diff --git a/.idea/.gitignore b/.idea/.gitignore deleted file mode 100644 index e7e9d11..0000000 --- a/.idea/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -# Default ignored files -/workspace.xml diff --git a/.idea/SI_Projekt.iml b/.idea/SI_Projekt.iml deleted file mode 100644 index 0e4e9fa..0000000 --- a/.idea/SI_Projekt.iml +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - - - - \ No newline at end of file diff --git a/.idea/inspectionProfiles/profiles_settings.xml b/.idea/inspectionProfiles/profiles_settings.xml deleted file mode 100644 index 105ce2d..0000000 --- a/.idea/inspectionProfiles/profiles_settings.xml +++ /dev/null @@ -1,6 +0,0 @@ - - - - \ No newline at end of file diff --git a/.idea/misc.xml b/.idea/misc.xml deleted file mode 100644 index 6649a8c..0000000 --- a/.idea/misc.xml +++ /dev/null @@ -1,7 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/.idea/modules.xml b/.idea/modules.xml deleted file mode 100644 index 48744ac..0000000 --- a/.idea/modules.xml +++ /dev/null @@ -1,8 +0,0 @@ - - - - - - - - \ No newline at end of file diff --git a/.idea/vcs.xml b/.idea/vcs.xml deleted file mode 100644 index 288b36b..0000000 --- a/.idea/vcs.xml +++ /dev/null @@ -1,7 +0,0 @@ - - - - - - - \ No newline at end of file diff --git a/venv/bin/activate b/venv/bin/activate deleted file mode 100644 index 678ec96..0000000 --- a/venv/bin/activate +++ /dev/null @@ -1,76 +0,0 @@ -# This file must be used with "source bin/activate" *from bash* -# you cannot run it directly - -deactivate () { - # reset old environment variables - if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then - PATH="${_OLD_VIRTUAL_PATH:-}" - export PATH - unset _OLD_VIRTUAL_PATH - fi - if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then - PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}" - export PYTHONHOME - unset _OLD_VIRTUAL_PYTHONHOME - fi - - # This should detect bash and zsh, which have a hash command that must - # be called to get it to forget past commands. Without forgetting - # past commands the $PATH changes we made may not be respected - if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then - hash -r - fi - - if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then - PS1="${_OLD_VIRTUAL_PS1:-}" - export PS1 - unset _OLD_VIRTUAL_PS1 - fi - - unset VIRTUAL_ENV - if [ ! "$1" = "nondestructive" ] ; then - # Self destruct! - unset -f deactivate - fi -} - -# unset irrelevant variables -deactivate nondestructive - -VIRTUAL_ENV="/home/marcin/PycharmProjects/SI_Projekt/venv" -export VIRTUAL_ENV - -_OLD_VIRTUAL_PATH="$PATH" -PATH="$VIRTUAL_ENV/bin:$PATH" -export PATH - -# unset PYTHONHOME if set -# this will fail if PYTHONHOME is set to the empty string (which is bad anyway) -# could use `if (set -u; : $PYTHONHOME) ;` in bash -if [ -n "${PYTHONHOME:-}" ] ; then - _OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}" - unset PYTHONHOME -fi - -if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then - _OLD_VIRTUAL_PS1="${PS1:-}" - if [ "x(venv) " != x ] ; then - PS1="(venv) ${PS1:-}" - else - if [ "`basename \"$VIRTUAL_ENV\"`" = "__" ] ; then - # special case for Aspen magic directories - # see http://www.zetadev.com/software/aspen/ - PS1="[`basename \`dirname \"$VIRTUAL_ENV\"\``] $PS1" - else - PS1="(`basename \"$VIRTUAL_ENV\"`)$PS1" - fi - fi - export PS1 -fi - -# This should detect bash and zsh, which have a hash command that must -# be called to get it to forget past commands. Without forgetting -# past commands the $PATH changes we made may not be respected -if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then - hash -r -fi diff --git a/venv/bin/activate.csh b/venv/bin/activate.csh deleted file mode 100644 index 76ed6b5..0000000 --- a/venv/bin/activate.csh +++ /dev/null @@ -1,37 +0,0 @@ -# This file must be used with "source bin/activate.csh" *from csh*. -# You cannot run it directly. -# Created by Davide Di Blasi . -# Ported to Python 3.3 venv by Andrew Svetlov - -alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; test "\!:*" != "nondestructive" && unalias deactivate' - -# Unset irrelevant variables. -deactivate nondestructive - -setenv VIRTUAL_ENV "/home/marcin/PycharmProjects/SI_Projekt/venv" - -set _OLD_VIRTUAL_PATH="$PATH" -setenv PATH "$VIRTUAL_ENV/bin:$PATH" - - -set _OLD_VIRTUAL_PROMPT="$prompt" - -if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then - if ("venv" != "") then - set env_name = "venv" - else - if (`basename "VIRTUAL_ENV"` == "__") then - # special case for Aspen magic directories - # see http://www.zetadev.com/software/aspen/ - set env_name = `basename \`dirname "$VIRTUAL_ENV"\`` - else - set env_name = `basename "$VIRTUAL_ENV"` - endif - endif - set prompt = "[$env_name] $prompt" - unset env_name -endif - -alias pydoc python -m pydoc - -rehash diff --git a/venv/bin/activate.fish b/venv/bin/activate.fish deleted file mode 100644 index 5c0713d..0000000 --- a/venv/bin/activate.fish +++ /dev/null @@ -1,75 +0,0 @@ -# This file must be used with ". bin/activate.fish" *from fish* (http://fishshell.org) -# you cannot run it directly - -function deactivate -d "Exit virtualenv and return to normal shell environment" - # reset old environment variables - if test -n "$_OLD_VIRTUAL_PATH" - set -gx PATH $_OLD_VIRTUAL_PATH - set -e _OLD_VIRTUAL_PATH - end - if test -n "$_OLD_VIRTUAL_PYTHONHOME" - set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME - set -e _OLD_VIRTUAL_PYTHONHOME - end - - if test -n "$_OLD_FISH_PROMPT_OVERRIDE" - functions -e fish_prompt - set -e _OLD_FISH_PROMPT_OVERRIDE - functions -c _old_fish_prompt fish_prompt - functions -e _old_fish_prompt - end - - set -e VIRTUAL_ENV - if test "$argv[1]" != "nondestructive" - # Self destruct! - functions -e deactivate - end -end - -# unset irrelevant variables -deactivate nondestructive - -set -gx VIRTUAL_ENV "/home/marcin/PycharmProjects/SI_Projekt/venv" - -set -gx _OLD_VIRTUAL_PATH $PATH -set -gx PATH "$VIRTUAL_ENV/bin" $PATH - -# unset PYTHONHOME if set -if set -q PYTHONHOME - set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME - set -e PYTHONHOME -end - -if test -z "$VIRTUAL_ENV_DISABLE_PROMPT" - # fish uses a function instead of an env var to generate the prompt. - - # save the current fish_prompt function as the function _old_fish_prompt - functions -c fish_prompt _old_fish_prompt - - # with the original prompt function renamed, we can override with our own. - function fish_prompt - # Save the return status of the last command - set -l old_status $status - - # Prompt override? - if test -n "(venv) " - printf "%s%s" "(venv) " (set_color normal) - else - # ...Otherwise, prepend env - set -l _checkbase (basename "$VIRTUAL_ENV") - if test $_checkbase = "__" - # special case for Aspen magic directories - # see http://www.zetadev.com/software/aspen/ - printf "%s[%s]%s " (set_color -b blue white) (basename (dirname "$VIRTUAL_ENV")) (set_color normal) - else - printf "%s(%s)%s" (set_color -b blue white) (basename "$VIRTUAL_ENV") (set_color normal) - end - end - - # Restore the return status of the previous command. - echo "exit $old_status" | . - _old_fish_prompt - end - - set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV" -end diff --git a/venv/bin/easy_install b/venv/bin/easy_install deleted file mode 100755 index 4824897..0000000 --- a/venv/bin/easy_install +++ /dev/null @@ -1,12 +0,0 @@ -#!/home/marcin/PycharmProjects/SI_Projekt/venv/bin/python -# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install' -__requires__ = 'setuptools==40.8.0' -import re -import sys -from pkg_resources import load_entry_point - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit( - load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')() - ) diff --git a/venv/bin/easy_install-3.7 b/venv/bin/easy_install-3.7 deleted file mode 100755 index fc582e5..0000000 --- a/venv/bin/easy_install-3.7 +++ /dev/null @@ -1,12 +0,0 @@ -#!/home/marcin/PycharmProjects/SI_Projekt/venv/bin/python -# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7' -__requires__ = 'setuptools==40.8.0' -import re -import sys -from pkg_resources import load_entry_point - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit( - load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')() - ) diff --git a/venv/bin/f2py b/venv/bin/f2py deleted file mode 100755 index d6993d6..0000000 --- a/venv/bin/f2py +++ /dev/null @@ -1,10 +0,0 @@ -#!/home/marcin/PycharmProjects/SI_Projekt/venv/bin/python -# -*- coding: utf-8 -*- -import re -import sys - -from numpy.f2py.f2py2e import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/venv/bin/f2py3 b/venv/bin/f2py3 deleted file mode 100755 index d6993d6..0000000 --- a/venv/bin/f2py3 +++ /dev/null @@ -1,10 +0,0 @@ -#!/home/marcin/PycharmProjects/SI_Projekt/venv/bin/python -# -*- coding: utf-8 -*- -import re -import sys - -from numpy.f2py.f2py2e import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/venv/bin/f2py3.7 b/venv/bin/f2py3.7 deleted file mode 100755 index d6993d6..0000000 --- a/venv/bin/f2py3.7 +++ /dev/null @@ -1,10 +0,0 @@ -#!/home/marcin/PycharmProjects/SI_Projekt/venv/bin/python -# -*- coding: utf-8 -*- -import re -import sys - -from numpy.f2py.f2py2e import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/venv/bin/pip b/venv/bin/pip deleted file mode 100755 index 5dcb028..0000000 --- a/venv/bin/pip +++ /dev/null @@ -1,12 +0,0 @@ -#!/home/marcin/PycharmProjects/SI_Projekt/venv/bin/python -# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip' -__requires__ = 'pip==19.0.3' -import re -import sys -from pkg_resources import load_entry_point - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit( - load_entry_point('pip==19.0.3', 'console_scripts', 'pip')() - ) diff --git a/venv/bin/pip3 b/venv/bin/pip3 deleted file mode 100755 index 9177fc0..0000000 --- a/venv/bin/pip3 +++ /dev/null @@ -1,12 +0,0 @@ -#!/home/marcin/PycharmProjects/SI_Projekt/venv/bin/python -# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3' -__requires__ = 'pip==19.0.3' -import re -import sys -from pkg_resources import load_entry_point - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit( - load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')() - ) diff --git a/venv/bin/pip3.7 b/venv/bin/pip3.7 deleted file mode 100755 index 0643220..0000000 --- a/venv/bin/pip3.7 +++ /dev/null @@ -1,12 +0,0 @@ -#!/home/marcin/PycharmProjects/SI_Projekt/venv/bin/python -# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7' -__requires__ = 'pip==19.0.3' -import re -import sys -from pkg_resources import load_entry_point - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit( - load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')() - ) diff --git a/venv/bin/python b/venv/bin/python deleted file mode 120000 index 940bee3..0000000 --- a/venv/bin/python +++ /dev/null @@ -1 +0,0 @@ -python3.7 \ No newline at end of file diff --git a/venv/bin/python3 b/venv/bin/python3 deleted file mode 120000 index 940bee3..0000000 --- a/venv/bin/python3 +++ /dev/null @@ -1 +0,0 @@ -python3.7 \ No newline at end of file diff --git a/venv/bin/python3.7 b/venv/bin/python3.7 deleted file mode 120000 index f097b0e..0000000 --- a/venv/bin/python3.7 +++ /dev/null @@ -1 +0,0 @@ -/usr/bin/python3.7 \ No newline at end of file diff --git a/venv/include/site/python3.7/pygame/_camera.h b/venv/include/site/python3.7/pygame/_camera.h deleted file mode 100644 index 68ae989..0000000 --- a/venv/include/site/python3.7/pygame/_camera.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - pygame - Python Game Library - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Library General Public License for more details. - - You should have received a copy of the GNU Library General Public - License along with this library; if not, write to the Free - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -*/ - -#ifndef _CAMERA_H -#define _CAMERA_H - -#include "_pygame.h" -#include "camera.h" - -#endif - diff --git a/venv/include/site/python3.7/pygame/_pygame.h b/venv/include/site/python3.7/pygame/_pygame.h deleted file mode 100644 index 68962fc..0000000 --- a/venv/include/site/python3.7/pygame/_pygame.h +++ /dev/null @@ -1,864 +0,0 @@ -/* - pygame - Python Game Library - Copyright (C) 2000-2001 Pete Shinners - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Library General Public License for more details. - - You should have received a copy of the GNU Library General Public - License along with this library; if not, write to the Free - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - - Pete Shinners - pete@shinners.org -*/ - -#ifndef _PYGAME_H -#define _PYGAME_H - -/** This header file includes all the definitions for the - ** base pygame extensions. This header only requires - ** SDL and Python includes. The reason for functions - ** prototyped with #define's is to allow for maximum - ** python portability. It also uses python as the - ** runtime linker, which allows for late binding. For more - ** information on this style of development, read the Python - ** docs on this subject. - ** http://www.python.org/doc/current/ext/using-cobjects.html - ** - ** If using this to build your own derived extensions, - ** you'll see that the functions available here are mainly - ** used to help convert between python objects and SDL objects. - ** Since this library doesn't add a lot of functionality to - ** the SDL libarary, it doesn't need to offer a lot either. - ** - ** When initializing your extension module, you must manually - ** import the modules you want to use. (this is the part about - ** using python as the runtime linker). Each module has its - ** own import_xxx() routine. You need to perform this import - ** after you have initialized your own module, and before - ** you call any routines from that module. Since every module - ** in pygame does this, there are plenty of examples. - ** - ** The base module does include some useful conversion routines - ** that you are free to use in your own extension. - ** - ** When making changes, it is very important to keep the - ** FIRSTSLOT and NUMSLOT constants up to date for each - ** section. Also be sure not to overlap any of the slots. - ** When you do make a mistake with this, it will result - ** is a dereferenced NULL pointer that is easier to diagnose - ** than it could be :] - **/ -#if defined(HAVE_SNPRINTF) /* defined in python.h (pyerrors.h) and SDL.h \ - (SDL_config.h) */ -#undef HAVE_SNPRINTF /* remove GCC redefine warning */ -#endif - -// This must be before all else -#if defined(__SYMBIAN32__) && defined(OPENC) -#include - -#if defined(__WINS__) -void * -_alloca(size_t size); -#define alloca _alloca -#endif -#endif - -#define PG_STRINGIZE_HELPER(x) #x -#define PG_STRINGIZE(x) PG_STRINGIZE_HELPER(x) -#define PG_WARN(desc) message(__FILE__ "(" PG_STRINGIZE(__LINE__) "): WARNING: " #desc) - -/* This is unconditionally defined in Python.h */ -#if defined(_POSIX_C_SOURCE) -#undef _POSIX_C_SOURCE -#endif - -#include - -/* the version macros are defined since version 1.9.5 */ -#define PG_MAJOR_VERSION 1 -#define PG_MINOR_VERSION 9 -#define PG_PATCH_VERSION 6 -#define PG_VERSIONNUM(MAJOR, MINOR, PATCH) (1000*(MAJOR) + 100*(MINOR) + (PATCH)) -#define PG_VERSION_ATLEAST(MAJOR, MINOR, PATCH) \ - (PG_VERSIONNUM(PG_MAJOR_VERSION, PG_MINOR_VERSION, PG_PATCH_VERSION) >= \ - PG_VERSIONNUM(MAJOR, MINOR, PATCH)) - -/* Cobjects vanish in Python 3.2; so we will code as though we use capsules */ -#if defined(Py_CAPSULE_H) -#define PG_HAVE_CAPSULE 1 -#else -#define PG_HAVE_CAPSULE 0 -#endif -#if defined(Py_COBJECT_H) -#define PG_HAVE_COBJECT 1 -#else -#define PG_HAVE_COBJECT 0 -#endif -#if !PG_HAVE_CAPSULE -#define PyCapsule_New(ptr, n, dfn) PyCObject_FromVoidPtr(ptr, dfn) -#define PyCapsule_GetPointer(obj, n) PyCObject_AsVoidPtr(obj) -#define PyCapsule_CheckExact(obj) PyCObject_Check(obj) -#endif - -/* Pygame uses Py_buffer (PEP 3118) to exchange array information internally; - * define here as needed. - */ -#if !defined(PyBUF_SIMPLE) -typedef struct bufferinfo { - void *buf; - PyObject *obj; - Py_ssize_t len; - Py_ssize_t itemsize; - int readonly; - int ndim; - char *format; - Py_ssize_t *shape; - Py_ssize_t *strides; - Py_ssize_t *suboffsets; - void *internal; -} Py_buffer; - -/* Flags for getting buffers */ -#define PyBUF_SIMPLE 0 -#define PyBUF_WRITABLE 0x0001 -/* we used to include an E, backwards compatible alias */ -#define PyBUF_WRITEABLE PyBUF_WRITABLE -#define PyBUF_FORMAT 0x0004 -#define PyBUF_ND 0x0008 -#define PyBUF_STRIDES (0x0010 | PyBUF_ND) -#define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) -#define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) -#define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) -#define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) - -#define PyBUF_CONTIG (PyBUF_ND | PyBUF_WRITABLE) -#define PyBUF_CONTIG_RO (PyBUF_ND) - -#define PyBUF_STRIDED (PyBUF_STRIDES | PyBUF_WRITABLE) -#define PyBUF_STRIDED_RO (PyBUF_STRIDES) - -#define PyBUF_RECORDS (PyBUF_STRIDES | PyBUF_WRITABLE | PyBUF_FORMAT) -#define PyBUF_RECORDS_RO (PyBUF_STRIDES | PyBUF_FORMAT) - -#define PyBUF_FULL (PyBUF_INDIRECT | PyBUF_WRITABLE | PyBUF_FORMAT) -#define PyBUF_FULL_RO (PyBUF_INDIRECT | PyBUF_FORMAT) - -#define PyBUF_READ 0x100 -#define PyBUF_WRITE 0x200 -#define PyBUF_SHADOW 0x400 - -typedef int (*getbufferproc)(PyObject *, Py_buffer *, int); -typedef void (*releasebufferproc)(Py_buffer *); -#endif /* #if !defined(PyBUF_SIMPLE) */ - -/* Flag indicating a pg_buffer; used for assertions within callbacks */ -#ifndef NDEBUG -#define PyBUF_PYGAME 0x4000 -#endif - -#define PyBUF_HAS_FLAG(f, F) (((f) & (F)) == (F)) - -/* Array information exchange struct C type; inherits from Py_buffer - * - * Pygame uses its own Py_buffer derived C struct as an internal representation - * of an imported array buffer. The extended Py_buffer allows for a - * per-instance release callback, - */ -typedef void (*pybuffer_releaseproc)(Py_buffer *); - -typedef struct pg_bufferinfo_s { - Py_buffer view; - PyObject *consumer; /* Input: Borrowed reference */ - pybuffer_releaseproc release_buffer; -} pg_buffer; - -/* Operating system specific adjustments - */ -// No signal() -#if defined(__SYMBIAN32__) && defined(HAVE_SIGNAL_H) -#undef HAVE_SIGNAL_H -#endif - -#if defined(HAVE_SNPRINTF) -#undef HAVE_SNPRINTF -#endif - -#ifdef MS_WIN32 /*Python gives us MS_WIN32, SDL needs just WIN32*/ -#ifndef WIN32 -#define WIN32 -#endif -#endif - -/// Prefix when initializing module -#define MODPREFIX "" -/// Prefix when importing module -#define IMPPREFIX "pygame." - -#ifdef __SYMBIAN32__ -#undef MODPREFIX -#undef IMPPREFIX -// On Symbian there is no pygame package. The extensions are built-in or in -// sys\bin. -#define MODPREFIX "pygame_" -#define IMPPREFIX "pygame_" -#endif - -#include - -/* Pygame's SDL version macros: - * IS_SDLv1 is 1 if SDL 1.x.x, 0 otherwise - * IS_SDLv2 is 1 if at least SDL 2.0.0, 0 otherwise - */ -#if (SDL_VERSION_ATLEAST(2, 0, 0)) -#define IS_SDLv1 0 -#define IS_SDLv2 1 -#else -#define IS_SDLv1 1 -#define IS_SDLv2 0 -#endif - -/*#if IS_SDLv1 && PG_MAJOR_VERSION >= 2 -#error pygame 2 requires SDL 2 -#endif*/ - -#if IS_SDLv2 -/* SDL 1.2 constants removed from SDL 2 */ -typedef enum { - SDL_HWSURFACE = 0, - SDL_RESIZABLE = SDL_WINDOW_RESIZABLE, - SDL_ASYNCBLIT = 0, - SDL_OPENGL = SDL_WINDOW_OPENGL, - SDL_OPENGLBLIT = 0, - SDL_ANYFORMAT = 0, - SDL_HWPALETTE = 0, - SDL_DOUBLEBUF = 0, - SDL_FULLSCREEN = SDL_WINDOW_FULLSCREEN, - SDL_HWACCEL = 0, - SDL_SRCCOLORKEY = 0, - SDL_RLEACCELOK = 0, - SDL_SRCALPHA = 0, - SDL_NOFRAME = SDL_WINDOW_BORDERLESS, - SDL_GL_SWAP_CONTROL = 0, - TIMER_RESOLUTION = 0 -} PygameVideoFlags; - -/* the wheel button constants were removed from SDL 2 */ -typedef enum { - PGM_BUTTON_LEFT = SDL_BUTTON_LEFT, - PGM_BUTTON_RIGHT = SDL_BUTTON_RIGHT, - PGM_BUTTON_MIDDLE = SDL_BUTTON_MIDDLE, - PGM_BUTTON_WHEELUP = 4, - PGM_BUTTON_WHEELDOWN = 5, - PGM_BUTTON_X1 = SDL_BUTTON_X1 + 2, - PGM_BUTTON_X2 = SDL_BUTTON_X2 + 2, - PGM_BUTTON_KEEP = 0x80 -} PygameMouseFlags; - -typedef enum { - SDL_NOEVENT = 0, - /* SDL 1.2 allowed for 8 user defined events. */ - SDL_NUMEVENTS = SDL_USEREVENT + 8, - SDL_ACTIVEEVENT = SDL_NUMEVENTS, - PGE_EVENTBEGIN = SDL_NUMEVENTS, - SDL_VIDEORESIZE, - SDL_VIDEOEXPOSE, - PGE_KEYREPEAT, - PGE_EVENTEND -} PygameEventCode; - -#define PGE_NUMEVENTS (PGE_EVENTEND - PGE_EVENTBEGIN) - -typedef enum { - SDL_APPFOCUSMOUSE, - SDL_APPINPUTFOCUS, - SDL_APPACTIVE -} PygameAppCode; - -/* Surface flags: based on SDL 1.2 flags */ -typedef enum { - PGS_SWSURFACE = 0x00000000, - PGS_HWSURFACE = 0x00000001, - PGS_ASYNCBLIT = 0x00000004, - - PGS_ANYFORMAT = 0x10000000, - PGS_HWPALETTE = 0x20000000, - PGS_DOUBLEBUF = 0x40000000, - PGS_FULLSCREEN = 0x80000000, - PGS_OPENGL = 0x00000002, - PGS_OPENGLBLIT = 0x0000000A, - PGS_RESIZABLE = 0x00000010, - PGS_NOFRAME = 0x00000020, - PGS_SHOWN = 0x00000040, /* Added from SDL 2 */ - PGS_HIDDEN = 0x00000080, /* Added from SDL 2 */ - - PGS_HWACCEL = 0x00000100, - PGS_SRCCOLORKEY = 0x00001000, - PGS_RLEACCELOK = 0x00002000, - PGS_RLEACCEL = 0x00004000, - PGS_SRCALPHA = 0x00010000, - PGS_PREALLOC = 0x01000000 -} PygameSurfaceFlags; - -typedef struct { - Uint32 hw_available:1; - Uint32 wm_available:1; - Uint32 blit_hw:1; - Uint32 blit_hw_CC:1; - Uint32 blit_hw_A:1; - Uint32 blit_sw:1; - Uint32 blit_sw_CC:1; - Uint32 blit_sw_A:1; - Uint32 blit_fill:1; - Uint32 video_mem; - SDL_PixelFormat *vfmt; - SDL_PixelFormat vfmt_data; - int current_w; - int current_h; -} pg_VideoInfo; - -#endif /* IS_SDLv2 */ -/* macros used throughout the source */ -#define RAISE(x, y) (PyErr_SetString((x), (y)), (PyObject *)NULL) - -#ifdef WITH_THREAD -#define PG_CHECK_THREADS() (1) -#else /* ~WITH_THREAD */ -#define PG_CHECK_THREADS() \ - (RAISE(PyExc_NotImplementedError, \ - "Python built without thread support")) -#endif /* ~WITH_THREAD */ - -#define PyType_Init(x) (((x).ob_type) = &PyType_Type) -#define PYGAMEAPI_LOCAL_ENTRY "_PYGAME_C_API" - -#ifndef MIN -#define MIN(a, b) ((a) < (b) ? (a) : (b)) -#endif - -#ifndef MAX -#define MAX(a, b) ((a) > (b) ? (a) : (b)) -#endif - -#ifndef ABS -#define ABS(a) (((a) < 0) ? -(a) : (a)) -#endif - -/* test sdl initializations */ -#define VIDEO_INIT_CHECK() \ - if (!SDL_WasInit(SDL_INIT_VIDEO)) \ - return RAISE(pgExc_SDLError, "video system not initialized") - -#define CDROM_INIT_CHECK() \ - if (!SDL_WasInit(SDL_INIT_CDROM)) \ - return RAISE(pgExc_SDLError, "cdrom system not initialized") - -#define JOYSTICK_INIT_CHECK() \ - if (!SDL_WasInit(SDL_INIT_JOYSTICK)) \ - return RAISE(pgExc_SDLError, "joystick system not initialized") - -/* BASE */ -#define VIEW_CONTIGUOUS 1 -#define VIEW_C_ORDER 2 -#define VIEW_F_ORDER 4 - -#define PYGAMEAPI_BASE_FIRSTSLOT 0 -#if IS_SDLv1 -#define PYGAMEAPI_BASE_NUMSLOTS 19 -#else /* IS_SDLv2 */ -#define PYGAMEAPI_BASE_NUMSLOTS 23 -#endif /* IS_SDLv2 */ -#ifndef PYGAMEAPI_BASE_INTERNAL -#define pgExc_SDLError ((PyObject *)PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT]) - -#define pg_RegisterQuit \ - (*(void (*)(void (*)(void)))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 1]) - -#define pg_IntFromObj \ - (*(int (*)(PyObject *, int *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 2]) - -#define pg_IntFromObjIndex \ - (*(int (*)(PyObject *, int, \ - int *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 3]) - -#define pg_TwoIntsFromObj \ - (*(int (*)(PyObject *, int *, \ - int *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 4]) - -#define pg_FloatFromObj \ - (*(int (*)(PyObject *, float *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 5]) - -#define pg_FloatFromObjIndex \ - (*(int (*)(PyObject *, int, \ - float *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 6]) - -#define pg_TwoFloatsFromObj \ - (*(int (*)(PyObject *, float *, \ - float *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 7]) - -#define pg_UintFromObj \ - (*(int (*)(PyObject *, \ - Uint32 *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 8]) - -#define pg_UintFromObjIndex \ - (*(int (*)(PyObject *, int, \ - Uint32 *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 9]) - -#define pgVideo_AutoQuit \ - (*(void (*)(void))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 10]) - -#define pgVideo_AutoInit \ - (*(int (*)(void))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 11]) - -#define pg_RGBAFromObj \ - (*(int (*)(PyObject *, \ - Uint8 *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 12]) - -#define pgBuffer_AsArrayInterface \ - (*(PyObject * (*)(Py_buffer *)) \ - PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 13]) - -#define pgBuffer_AsArrayStruct \ - (*(PyObject * (*)(Py_buffer *)) \ - PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 14]) - -#define pgObject_GetBuffer \ - (*(int (*)(PyObject *, pg_buffer *, \ - int))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 15]) - -#define pgBuffer_Release \ - (*(void (*)(pg_buffer *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 16]) - -#define pgDict_AsBuffer \ - (*(int (*)(pg_buffer *, PyObject *, \ - int))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 17]) - -#define pgExc_BufferError \ - ((PyObject *)PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 18]) - -#if IS_SDLv2 -#define pg_GetDefaultWindow \ - (*(SDL_Window * (*)(void)) PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 19]) - -#define pg_SetDefaultWindow \ - (*(void (*)(SDL_Window *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 20]) - -#define pg_GetDefaultWindowSurface \ - (*(PyObject * (*)(void)) PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 21]) - -#define pg_SetDefaultWindowSurface \ - (*(void (*)(PyObject *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 22]) - -#endif /* IS_SDLv2 */ - -#define import_pygame_base() IMPORT_PYGAME_MODULE(base, BASE) -#endif - -/* RECT */ -#define PYGAMEAPI_RECT_FIRSTSLOT \ - (PYGAMEAPI_BASE_FIRSTSLOT + PYGAMEAPI_BASE_NUMSLOTS) -#define PYGAMEAPI_RECT_NUMSLOTS 4 - -#if IS_SDLv1 -typedef struct { - int x, y; - int w, h; -} GAME_Rect; -#else -typedef SDL_Rect GAME_Rect; -#endif - -typedef struct { - PyObject_HEAD GAME_Rect r; - PyObject *weakreflist; -} pgRectObject; - -#define pgRect_AsRect(x) (((pgRectObject *)x)->r) -#ifndef PYGAMEAPI_RECT_INTERNAL -#define pgRect_Check(x) \ - ((x)->ob_type == \ - (PyTypeObject *)PyGAME_C_API[PYGAMEAPI_RECT_FIRSTSLOT + 0]) -#define pgRect_Type \ - (*(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_RECT_FIRSTSLOT + 0]) -#define pgRect_New \ - (*(PyObject * (*)(SDL_Rect *)) PyGAME_C_API[PYGAMEAPI_RECT_FIRSTSLOT + 1]) -#define pgRect_New4 \ - (*(PyObject * (*)(int, int, int, int)) \ - PyGAME_C_API[PYGAMEAPI_RECT_FIRSTSLOT + 2]) -#define pgRect_FromObject \ - (*(GAME_Rect * (*)(PyObject *, GAME_Rect *)) \ - PyGAME_C_API[PYGAMEAPI_RECT_FIRSTSLOT + 3]) - -#define import_pygame_rect() IMPORT_PYGAME_MODULE(rect, RECT) -#endif - -/* CDROM */ -#define PYGAMEAPI_CDROM_FIRSTSLOT \ - (PYGAMEAPI_RECT_FIRSTSLOT + PYGAMEAPI_RECT_NUMSLOTS) -#define PYGAMEAPI_CDROM_NUMSLOTS 2 - -typedef struct { - PyObject_HEAD int id; -} pgCDObject; - -#define pgCD_AsID(x) (((pgCDObject *)x)->id) -#ifndef PYGAMEAPI_CDROM_INTERNAL -#define pgCD_Check(x) \ - ((x)->ob_type == \ - (PyTypeObject *)PyGAME_C_API[PYGAMEAPI_CDROM_FIRSTSLOT + 0]) -#define pgCD_Type \ - (*(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_CDROM_FIRSTSLOT + 0]) -#define pgCD_New \ - (*(PyObject * (*)(int)) PyGAME_C_API[PYGAMEAPI_CDROM_FIRSTSLOT + 1]) - -#define import_pygame_cd() IMPORT_PYGAME_MODULE(cdrom, CDROM) -#endif - -/* JOYSTICK */ -#define PYGAMEAPI_JOYSTICK_FIRSTSLOT \ - (PYGAMEAPI_CDROM_FIRSTSLOT + PYGAMEAPI_CDROM_NUMSLOTS) -#define PYGAMEAPI_JOYSTICK_NUMSLOTS 2 - -typedef struct { - PyObject_HEAD int id; -} pgJoystickObject; - -#define pgJoystick_AsID(x) (((pgJoystickObject *)x)->id) - -#ifndef PYGAMEAPI_JOYSTICK_INTERNAL -#define pgJoystick_Check(x) \ - ((x)->ob_type == \ - (PyTypeObject *)PyGAME_C_API[PYGAMEAPI_JOYSTICK_FIRSTSLOT + 0]) - -#define pgJoystick_Type \ - (*(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_JOYSTICK_FIRSTSLOT + 0]) -#define pgJoystick_New \ - (*(PyObject * (*)(int)) PyGAME_C_API[PYGAMEAPI_JOYSTICK_FIRSTSLOT + 1]) - -#define import_pygame_joystick() IMPORT_PYGAME_MODULE(joystick, JOYSTICK) -#endif - -/* DISPLAY */ -#define PYGAMEAPI_DISPLAY_FIRSTSLOT \ - (PYGAMEAPI_JOYSTICK_FIRSTSLOT + PYGAMEAPI_JOYSTICK_NUMSLOTS) -#define PYGAMEAPI_DISPLAY_NUMSLOTS 2 - -typedef struct { -#if IS_SDLv1 - PyObject_HEAD SDL_VideoInfo info; -#else - PyObject_HEAD pg_VideoInfo info; -#endif -} pgVidInfoObject; - -#define pgVidInfo_AsVidInfo(x) (((pgVidInfoObject *)x)->info) -#ifndef PYGAMEAPI_DISPLAY_INTERNAL -#define pgVidInfo_Check(x) \ - ((x)->ob_type == \ - (PyTypeObject *)PyGAME_C_API[PYGAMEAPI_DISPLAY_FIRSTSLOT + 0]) - -#define pgVidInfo_Type \ - (*(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_DISPLAY_FIRSTSLOT + 0]) - -#if IS_SDLv1 -#define pgVidInfo_New \ - (*(PyObject * (*)(SDL_VideoInfo *)) \ - PyGAME_C_API[PYGAMEAPI_DISPLAY_FIRSTSLOT + 1]) -#else -#define pgVidInfo_New \ - (*(PyObject * (*)(pg_VideoInfo *)) \ - PyGAME_C_API[PYGAMEAPI_DISPLAY_FIRSTSLOT + 1]) -#endif - -#define import_pygame_display() IMPORT_PYGAME_MODULE(display, DISPLAY) -#endif - -/* SURFACE */ -#define PYGAMEAPI_SURFACE_FIRSTSLOT \ - (PYGAMEAPI_DISPLAY_FIRSTSLOT + PYGAMEAPI_DISPLAY_NUMSLOTS) -#define PYGAMEAPI_SURFACE_NUMSLOTS 3 -typedef struct { - PyObject_HEAD SDL_Surface *surf; -#if IS_SDLv2 - int owner; -#endif /* IS_SDLv2 */ - struct pgSubSurface_Data *subsurface; /*ptr to subsurface data (if a - * subsurface)*/ - PyObject *weakreflist; - PyObject *locklist; - PyObject *dependency; -} pgSurfaceObject; -#define pgSurface_AsSurface(x) (((pgSurfaceObject *)x)->surf) -#ifndef PYGAMEAPI_SURFACE_INTERNAL -#define pgSurface_Check(x) \ - (PyObject_IsInstance((x), \ - (PyObject *)PyGAME_C_API[PYGAMEAPI_SURFACE_FIRSTSLOT + 0])) -#define pgSurface_Type \ - (*(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_SURFACE_FIRSTSLOT + 0]) -#if IS_SDLv1 -#define pgSurface_New \ - (*(PyObject * (*)(SDL_Surface *)) \ - PyGAME_C_API[PYGAMEAPI_SURFACE_FIRSTSLOT + 1]) -#else /* IS_SDLv2 */ -#define pgSurface_New2 \ - (*(PyObject * (*)(SDL_Surface *, int)) \ - PyGAME_C_API[PYGAMEAPI_SURFACE_FIRSTSLOT + 1]) -#endif /* IS_SDLv2 */ -#define pgSurface_Blit \ - (*(int (*)(PyObject *, PyObject *, SDL_Rect *, SDL_Rect *, \ - int))PyGAME_C_API[PYGAMEAPI_SURFACE_FIRSTSLOT + 2]) - -#define import_pygame_surface() \ - do { \ - IMPORT_PYGAME_MODULE(surface, SURFACE); \ - if (PyErr_Occurred() != NULL) \ - break; \ - IMPORT_PYGAME_MODULE(surflock, SURFLOCK); \ - } while (0) - -#if IS_SDLv2 -#define pgSurface_New(surface) pgSurface_New2((surface), 1) -#define pgSurface_NewNoOwn(surface) pgSurface_New2((surface), 0) -#endif /* IS_SDLv2 */ - -#endif - -/* SURFLOCK */ /*auto import/init by surface*/ -#define PYGAMEAPI_SURFLOCK_FIRSTSLOT \ - (PYGAMEAPI_SURFACE_FIRSTSLOT + PYGAMEAPI_SURFACE_NUMSLOTS) -#define PYGAMEAPI_SURFLOCK_NUMSLOTS 8 -struct pgSubSurface_Data { - PyObject *owner; - int pixeloffset; - int offsetx, offsety; -}; - -typedef struct { - PyObject_HEAD PyObject *surface; - PyObject *lockobj; - PyObject *weakrefs; -} pgLifetimeLockObject; - -#ifndef PYGAMEAPI_SURFLOCK_INTERNAL -#define pgLifetimeLock_Check(x) \ - ((x)->ob_type == \ - (PyTypeObject *)PyGAME_C_API[PYGAMEAPI_SURFLOCK_FIRSTSLOT + 0]) -#define pgSurface_Prep(x) \ - if (((pgSurfaceObject *)x)->subsurface) \ - (*(*(void (*)( \ - PyObject *))PyGAME_C_API[PYGAMEAPI_SURFLOCK_FIRSTSLOT + 1]))(x) - -#define pgSurface_Unprep(x) \ - if (((pgSurfaceObject *)x)->subsurface) \ - (*(*(void (*)( \ - PyObject *))PyGAME_C_API[PYGAMEAPI_SURFLOCK_FIRSTSLOT + 2]))(x) - -#define pgSurface_Lock \ - (*(int (*)(PyObject *))PyGAME_C_API[PYGAMEAPI_SURFLOCK_FIRSTSLOT + 3]) -#define pgSurface_Unlock \ - (*(int (*)(PyObject *))PyGAME_C_API[PYGAMEAPI_SURFLOCK_FIRSTSLOT + 4]) -#define pgSurface_LockBy \ - (*(int (*)(PyObject *, \ - PyObject *))PyGAME_C_API[PYGAMEAPI_SURFLOCK_FIRSTSLOT + 5]) -#define pgSurface_UnlockBy \ - (*(int (*)(PyObject *, \ - PyObject *))PyGAME_C_API[PYGAMEAPI_SURFLOCK_FIRSTSLOT + 6]) -#define pgSurface_LockLifetime \ - (*(PyObject * (*)(PyObject *, PyObject *)) \ - PyGAME_C_API[PYGAMEAPI_SURFLOCK_FIRSTSLOT + 7]) -#endif - -/* EVENT */ -#define PYGAMEAPI_EVENT_FIRSTSLOT \ - (PYGAMEAPI_SURFLOCK_FIRSTSLOT + PYGAMEAPI_SURFLOCK_NUMSLOTS) -#if IS_SDLv1 -#define PYGAMEAPI_EVENT_NUMSLOTS 4 -#else /* IS_SDLv2 */ -#define PYGAMEAPI_EVENT_NUMSLOTS 6 -#endif /* IS_SDLv2 */ - -typedef struct { - PyObject_HEAD int type; - PyObject *dict; -} pgEventObject; - -#ifndef PYGAMEAPI_EVENT_INTERNAL -#define pgEvent_Check(x) \ - ((x)->ob_type == \ - (PyTypeObject *)PyGAME_C_API[PYGAMEAPI_EVENT_FIRSTSLOT + 0]) -#define pgEvent_Type \ - (*(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_EVENT_FIRSTSLOT + 0]) -#define pgEvent_New \ - (*(PyObject * (*)(SDL_Event *)) \ - PyGAME_C_API[PYGAMEAPI_EVENT_FIRSTSLOT + 1]) -#define pgEvent_New2 \ - (*(PyObject * (*)(int, PyObject *)) \ - PyGAME_C_API[PYGAMEAPI_EVENT_FIRSTSLOT + 2]) -#define pgEvent_FillUserEvent \ - (*(int (*)(pgEventObject *, \ - SDL_Event *))PyGAME_C_API[PYGAMEAPI_EVENT_FIRSTSLOT + 3]) -#if IS_SDLv2 -#define pg_EnableKeyRepeat \ - (*(int (*)(int, int))PyGAME_C_API[PYGAMEAPI_EVENT_FIRSTSLOT + 4]) -#define pg_GetKeyRepeat \ - (*(void (*)(int *, int *))PyGAME_C_API[PYGAMEAPI_EVENT_FIRSTSLOT + 5]) -#endif /* IS_SDLv2 */ -#define import_pygame_event() IMPORT_PYGAME_MODULE(event, EVENT) -#endif - -/* RWOBJECT */ -/*the rwobject are only needed for C side work, not accessable from python*/ -#define PYGAMEAPI_RWOBJECT_FIRSTSLOT \ - (PYGAMEAPI_EVENT_FIRSTSLOT + PYGAMEAPI_EVENT_NUMSLOTS) -#define PYGAMEAPI_RWOBJECT_NUMSLOTS 6 -#ifndef PYGAMEAPI_RWOBJECT_INTERNAL -#define pgRWops_FromObject \ - (*(SDL_RWops * (*)(PyObject *)) \ - PyGAME_C_API[PYGAMEAPI_RWOBJECT_FIRSTSLOT + 0]) -#define pgRWops_IsFileObject \ - (*(int (*)(SDL_RWops *))PyGAME_C_API[PYGAMEAPI_RWOBJECT_FIRSTSLOT + 1]) -#define pg_EncodeFilePath \ - (*(PyObject * (*)(PyObject *, PyObject *)) \ - PyGAME_C_API[PYGAMEAPI_RWOBJECT_FIRSTSLOT + 2]) -#define pg_EncodeString \ - (*(PyObject * (*)(PyObject *, const char *, const char *, PyObject *)) \ - PyGAME_C_API[PYGAMEAPI_RWOBJECT_FIRSTSLOT + 3]) -#define pgRWops_FromFileObject \ - (*(SDL_RWops * (*)(PyObject *)) \ - PyGAME_C_API[PYGAMEAPI_RWOBJECT_FIRSTSLOT + 4]) -#define pgRWops_ReleaseObject \ - (*(int (*)(SDL_RWops *)) \ - PyGAME_C_API[PYGAMEAPI_RWOBJECT_FIRSTSLOT + 5]) -#define import_pygame_rwobject() IMPORT_PYGAME_MODULE(rwobject, RWOBJECT) - -#endif - -/* PixelArray */ -#define PYGAMEAPI_PIXELARRAY_FIRSTSLOT \ - (PYGAMEAPI_RWOBJECT_FIRSTSLOT + PYGAMEAPI_RWOBJECT_NUMSLOTS) -#define PYGAMEAPI_PIXELARRAY_NUMSLOTS 2 -#ifndef PYGAMEAPI_PIXELARRAY_INTERNAL -#define PyPixelArray_Check(x) \ - ((x)->ob_type == \ - (PyTypeObject *)PyGAME_C_API[PYGAMEAPI_PIXELARRAY_FIRSTSLOT + 0]) -#define PyPixelArray_New \ - (*(PyObject * (*)) PyGAME_C_API[PYGAMEAPI_PIXELARRAY_FIRSTSLOT + 1]) -#define import_pygame_pixelarray() IMPORT_PYGAME_MODULE(pixelarray, PIXELARRAY) -#endif /* PYGAMEAPI_PIXELARRAY_INTERNAL */ - -/* Color */ -#define PYGAMEAPI_COLOR_FIRSTSLOT \ - (PYGAMEAPI_PIXELARRAY_FIRSTSLOT + PYGAMEAPI_PIXELARRAY_NUMSLOTS) -#define PYGAMEAPI_COLOR_NUMSLOTS 4 -#ifndef PYGAMEAPI_COLOR_INTERNAL -#define pgColor_Check(x) \ - ((x)->ob_type == \ - (PyTypeObject *)PyGAME_C_API[PYGAMEAPI_COLOR_FIRSTSLOT + 0]) -#define pgColor_Type (*(PyObject *)PyGAME_C_API[PYGAMEAPI_COLOR_FIRSTSLOT]) -#define pgColor_New \ - (*(PyObject * (*)(Uint8 *)) PyGAME_C_API[PYGAMEAPI_COLOR_FIRSTSLOT + 1]) -#define pgColor_NewLength \ - (*(PyObject * (*)(Uint8 *, Uint8)) \ - PyGAME_C_API[PYGAMEAPI_COLOR_FIRSTSLOT + 3]) - -#define pg_RGBAFromColorObj \ - (*(int (*)(PyObject *, \ - Uint8 *))PyGAME_C_API[PYGAMEAPI_COLOR_FIRSTSLOT + 2]) -#define import_pygame_color() IMPORT_PYGAME_MODULE(color, COLOR) -#endif /* PYGAMEAPI_COLOR_INTERNAL */ - -/* Math */ -#define PYGAMEAPI_MATH_FIRSTSLOT \ - (PYGAMEAPI_COLOR_FIRSTSLOT + PYGAMEAPI_COLOR_NUMSLOTS) -#define PYGAMEAPI_MATH_NUMSLOTS 2 -#ifndef PYGAMEAPI_MATH_INTERNAL -#define pgVector2_Check(x) \ - ((x)->ob_type == \ - (PyTypeObject *)PyGAME_C_API[PYGAMEAPI_MATH_FIRSTSLOT + 0]) -#define pgVector3_Check(x) \ - ((x)->ob_type == \ - (PyTypeObject *)PyGAME_C_API[PYGAMEAPI_MATH_FIRSTSLOT + 1]) -/* -#define pgVector2_New \ - (*(PyObject*(*)) PyGAME_C_API[PYGAMEAPI_MATH_FIRSTSLOT + 1]) -*/ -#define import_pygame_math() IMPORT_PYGAME_MODULE(math, MATH) -#endif /* PYGAMEAPI_MATH_INTERNAL */ - -#define PG_CAPSULE_NAME(m) (IMPPREFIX m "." PYGAMEAPI_LOCAL_ENTRY) - -#define _IMPORT_PYGAME_MODULE(module, MODULE, api_root) \ - { \ - PyObject *_module = PyImport_ImportModule(IMPPREFIX #module); \ - \ - if (_module != NULL) { \ - PyObject *_c_api = \ - PyObject_GetAttrString(_module, PYGAMEAPI_LOCAL_ENTRY); \ - \ - Py_DECREF(_module); \ - if (_c_api != NULL && PyCapsule_CheckExact(_c_api)) { \ - void **localptr = (void **)PyCapsule_GetPointer( \ - _c_api, PG_CAPSULE_NAME(#module)); \ - \ - if (localptr != NULL) { \ - memcpy(api_root + PYGAMEAPI_##MODULE##_FIRSTSLOT, \ - localptr, \ - sizeof(void **) * PYGAMEAPI_##MODULE##_NUMSLOTS); \ - } \ - } \ - Py_XDECREF(_c_api); \ - } \ - } - -#ifndef NO_PYGAME_C_API -#define IMPORT_PYGAME_MODULE(module, MODULE) \ - _IMPORT_PYGAME_MODULE(module, MODULE, PyGAME_C_API) -#define PYGAMEAPI_TOTALSLOTS \ - (PYGAMEAPI_MATH_FIRSTSLOT + PYGAMEAPI_MATH_NUMSLOTS) - -#ifdef PYGAME_H -void *PyGAME_C_API[PYGAMEAPI_TOTALSLOTS] = {NULL}; -#else -extern void *PyGAME_C_API[PYGAMEAPI_TOTALSLOTS]; -#endif -#endif - -#if PG_HAVE_CAPSULE -#define encapsulate_api(ptr, module) \ - PyCapsule_New(ptr, PG_CAPSULE_NAME(module), NULL) -#else -#define encapsulate_api(ptr, module) PyCObject_FromVoidPtr(ptr, NULL) -#endif - -#ifndef PG_INLINE -#if defined(__clang__) -#define PG_INLINE __inline__ __attribute__((__unused__)) -#elif defined(__GNUC__) -#define PG_INLINE __inline__ -#elif defined(_MSC_VER) -#define PG_INLINE __inline -#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L -#define PG_INLINE inline -#else -#define PG_INLINE -#endif -#endif - -/*last platform compiler stuff*/ -#if defined(macintosh) && defined(__MWERKS__) || defined(__SYMBIAN32__) -#define PYGAME_EXPORT __declspec(export) -#else -#define PYGAME_EXPORT -#endif - - -#endif /* PYGAME_H */ diff --git a/venv/include/site/python3.7/pygame/_surface.h b/venv/include/site/python3.7/pygame/_surface.h deleted file mode 100644 index 016aac0..0000000 --- a/venv/include/site/python3.7/pygame/_surface.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - pygame - Python Game Library - Copyright (C) 2000-2001 Pete Shinners - Copyright (C) 2007 Marcus von Appen - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Library General Public License for more details. - - You should have received a copy of the GNU Library General Public - License along with this library; if not, write to the Free - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - - Pete Shinners - pete@shinners.org -*/ - -#ifndef _SURFACE_H -#define _SURFACE_H - -#include "_pygame.h" -#include "surface.h" - -#endif - diff --git a/venv/include/site/python3.7/pygame/bitmask.h b/venv/include/site/python3.7/pygame/bitmask.h deleted file mode 100644 index 1230497..0000000 --- a/venv/include/site/python3.7/pygame/bitmask.h +++ /dev/null @@ -1,146 +0,0 @@ -/* - Bitmask 1.7 - A pixel-perfect collision detection library. - - Copyright (C) 2002-2005 Ulf Ekstrom except for the bitcount - function which is copyright (C) Donald W. Gillies, 1992. - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Library General Public License for more details. - - You should have received a copy of the GNU Library General Public - License along with this library; if not, write to the Free - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ -#ifndef BITMASK_H -#define BITMASK_H - -#ifdef __cplusplus -extern "C" { -#endif - -#include -/* Define INLINE for different compilers. If your compiler does not - support inlining then there might be a performance hit in - bitmask_overlap_area(). -*/ -#ifndef INLINE -# ifdef __GNUC__ -# define INLINE inline -# else -# ifdef _MSC_VER -# define INLINE __inline -# else -# define INLINE -# endif -# endif -#endif - -#define BITMASK_W unsigned long int -#define BITMASK_W_LEN (sizeof(BITMASK_W)*CHAR_BIT) -#define BITMASK_W_MASK (BITMASK_W_LEN - 1) -#define BITMASK_N(n) ((BITMASK_W)1 << (n)) - -typedef struct bitmask -{ - int w,h; - BITMASK_W bits[1]; -} bitmask_t; - -/* Creates a bitmask of width w and height h, where - w and h must both be greater than or equal to 0. - The mask is automatically cleared when created. - */ -bitmask_t *bitmask_create(int w, int h); - -/* Frees all the memory allocated by bitmask_create for m. */ -void bitmask_free(bitmask_t *m); - -/* Clears all bits in the mask */ -void bitmask_clear(bitmask_t *m); - -/* Sets all bits in the mask */ -void bitmask_fill(bitmask_t *m); - -/* Flips all bits in the mask */ -void bitmask_invert(bitmask_t *m); - -/* Counts the bits in the mask */ -unsigned int bitmask_count(bitmask_t *m); - -/* Returns nonzero if the bit at (x,y) is set. Coordinates start at - (0,0) */ -static INLINE int bitmask_getbit(const bitmask_t *m, int x, int y) -{ - return (m->bits[x/BITMASK_W_LEN*m->h + y] & BITMASK_N(x & BITMASK_W_MASK)) != 0; -} - -/* Sets the bit at (x,y) */ -static INLINE void bitmask_setbit(bitmask_t *m, int x, int y) -{ - m->bits[x/BITMASK_W_LEN*m->h + y] |= BITMASK_N(x & BITMASK_W_MASK); -} - -/* Clears the bit at (x,y) */ -static INLINE void bitmask_clearbit(bitmask_t *m, int x, int y) -{ - m->bits[x/BITMASK_W_LEN*m->h + y] &= ~BITMASK_N(x & BITMASK_W_MASK); -} - -/* Returns nonzero if the masks overlap with the given offset. - The overlap tests uses the following offsets (which may be negative): - - +----+----------.. - |A | yoffset - | +-+----------.. - +--|B - |xoffset - | | - : : -*/ -int bitmask_overlap(const bitmask_t *a, const bitmask_t *b, int xoffset, int yoffset); - -/* Like bitmask_overlap(), but will also give a point of intersection. - x and y are given in the coordinates of mask a, and are untouched - if there is no overlap. */ -int bitmask_overlap_pos(const bitmask_t *a, const bitmask_t *b, - int xoffset, int yoffset, int *x, int *y); - -/* Returns the number of overlapping 'pixels' */ -int bitmask_overlap_area(const bitmask_t *a, const bitmask_t *b, int xoffset, int yoffset); - -/* Fills a mask with the overlap of two other masks. A bitwise AND. */ -void bitmask_overlap_mask (const bitmask_t *a, const bitmask_t *b, bitmask_t *c, int xoffset, int yoffset); - -/* Draws mask b onto mask a (bitwise OR). Can be used to compose large - (game background?) mask from several submasks, which may speed up - the testing. */ - -void bitmask_draw(bitmask_t *a, const bitmask_t *b, int xoffset, int yoffset); - -void bitmask_erase(bitmask_t *a, const bitmask_t *b, int xoffset, int yoffset); - -/* Return a new scaled bitmask, with dimensions w*h. The quality of the - scaling may not be perfect for all circumstances, but it should - be reasonable. If either w or h is 0 a clear 1x1 mask is returned. */ -bitmask_t *bitmask_scale(const bitmask_t *m, int w, int h); - -/* Convolve b into a, drawing the output into o, shifted by offset. If offset - * is 0, then the (x,y) bit will be set if and only if - * bitmask_overlap(a, b, x - b->w - 1, y - b->h - 1) returns true. - * - * Modifies bits o[xoffset ... xoffset + a->w + b->w - 1) - * [yoffset ... yoffset + a->h + b->h - 1). */ -void bitmask_convolve(const bitmask_t *a, const bitmask_t *b, bitmask_t *o, int xoffset, int yoffset); - -#ifdef __cplusplus -} /* End of extern "C" { */ -#endif - -#endif diff --git a/venv/include/site/python3.7/pygame/camera.h b/venv/include/site/python3.7/pygame/camera.h deleted file mode 100644 index 46d2beb..0000000 --- a/venv/include/site/python3.7/pygame/camera.h +++ /dev/null @@ -1,201 +0,0 @@ -/* - pygame - Python Game Library - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Library General Public License for more details. - - You should have received a copy of the GNU Library General Public - License along with this library; if not, write to the Free - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -*/ - -#include "pygame.h" -#include "doc/camera_doc.h" - -#if defined(__unix__) - #include - #include - #include - #include - #include - - #include /* low-level i/o */ - #include - #include - #include - #include - #include - #include - #include - - /* on freebsd there is no asm/types */ - #ifdef linux - #include /* for videodev2.h */ - #endif - - #include -#elif defined(__APPLE__) - #include - /* We support OSX 10.6 and below. */ - #if __MAC_OS_X_VERSION_MAX_ALLOWED <= 1060 - #define PYGAME_MAC_CAMERA_OLD 1 - #endif -#endif - -#if defined(PYGAME_MAC_CAMERA_OLD) - #include - #include - #include -#endif - -/* some constants used which are not defined on non-v4l machines. */ -#ifndef V4L2_PIX_FMT_RGB24 - #define V4L2_PIX_FMT_RGB24 'RGB3' -#endif -#ifndef V4L2_PIX_FMT_RGB444 - #define V4L2_PIX_FMT_RGB444 'R444' -#endif -#ifndef V4L2_PIX_FMT_YUYV - #define V4L2_PIX_FMT_YUYV 'YUYV' -#endif - -#define CLEAR(x) memset (&(x), 0, sizeof (x)) -#define SAT(c) if (c & (~255)) { if (c < 0) c = 0; else c = 255; } -#define SAT2(c) ((c) & (~255) ? ((c) < 0 ? 0 : 255) : (c)) -#define DEFAULT_WIDTH 640 -#define DEFAULT_HEIGHT 480 -#define RGB_OUT 1 -#define YUV_OUT 2 -#define HSV_OUT 4 -#define CAM_V4L 1 /* deprecated. the incomplete support in pygame was removed */ -#define CAM_V4L2 2 - -struct buffer { - void * start; - size_t length; -}; - -#if defined(__unix__) -typedef struct pgCameraObject { - PyObject_HEAD - char* device_name; - int camera_type; - unsigned long pixelformat; - unsigned int color_out; - struct buffer* buffers; - unsigned int n_buffers; - int width; - int height; - int size; - int hflip; - int vflip; - int brightness; - int fd; -} pgCameraObject; -#elif defined(PYGAME_MAC_CAMERA_OLD) -typedef struct pgCameraObject { - PyObject_HEAD - char* device_name; /* unieke name of the device */ - OSType pixelformat; - unsigned int color_out; - SeqGrabComponent component; /* A type used by the Sequence Grabber API */ - SGChannel channel; /* Channel of the Sequence Grabber */ - GWorldPtr gworld; /* Pointer to the struct that holds the data of the captured image */ - Rect boundsRect; /* bounds of the image frame */ - long size; /* size of the image in our buffer to draw */ - int hflip; - int vflip; - short depth; - struct buffer pixels; - //struct buffer tmp_pixels /* place where the flipped image in temporarly stored if hflip or vflip is true.*/ -} pgCameraObject; - -#else -/* generic definition. -*/ - -typedef struct pgCameraObject { - PyObject_HEAD - char* device_name; - int camera_type; - unsigned long pixelformat; - unsigned int color_out; - struct buffer* buffers; - unsigned int n_buffers; - int width; - int height; - int size; - int hflip; - int vflip; - int brightness; - int fd; -} pgCameraObject; -#endif - -/* internal functions for colorspace conversion */ -void colorspace (SDL_Surface *src, SDL_Surface *dst, int cspace); -void rgb24_to_rgb (const void* src, void* dst, int length, SDL_PixelFormat* format); -void rgb444_to_rgb (const void* src, void* dst, int length, SDL_PixelFormat* format); -void rgb_to_yuv (const void* src, void* dst, int length, - unsigned long source, SDL_PixelFormat* format); -void rgb_to_hsv (const void* src, void* dst, int length, - unsigned long source, SDL_PixelFormat* format); -void yuyv_to_rgb (const void* src, void* dst, int length, SDL_PixelFormat* format); -void yuyv_to_yuv (const void* src, void* dst, int length, SDL_PixelFormat* format); -void uyvy_to_rgb (const void* src, void* dst, int length, SDL_PixelFormat* format); -void uyvy_to_yuv (const void* src, void* dst, int length, SDL_PixelFormat* format); -void sbggr8_to_rgb (const void* src, void* dst, int width, int height, - SDL_PixelFormat* format); -void yuv420_to_rgb (const void* src, void* dst, int width, int height, - SDL_PixelFormat* format); -void yuv420_to_yuv (const void* src, void* dst, int width, int height, - SDL_PixelFormat* format); - -#if defined(__unix__) -/* internal functions specific to v4l2 */ -char** v4l2_list_cameras (int* num_devices); -int v4l2_get_control (int fd, int id, int *value); -int v4l2_set_control (int fd, int id, int value); -PyObject* v4l2_read_raw (pgCameraObject* self); -int v4l2_xioctl (int fd, int request, void *arg); -int v4l2_process_image (pgCameraObject* self, const void *image, - unsigned int buffer_size, SDL_Surface* surf); -int v4l2_query_buffer (pgCameraObject* self); -int v4l2_read_frame (pgCameraObject* self, SDL_Surface* surf); -int v4l2_stop_capturing (pgCameraObject* self); -int v4l2_start_capturing (pgCameraObject* self); -int v4l2_uninit_device (pgCameraObject* self); -int v4l2_init_mmap (pgCameraObject* self); -int v4l2_init_device (pgCameraObject* self); -int v4l2_close_device (pgCameraObject* self); -int v4l2_open_device (pgCameraObject* self); - -#elif defined(PYGAME_MAC_CAMERA_OLD) -/* internal functions specific to mac */ -char** mac_list_cameras(int* num_devices); -int mac_open_device (pgCameraObject* self); -int mac_init_device(pgCameraObject* self); -int mac_close_device (pgCameraObject* self); -int mac_start_capturing(pgCameraObject* self); -int mac_stop_capturing (pgCameraObject* self); - -int mac_get_control(pgCameraObject* self, int id, int* value); -int mac_set_control(pgCameraObject* self, int id, int value); - -PyObject* mac_read_raw(pgCameraObject *self); -int mac_read_frame(pgCameraObject* self, SDL_Surface* surf); -int mac_camera_idle(pgCameraObject* self); -int mac_copy_gworld_to_surface(pgCameraObject* self, SDL_Surface* surf); - -void flip_image(const void* image, void* flipped_image, int width, int height, - short depth, int hflip, int vflip); - -#endif diff --git a/venv/include/site/python3.7/pygame/fastevents.h b/venv/include/site/python3.7/pygame/fastevents.h deleted file mode 100644 index 04098c3..0000000 --- a/venv/include/site/python3.7/pygame/fastevents.h +++ /dev/null @@ -1,48 +0,0 @@ -#ifndef _FASTEVENTS_H_ -#define _FASTEVENTS_H_ -/* - NET2 is a threaded, event based, network IO library for SDL. - Copyright (C) 2002 Bob Pendleton - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public License - as published by the Free Software Foundation; either version 2.1 - of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free - Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA - 02111-1307 USA - - If you do not wish to comply with the terms of the LGPL please - contact the author as other terms are available for a fee. - - Bob Pendleton - Bob@Pendleton.com -*/ - -#include "SDL.h" - -#ifdef __cplusplus -extern "C" { -#endif - - int FE_Init(void); // Initialize FE - void FE_Quit(void); // shutdown FE - - void FE_PumpEvents(void); // replacement for SDL_PumpEvents - int FE_PollEvent(SDL_Event *event); // replacement for SDL_PollEvent - int FE_WaitEvent(SDL_Event *event); // replacement for SDL_WaitEvent - int FE_PushEvent(SDL_Event *event); // replacement for SDL_PushEvent - - char *FE_GetError(void); // get the last error -#ifdef __cplusplus -} -#endif - -#endif diff --git a/venv/include/site/python3.7/pygame/font.h b/venv/include/site/python3.7/pygame/font.h deleted file mode 100644 index b861a29..0000000 --- a/venv/include/site/python3.7/pygame/font.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - pygame - Python Game Library - Copyright (C) 2000-2001 Pete Shinners - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Library General Public License for more details. - - You should have received a copy of the GNU Library General Public - License along with this library; if not, write to the Free - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - - Pete Shinners - pete@shinners.org -*/ - -#include -#if defined(HAVE_SNPRINTF) /* also defined in SDL_ttf (SDL.h) */ -#undef HAVE_SNPRINTF /* remove GCC macro redefine warning */ -#endif -#include - - -/* test font initialization */ -#define FONT_INIT_CHECK() \ - if(!(*(int*)PyFONT_C_API[2])) \ - return RAISE(pgExc_SDLError, "font system not initialized") - - - -#define PYGAMEAPI_FONT_FIRSTSLOT 0 -#define PYGAMEAPI_FONT_NUMSLOTS 3 -typedef struct { - PyObject_HEAD - TTF_Font* font; - PyObject* weakreflist; -} PyFontObject; -#define PyFont_AsFont(x) (((PyFontObject*)x)->font) - -#ifndef PYGAMEAPI_FONT_INTERNAL -#define PyFont_Check(x) ((x)->ob_type == (PyTypeObject*)PyFONT_C_API[0]) -#define PyFont_Type (*(PyTypeObject*)PyFONT_C_API[0]) -#define PyFont_New (*(PyObject*(*)(TTF_Font*))PyFONT_C_API[1]) -/*slot 2 taken by FONT_INIT_CHECK*/ - -#define import_pygame_font() \ - _IMPORT_PYGAME_MODULE(font, FONT, PyFONT_C_API) - -static void* PyFONT_C_API[PYGAMEAPI_FONT_NUMSLOTS] = {NULL}; -#endif - diff --git a/venv/include/site/python3.7/pygame/freetype.h b/venv/include/site/python3.7/pygame/freetype.h deleted file mode 100644 index fda7226..0000000 --- a/venv/include/site/python3.7/pygame/freetype.h +++ /dev/null @@ -1,137 +0,0 @@ -/* - pygame - Python Game Library - Copyright (C) 2009 Vicent Marti - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Library General Public License for more details. - - You should have received a copy of the GNU Library General Public - License along with this library; if not, write to the Free - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -*/ -#ifndef _PYGAME_FREETYPE_H_ -#define _PYGAME_FREETYPE_H_ - -#define PGFT_PYGAME1_COMPAT -#define HAVE_PYGAME_SDL_VIDEO -#define HAVE_PYGAME_SDL_RWOPS - -#include "pygame.h" -#include "pgcompat.h" - -#if PY3 -# define IS_PYTHON_3 -#endif - -#include -#include FT_FREETYPE_H -#include FT_CACHE_H -#include FT_XFREE86_H -#include FT_TRIGONOMETRY_H - -/********************************************************** - * Global module constants - **********************************************************/ - -/* Render styles */ -#define FT_STYLE_NORMAL 0x00 -#define FT_STYLE_STRONG 0x01 -#define FT_STYLE_OBLIQUE 0x02 -#define FT_STYLE_UNDERLINE 0x04 -#define FT_STYLE_WIDE 0x08 -#define FT_STYLE_DEFAULT 0xFF - -/* Bounding box modes */ -#define FT_BBOX_EXACT FT_GLYPH_BBOX_SUBPIXELS -#define FT_BBOX_EXACT_GRIDFIT FT_GLYPH_BBOX_GRIDFIT -#define FT_BBOX_PIXEL FT_GLYPH_BBOX_TRUNCATE -#define FT_BBOX_PIXEL_GRIDFIT FT_GLYPH_BBOX_PIXELS - -/* Rendering flags */ -#define FT_RFLAG_NONE (0) -#define FT_RFLAG_ANTIALIAS (1 << 0) -#define FT_RFLAG_AUTOHINT (1 << 1) -#define FT_RFLAG_VERTICAL (1 << 2) -#define FT_RFLAG_HINTED (1 << 3) -#define FT_RFLAG_KERNING (1 << 4) -#define FT_RFLAG_TRANSFORM (1 << 5) -#define FT_RFLAG_PAD (1 << 6) -#define FT_RFLAG_ORIGIN (1 << 7) -#define FT_RFLAG_UCS4 (1 << 8) -#define FT_RFLAG_USE_BITMAP_STRIKES (1 << 9) -#define FT_RFLAG_DEFAULTS (FT_RFLAG_HINTED | \ - FT_RFLAG_USE_BITMAP_STRIKES | \ - FT_RFLAG_ANTIALIAS) - - -#define FT_RENDER_NEWBYTEARRAY 0x0 -#define FT_RENDER_NEWSURFACE 0x1 -#define FT_RENDER_EXISTINGSURFACE 0x2 - -/********************************************************** - * Global module types - **********************************************************/ - -typedef struct _scale_s { - FT_UInt x, y; -} Scale_t; -typedef FT_Angle Angle_t; - -struct fontinternals_; -struct freetypeinstance_; - -typedef struct { - FT_Long font_index; - FT_Open_Args open_args; -} pgFontId; - -typedef struct { - PyObject_HEAD - pgFontId id; - PyObject *path; - int is_scalable; - - Scale_t face_size; - FT_Int16 style; - FT_Int16 render_flags; - double strength; - double underline_adjustment; - FT_UInt resolution; - Angle_t rotation; - FT_Matrix transform; - FT_Byte fgcolor[4]; - - struct freetypeinstance_ *freetype; /* Personal reference */ - struct fontinternals_ *_internals; -} pgFontObject; - -#define pgFont_IS_ALIVE(o) \ - (((pgFontObject *)(o))->_internals != 0) - -/********************************************************** - * Module declaration - **********************************************************/ -#define PYGAMEAPI_FREETYPE_FIRSTSLOT 0 -#define PYGAMEAPI_FREETYPE_NUMSLOTS 2 - -#ifndef PYGAME_FREETYPE_INTERNAL - -#define pgFont_Check(x) ((x)->ob_type == (PyTypeObject*)PgFREETYPE_C_API[0]) -#define pgFont_Type (*(PyTypeObject*)PgFREETYPE_C_API[1]) -#define pgFont_New (*(PyObject*(*)(const char*, long))PgFREETYPE_C_API[1]) - -#define import_pygame_freetype() \ - _IMPORT_PYGAME_MODULE(freetype, FREETYPE, PgFREETYPE_C_API) - -static void *PgFREETYPE_C_API[PYGAMEAPI_FREETYPE_NUMSLOTS] = {0}; -#endif /* PYGAME_FREETYPE_INTERNAL */ - -#endif /* _PYGAME_FREETYPE_H_ */ diff --git a/venv/include/site/python3.7/pygame/mask.h b/venv/include/site/python3.7/pygame/mask.h deleted file mode 100644 index b151dd4..0000000 --- a/venv/include/site/python3.7/pygame/mask.h +++ /dev/null @@ -1,25 +0,0 @@ -#include -#include "bitmask.h" - -#define PYGAMEAPI_MASK_FIRSTSLOT 0 -#define PYGAMEAPI_MASK_NUMSLOTS 1 -#define PYGAMEAPI_LOCAL_ENTRY "_PYGAME_C_API" - -typedef struct { - PyObject_HEAD - bitmask_t *mask; -} pgMaskObject; - -#define pgMask_AsBitmap(x) (((pgMaskObject*)x)->mask) - -#ifndef PYGAMEAPI_MASK_INTERNAL - -#define pgMask_Type (*(PyTypeObject*)PyMASK_C_API[0]) -#define pgMask_Check(x) ((x)->ob_type == &pgMask_Type) - -#define import_pygame_mask() \ - _IMPORT_PYGAME_MODULE(mask, MASK, PyMASK_C_API) - -static void* PyMASK_C_API[PYGAMEAPI_MASK_NUMSLOTS] = {NULL}; -#endif /* #ifndef PYGAMEAPI_MASK_INTERNAL */ - diff --git a/venv/include/site/python3.7/pygame/mixer.h b/venv/include/site/python3.7/pygame/mixer.h deleted file mode 100644 index 36d57f3..0000000 --- a/venv/include/site/python3.7/pygame/mixer.h +++ /dev/null @@ -1,65 +0,0 @@ -/* - pygame - Python Game Library - Copyright (C) 2000-2001 Pete Shinners - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Library General Public License for more details. - - You should have received a copy of the GNU Library General Public - License along with this library; if not, write to the Free - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - - Pete Shinners - pete@shinners.org -*/ - -#include -#include -#include - - -/* test mixer initializations */ -#define MIXER_INIT_CHECK() \ - if(!SDL_WasInit(SDL_INIT_AUDIO)) \ - return RAISE(pgExc_SDLError, "mixer not initialized") - - -#define PYGAMEAPI_MIXER_FIRSTSLOT 0 -#define PYGAMEAPI_MIXER_NUMSLOTS 7 -typedef struct { - PyObject_HEAD - Mix_Chunk *chunk; - Uint8 *mem; - PyObject *weakreflist; -} pgSoundObject; -typedef struct { - PyObject_HEAD - int chan; -} pgChannelObject; -#define pgSound_AsChunk(x) (((pgSoundObject*)x)->chunk) -#define pgChannel_AsInt(x) (((pgChannelObject*)x)->chan) - -#ifndef PYGAMEAPI_MIXER_INTERNAL -#define pgSound_Check(x) ((x)->ob_type == (PyTypeObject*)pgMIXER_C_API[0]) -#define pgSound_Type (*(PyTypeObject*)pgMIXER_C_API[0]) -#define pgSound_New (*(PyObject*(*)(Mix_Chunk*))pgMIXER_C_API[1]) -#define pgSound_Play (*(PyObject*(*)(PyObject*, PyObject*))pgMIXER_C_API[2]) -#define pgChannel_Check(x) ((x)->ob_type == (PyTypeObject*)pgMIXER_C_API[3]) -#define pgChannel_Type (*(PyTypeObject*)pgMIXER_C_API[3]) -#define pgChannel_New (*(PyObject*(*)(int))pgMIXER_C_API[4]) -#define pgMixer_AutoInit (*(PyObject*(*)(PyObject*, PyObject*))pgMIXER_C_API[5]) -#define pgMixer_AutoQuit (*(void(*)(void))pgMIXER_C_API[6]) - -#define import_pygame_mixer() \ - _IMPORT_PYGAME_MODULE(mixer, MIXER, pgMIXER_C_API) - -static void* pgMIXER_C_API[PYGAMEAPI_MIXER_NUMSLOTS] = {NULL}; -#endif - diff --git a/venv/include/site/python3.7/pygame/palette.h b/venv/include/site/python3.7/pygame/palette.h deleted file mode 100644 index 1ae4cf6..0000000 --- a/venv/include/site/python3.7/pygame/palette.h +++ /dev/null @@ -1,123 +0,0 @@ -/* - pygame - Python Game Library - Copyright (C) 2000-2001 Pete Shinners - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Library General Public License for more details. - - You should have received a copy of the GNU Library General Public - License along with this library; if not, write to the Free - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - - Pete Shinners - pete@shinners.org -*/ - -#ifndef PALETTE_H -#define PALETTE_H - -#include - -/* SDL 2 does not assign a default palette color scheme to a new 8 bit - * surface. Instead, the palette is set all white. This defines the SDL 1.2 - * default palette. - */ -static const SDL_Color default_palette_colors[] = { - {0, 0, 0, 255}, {0, 0, 85, 255}, {0, 0, 170, 255}, - {0, 0, 255, 255}, {0, 36, 0, 255}, {0, 36, 85, 255}, - {0, 36, 170, 255}, {0, 36, 255, 255}, {0, 73, 0, 255}, - {0, 73, 85, 255}, {0, 73, 170, 255}, {0, 73, 255, 255}, - {0, 109, 0, 255}, {0, 109, 85, 255}, {0, 109, 170, 255}, - {0, 109, 255, 255}, {0, 146, 0, 255}, {0, 146, 85, 255}, - {0, 146, 170, 255}, {0, 146, 255, 255}, {0, 182, 0, 255}, - {0, 182, 85, 255}, {0, 182, 170, 255}, {0, 182, 255, 255}, - {0, 219, 0, 255}, {0, 219, 85, 255}, {0, 219, 170, 255}, - {0, 219, 255, 255}, {0, 255, 0, 255}, {0, 255, 85, 255}, - {0, 255, 170, 255}, {0, 255, 255, 255}, {85, 0, 0, 255}, - {85, 0, 85, 255}, {85, 0, 170, 255}, {85, 0, 255, 255}, - {85, 36, 0, 255}, {85, 36, 85, 255}, {85, 36, 170, 255}, - {85, 36, 255, 255}, {85, 73, 0, 255}, {85, 73, 85, 255}, - {85, 73, 170, 255}, {85, 73, 255, 255}, {85, 109, 0, 255}, - {85, 109, 85, 255}, {85, 109, 170, 255}, {85, 109, 255, 255}, - {85, 146, 0, 255}, {85, 146, 85, 255}, {85, 146, 170, 255}, - {85, 146, 255, 255}, {85, 182, 0, 255}, {85, 182, 85, 255}, - {85, 182, 170, 255}, {85, 182, 255, 255}, {85, 219, 0, 255}, - {85, 219, 85, 255}, {85, 219, 170, 255}, {85, 219, 255, 255}, - {85, 255, 0, 255}, {85, 255, 85, 255}, {85, 255, 170, 255}, - {85, 255, 255, 255}, {170, 0, 0, 255}, {170, 0, 85, 255}, - {170, 0, 170, 255}, {170, 0, 255, 255}, {170, 36, 0, 255}, - {170, 36, 85, 255}, {170, 36, 170, 255}, {170, 36, 255, 255}, - {170, 73, 0, 255}, {170, 73, 85, 255}, {170, 73, 170, 255}, - {170, 73, 255, 255}, {170, 109, 0, 255}, {170, 109, 85, 255}, - {170, 109, 170, 255}, {170, 109, 255, 255}, {170, 146, 0, 255}, - {170, 146, 85, 255}, {170, 146, 170, 255}, {170, 146, 255, 255}, - {170, 182, 0, 255}, {170, 182, 85, 255}, {170, 182, 170, 255}, - {170, 182, 255, 255}, {170, 219, 0, 255}, {170, 219, 85, 255}, - {170, 219, 170, 255}, {170, 219, 255, 255}, {170, 255, 0, 255}, - {170, 255, 85, 255}, {170, 255, 170, 255}, {170, 255, 255, 255}, - {255, 0, 0, 255}, {255, 0, 85, 255}, {255, 0, 170, 255}, - {255, 0, 255, 255}, {255, 36, 0, 255}, {255, 36, 85, 255}, - {255, 36, 170, 255}, {255, 36, 255, 255}, {255, 73, 0, 255}, - {255, 73, 85, 255}, {255, 73, 170, 255}, {255, 73, 255, 255}, - {255, 109, 0, 255}, {255, 109, 85, 255}, {255, 109, 170, 255}, - {255, 109, 255, 255}, {255, 146, 0, 255}, {255, 146, 85, 255}, - {255, 146, 170, 255}, {255, 146, 255, 255}, {255, 182, 0, 255}, - {255, 182, 85, 255}, {255, 182, 170, 255}, {255, 182, 255, 255}, - {255, 219, 0, 255}, {255, 219, 85, 255}, {255, 219, 170, 255}, - {255, 219, 255, 255}, {255, 255, 0, 255}, {255, 255, 85, 255}, - {255, 255, 170, 255}, {255, 255, 255, 255}, {0, 0, 0, 255}, - {0, 0, 85, 255}, {0, 0, 170, 255}, {0, 0, 255, 255}, - {0, 36, 0, 255}, {0, 36, 85, 255}, {0, 36, 170, 255}, - {0, 36, 255, 255}, {0, 73, 0, 255}, {0, 73, 85, 255}, - {0, 73, 170, 255}, {0, 73, 255, 255}, {0, 109, 0, 255}, - {0, 109, 85, 255}, {0, 109, 170, 255}, {0, 109, 255, 255}, - {0, 146, 0, 255}, {0, 146, 85, 255}, {0, 146, 170, 255}, - {0, 146, 255, 255}, {0, 182, 0, 255}, {0, 182, 85, 255}, - {0, 182, 170, 255}, {0, 182, 255, 255}, {0, 219, 0, 255}, - {0, 219, 85, 255}, {0, 219, 170, 255}, {0, 219, 255, 255}, - {0, 255, 0, 255}, {0, 255, 85, 255}, {0, 255, 170, 255}, - {0, 255, 255, 255}, {85, 0, 0, 255}, {85, 0, 85, 255}, - {85, 0, 170, 255}, {85, 0, 255, 255}, {85, 36, 0, 255}, - {85, 36, 85, 255}, {85, 36, 170, 255}, {85, 36, 255, 255}, - {85, 73, 0, 255}, {85, 73, 85, 255}, {85, 73, 170, 255}, - {85, 73, 255, 255}, {85, 109, 0, 255}, {85, 109, 85, 255}, - {85, 109, 170, 255}, {85, 109, 255, 255}, {85, 146, 0, 255}, - {85, 146, 85, 255}, {85, 146, 170, 255}, {85, 146, 255, 255}, - {85, 182, 0, 255}, {85, 182, 85, 255}, {85, 182, 170, 255}, - {85, 182, 255, 255}, {85, 219, 0, 255}, {85, 219, 85, 255}, - {85, 219, 170, 255}, {85, 219, 255, 255}, {85, 255, 0, 255}, - {85, 255, 85, 255}, {85, 255, 170, 255}, {85, 255, 255, 255}, - {170, 0, 0, 255}, {170, 0, 85, 255}, {170, 0, 170, 255}, - {170, 0, 255, 255}, {170, 36, 0, 255}, {170, 36, 85, 255}, - {170, 36, 170, 255}, {170, 36, 255, 255}, {170, 73, 0, 255}, - {170, 73, 85, 255}, {170, 73, 170, 255}, {170, 73, 255, 255}, - {170, 109, 0, 255}, {170, 109, 85, 255}, {170, 109, 170, 255}, - {170, 109, 255, 255}, {170, 146, 0, 255}, {170, 146, 85, 255}, - {170, 146, 170, 255}, {170, 146, 255, 255}, {170, 182, 0, 255}, - {170, 182, 85, 255}, {170, 182, 170, 255}, {170, 182, 255, 255}, - {170, 219, 0, 255}, {170, 219, 85, 255}, {170, 219, 170, 255}, - {170, 219, 255, 255}, {170, 255, 0, 255}, {170, 255, 85, 255}, - {170, 255, 170, 255}, {170, 255, 255, 255}, {255, 0, 0, 255}, - {255, 0, 85, 255}, {255, 0, 170, 255}, {255, 0, 255, 255}, - {255, 36, 0, 255}, {255, 36, 85, 255}, {255, 36, 170, 255}, - {255, 36, 255, 255}, {255, 73, 0, 255}, {255, 73, 85, 255}, - {255, 73, 170, 255}, {255, 73, 255, 255}, {255, 109, 0, 255}, - {255, 109, 85, 255}, {255, 109, 170, 255}, {255, 109, 255, 255}, - {255, 146, 0, 255}, {255, 146, 85, 255}, {255, 146, 170, 255}, - {255, 146, 255, 255}, {255, 182, 0, 255}, {255, 182, 85, 255}, - {255, 182, 170, 255}, {255, 182, 255, 255}, {255, 219, 0, 255}, - {255, 219, 85, 255}, {255, 219, 170, 255}, {255, 219, 255, 255}, - {255, 255, 0, 255}, {255, 255, 85, 255}, {255, 255, 170, 255}, - {255, 255, 255, 255}}; - -static const int default_palette_size = - (int)(sizeof(default_palette_colors) / sizeof(SDL_Color)); - -#endif diff --git a/venv/include/site/python3.7/pygame/pgarrinter.h b/venv/include/site/python3.7/pygame/pgarrinter.h deleted file mode 100644 index 5ba096b..0000000 --- a/venv/include/site/python3.7/pygame/pgarrinter.h +++ /dev/null @@ -1,26 +0,0 @@ -/* array structure interface version 3 declarations */ - -#if !defined(PG_ARRAYINTER_HEADER) -#define PG_ARRAYINTER_HEADER - -static const int PAI_CONTIGUOUS = 0x01; -static const int PAI_FORTRAN = 0x02; -static const int PAI_ALIGNED = 0x100; -static const int PAI_NOTSWAPPED = 0x200; -static const int PAI_WRITEABLE = 0x400; -static const int PAI_ARR_HAS_DESCR = 0x800; - -typedef struct { - int two; /* contains the integer 2 -- simple sanity check */ - int nd; /* number of dimensions */ - char typekind; /* kind in array -- character code of typestr */ - int itemsize; /* size of each element */ - int flags; /* flags indicating how the data should be */ - /* interpreted */ - Py_intptr_t *shape; /* A length-nd array of shape information */ - Py_intptr_t *strides; /* A length-nd array of stride information */ - void *data; /* A pointer to the first element of the array */ - PyObject *descr; /* NULL or a data-description */ -} PyArrayInterface; - -#endif diff --git a/venv/include/site/python3.7/pygame/pgbufferproxy.h b/venv/include/site/python3.7/pygame/pgbufferproxy.h deleted file mode 100644 index 92dc2f0..0000000 --- a/venv/include/site/python3.7/pygame/pgbufferproxy.h +++ /dev/null @@ -1,52 +0,0 @@ -/* - pygame - Python Game Library - Copyright (C) 2000-2001 Pete Shinners - Copyright (C) 2007 Rene Dudfield, Richard Goedeken - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Library General Public License for more details. - - You should have received a copy of the GNU Library General Public - License along with this library; if not, write to the Free - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - - Pete Shinners - pete@shinners.org -*/ - -/* Bufferproxy module C api. - Depends on pygame.h being included first. - */ -#if !defined(PG_BUFPROXY_HEADER) - -#define PYGAMEAPI_BUFPROXY_NUMSLOTS 4 -#define PYGAMEAPI_BUFPROXY_FIRSTSLOT 0 - -#if !(defined(PYGAMEAPI_BUFPROXY_INTERNAL) || defined(NO_PYGAME_C_API)) -static void *PgBUFPROXY_C_API[PYGAMEAPI_BUFPROXY_NUMSLOTS]; - -typedef PyObject *(*_pgbufproxy_new_t)(PyObject *, getbufferproc); -typedef PyObject *(*_pgbufproxy_get_obj_t)(PyObject *); -typedef int (*_pgbufproxy_trip_t)(PyObject *); - -#define pgBufproxy_Type (*(PyTypeObject*)PgBUFPROXY_C_API[0]) -#define pgBufproxy_New (*(_pgbufproxy_new_t)PgBUFPROXY_C_API[1]) -#define pgBufproxy_GetParent \ - (*(_pgbufproxy_get_obj_t)PgBUFPROXY_C_API[2]) -#define pgBufproxy_Trip (*(_pgbufproxy_trip_t)PgBUFPROXY_C_API[3]) -#define pgBufproxy_Check(x) ((x)->ob_type == (pgBufproxy_Type)) -#define import_pygame_bufferproxy() \ - _IMPORT_PYGAME_MODULE(bufferproxy, BUFPROXY, PgBUFPROXY_C_API) - -#endif /* #if !(defined(PYGAMEAPI_BUFPROXY_INTERNAL) || ... */ - -#define PG_BUFPROXY_HEADER - -#endif /* #if !defined(PG_BUFPROXY_HEADER) */ diff --git a/venv/include/site/python3.7/pygame/pgcompat.h b/venv/include/site/python3.7/pygame/pgcompat.h deleted file mode 100644 index 9eb1b88..0000000 --- a/venv/include/site/python3.7/pygame/pgcompat.h +++ /dev/null @@ -1,195 +0,0 @@ -/* Python 2.x/3.x compitibility tools - */ - -#if !defined(PGCOMPAT_H) -#define PGCOMPAT_H - -#if PY_MAJOR_VERSION >= 3 - -#define PY3 1 - -/* Define some aliases for the removed PyInt_* functions */ -#define PyInt_Check(op) PyLong_Check(op) -#define PyInt_FromString PyLong_FromString -#define PyInt_FromUnicode PyLong_FromUnicode -#define PyInt_FromLong PyLong_FromLong -#define PyInt_FromSize_t PyLong_FromSize_t -#define PyInt_FromSsize_t PyLong_FromSsize_t -#define PyInt_AsLong PyLong_AsLong -#define PyInt_AsSsize_t PyLong_AsSsize_t -#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask -#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask -#define PyInt_AS_LONG PyLong_AS_LONG -#define PyNumber_Int PyNumber_Long - -/* Weakrefs flags changed in 3.x */ -#define Py_TPFLAGS_HAVE_WEAKREFS 0 - -/* Module init function returns new module instance. */ -#define MODINIT_RETURN(x) return x -#define MODINIT_DEFINE(mod_name) PyMODINIT_FUNC PyInit_##mod_name (void) -#define DECREF_MOD(mod) Py_DECREF (mod) - -/* Type header differs. */ -#define TYPE_HEAD(x,y) PyVarObject_HEAD_INIT(x,y) - -/* Text interface. Use unicode strings. */ -#define Text_Type PyUnicode_Type -#define Text_Check PyUnicode_Check - -#ifndef PYPY_VERSION -#define Text_FromLocale(s) PyUnicode_DecodeLocale((s), "strict") -#else /* PYPY_VERSION */ -/* workaround: missing function for pypy */ -#define Text_FromLocale PyUnicode_FromString -#endif /* PYPY_VERSION */ - -#define Text_FromUTF8 PyUnicode_FromString -#define Text_FromUTF8AndSize PyUnicode_FromStringAndSize -#define Text_FromFormat PyUnicode_FromFormat -#define Text_GetSize PyUnicode_GetSize -#define Text_GET_SIZE PyUnicode_GET_SIZE - -/* Binary interface. Use bytes. */ -#define Bytes_Type PyBytes_Type -#define Bytes_Check PyBytes_Check -#define Bytes_Size PyBytes_Size -#define Bytes_AsString PyBytes_AsString -#define Bytes_AsStringAndSize PyBytes_AsStringAndSize -#define Bytes_FromStringAndSize PyBytes_FromStringAndSize -#define Bytes_FromFormat PyBytes_FromFormat -#define Bytes_AS_STRING PyBytes_AS_STRING -#define Bytes_GET_SIZE PyBytes_GET_SIZE -#define Bytes_AsDecodeObject PyBytes_AsDecodedObject - -#define Object_Unicode PyObject_Str - -#define IsTextObj(x) (PyUnicode_Check(x) || PyBytes_Check(x)) - -/* Renamed builtins */ -#define BUILTINS_MODULE "builtins" -#define BUILTINS_UNICODE "str" -#define BUILTINS_UNICHR "chr" - -/* Defaults for unicode file path encoding */ -#define UNICODE_DEF_FS_CODEC Py_FileSystemDefaultEncoding -#if defined(MS_WIN32) -#define UNICODE_DEF_FS_ERROR "replace" -#else -#define UNICODE_DEF_FS_ERROR "surrogateescape" -#endif - -#else /* #if PY_MAJOR_VERSION >= 3 */ - -#define PY3 0 - -/* Module init function returns nothing. */ -#define MODINIT_RETURN(x) return -#define MODINIT_DEFINE(mod_name) PyMODINIT_FUNC init##mod_name (void) -#define DECREF_MOD(mod) - -/* Type header differs. */ -#define TYPE_HEAD(x,y) \ - PyObject_HEAD_INIT(x) \ - 0, - -/* Text interface. Use ascii strings. */ -#define Text_Type PyString_Type -#define Text_Check PyString_Check -#define Text_FromLocale PyString_FromString -#define Text_FromUTF8 PyString_FromString -#define Text_FromUTF8AndSize PyString_FromStringAndSize -#define Text_FromFormat PyString_FromFormat -#define Text_GetSize PyString_GetSize -#define Text_GET_SIZE PyString_GET_SIZE - -/* Binary interface. Use ascii strings. */ -#define Bytes_Type PyString_Type -#define Bytes_Check PyString_Check -#define Bytes_Size PyString_Size -#define Bytes_AsString PyString_AsString -#define Bytes_AsStringAndSize PyString_AsStringAndSize -#define Bytes_FromStringAndSize PyString_FromStringAndSize -#define Bytes_FromFormat PyString_FromFormat -#define Bytes_AS_STRING PyString_AS_STRING -#define Bytes_GET_SIZE PyString_GET_SIZE -#define Bytes_AsDecodedObject PyString_AsDecodedObject - -#define Object_Unicode PyObject_Unicode - -/* Renamed builtins */ -#define BUILTINS_MODULE "__builtin__" -#define BUILTINS_UNICODE "unicode" -#define BUILTINS_UNICHR "unichr" - -/* Defaults for unicode file path encoding */ -#define UNICODE_DEF_FS_CODEC Py_FileSystemDefaultEncoding -#define UNICODE_DEF_FS_ERROR "strict" - -#endif /* #if PY_MAJOR_VERSION >= 3 */ - -#define PY2 (!PY3) - -#define MODINIT_ERROR MODINIT_RETURN (NULL) - -/* Module state. These macros are used to define per-module macros. - * v - global state variable (Python 2.x) - * s - global state structure (Python 3.x) - */ -#define PY2_GETSTATE(v) (&(v)) -#define PY3_GETSTATE(s, m) ((struct s *) PyModule_GetState (m)) - -/* Pep 3123: Making PyObject_HEAD conform to standard C */ -#if !defined(Py_TYPE) -#define Py_TYPE(o) (((PyObject *)(o))->ob_type) -#define Py_REFCNT(o) (((PyObject *)(o))->ob_refcnt) -#define Py_SIZE(o) (((PyVarObject *)(o))->ob_size) -#endif - -/* Encode a unicode file path */ -#define Unicode_AsEncodedPath(u) \ - PyUnicode_AsEncodedString ((u), UNICODE_DEF_FS_CODEC, UNICODE_DEF_FS_ERROR) - -#define RELATIVE_MODULE(m) ("." m) - -#define HAVE_OLD_BUFPROTO PY2 - -#if !defined(PG_ENABLE_OLDBUF) /* allow for command line override */ -#if HAVE_OLD_BUFPROTO -#define PG_ENABLE_OLDBUF 1 -#else -#define PG_ENABLE_OLDBUF 0 -#endif -#endif - -#ifndef Py_TPFLAGS_HAVE_NEWBUFFER -#define Py_TPFLAGS_HAVE_NEWBUFFER 0 -#endif - -#ifndef Py_TPFLAGS_HAVE_CLASS -#define Py_TPFLAGS_HAVE_CLASS 0 -#endif - -#ifndef Py_TPFLAGS_CHECKTYPES -#define Py_TPFLAGS_CHECKTYPES 0 -#endif - -#if PY_VERSION_HEX >= 0x03020000 -#define Slice_GET_INDICES_EX(slice, length, start, stop, step, slicelength) \ - PySlice_GetIndicesEx(slice, length, start, stop, step, slicelength) -#else -#define Slice_GET_INDICES_EX(slice, length, start, stop, step, slicelength) \ - PySlice_GetIndicesEx((PySliceObject *)(slice), length, \ - start, stop, step, slicelength) -#endif - -/* Support new buffer protocol? */ -#if !defined(PG_ENABLE_NEWBUF) /* allow for command line override */ -#if !defined(PYPY_VERSION) -#define PG_ENABLE_NEWBUF 1 -#else -#define PG_ENABLE_NEWBUF 0 -#endif -#endif - -#endif /* #if !defined(PGCOMPAT_H) */ diff --git a/venv/include/site/python3.7/pygame/pgopengl.h b/venv/include/site/python3.7/pygame/pgopengl.h deleted file mode 100644 index 3c80dca..0000000 --- a/venv/include/site/python3.7/pygame/pgopengl.h +++ /dev/null @@ -1,16 +0,0 @@ -#if !defined(PGOPENGL_H) -#define PGOPENGL_H - -/** This header includes definitions of Opengl functions as pointer types for - ** use with the SDL function SDL_GL_GetProcAddress. - **/ - -#if defined(_WIN32) -#define GL_APIENTRY __stdcall -#else -#define GL_APIENTRY -#endif - -typedef void (GL_APIENTRY *GL_glReadPixels_Func)(int, int, int, int, unsigned int, unsigned int, void*); - -#endif diff --git a/venv/include/site/python3.7/pygame/pygame.h b/venv/include/site/python3.7/pygame/pygame.h deleted file mode 100644 index bcbf1d9..0000000 --- a/venv/include/site/python3.7/pygame/pygame.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - pygame - Python Game Library - Copyright (C) 2000-2001 Pete Shinners - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Library General Public License for more details. - - You should have received a copy of the GNU Library General Public - License along with this library; if not, write to the Free - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - - Pete Shinners - pete@shinners.org -*/ - -/* To allow the Pygame C api to be globally shared by all code within an - * extension module built from multiple C files, only include the pygame.h - * header within the top level C file, the one which calls the - * 'import_pygame_*' macros. All other C source files of the module should - * include _pygame.h instead. - */ -#ifndef PYGAME_H -#define PYGAME_H - -#include "_pygame.h" - -#endif diff --git a/venv/include/site/python3.7/pygame/scrap.h b/venv/include/site/python3.7/pygame/scrap.h deleted file mode 100644 index b1b3856..0000000 --- a/venv/include/site/python3.7/pygame/scrap.h +++ /dev/null @@ -1,143 +0,0 @@ -/* - pygame - Python Game Library - Copyright (C) 2006, 2007 Rene Dudfield, Marcus von Appen - - Originally put in the public domain by Sam Lantinga. - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Library General Public License for more details. - - You should have received a copy of the GNU Library General Public - License along with this library; if not, write to the Free - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ - -/* This is unconditionally defined in Python.h */ -#if defined(_POSIX_C_SOURCE) -#undef _POSIX_C_SOURCE -#endif - -#include - -/* Handle clipboard text and data in arbitrary formats */ - -/** - * Predefined supported pygame scrap types. - */ -#define PYGAME_SCRAP_TEXT "text/plain" -#define PYGAME_SCRAP_BMP "image/bmp" -#define PYGAME_SCRAP_PPM "image/ppm" -#define PYGAME_SCRAP_PBM "image/pbm" - -/** - * The supported scrap clipboard types. - * - * This is only relevant in a X11 environment, which supports mouse - * selections as well. For Win32 and MacOS environments the default - * clipboard is used, no matter what value is passed. - */ -typedef enum -{ - SCRAP_CLIPBOARD, - SCRAP_SELECTION /* only supported in X11 environments. */ -} ScrapClipType; - -/** - * Macro for initialization checks. - */ -#define PYGAME_SCRAP_INIT_CHECK() \ - if(!pygame_scrap_initialized()) \ - return (PyErr_SetString (pgExc_SDLError, \ - "scrap system not initialized."), NULL) - -/** - * \brief Checks, whether the pygame scrap module was initialized. - * - * \return 1 if the modules was initialized, 0 otherwise. - */ -extern int -pygame_scrap_initialized (void); - -/** - * \brief Initializes the pygame scrap module internals. Call this before any - * other method. - * - * \return 1 on successful initialization, 0 otherwise. - */ -extern int -pygame_scrap_init (void); - -/** - * \brief Checks, whether the pygame window lost the clipboard focus or not. - * - * \return 1 if the window lost the focus, 0 otherwise. - */ -extern int -pygame_scrap_lost (void); - -/** - * \brief Places content of a specific type into the clipboard. - * - * \note For X11 the following notes are important: The following types - * are reserved for internal usage and thus will throw an error on - * setting them: "TIMESTAMP", "TARGETS", "SDL_SELECTION". - * Setting PYGAME_SCRAP_TEXT ("text/plain") will also automatically - * set the X11 types "STRING" (XA_STRING), "TEXT" and "UTF8_STRING". - * - * For Win32 the following notes are important: Setting - * PYGAME_SCRAP_TEXT ("text/plain") will also automatically set - * the Win32 type "TEXT" (CF_TEXT). - * - * For QNX the following notes are important: Setting - * PYGAME_SCRAP_TEXT ("text/plain") will also automatically set - * the QNX type "TEXT" (Ph_CL_TEXT). - * - * \param type The type of the content. - * \param srclen The length of the content. - * \param src The NULL terminated content. - * \return 1, if the content could be successfully pasted into the clipboard, - * 0 otherwise. - */ -extern int -pygame_scrap_put (char *type, int srclen, char *src); - -/** - * \brief Gets the current content from the clipboard. - * - * \note The received content does not need to be the content previously - * placed in the clipboard using pygame_put_scrap(). See the - * pygame_put_scrap() notes for more details. - * - * \param type The type of the content to receive. - * \param count The size of the returned content. - * \return The content or NULL in case of an error or if no content of the - * specified type was available. - */ -extern char* -pygame_scrap_get (char *type, unsigned long *count); - -/** - * \brief Gets the currently available content types from the clipboard. - * - * \return The different available content types or NULL in case of an - * error or if no content type is available. - */ -extern char** -pygame_scrap_get_types (void); - -/** - * \brief Checks whether content for the specified scrap type is currently - * available in the clipboard. - * - * \param type The type to check for. - * \return 1, if there is content and 0 otherwise. - */ -extern int -pygame_scrap_contains (char *type); diff --git a/venv/include/site/python3.7/pygame/surface.h b/venv/include/site/python3.7/pygame/surface.h deleted file mode 100644 index cc5f071..0000000 --- a/venv/include/site/python3.7/pygame/surface.h +++ /dev/null @@ -1,383 +0,0 @@ -/* - pygame - Python Game Library - Copyright (C) 2000-2001 Pete Shinners - Copyright (C) 2007 Marcus von Appen - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Library General Public License for more details. - - You should have received a copy of the GNU Library General Public - License along with this library; if not, write to the Free - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - - Pete Shinners - pete@shinners.org -*/ - -#ifndef SURFACE_H -#define SURFACE_H - -/* This is defined in SDL.h */ -#if defined(_POSIX_C_SOURCE) -#undef _POSIX_C_SOURCE -#endif - -#include -#include "pygame.h" - -/* Blend modes */ -#define PYGAME_BLEND_ADD 0x1 -#define PYGAME_BLEND_SUB 0x2 -#define PYGAME_BLEND_MULT 0x3 -#define PYGAME_BLEND_MIN 0x4 -#define PYGAME_BLEND_MAX 0x5 - -#define PYGAME_BLEND_RGB_ADD 0x1 -#define PYGAME_BLEND_RGB_SUB 0x2 -#define PYGAME_BLEND_RGB_MULT 0x3 -#define PYGAME_BLEND_RGB_MIN 0x4 -#define PYGAME_BLEND_RGB_MAX 0x5 - -#define PYGAME_BLEND_RGBA_ADD 0x6 -#define PYGAME_BLEND_RGBA_SUB 0x7 -#define PYGAME_BLEND_RGBA_MULT 0x8 -#define PYGAME_BLEND_RGBA_MIN 0x9 -#define PYGAME_BLEND_RGBA_MAX 0x10 -#define PYGAME_BLEND_PREMULTIPLIED 0x11 - - - - - -#if SDL_BYTEORDER == SDL_LIL_ENDIAN -#define GET_PIXEL_24(b) (b[0] + (b[1] << 8) + (b[2] << 16)) -#else -#define GET_PIXEL_24(b) (b[2] + (b[1] << 8) + (b[0] << 16)) -#endif - -#define GET_PIXEL(pxl, bpp, source) \ - switch (bpp) \ - { \ - case 2: \ - pxl = *((Uint16 *) (source)); \ - break; \ - case 4: \ - pxl = *((Uint32 *) (source)); \ - break; \ - default: \ - { \ - Uint8 *b = (Uint8 *) source; \ - pxl = GET_PIXEL_24(b); \ - } \ - break; \ - } - -#if IS_SDLv1 -#define GET_PIXELVALS(_sR, _sG, _sB, _sA, px, fmt, ppa) \ - _sR = ((px & fmt->Rmask) >> fmt->Rshift); \ - _sR = (_sR << fmt->Rloss) + (_sR >> (8 - (fmt->Rloss << 1))); \ - _sG = ((px & fmt->Gmask) >> fmt->Gshift); \ - _sG = (_sG << fmt->Gloss) + (_sG >> (8 - (fmt->Gloss << 1))); \ - _sB = ((px & fmt->Bmask) >> fmt->Bshift); \ - _sB = (_sB << fmt->Bloss) + (_sB >> (8 - (fmt->Bloss << 1))); \ - if (ppa) \ - { \ - _sA = ((px & fmt->Amask) >> fmt->Ashift); \ - _sA = (_sA << fmt->Aloss) + (_sA >> (8 - (fmt->Aloss << 1))); \ - } \ - else \ - { \ - _sA = 255; \ - } - -#define GET_PIXELVALS_1(sr, sg, sb, sa, _src, _fmt) \ - sr = _fmt->palette->colors[*((Uint8 *) (_src))].r; \ - sg = _fmt->palette->colors[*((Uint8 *) (_src))].g; \ - sb = _fmt->palette->colors[*((Uint8 *) (_src))].b; \ - sa = 255; - -/* For 1 byte palette pixels */ -#define SET_PIXELVAL(px, fmt, _dR, _dG, _dB, _dA) \ - *(px) = (Uint8) SDL_MapRGB(fmt, _dR, _dG, _dB) -#else /* IS_SDLv2 */ -#define GET_PIXELVALS(_sR, _sG, _sB, _sA, px, fmt, ppa) \ - SDL_GetRGBA(px, fmt, &(_sR), &(_sG), &(_sB), &(_sA)); \ - if (!ppa) { \ - _sA = 255; \ - } - -#define GET_PIXELVALS_1(sr, sg, sb, sa, _src, _fmt) \ - sr = _fmt->palette->colors[*((Uint8 *) (_src))].r; \ - sg = _fmt->palette->colors[*((Uint8 *) (_src))].g; \ - sb = _fmt->palette->colors[*((Uint8 *) (_src))].b; \ - sa = 255; - -/* For 1 byte palette pixels */ -#define SET_PIXELVAL(px, fmt, _dR, _dG, _dB, _dA) \ - *(px) = (Uint8) SDL_MapRGBA(fmt, _dR, _dG, _dB, _dA) -#endif /* IS_SDLv2 */ - - - - - - - - -#if SDL_BYTEORDER == SDL_LIL_ENDIAN -#define SET_OFFSETS_24(or, og, ob, fmt) \ - { \ - or = (fmt->Rshift == 0 ? 0 : \ - fmt->Rshift == 8 ? 1 : \ - 2 ); \ - og = (fmt->Gshift == 0 ? 0 : \ - fmt->Gshift == 8 ? 1 : \ - 2 ); \ - ob = (fmt->Bshift == 0 ? 0 : \ - fmt->Bshift == 8 ? 1 : \ - 2 ); \ - } - -#define SET_OFFSETS_32(or, og, ob, fmt) \ - { \ - or = (fmt->Rshift == 0 ? 0 : \ - fmt->Rshift == 8 ? 1 : \ - fmt->Rshift == 16 ? 2 : \ - 3 ); \ - og = (fmt->Gshift == 0 ? 0 : \ - fmt->Gshift == 8 ? 1 : \ - fmt->Gshift == 16 ? 2 : \ - 3 ); \ - ob = (fmt->Bshift == 0 ? 0 : \ - fmt->Bshift == 8 ? 1 : \ - fmt->Bshift == 16 ? 2 : \ - 3 ); \ - } -#else -#define SET_OFFSETS_24(or, og, ob, fmt) \ - { \ - or = (fmt->Rshift == 0 ? 2 : \ - fmt->Rshift == 8 ? 1 : \ - 0 ); \ - og = (fmt->Gshift == 0 ? 2 : \ - fmt->Gshift == 8 ? 1 : \ - 0 ); \ - ob = (fmt->Bshift == 0 ? 2 : \ - fmt->Bshift == 8 ? 1 : \ - 0 ); \ - } - -#define SET_OFFSETS_32(or, og, ob, fmt) \ - { \ - or = (fmt->Rshift == 0 ? 3 : \ - fmt->Rshift == 8 ? 2 : \ - fmt->Rshift == 16 ? 1 : \ - 0 ); \ - og = (fmt->Gshift == 0 ? 3 : \ - fmt->Gshift == 8 ? 2 : \ - fmt->Gshift == 16 ? 1 : \ - 0 ); \ - ob = (fmt->Bshift == 0 ? 3 : \ - fmt->Bshift == 8 ? 2 : \ - fmt->Bshift == 16 ? 1 : \ - 0 ); \ - } -#endif - - -#define CREATE_PIXEL(buf, r, g, b, a, bp, ft) \ - switch (bp) \ - { \ - case 2: \ - *((Uint16 *) (buf)) = \ - ((r >> ft->Rloss) << ft->Rshift) | \ - ((g >> ft->Gloss) << ft->Gshift) | \ - ((b >> ft->Bloss) << ft->Bshift) | \ - ((a >> ft->Aloss) << ft->Ashift); \ - break; \ - case 4: \ - *((Uint32 *) (buf)) = \ - ((r >> ft->Rloss) << ft->Rshift) | \ - ((g >> ft->Gloss) << ft->Gshift) | \ - ((b >> ft->Bloss) << ft->Bshift) | \ - ((a >> ft->Aloss) << ft->Ashift); \ - break; \ - } - -/* Pretty good idea from Tom Duff :-). */ -#define LOOP_UNROLLED4(code, n, width) \ - n = (width + 3) / 4; \ - switch (width & 3) \ - { \ - case 0: do { code; \ - case 3: code; \ - case 2: code; \ - case 1: code; \ - } while (--n > 0); \ - } - -/* Used in the srcbpp == dstbpp == 1 blend functions */ -#define REPEAT_3(code) \ - code; \ - code; \ - code; - -#define REPEAT_4(code) \ - code; \ - code; \ - code; \ - code; - - -#define BLEND_ADD(tmp, sR, sG, sB, sA, dR, dG, dB, dA) \ - tmp = dR + sR; dR = (tmp <= 255 ? tmp : 255); \ - tmp = dG + sG; dG = (tmp <= 255 ? tmp : 255); \ - tmp = dB + sB; dB = (tmp <= 255 ? tmp : 255); - -#define BLEND_SUB(tmp, sR, sG, sB, sA, dR, dG, dB, dA) \ - tmp = dR - sR; dR = (tmp >= 0 ? tmp : 0); \ - tmp = dG - sG; dG = (tmp >= 0 ? tmp : 0); \ - tmp = dB - sB; dB = (tmp >= 0 ? tmp : 0); - -#define BLEND_MULT(sR, sG, sB, sA, dR, dG, dB, dA) \ - dR = (dR && sR) ? (dR * sR) >> 8 : 0; \ - dG = (dG && sG) ? (dG * sG) >> 8 : 0; \ - dB = (dB && sB) ? (dB * sB) >> 8 : 0; - -#define BLEND_MIN(sR, sG, sB, sA, dR, dG, dB, dA) \ - if(sR < dR) { dR = sR; } \ - if(sG < dG) { dG = sG; } \ - if(sB < dB) { dB = sB; } - -#define BLEND_MAX(sR, sG, sB, sA, dR, dG, dB, dA) \ - if(sR > dR) { dR = sR; } \ - if(sG > dG) { dG = sG; } \ - if(sB > dB) { dB = sB; } - - - - - - -#define BLEND_RGBA_ADD(tmp, sR, sG, sB, sA, dR, dG, dB, dA) \ - tmp = dR + sR; dR = (tmp <= 255 ? tmp : 255); \ - tmp = dG + sG; dG = (tmp <= 255 ? tmp : 255); \ - tmp = dB + sB; dB = (tmp <= 255 ? tmp : 255); \ - tmp = dA + sA; dA = (tmp <= 255 ? tmp : 255); - -#define BLEND_RGBA_SUB(tmp, sR, sG, sB, sA, dR, dG, dB, dA) \ - tmp = dR - sR; dR = (tmp >= 0 ? tmp : 0); \ - tmp = dG - sG; dG = (tmp >= 0 ? tmp : 0); \ - tmp = dB - sB; dB = (tmp >= 0 ? tmp : 0); \ - tmp = dA - sA; dA = (tmp >= 0 ? tmp : 0); - -#define BLEND_RGBA_MULT(sR, sG, sB, sA, dR, dG, dB, dA) \ - dR = (dR && sR) ? (dR * sR) >> 8 : 0; \ - dG = (dG && sG) ? (dG * sG) >> 8 : 0; \ - dB = (dB && sB) ? (dB * sB) >> 8 : 0; \ - dA = (dA && sA) ? (dA * sA) >> 8 : 0; - -#define BLEND_RGBA_MIN(sR, sG, sB, sA, dR, dG, dB, dA) \ - if(sR < dR) { dR = sR; } \ - if(sG < dG) { dG = sG; } \ - if(sB < dB) { dB = sB; } \ - if(sA < dA) { dA = sA; } - -#define BLEND_RGBA_MAX(sR, sG, sB, sA, dR, dG, dB, dA) \ - if(sR > dR) { dR = sR; } \ - if(sG > dG) { dG = sG; } \ - if(sB > dB) { dB = sB; } \ - if(sA > dA) { dA = sA; } - - - - - - - - - - - -#if 1 -/* Choose an alpha blend equation. If the sign is preserved on a right shift - * then use a specialized, faster, equation. Otherwise a more general form, - * where all additions are done before the shift, is needed. -*/ -#if (-1 >> 1) < 0 -#define ALPHA_BLEND_COMP(sC, dC, sA) ((((sC - dC) * sA + sC) >> 8) + dC) -#else -#define ALPHA_BLEND_COMP(sC, dC, sA) (((dC << 8) + (sC - dC) * sA + sC) >> 8) -#endif - -#define ALPHA_BLEND(sR, sG, sB, sA, dR, dG, dB, dA) \ - do { \ - if (dA) \ - { \ - dR = ALPHA_BLEND_COMP(sR, dR, sA); \ - dG = ALPHA_BLEND_COMP(sG, dG, sA); \ - dB = ALPHA_BLEND_COMP(sB, dB, sA); \ - dA = sA + dA - ((sA * dA) / 255); \ - } \ - else \ - { \ - dR = sR; \ - dG = sG; \ - dB = sB; \ - dA = sA; \ - } \ - } while(0) - -#define ALPHA_BLEND_PREMULTIPLIED_COMP(sC, dC, sA) (sC + dC - ((dC * sA) >> 8)) - -#define ALPHA_BLEND_PREMULTIPLIED(tmp, sR, sG, sB, sA, dR, dG, dB, dA) \ - do { \ - tmp = ALPHA_BLEND_PREMULTIPLIED_COMP(sR, dR, sA); dR = (tmp > 255 ? 255 : tmp); \ - tmp = ALPHA_BLEND_PREMULTIPLIED_COMP(sG, dG, sA); dG = (tmp > 255 ? 255 : tmp); \ - tmp = ALPHA_BLEND_PREMULTIPLIED_COMP(sB, dB, sA); dB = (tmp > 255 ? 255 : tmp); \ - dA = sA + dA - ((sA * dA) / 255); \ - } while(0) -#elif 0 - -#define ALPHA_BLEND(sR, sG, sB, sA, dR, dG, dB, dA) \ - do { \ - if(sA){ \ - if(dA && sA < 255){ \ - int dContrib = dA*(255 - sA)/255; \ - dA = sA+dA - ((sA*dA)/255); \ - dR = (dR*dContrib + sR*sA)/dA; \ - dG = (dG*dContrib + sG*sA)/dA; \ - dB = (dB*dContrib + sB*sA)/dA; \ - }else{ \ - dR = sR; \ - dG = sG; \ - dB = sB; \ - dA = sA; \ - } \ - } \ - } while(0) -#endif - -int -surface_fill_blend (SDL_Surface *surface, SDL_Rect *rect, Uint32 color, - int blendargs); - -void -surface_respect_clip_rect (SDL_Surface *surface, SDL_Rect *rect); - -int -pygame_AlphaBlit (SDL_Surface * src, SDL_Rect * srcrect, - SDL_Surface * dst, SDL_Rect * dstrect, int the_args); - -int -pygame_Blit (SDL_Surface * src, SDL_Rect * srcrect, - SDL_Surface * dst, SDL_Rect * dstrect, int the_args); - -#endif /* SURFACE_H */ diff --git a/venv/lib/python3.7/site-packages/easy-install.pth b/venv/lib/python3.7/site-packages/easy-install.pth deleted file mode 100644 index b74fe2e..0000000 --- a/venv/lib/python3.7/site-packages/easy-install.pth +++ /dev/null @@ -1,2 +0,0 @@ -./setuptools-40.8.0-py3.7.egg -./pip-19.0.3-py3.7.egg diff --git a/venv/lib/python3.7/site-packages/numpy-1.18.2.dist-info/INSTALLER b/venv/lib/python3.7/site-packages/numpy-1.18.2.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/venv/lib/python3.7/site-packages/numpy-1.18.2.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/venv/lib/python3.7/site-packages/numpy-1.18.2.dist-info/METADATA b/venv/lib/python3.7/site-packages/numpy-1.18.2.dist-info/METADATA deleted file mode 100644 index d87023b..0000000 --- a/venv/lib/python3.7/site-packages/numpy-1.18.2.dist-info/METADATA +++ /dev/null @@ -1,56 +0,0 @@ -Metadata-Version: 2.1 -Name: numpy -Version: 1.18.2 -Summary: NumPy is the fundamental package for array computing with Python. -Home-page: https://www.numpy.org -Author: Travis E. Oliphant et al. -Maintainer: NumPy Developers -Maintainer-email: numpy-discussion@python.org -License: BSD -Download-URL: https://pypi.python.org/pypi/numpy -Project-URL: Bug Tracker, https://github.com/numpy/numpy/issues -Project-URL: Documentation, https://docs.scipy.org/doc/numpy/ -Project-URL: Source Code, https://github.com/numpy/numpy -Platform: Windows -Platform: Linux -Platform: Solaris -Platform: Mac OS-X -Platform: Unix -Classifier: Development Status :: 5 - Production/Stable -Classifier: Intended Audience :: Science/Research -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved -Classifier: Programming Language :: C -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: 3.7 -Classifier: Programming Language :: Python :: 3.8 -Classifier: Programming Language :: Python :: 3 :: Only -Classifier: Programming Language :: Python :: Implementation :: CPython -Classifier: Topic :: Software Development -Classifier: Topic :: Scientific/Engineering -Classifier: Operating System :: Microsoft :: Windows -Classifier: Operating System :: POSIX -Classifier: Operating System :: Unix -Classifier: Operating System :: MacOS -Requires-Python: >=3.5 - -It provides: - -- a powerful N-dimensional array object -- sophisticated (broadcasting) functions -- tools for integrating C/C++ and Fortran code -- useful linear algebra, Fourier transform, and random number capabilities -- and much more - -Besides its obvious scientific uses, NumPy can also be used as an efficient -multi-dimensional container of generic data. Arbitrary data-types can be -defined. This allows NumPy to seamlessly and speedily integrate with a wide -variety of databases. - -All NumPy wheels distributed on PyPI are BSD licensed. - - - diff --git a/venv/lib/python3.7/site-packages/numpy-1.18.2.dist-info/RECORD b/venv/lib/python3.7/site-packages/numpy-1.18.2.dist-info/RECORD deleted file mode 100644 index 636a01b..0000000 --- a/venv/lib/python3.7/site-packages/numpy-1.18.2.dist-info/RECORD +++ /dev/null @@ -1,852 +0,0 @@ -../../../bin/f2py,sha256=-BAA-2fWt2Kcos0jeHtabPXud3qyCZ-1dOc8lU3tJJI,258 -../../../bin/f2py3,sha256=-BAA-2fWt2Kcos0jeHtabPXud3qyCZ-1dOc8lU3tJJI,258 -../../../bin/f2py3.7,sha256=-BAA-2fWt2Kcos0jeHtabPXud3qyCZ-1dOc8lU3tJJI,258 -numpy-1.18.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -numpy-1.18.2.dist-info/METADATA,sha256=TuIVILC5R4ELDt_vDZ9g3IIi-6phvFDaLXXa_jKh1oM,2057 -numpy-1.18.2.dist-info/RECORD,, -numpy-1.18.2.dist-info/WHEEL,sha256=AhV6RMqZ2IDfreRJKo44QWYxYeP-0Jr0bezzBLQ1eog,109 -numpy-1.18.2.dist-info/entry_points.txt,sha256=MA6o_IjpQrpZlNNxq1yxwYV0u_I689RuoWedrJLsZnk,113 -numpy-1.18.2.dist-info/top_level.txt,sha256=4J9lbBMLnAiyxatxh8iRKV5Entd_6-oqbO7pzJjMsPw,6 -numpy/.libs/libgfortran-ed201abd.so.3.0.0,sha256=-wq9A9a6iPJfgojsh9Fi4vj6Br_EwUqr7W5Pc4giOYg,1023960 -numpy/.libs/libopenblasp-r0-34a18dc3.3.7.so,sha256=yHuhchYklHB9dvBnMyw8DDkIvR3ApKIE_LPaeGklZw4,29724672 -numpy/LICENSE.txt,sha256=kL0gtRLFMt0qE0tusWLm-rVSSW0Uy3UA-f0l8ZEVikk,45692 -numpy/__config__.py,sha256=l-kYBVT3VpoLPbr8_dilDgG-Z1l-VOLtHHFd2vCF8fw,1646 -numpy/__init__.py,sha256=Ited5sCQ_GQpr_n6rXbUxiF6PsLBQHuBs6VZuTdX9iY,8858 -numpy/__pycache__/__config__.cpython-37.pyc,, -numpy/__pycache__/__init__.cpython-37.pyc,, -numpy/__pycache__/_distributor_init.cpython-37.pyc,, -numpy/__pycache__/_globals.cpython-37.pyc,, -numpy/__pycache__/_pytesttester.cpython-37.pyc,, -numpy/__pycache__/conftest.cpython-37.pyc,, -numpy/__pycache__/ctypeslib.cpython-37.pyc,, -numpy/__pycache__/dual.cpython-37.pyc,, -numpy/__pycache__/matlib.cpython-37.pyc,, -numpy/__pycache__/setup.cpython-37.pyc,, -numpy/__pycache__/version.cpython-37.pyc,, -numpy/_distributor_init.py,sha256=IgPkSK3H9bgjFeUfWuXhjKrgetQl5ztUW-rTyjGHK3c,331 -numpy/_globals.py,sha256=p8xxERZsxjGPUWV9pMY3jz75NZxDLppGeKaHbYGCDqM,2379 -numpy/_pytesttester.py,sha256=JQAw-aDSd7hl9dPpeIvD7eRbrMppI9sFeYQEgqpTqx8,6980 -numpy/compat/__init__.py,sha256=MHle4gJcrXh1w4SNv0mz5rbUTAjAzHnyO3rtbSW3AUo,498 -numpy/compat/__pycache__/__init__.cpython-37.pyc,, -numpy/compat/__pycache__/_inspect.cpython-37.pyc,, -numpy/compat/__pycache__/py3k.cpython-37.pyc,, -numpy/compat/__pycache__/setup.cpython-37.pyc,, -numpy/compat/_inspect.py,sha256=xEImUFhm4VAzT2LJj2Va_yDAHJsdy0RwSi1JwOOhykU,7513 -numpy/compat/py3k.py,sha256=EWeA4IONUTXhTcTJ7wEh2xoECE5knqPI1VzEfSTyY_8,7097 -numpy/compat/setup.py,sha256=REJcwNU7EbfwBFS1FHazGJcUhh50_5gYttr3BSczCiM,382 -numpy/compat/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/compat/tests/__pycache__/__init__.cpython-37.pyc,, -numpy/compat/tests/__pycache__/test_compat.cpython-37.pyc,, -numpy/compat/tests/test_compat.py,sha256=KtCVafV8yN5g90tIIe7T9f5ruAs5Y0DNa64d040Rx5s,542 -numpy/conftest.py,sha256=HHIMNsYUUp2eensC63LtRYy_NZC1su1tbtN26rnrg5E,2749 -numpy/core/__init__.py,sha256=MM3QX8fvUwztExd4zaHTdgvXxE8yr4ZMkr4SlcGD7QI,4925 -numpy/core/__pycache__/__init__.cpython-37.pyc,, -numpy/core/__pycache__/_add_newdocs.cpython-37.pyc,, -numpy/core/__pycache__/_asarray.cpython-37.pyc,, -numpy/core/__pycache__/_dtype.cpython-37.pyc,, -numpy/core/__pycache__/_dtype_ctypes.cpython-37.pyc,, -numpy/core/__pycache__/_exceptions.cpython-37.pyc,, -numpy/core/__pycache__/_internal.cpython-37.pyc,, -numpy/core/__pycache__/_methods.cpython-37.pyc,, -numpy/core/__pycache__/_string_helpers.cpython-37.pyc,, -numpy/core/__pycache__/_type_aliases.cpython-37.pyc,, -numpy/core/__pycache__/_ufunc_config.cpython-37.pyc,, -numpy/core/__pycache__/arrayprint.cpython-37.pyc,, -numpy/core/__pycache__/cversions.cpython-37.pyc,, -numpy/core/__pycache__/defchararray.cpython-37.pyc,, -numpy/core/__pycache__/einsumfunc.cpython-37.pyc,, -numpy/core/__pycache__/fromnumeric.cpython-37.pyc,, -numpy/core/__pycache__/function_base.cpython-37.pyc,, -numpy/core/__pycache__/generate_numpy_api.cpython-37.pyc,, -numpy/core/__pycache__/getlimits.cpython-37.pyc,, -numpy/core/__pycache__/machar.cpython-37.pyc,, -numpy/core/__pycache__/memmap.cpython-37.pyc,, -numpy/core/__pycache__/multiarray.cpython-37.pyc,, -numpy/core/__pycache__/numeric.cpython-37.pyc,, -numpy/core/__pycache__/numerictypes.cpython-37.pyc,, -numpy/core/__pycache__/overrides.cpython-37.pyc,, -numpy/core/__pycache__/records.cpython-37.pyc,, -numpy/core/__pycache__/setup.cpython-37.pyc,, -numpy/core/__pycache__/setup_common.cpython-37.pyc,, -numpy/core/__pycache__/shape_base.cpython-37.pyc,, -numpy/core/__pycache__/umath.cpython-37.pyc,, -numpy/core/__pycache__/umath_tests.cpython-37.pyc,, -numpy/core/_add_newdocs.py,sha256=LqccpEMz9ETDG4jXOTrBnol3wUO0hTw0I1JDSOUsUE8,202937 -numpy/core/_asarray.py,sha256=NH0SPZr_pBMKOJgyy6dsfmKOQPy3r31hlzFG5bP1yYA,9940 -numpy/core/_dtype.py,sha256=lhiLEajO4UQ0wGSY52T4KtLdylFfCaAQs-YV6Ru-hNM,10053 -numpy/core/_dtype_ctypes.py,sha256=EiTjqVsDSibpbS8pkvzres86E9er1aFaflsss9N3Uao,3448 -numpy/core/_exceptions.py,sha256=MbGfp_yuOifOpZRppfk-DA9dL07AVv7blO0i63OX8lU,6259 -numpy/core/_internal.py,sha256=pwHot3zvS_5qcO_INVPk7gpM1YkNK1A5K8M1NyF1ghc,26469 -numpy/core/_methods.py,sha256=g8AnOnA3CdC4qe7s7N_pG3OcaW-YKhXmRz8FmLNnpG0,8399 -numpy/core/_multiarray_tests.cpython-37m-x86_64-linux-gnu.so,sha256=9Ewrq9nU6CKSUR5MXAqcCz_HcxI9Y4v_UsJsW7zNSsY,580203 -numpy/core/_multiarray_umath.cpython-37m-x86_64-linux-gnu.so,sha256=2wzZ2EtGMJjDaycOULGHZqZFUr_KZwApuza_yjReE1o,21507704 -numpy/core/_operand_flag_tests.cpython-37m-x86_64-linux-gnu.so,sha256=kawkN-3Gn6UQNAFv5B_M3JmCr4yeL8RSI8-a7Xz6gz8,31319 -numpy/core/_rational_tests.cpython-37m-x86_64-linux-gnu.so,sha256=UrPyPujhte6FfTbtswWq8Bei_xGz8A3CqDf6PCxg0Ls,270173 -numpy/core/_string_helpers.py,sha256=NGGGhaFdU5eGiUAj3GTIBoOgWs4r9aTNlsE2r9NgX6Q,2855 -numpy/core/_struct_ufunc_tests.cpython-37m-x86_64-linux-gnu.so,sha256=a6SlGjJLfa6wyV5Bs14o_ZnVN_txdltect3Ffk7x5HE,34727 -numpy/core/_type_aliases.py,sha256=FA2Pz5OKqcLl1QKLJNu-ETHIzQ1ii3LH5pSdHhZkfZA,9181 -numpy/core/_ufunc_config.py,sha256=yQ9RSST7_TagO8EYDZG5g23gz7loX76a0ajCU5HfYRI,14219 -numpy/core/_umath_tests.cpython-37m-x86_64-linux-gnu.so,sha256=l8pu1J2kNgM6hlXTbfbQEze7-fonaZMzxS0jj8RpW3Q,85900 -numpy/core/arrayprint.py,sha256=WuIViYKXL-qr000rKTQhss9swe3nsKlG2Jc0mfuiS10,59774 -numpy/core/cversions.py,sha256=ukYNpkei0Coi7DOcbroXuDoXc6kl5odxmcy_39pszA0,413 -numpy/core/defchararray.py,sha256=HJU2o-dQbiwglIwIv8MRSEDB6p4p2PE9Aq67IQ47aEQ,70980 -numpy/core/einsumfunc.py,sha256=94J-3sQQWoCzYGwUlsEIHD6B3Qjv481XUD2jd0KClGY,51271 -numpy/core/fromnumeric.py,sha256=_d9szuykDMfWhYjBl5tIcD81G7KNz9l4PMyvfxyzO64,117694 -numpy/core/function_base.py,sha256=jgKa0iHIzpUUy8T9XXlIEbI8XO0xeh1olG409kdM2qo,18344 -numpy/core/generate_numpy_api.py,sha256=0JBYTvekUeJyhp7QMKtWJSK-L6lVNhev16y0F2qX2pU,7470 -numpy/core/getlimits.py,sha256=X26A-6nrzC1FH1wtCggX-faIw0WMYYkPH1_983h4hCE,18914 -numpy/core/include/numpy/__multiarray_api.h,sha256=SQEcRelzaunap6-uUl3E21qUanrFOBcC1PiQITpVU0Y,61920 -numpy/core/include/numpy/__ufunc_api.h,sha256=fWkLh84HH3fN99gOJoZ10bZEpaO3VGT9aNpTu-2zblI,12179 -numpy/core/include/numpy/_neighborhood_iterator_imp.h,sha256=hNiUJ3gmJRxdjByk5R5jmLeBKpNfaP_29KLHFuTrSIA,1861 -numpy/core/include/numpy/_numpyconfig.h,sha256=bDiTLQ972ZWQBEpx6OM8riS64nSAelKa2kIimnXm_Ss,1010 -numpy/core/include/numpy/arrayobject.h,sha256=SXj-2avTHV8mNWvv7sOYHLKkRKcafDG7_HNpQNot1GE,164 -numpy/core/include/numpy/arrayscalars.h,sha256=vC7QCznlT8vkyvxbIh4QNwi1LR7UkP7GJ1j_0ZiJa1E,3509 -numpy/core/include/numpy/halffloat.h,sha256=ohvyl3Kz3mB1hW3MRzxwPDH-0L9WWM_eKhvYLjtT_2w,1878 -numpy/core/include/numpy/multiarray_api.txt,sha256=qG593ym4jzzsPHIkFfKSTxK1XrrICKTLb9qGIto1fxc,56884 -numpy/core/include/numpy/ndarrayobject.h,sha256=E737J_1YQI-igbXcbA3kdbwsMqTv1aXcy6bp5aE0P_0,11496 -numpy/core/include/numpy/ndarraytypes.h,sha256=Lelck68SVrCPhxTAGURh_AyOth5txewU6xp2f556lLg,65105 -numpy/core/include/numpy/noprefix.h,sha256=YE-lWegAdZKI5lf44AW5jiWbnmO6hircWzj_WMFrLT4,6786 -numpy/core/include/numpy/npy_1_7_deprecated_api.h,sha256=LLeZKLuJADU3RDfT04pu5FCxCBU5cEzY5Q9phR_HL78,4715 -numpy/core/include/numpy/npy_3kcompat.h,sha256=exFgMT6slmo2Zg3bFsY3mKLUrrkg3KU_66gUmu5IYKk,14666 -numpy/core/include/numpy/npy_common.h,sha256=R-LMbpQDZJ4XXKDeXvI58WFKgkEiljDDgDMl6Yk_KTI,37943 -numpy/core/include/numpy/npy_cpu.h,sha256=3frXChwN0Cxca-sAeTTOJCiZ6_2q1EuggUwqEotdXLg,3879 -numpy/core/include/numpy/npy_endian.h,sha256=HHanBydLvLC2anJJySvy6wZ_lYaC_xI6GNwT8cJ78rE,2596 -numpy/core/include/numpy/npy_interrupt.h,sha256=Eyddk806h30jxgymbr44b7eIZKrHXtNzXpPtUPp2Ng8,3439 -numpy/core/include/numpy/npy_math.h,sha256=VFv-sN9Dnm3wmnZoHoGJO5lFyJECbQfipzJgJj1p5vA,23139 -numpy/core/include/numpy/npy_no_deprecated_api.h,sha256=X-wRYdpuwIuerTnBblKjR7Dqsv8rqxn01RFLVWUHvi8,567 -numpy/core/include/numpy/npy_os.h,sha256=cEvEvpD92EeFjsjRelw1dXJaHYL-0yPJDuz3VeSJs4E,817 -numpy/core/include/numpy/numpyconfig.h,sha256=mHTx0sXeXNcaq0wWcP-8hGFUWvoG_2AHFKub59KJGm4,1327 -numpy/core/include/numpy/old_defines.h,sha256=7eiZoi7JrdVT9LXKCoeta5AoIncGa98GcVlWqDrLjwk,6306 -numpy/core/include/numpy/oldnumeric.h,sha256=Yo-LiSzVfDK2YyhlH41ff4gS0m-lv8XjI4JcAzpdy94,708 -numpy/core/include/numpy/random/bitgen.h,sha256=Gfrwd0M0odkpRJXw7QXJgVxb5XCw3iDXacWE_h-F_uM,389 -numpy/core/include/numpy/random/distributions.h,sha256=nbbdQ6X-lsdyzo7bmss4i3kg354GnkYQGGfYld_x6HM,9633 -numpy/core/include/numpy/ufunc_api.txt,sha256=RTz9blLHbWMCWMaiPeJyqt9d93nHJXJT7RiTf-bbMO4,6937 -numpy/core/include/numpy/ufuncobject.h,sha256=GpAJZKRnE08xRy5IOJD8r8i6Xz1nltg-iEMl3Frqsyk,12746 -numpy/core/include/numpy/utils.h,sha256=KqJzngAvarYV3oZQu5fY0ARPVihUP7FsZjdljysaSUk,729 -numpy/core/lib/libnpymath.a,sha256=aWHXyaoHHxnrPzhrK9HtatrDwlmjZKQHfT7278_T7tk,355952 -numpy/core/lib/npy-pkg-config/mlib.ini,sha256=_LsWV1eStNqwhdiYPa2538GL46dnfVwT4MrI1zbsoFw,147 -numpy/core/lib/npy-pkg-config/npymath.ini,sha256=kamUNrYKAmXqQa8BcNv7D5sLqHh6bnChM0_5rZCsTfY,360 -numpy/core/machar.py,sha256=P8Ae9aOzoTUMWWiAXgE0Uf5Vk837DTODV5ndQLvm5zU,10860 -numpy/core/memmap.py,sha256=RVD10EyH-4jgzrTy3Xc_mXsJrvt-QMGGLmY7Aoqmy7I,11590 -numpy/core/multiarray.py,sha256=7yvhC6SVcF-MGwX5PwsSmV7jMfObe4gldkNI6lqsyvY,53002 -numpy/core/numeric.py,sha256=xV7Lo8i9bcILM4GGrryguiQAWzCuJJdM99CKkLndcQE,71955 -numpy/core/numerictypes.py,sha256=fCQuWSy6vshZHh4YP4oz9n3ysSHl-HSaGMjEzmVVQdY,17918 -numpy/core/overrides.py,sha256=_OoaYi35e6xJ9QCOeMuJlZmuU0efF47pJAXmTgWeHrU,7481 -numpy/core/records.py,sha256=xOCgmcTtTLjBaOYtjae9t-DtvpqFjFJwg_c5ZgHZ0xs,30928 -numpy/core/setup.py,sha256=eVqe4s7YjhH8bSgsGSjXKBF2BZVj5vOeiexbh_M3ibE,42069 -numpy/core/setup_common.py,sha256=z3oR0UKy8sbt0rHq7TEjzwkitQNsfKw7T69LD18qTbY,19365 -numpy/core/shape_base.py,sha256=VXd2RUcUoxp4mcLQWxNszD-ygubCS8xp9ZOHYhnxddY,28964 -numpy/core/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/core/tests/__pycache__/__init__.cpython-37.pyc,, -numpy/core/tests/__pycache__/_locales.cpython-37.pyc,, -numpy/core/tests/__pycache__/test__exceptions.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_abc.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_api.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_arrayprint.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_datetime.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_defchararray.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_deprecations.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_dtype.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_einsum.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_errstate.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_extint128.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_function_base.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_getlimits.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_half.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_indexerrors.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_indexing.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_issue14735.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_item_selection.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_longdouble.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_machar.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_mem_overlap.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_memmap.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_multiarray.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_nditer.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_numeric.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_numerictypes.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_overrides.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_print.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_records.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_regression.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_scalar_ctors.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_scalar_methods.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_scalarbuffer.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_scalarinherit.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_scalarmath.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_scalarprint.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_shape_base.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_ufunc.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_umath.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_umath_accuracy.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_umath_complex.cpython-37.pyc,, -numpy/core/tests/__pycache__/test_unicode.cpython-37.pyc,, -numpy/core/tests/_locales.py,sha256=GQro3bha8c5msgQyvNzmDUrNwqS2cGkKKuN4gg4c6tI,2266 -numpy/core/tests/data/astype_copy.pkl,sha256=lWSzCcvzRB_wpuRGj92spGIw-rNPFcd9hwJaRVvfWdk,716 -numpy/core/tests/data/recarray_from_file.fits,sha256=NA0kliz31FlLnYxv3ppzeruONqNYkuEvts5wzXEeIc4,8640 -numpy/core/tests/data/umath-validation-set-README,sha256=-1JRNN1zx8S1x9l4D0786USSRMNt3Dk0nsOMg6O7CiM,959 -numpy/core/tests/data/umath-validation-set-cos,sha256=qIka8hARvhXZOu9XR3CnGiPnOdrkAaxEgFgEEqus06s,24703 -numpy/core/tests/data/umath-validation-set-exp,sha256=GZn7cZRKAjskJ4l6tcvDF53I3e9zegQH--GPzYib9_g,4703 -numpy/core/tests/data/umath-validation-set-log,sha256=gDbicMaonc26BmtHPoyvunUvXrSFLV9BY8L1QVoH5Dw,4088 -numpy/core/tests/data/umath-validation-set-sin,sha256=fMEynY6dZz18jtuRdpfOJT9KnpRSWd9ilcz0oXMwgCQ,24690 -numpy/core/tests/test__exceptions.py,sha256=8XVPAkXmYh9dHiN5XhQk4D_r_l71cYpejg_ueTscrRI,1495 -numpy/core/tests/test_abc.py,sha256=cpIqt3VFBZLHbuNpO4NuyCGgd--k1zij5aasu7FV77I,2402 -numpy/core/tests/test_api.py,sha256=RIlRUqB_lRM0xcrEAdLRdDRWWk-0O7bUcEJfPCHyNl4,19224 -numpy/core/tests/test_arrayprint.py,sha256=zoNxYH3h7VnMFtU1vt67ujPuRCAQkQ1VmXKhTo0Juqw,34400 -numpy/core/tests/test_datetime.py,sha256=LT_KGIp6AyqAryB289cKW4_xTQ44Egb6JriGNHiB_g8,108148 -numpy/core/tests/test_defchararray.py,sha256=L5EoOBTZVrRU1Vju5IhY8BSUlBOGPzEViKJwyQSlpXo,25481 -numpy/core/tests/test_deprecations.py,sha256=vcbHCQUx7_Um0pPofOLY-3u4AaF1ABIVmZsJBCXnjWw,22466 -numpy/core/tests/test_dtype.py,sha256=gkDXeJFWFcYHu5Sw5b6Wbyl_xbkkssOYdx5EdjLhEHA,49663 -numpy/core/tests/test_einsum.py,sha256=gMWQQ9yfSdEUlY0db4e-I2seD7n99xToiN-g6tB3TBE,44736 -numpy/core/tests/test_errstate.py,sha256=84S9ragkp2xqJ5s8uNEt1-4SGs99t3pkPVMHYc4QL-s,1505 -numpy/core/tests/test_extint128.py,sha256=-0zEInkai1qRhXI0bdHCguU_meD3s6Td4vUIBwirYQI,5709 -numpy/core/tests/test_function_base.py,sha256=r45sHfslz-e8qgn10PT8elVEBjeXEGk7xsaW-s4tjvY,13268 -numpy/core/tests/test_getlimits.py,sha256=2fBK7Slo67kP6bThcN9bOKmeX9gGPQVUE17jGVydoXk,4427 -numpy/core/tests/test_half.py,sha256=83O_R-Frt8mx2-7WEbmoVXLWJ5Dc5SH9n0vyPJ9Wp_I,22301 -numpy/core/tests/test_indexerrors.py,sha256=0Ku3Sy5jcaE3D2KsyDrFTvgQzMv2dyWja3hc4t5-n_k,4857 -numpy/core/tests/test_indexing.py,sha256=0-I5M5NCgDgHM58Myxp1vpOaulm7_s3n4K82_BeDihk,51366 -numpy/core/tests/test_issue14735.py,sha256=JADt-FhIdq6MaVAfVI_ACI9EpfpqylFdDrZ3A95NW1w,728 -numpy/core/tests/test_item_selection.py,sha256=0Ocg_RzeQjNqwIaPhb_Zk0ZlmqSjIBY0lHeef_H9l9U,3579 -numpy/core/tests/test_longdouble.py,sha256=C-Uaz8ho6YfvNFf5hy1HbbIfZ4mMsw0zdH1bZ60shV0,12321 -numpy/core/tests/test_machar.py,sha256=FrKeGhC7j-z9tApS_uI1E0DUkzieKIdUHMQPfCSM0t8,1141 -numpy/core/tests/test_mem_overlap.py,sha256=AyBz4pm7HhTDdlW2pq9FR1AO0E5QAYdKpBoWbOdSrco,29505 -numpy/core/tests/test_memmap.py,sha256=sFJ6uaf6ior1Hzjg7Y-VYzYPHnuZOYmNczOBa-_GgSY,7607 -numpy/core/tests/test_multiarray.py,sha256=SDfgwGmfH4lAKkCEafEsfX1ERP7tVs4jELXOInzwihI,315998 -numpy/core/tests/test_nditer.py,sha256=VYOj7XD87yjArRSxPThhMeF-Kz5tC3hmav9glLbPkKM,112098 -numpy/core/tests/test_numeric.py,sha256=0SLdicNj0ODq6bh8FpO89FZAHPTs3XpJuI3jrNxMRNs,117625 -numpy/core/tests/test_numerictypes.py,sha256=8C-_WrUHnnRcXyDjAHLevt6FZ8LO51ZVPY-ohP0FVQA,19635 -numpy/core/tests/test_overrides.py,sha256=rkP2O-8MYssKR4y6gKkNxz2LyaYvnCuHn6eOEYtJzsc,14619 -numpy/core/tests/test_print.py,sha256=Q53dqbjQQIlCzRp_1ZY0A-ptP7FlbBZVPeMeMLX0cVg,6876 -numpy/core/tests/test_records.py,sha256=CJu2VaBWsNQrYpCSS0HAV2aKv8Ow0Zfc5taegRslVW0,19651 -numpy/core/tests/test_regression.py,sha256=S8IS6iH19hsT41Ms33Bj1btMAkd2iVz2sXXHS98qcq8,88558 -numpy/core/tests/test_scalar_ctors.py,sha256=kjyYllJHyhMQGT49Xbjjc2tuFHXcQIM-PAZExMWczq8,2294 -numpy/core/tests/test_scalar_methods.py,sha256=n3eNfQ-NS6ODGfJFrww-RSKVm9QzRKeDRp0ae4TzQJ8,4220 -numpy/core/tests/test_scalarbuffer.py,sha256=M-xSWyn2ts_O4d69kWAuEEzupY6AZ6YpLI31Gxlvjn4,3556 -numpy/core/tests/test_scalarinherit.py,sha256=vIZqnyg99o3BsEQQHsiYxzedXIf4wSr9qVwqur_C-VU,1807 -numpy/core/tests/test_scalarmath.py,sha256=U-h1wclwyDaFpoASPrRq6qW2YJ1nAUW__XF6fNUzbjs,28807 -numpy/core/tests/test_scalarprint.py,sha256=SPTkscqlnApyqaOUZ5cgC2rDgGED6hPBtdRkWXxXlbE,15470 -numpy/core/tests/test_shape_base.py,sha256=B4869KCdnSxSTcTmqFhOPU2eRjmzOuG0fwVa3jrGyg8,24993 -numpy/core/tests/test_ufunc.py,sha256=LHGt9_It2-GP79B5dnEE4WhZQjTOxz99gmiVCndcHmA,81054 -numpy/core/tests/test_umath.py,sha256=Yb3SHIavyTSAJoQrNbpW9obBnSkbmosbvOa0b86DYpY,117248 -numpy/core/tests/test_umath_accuracy.py,sha256=GCvLPNmGeVCJcDpYst4Q21_0IkJGygdjMD8mBVlH_H8,2647 -numpy/core/tests/test_umath_complex.py,sha256=zvjC9COuHSZ_6BL3lz2iP7UppkNWL8ThP04fj0eulUQ,19413 -numpy/core/tests/test_unicode.py,sha256=PvWt5NLjgwulCgXakHEKMJ2pSpTLbUWgz9dZExEcSJ8,13656 -numpy/core/umath.py,sha256=KAWy8e3HN7CMF6bPfQ_MCL36bDuU7UeS39tlxaFAeto,1905 -numpy/core/umath_tests.py,sha256=Sr6VQTbH-sOMlXy-tg1-Unht7MKaaV4wtAYR6mQYNbU,455 -numpy/ctypeslib.py,sha256=_y3WO60jLJaHAaDbVj2PNF4jZ4X8EOqih14fvJffOVI,17443 -numpy/distutils/__config__.py,sha256=l-kYBVT3VpoLPbr8_dilDgG-Z1l-VOLtHHFd2vCF8fw,1646 -numpy/distutils/__init__.py,sha256=gsPLMHtEHdGbVbA9_LBfVAjnwo9n0j29aqEkCmehE7Y,1625 -numpy/distutils/__pycache__/__config__.cpython-37.pyc,, -numpy/distutils/__pycache__/__init__.cpython-37.pyc,, -numpy/distutils/__pycache__/_shell_utils.cpython-37.pyc,, -numpy/distutils/__pycache__/ccompiler.cpython-37.pyc,, -numpy/distutils/__pycache__/compat.cpython-37.pyc,, -numpy/distutils/__pycache__/conv_template.cpython-37.pyc,, -numpy/distutils/__pycache__/core.cpython-37.pyc,, -numpy/distutils/__pycache__/cpuinfo.cpython-37.pyc,, -numpy/distutils/__pycache__/exec_command.cpython-37.pyc,, -numpy/distutils/__pycache__/extension.cpython-37.pyc,, -numpy/distutils/__pycache__/from_template.cpython-37.pyc,, -numpy/distutils/__pycache__/intelccompiler.cpython-37.pyc,, -numpy/distutils/__pycache__/lib2def.cpython-37.pyc,, -numpy/distutils/__pycache__/line_endings.cpython-37.pyc,, -numpy/distutils/__pycache__/log.cpython-37.pyc,, -numpy/distutils/__pycache__/mingw32ccompiler.cpython-37.pyc,, -numpy/distutils/__pycache__/misc_util.cpython-37.pyc,, -numpy/distutils/__pycache__/msvc9compiler.cpython-37.pyc,, -numpy/distutils/__pycache__/msvccompiler.cpython-37.pyc,, -numpy/distutils/__pycache__/npy_pkg_config.cpython-37.pyc,, -numpy/distutils/__pycache__/numpy_distribution.cpython-37.pyc,, -numpy/distutils/__pycache__/pathccompiler.cpython-37.pyc,, -numpy/distutils/__pycache__/setup.cpython-37.pyc,, -numpy/distutils/__pycache__/system_info.cpython-37.pyc,, -numpy/distutils/__pycache__/unixccompiler.cpython-37.pyc,, -numpy/distutils/_shell_utils.py,sha256=kMLOIoimB7PdFRgoVxCIyCFsIl1pP3d0hkm_s3E9XdA,2613 -numpy/distutils/ccompiler.py,sha256=qlwbbVN_0Qsw4gpx8tCyMAy_9a146XHHkJCFRNKKvP8,27660 -numpy/distutils/command/__init__.py,sha256=l5r9aYwIEq1D-JJc8WFUxABk6Ip28FpRK_ok7wSLRZE,1098 -numpy/distutils/command/__pycache__/__init__.cpython-37.pyc,, -numpy/distutils/command/__pycache__/autodist.cpython-37.pyc,, -numpy/distutils/command/__pycache__/bdist_rpm.cpython-37.pyc,, -numpy/distutils/command/__pycache__/build.cpython-37.pyc,, -numpy/distutils/command/__pycache__/build_clib.cpython-37.pyc,, -numpy/distutils/command/__pycache__/build_ext.cpython-37.pyc,, -numpy/distutils/command/__pycache__/build_py.cpython-37.pyc,, -numpy/distutils/command/__pycache__/build_scripts.cpython-37.pyc,, -numpy/distutils/command/__pycache__/build_src.cpython-37.pyc,, -numpy/distutils/command/__pycache__/config.cpython-37.pyc,, -numpy/distutils/command/__pycache__/config_compiler.cpython-37.pyc,, -numpy/distutils/command/__pycache__/develop.cpython-37.pyc,, -numpy/distutils/command/__pycache__/egg_info.cpython-37.pyc,, -numpy/distutils/command/__pycache__/install.cpython-37.pyc,, -numpy/distutils/command/__pycache__/install_clib.cpython-37.pyc,, -numpy/distutils/command/__pycache__/install_data.cpython-37.pyc,, -numpy/distutils/command/__pycache__/install_headers.cpython-37.pyc,, -numpy/distutils/command/__pycache__/sdist.cpython-37.pyc,, -numpy/distutils/command/autodist.py,sha256=m5BGbaBPrBjbp3U_lGD35BS_yUxjarB9S9wAwTxgGvw,3041 -numpy/distutils/command/bdist_rpm.py,sha256=rhhIyFzkd5NGi6lZaft44EBPZB3zZFRDc75klJYnbw8,775 -numpy/distutils/command/build.py,sha256=0sB5J4vmeEL6CBpvCo8EVVRx9CnM3HYR1fddv7uQIh0,1448 -numpy/distutils/command/build_clib.py,sha256=YaWxa26hf_D7qI2rv-utAPQWFf99UEBfe9uJxT_YT2c,13800 -numpy/distutils/command/build_ext.py,sha256=fiTsl8O8dBODimXtG-RAVHMA764ea_aNo3gvQ_6Nv-4,26434 -numpy/distutils/command/build_py.py,sha256=7TBGLz0va0PW6sEX-aUjsXdzvhuSbJGgIrMim1JTwu4,1210 -numpy/distutils/command/build_scripts.py,sha256=ze19jHBhC3JggKLbL9wgs9I3mG7ls-V2NbykvleNwgQ,1731 -numpy/distutils/command/build_src.py,sha256=4lOovmHAoo_vDC7RkuxZccEyQUjmelxW-J8KL2wEadk,31246 -numpy/distutils/command/config.py,sha256=ZziDEAnaHskht8MYCHA0BSEcHny-byOiDPx_P8YfhZ0,20473 -numpy/distutils/command/config_compiler.py,sha256=SKJTEk_Y_Da-dVYOHAdf4c3yXxjlE1dsr-hJxY0m0PU,4435 -numpy/distutils/command/develop.py,sha256=nYM5yjhKtGKh_3wZwrvEQBLYHKldz64aU-0iSycSkXA,641 -numpy/distutils/command/egg_info.py,sha256=pdiCFQiQuIpf_xmVk9Njl7iowY9CxGn9KRbU-A9eBfg,987 -numpy/distutils/command/install.py,sha256=-y7bHvwoQdDCMGdLONawqnOWKtwQzjp5v-vSpZ7PdYU,3144 -numpy/distutils/command/install_clib.py,sha256=rGCajxbqAZjsYWg3l5B7ZRgcHJzFtYAiUHZH-DO64eU,1465 -numpy/distutils/command/install_data.py,sha256=7iWTw93ty2sBPwHwg_EEhgQhZSZe6SsKdfTS9RbUR9A,914 -numpy/distutils/command/install_headers.py,sha256=NbZwt-Joo80z_1TfxA-mIWXm2L9Mmh4ZLht7HAuveoo,985 -numpy/distutils/command/sdist.py,sha256=tHmlb0RzD8x04dswPXEua9H_b6GuHWY1V3hYkwJDKvA,799 -numpy/distutils/compat.py,sha256=xzkW8JgJgGTmye34QCYTIkLfsXBvmPu4tvgCwXNdiU0,218 -numpy/distutils/conv_template.py,sha256=0BFDE5IToW3sMVMzSRjmgENs2PAKyt7Wnvm2gyFrKnU,9750 -numpy/distutils/core.py,sha256=9GNNyWDTCqfnD7Jp2tzp9vOBVyeJmF8lsgv_xdlt59g,8230 -numpy/distutils/cpuinfo.py,sha256=onN3xteqf2G5IgKwRCYDG0VucoQY8sCTMUJ0nhc5QT0,23013 -numpy/distutils/exec_command.py,sha256=PKHgZ-hESpsBM8vnUhPknPRioAc6hLvsJzcOQoey-zo,10918 -numpy/distutils/extension.py,sha256=hXpEH2aP6ItaqNms1RW6TA1tSi0z37abrFpnyKXcjcA,3495 -numpy/distutils/fcompiler/__init__.py,sha256=-9uYUvrMwdxy0jetB-T-QHSwmWcobNRL5u0Bbj0Sm4w,40157 -numpy/distutils/fcompiler/__pycache__/__init__.cpython-37.pyc,, -numpy/distutils/fcompiler/__pycache__/absoft.cpython-37.pyc,, -numpy/distutils/fcompiler/__pycache__/compaq.cpython-37.pyc,, -numpy/distutils/fcompiler/__pycache__/environment.cpython-37.pyc,, -numpy/distutils/fcompiler/__pycache__/g95.cpython-37.pyc,, -numpy/distutils/fcompiler/__pycache__/gnu.cpython-37.pyc,, -numpy/distutils/fcompiler/__pycache__/hpux.cpython-37.pyc,, -numpy/distutils/fcompiler/__pycache__/ibm.cpython-37.pyc,, -numpy/distutils/fcompiler/__pycache__/intel.cpython-37.pyc,, -numpy/distutils/fcompiler/__pycache__/lahey.cpython-37.pyc,, -numpy/distutils/fcompiler/__pycache__/mips.cpython-37.pyc,, -numpy/distutils/fcompiler/__pycache__/nag.cpython-37.pyc,, -numpy/distutils/fcompiler/__pycache__/none.cpython-37.pyc,, -numpy/distutils/fcompiler/__pycache__/pathf95.cpython-37.pyc,, -numpy/distutils/fcompiler/__pycache__/pg.cpython-37.pyc,, -numpy/distutils/fcompiler/__pycache__/sun.cpython-37.pyc,, -numpy/distutils/fcompiler/__pycache__/vast.cpython-37.pyc,, -numpy/distutils/fcompiler/absoft.py,sha256=AKbj5uGr8dpGDLzRIJbdUnXXAtF_5k4JqnqwTWvy-tQ,5565 -numpy/distutils/fcompiler/compaq.py,sha256=SlIcqV82SrmOSVMZCYdSyhtglSl3doAoxDCcjq1hbkE,4109 -numpy/distutils/fcompiler/environment.py,sha256=1AziWo5qkxOFClEnChTFnUMIShtNCwHQa2xidjorjKk,3078 -numpy/distutils/fcompiler/g95.py,sha256=K68RRAvOvyKoh-jsD9J4ZDsHltrGnJ_AllxULhy6iOE,1396 -numpy/distutils/fcompiler/gnu.py,sha256=oHipJDyfisSK9_Kdkv1Av8hDHY3UbLALgWfBO7cXkPA,20804 -numpy/distutils/fcompiler/hpux.py,sha256=xpNfy7vCKWPnJ5M3JPnjMAewKBAfKN5hFX3hvEL2zaM,1419 -numpy/distutils/fcompiler/ibm.py,sha256=3q-AZ3TC3VjRxNyvkeIGN81SDWtHDH9iddfd8hqk4x4,3607 -numpy/distutils/fcompiler/intel.py,sha256=WlsBtvZnLpFke7oTpMCDYFlccNSUWWkB2p422iwQURU,6861 -numpy/distutils/fcompiler/lahey.py,sha256=pJ0-xgtYwyYXgt8JlN8PFeYYEWB3vOmFkNx6UUFXzuM,1393 -numpy/distutils/fcompiler/mips.py,sha256=IxLojWR1oi0VW93PxPpHQXRwZcYffD1dunllQW2w19A,1780 -numpy/distutils/fcompiler/nag.py,sha256=eiTvBopdCgVh5-HDTryVbRrYvf4r_Sqse1mruTt5Blo,2608 -numpy/distutils/fcompiler/none.py,sha256=N6adoFAf8inIQfCDEBzK5cGI3hLIWWpHmQXux8iJDfA,824 -numpy/distutils/fcompiler/pathf95.py,sha256=Xf1JMB30PDSoNpA1Y-vKPRBeNO0XfSi0dvVQvvdjfUQ,1127 -numpy/distutils/fcompiler/pg.py,sha256=G0uNPfedmbkYWfChg1UbxBKqo25RenzSVJN1BUtRDw0,4232 -numpy/distutils/fcompiler/sun.py,sha256=21DQ6Rprr9rEp4pp7Np8kCwOc0Xfqdxa1iX0O-yPJPM,1643 -numpy/distutils/fcompiler/vast.py,sha256=LJ21-WIJsiquLtjdDaNsJqblwN5wuM2FZsYl1R40vN8,1733 -numpy/distutils/from_template.py,sha256=k5PrP9If_X8J5Fsh9vR2h0Tcj2JsZC9EsC2h8fGfaXs,8027 -numpy/distutils/intelccompiler.py,sha256=1qzr6PMxi0UkR0NUY3rt3gqww9GwJ-Gbe91yxQKlieU,4291 -numpy/distutils/lib2def.py,sha256=YyVORDcNVb-Wzn_ibQXIqeQlAdXQQsLY3XfwtvhnLnE,3710 -numpy/distutils/line_endings.py,sha256=jrYG8SnOyMN0lvQim4Kf6ChoHdtaWO0egeTUUHtPoQA,2085 -numpy/distutils/log.py,sha256=6wgjYylV3BPEYc0NV8V3MIeKHxmlj0cP5UsDjTe6YS4,2796 -numpy/distutils/mingw/gfortran_vs2003_hack.c,sha256=cbsN3Lk9Hkwzr9c-yOP2xEBg1_ml1X7nwAMDWxGjzc8,77 -numpy/distutils/mingw32ccompiler.py,sha256=k-2SpajodL5Ey8ZbmiKQpXPhABe7UD0PJilEWbh8gH4,25411 -numpy/distutils/misc_util.py,sha256=DK1mEpnYeSsF70lgCuF7H3a5z3cgVWACAiJqz-dIzrM,84707 -numpy/distutils/msvc9compiler.py,sha256=TuPYjPFp3nYQSIG1goNxuOly7o3VMx-H35POMpycB3k,2258 -numpy/distutils/msvccompiler.py,sha256=7EUlHbgdKBBJG3AzgE94AQeUFnj0HcD6M7_YPN7vdCs,1994 -numpy/distutils/npy_pkg_config.py,sha256=RQZnr78rmA-dMIxOnibBMBMsGqsZUBK3Hnx-J8UQl8I,13152 -numpy/distutils/numpy_distribution.py,sha256=lbnEW1OxWxC_1n2sKd0Q3fC5QnNdFuAkNAlvXF99zIQ,700 -numpy/distutils/pathccompiler.py,sha256=FjNouOTL8u4gLMbJW7GdT0RlsD2nXV1_SEBNZj9QdpQ,779 -numpy/distutils/setup.py,sha256=q3DcCZNkK_jHsC0imocewd4uCKQWWXjkzd4nkBmkMFI,611 -numpy/distutils/system_info.py,sha256=IcYgQX1CzFSspCUMq8yttCa2gPqsk09JhR_QWnpdDys,104759 -numpy/distutils/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/distutils/tests/__pycache__/__init__.cpython-37.pyc,, -numpy/distutils/tests/__pycache__/test_exec_command.cpython-37.pyc,, -numpy/distutils/tests/__pycache__/test_fcompiler.cpython-37.pyc,, -numpy/distutils/tests/__pycache__/test_fcompiler_gnu.cpython-37.pyc,, -numpy/distutils/tests/__pycache__/test_fcompiler_intel.cpython-37.pyc,, -numpy/distutils/tests/__pycache__/test_fcompiler_nagfor.cpython-37.pyc,, -numpy/distutils/tests/__pycache__/test_from_template.cpython-37.pyc,, -numpy/distutils/tests/__pycache__/test_mingw32ccompiler.cpython-37.pyc,, -numpy/distutils/tests/__pycache__/test_misc_util.cpython-37.pyc,, -numpy/distutils/tests/__pycache__/test_npy_pkg_config.cpython-37.pyc,, -numpy/distutils/tests/__pycache__/test_shell_utils.cpython-37.pyc,, -numpy/distutils/tests/__pycache__/test_system_info.cpython-37.pyc,, -numpy/distutils/tests/test_exec_command.py,sha256=U__8FXVF4WwYdf6ucgNzgYHGgUOIKhFWG9qoCr2GxGo,7483 -numpy/distutils/tests/test_fcompiler.py,sha256=5-wYZnqXW3RRegDmnQ_dKGIjHWXURz93wxLvGnoT-AQ,1377 -numpy/distutils/tests/test_fcompiler_gnu.py,sha256=O57uCEHeQIS0XF8GloEas3OlaOfmIHDWEtgYS_q3x48,2218 -numpy/distutils/tests/test_fcompiler_intel.py,sha256=fOjd_jv0Od6bZyzFf4YpZMcnFva0OZK7yJV_4Hebb6A,1140 -numpy/distutils/tests/test_fcompiler_nagfor.py,sha256=5-Num0A3cN7_NS3BlAgYt174S-OGOWRLL9rXtv-h_fA,1176 -numpy/distutils/tests/test_from_template.py,sha256=SDYoe0XUpAayyEQDq7ZhrvEEz7U9upJDLYzhcdoVifc,1103 -numpy/distutils/tests/test_mingw32ccompiler.py,sha256=rMC8-IyBOiuZVfAoklV_KnD9qVeB_hFVvb5dStxfk08,1609 -numpy/distutils/tests/test_misc_util.py,sha256=8LIm12X83HmvgmpvJJ9inaU7FlGt287VwDM-rMKCOv4,3316 -numpy/distutils/tests/test_npy_pkg_config.py,sha256=wa0QMQ9JAye87t2gDbFaBHp0HGpNFgwxJrJ30ZrHvNk,2639 -numpy/distutils/tests/test_shell_utils.py,sha256=we9P8AvjCQky1NRDP3sXAJnNUek7rDmMR4Ar9cg9iSk,2030 -numpy/distutils/tests/test_system_info.py,sha256=gb99F0iX4pbKhjxCcdiby0bvFMzPwuUGlSj_VXnfpWk,8548 -numpy/distutils/unixccompiler.py,sha256=M7Hn3ANMo8iP-sZtSAebI3RCLp0ViRYxawAbck0hlQM,5177 -numpy/doc/__init__.py,sha256=BDpxTM0iw2F4thjBkYqjIXX57F5KfIaH8xMd67N6Jh0,574 -numpy/doc/__pycache__/__init__.cpython-37.pyc,, -numpy/doc/__pycache__/basics.cpython-37.pyc,, -numpy/doc/__pycache__/broadcasting.cpython-37.pyc,, -numpy/doc/__pycache__/byteswapping.cpython-37.pyc,, -numpy/doc/__pycache__/constants.cpython-37.pyc,, -numpy/doc/__pycache__/creation.cpython-37.pyc,, -numpy/doc/__pycache__/dispatch.cpython-37.pyc,, -numpy/doc/__pycache__/glossary.cpython-37.pyc,, -numpy/doc/__pycache__/indexing.cpython-37.pyc,, -numpy/doc/__pycache__/internals.cpython-37.pyc,, -numpy/doc/__pycache__/misc.cpython-37.pyc,, -numpy/doc/__pycache__/structured_arrays.cpython-37.pyc,, -numpy/doc/__pycache__/subclassing.cpython-37.pyc,, -numpy/doc/__pycache__/ufuncs.cpython-37.pyc,, -numpy/doc/basics.py,sha256=bWasRQIE2QkLs-1MEhr_l1TQC_ZDZ4vnUUdxYkgz8wc,11252 -numpy/doc/broadcasting.py,sha256=eh6Gs3wGnc4Qpuw59qAa1wH-oIl6YtIjPEutyLsfIPQ,5595 -numpy/doc/byteswapping.py,sha256=OaEr35v3R__QWWETIlYKfqIyf_qtUm_qxityFIQ0Zrc,5375 -numpy/doc/constants.py,sha256=_n8_OUw7ZKKod6Ho7jtC_J-tSg1pZOBfMO2avPIz_88,9291 -numpy/doc/creation.py,sha256=6FUALDWgqPWObcW-ZHDQMAnfo42I60rRR9pDpwb4-YE,5496 -numpy/doc/dispatch.py,sha256=wLLHuxD4g552N3ot5M6uucEatFUaw3WmYVUa7Sdv-sI,10012 -numpy/doc/glossary.py,sha256=sj5-0X9pjaQEmaTCHAzsqIcVJL_T201E1Ex8v90QiAc,14777 -numpy/doc/indexing.py,sha256=gF3w0dZp7tCx0vKkOSELIBdNGfL1gPZqfiW3T_vj_4Q,16119 -numpy/doc/internals.py,sha256=xYp6lv4yyV0ZIo_qCvLCAWxDa0rhu7FNrTmpXY1isO4,9669 -numpy/doc/misc.py,sha256=JWJqyiYL2qoSMVAb0QC8w_Pm5l7ZLxx2Z9D5ilgU4Uo,6191 -numpy/doc/structured_arrays.py,sha256=28B7iMDrJvM1vjEHou73gXjRcldI5MAz7r4CaEouxmk,26509 -numpy/doc/subclassing.py,sha256=Ha0H-lWMEDWGBWEeP3ZAy_SYfXaImvoUhoDr6f-hYW8,28624 -numpy/doc/ufuncs.py,sha256=xYcK2hwnAUwVgOAmVouIOKXpZuG0LHRd5CYXzNBbv84,5425 -numpy/dual.py,sha256=q17Lo5-3Y4_wNOkg7c7eqno9EdTTtvnz4XpF75HK2fw,1877 -numpy/f2py/__init__.py,sha256=jpo2CzWHgtnMcy0VWSlXR0ucIB_ZVE0ATInpDOReWFE,3138 -numpy/f2py/__main__.py,sha256=mnksAcMyLdK0So_DseQn0zalhnA7LflS7hHvo7QCVjU,134 -numpy/f2py/__pycache__/__init__.cpython-37.pyc,, -numpy/f2py/__pycache__/__main__.cpython-37.pyc,, -numpy/f2py/__pycache__/__version__.cpython-37.pyc,, -numpy/f2py/__pycache__/auxfuncs.cpython-37.pyc,, -numpy/f2py/__pycache__/capi_maps.cpython-37.pyc,, -numpy/f2py/__pycache__/cb_rules.cpython-37.pyc,, -numpy/f2py/__pycache__/cfuncs.cpython-37.pyc,, -numpy/f2py/__pycache__/common_rules.cpython-37.pyc,, -numpy/f2py/__pycache__/crackfortran.cpython-37.pyc,, -numpy/f2py/__pycache__/diagnose.cpython-37.pyc,, -numpy/f2py/__pycache__/f2py2e.cpython-37.pyc,, -numpy/f2py/__pycache__/f2py_testing.cpython-37.pyc,, -numpy/f2py/__pycache__/f90mod_rules.cpython-37.pyc,, -numpy/f2py/__pycache__/func2subr.cpython-37.pyc,, -numpy/f2py/__pycache__/rules.cpython-37.pyc,, -numpy/f2py/__pycache__/setup.cpython-37.pyc,, -numpy/f2py/__pycache__/use_rules.cpython-37.pyc,, -numpy/f2py/__version__.py,sha256=rEHB9hlWmpryhNa0EmMnlAlDCGI4GXILC9CZUEV3Wew,254 -numpy/f2py/auxfuncs.py,sha256=mDvaBo3Y8tYpXLZfq8DCv6UZ3-2JqWc_iNBZRxGesb0,21826 -numpy/f2py/capi_maps.py,sha256=buQRyA-zNXc5Azt6GLxqHTDw74gQb68BDStb7kYLs4A,31676 -numpy/f2py/cb_rules.py,sha256=un1xn8goj4jFL8FzxRwWSAzpr0CVcvwObVUKdIGJyaA,22946 -numpy/f2py/cfuncs.py,sha256=QqWwxZwW9Xk23673dI-RC6mfKVME34DCccHx4EAigTQ,45459 -numpy/f2py/common_rules.py,sha256=N2XFecZU_9iHjuL4Ehs0p92vJUcGBTSvAG4zi4zTwNE,5032 -numpy/f2py/crackfortran.py,sha256=onGQnPhpE8DyP4L4XinwHbdPwhXavetgPbKS3SG-REQ,128945 -numpy/f2py/diagnose.py,sha256=VNuNTGnQaXn9Fn2jlueYt47634CvLQSaAWJWy_Nxwnw,5295 -numpy/f2py/f2py2e.py,sha256=F9gKsZ1fI8h4lsNaBs_iqC92znNlZQMU6VjVC-AyZkA,24415 -numpy/f2py/f2py_testing.py,sha256=8rkBjUsNhBavpoBgi_bqDS8H8tBdd5BR8hrE6ENsIAo,1523 -numpy/f2py/f90mod_rules.py,sha256=YFK4MPkGHBxshAInbcapnumX3qlu0h6ya6GQpS8zWLk,9850 -numpy/f2py/func2subr.py,sha256=Oy12rqUa1vcXvzR6g8yx8jSYDwfKt5Jqiebf1QaWX1o,9224 -numpy/f2py/rules.py,sha256=sBUGQuWBmhEgCfcqCZuUmc-p433gVAbWim2wXl6z950,59120 -numpy/f2py/setup.py,sha256=bE-1KTXhPIAoAt4HXHW92chzNQc691AMpki3DQCQYAI,2434 -numpy/f2py/src/fortranobject.c,sha256=aoRy0d0vzgC6wJOAOYEadH5jExZKtTSMUeOO5HXirpA,36256 -numpy/f2py/src/fortranobject.h,sha256=ltMxueNeETQtEYSA_E7bpRtF8Jj1xuOBS-YNhjBMfOw,5227 -numpy/f2py/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/f2py/tests/__pycache__/__init__.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/test_array_from_pyobj.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/test_assumed_shape.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/test_block_docstring.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/test_callback.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/test_common.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/test_compile_function.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/test_kind.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/test_mixed.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/test_parameter.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/test_quoted_character.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/test_regression.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/test_return_character.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/test_return_complex.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/test_return_integer.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/test_return_logical.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/test_return_real.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/test_semicolon_split.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/test_size.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/test_string.cpython-37.pyc,, -numpy/f2py/tests/__pycache__/util.cpython-37.pyc,, -numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c,sha256=8x5-BYpwiT0fYXwMpwyvu8IaESE1ABIWJNXOkk81QMk,7768 -numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap,sha256=But9r9m4iL7EGq_haMW8IiQ4VivH0TgUozxX4pPvdpE,29 -numpy/f2py/tests/src/assumed_shape/foo_free.f90,sha256=oBwbGSlbr9MkFyhVO2aldjc01dr9GHrMrSiRQek8U64,460 -numpy/f2py/tests/src/assumed_shape/foo_mod.f90,sha256=rfzw3QdI-eaDSl-hslCgGpd5tHftJOVhXvb21Y9Gf6M,499 -numpy/f2py/tests/src/assumed_shape/foo_use.f90,sha256=rmT9k4jP9Ru1PLcGqepw9Jc6P9XNXM0axY7o4hi9lUw,269 -numpy/f2py/tests/src/assumed_shape/precision.f90,sha256=r08JeTVmTTExA-hYZ6HzaxVwBn1GMbPAuuwBhBDtJUk,130 -numpy/f2py/tests/src/common/block.f,sha256=GQ0Pd-VMX3H3a-__f2SuosSdwNXHpBqoGnQDjf8aG9g,224 -numpy/f2py/tests/src/kind/foo.f90,sha256=zIHpw1KdkWbTzbXb73hPbCg4N2Htj3XL8DIwM7seXpo,347 -numpy/f2py/tests/src/mixed/foo.f,sha256=90zmbSHloY1XQYcPb8B5d9bv9mCZx8Z8AMTtgDwJDz8,85 -numpy/f2py/tests/src/mixed/foo_fixed.f90,sha256=pxKuPzxF3Kn5khyFq9ayCsQiolxB3SaNtcWaK5j6Rv4,179 -numpy/f2py/tests/src/mixed/foo_free.f90,sha256=fIQ71wrBc00JUAVUj_r3QF9SdeNniBiMw6Ly7CGgPWU,139 -numpy/f2py/tests/src/parameter/constant_both.f90,sha256=-bBf2eqHb-uFxgo6Q7iAtVUUQzrGFqzhHDNaxwSICfQ,1939 -numpy/f2py/tests/src/parameter/constant_compound.f90,sha256=re7pfzcuaquiOia53UT7qNNrTYu2euGKOF4IhoLmT6g,469 -numpy/f2py/tests/src/parameter/constant_integer.f90,sha256=nEmMLitKoSAG7gBBEQLWumogN-KS3DBZOAZJWcSDnFw,612 -numpy/f2py/tests/src/parameter/constant_non_compound.f90,sha256=IcxESVLKJUZ1k9uYKoSb8Hfm9-O_4rVnlkiUU2diy8Q,609 -numpy/f2py/tests/src/parameter/constant_real.f90,sha256=quNbDsM1Ts2rN4WtPO67S9Xi_8l2cXabWRO00CPQSSQ,610 -numpy/f2py/tests/src/regression/inout.f90,sha256=CpHpgMrf0bqA1W3Ozo3vInDz0RP904S7LkpdAH6ODck,277 -numpy/f2py/tests/src/size/foo.f90,sha256=IlFAQazwBRr3zyT7v36-tV0-fXtB1d7WFp6S1JVMstg,815 -numpy/f2py/tests/src/string/char.f90,sha256=ihr_BH9lY7eXcQpHHDQhFoKcbu7VMOX5QP2Tlr7xlaM,618 -numpy/f2py/tests/test_array_from_pyobj.py,sha256=gLSX9JuF_8NNboUQRzRF3IYC7pWJ06Mw8m6sy2wQvCQ,22083 -numpy/f2py/tests/test_assumed_shape.py,sha256=zS_LgeakxCOrb4t5m74pX86eBbBo9GhgF4Pnh2lXDig,1650 -numpy/f2py/tests/test_block_docstring.py,sha256=ld1G4pBEi8F4GrkYDpNBJKJdlfDANNI6tiKfBQS9I6w,647 -numpy/f2py/tests/test_callback.py,sha256=iRV0nslbJKovMmXPZed-w9QhNJYZfEo07p_8qneDDbU,3986 -numpy/f2py/tests/test_common.py,sha256=tLmi1JrfwFdTcBlUInxTn04f6Hf8eSB00sWRoKJvHrM,868 -numpy/f2py/tests/test_compile_function.py,sha256=WvOcUNqmRhf4KjplgcP-5s5a03020qhgfcjrhoGeaUk,4500 -numpy/f2py/tests/test_kind.py,sha256=G6u6EWjVHenmPju3RQCa9bSeCJGDul3VyXFgp2_Yc7w,1078 -numpy/f2py/tests/test_mixed.py,sha256=jojC-g_G21G-ACCqlYFuOxZokx8iHikBcmxQWEdWSSc,902 -numpy/f2py/tests/test_parameter.py,sha256=_wX-gM-XGxA_mfDBM8np9NLjYiCF6LJbglwKf09JbdM,3976 -numpy/f2py/tests/test_quoted_character.py,sha256=Q0oDtl3STQqzSap5VYPpfzJJ72NtQchm6Vg-bwuoBl4,1029 -numpy/f2py/tests/test_regression.py,sha256=lPQUKx5RrVtGhyIvIcWS5GgA_CgQypabuuna-Q1z3hs,764 -numpy/f2py/tests/test_return_character.py,sha256=4a_JeEtY1AkT-Q-01iaZyqWLDGmZGW17d88JNFZoXTc,3864 -numpy/f2py/tests/test_return_complex.py,sha256=FO4oflCncNIft36R3Fe9uiyDtryiB-_d2PLMH3x64I4,4779 -numpy/f2py/tests/test_return_integer.py,sha256=cyyAbyHUepwYeyXlgIa2FD4B7A2dHnpp2jwx8ZDQiZQ,4749 -numpy/f2py/tests/test_return_logical.py,sha256=u3dazkOU1oz9kZKYXBd2GWaEr02MYfjGdLrb7kT8MiY,4974 -numpy/f2py/tests/test_return_real.py,sha256=QVRKzeO44ZuIlV8EycmtXaHT_i0rnX2bi3rOh7py4GM,5619 -numpy/f2py/tests/test_semicolon_split.py,sha256=v7YFx-oTbXUZZ4qjdblCYeVVtkD1YYa4CbuEf2LTOLs,1580 -numpy/f2py/tests/test_size.py,sha256=GV7S4tl8FhK60T_EpX86yVQo_bMVTdyOTB8fGVIQ24o,1352 -numpy/f2py/tests/test_string.py,sha256=LTQC9AFVsUAuJVFuH3Wltl-NfFIilVl0KvBNnEgdnmo,676 -numpy/f2py/tests/util.py,sha256=Wa3lwxZYuwByUkuWYq8phvikYypQehRzKOXd_0vYPPg,9764 -numpy/f2py/use_rules.py,sha256=L6nTSJnxougQ2PVAzR7s-1spidcfDp9tzLIFAJe3gUI,3652 -numpy/fft/__init__.py,sha256=zhieVvDXjjfIEHlZo_ta3OH6qFANuy_Wl1Arh1crX28,7587 -numpy/fft/__pycache__/__init__.cpython-37.pyc,, -numpy/fft/__pycache__/_pocketfft.cpython-37.pyc,, -numpy/fft/__pycache__/helper.cpython-37.pyc,, -numpy/fft/__pycache__/setup.cpython-37.pyc,, -numpy/fft/_pocketfft.py,sha256=TRYWW7fZB_ubxOwmRYE-Ok14N-ryllJh1W3gMzd1Ha0,47832 -numpy/fft/_pocketfft_internal.cpython-37m-x86_64-linux-gnu.so,sha256=zUEBGzvj-_s8JWAW_3c2lQGWBoIcffG50tQ9L0ax6lI,386852 -numpy/fft/helper.py,sha256=vrKPnvFngxaag3nQA-OWzB9qsQctBk6vXaKsuQVMU0k,6271 -numpy/fft/setup.py,sha256=XT8tvC_P5KUDyBgP5S6KWc63-Fmu_L86c2u-KDLWqxo,542 -numpy/fft/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/fft/tests/__pycache__/__init__.cpython-37.pyc,, -numpy/fft/tests/__pycache__/test_helper.cpython-37.pyc,, -numpy/fft/tests/__pycache__/test_pocketfft.cpython-37.pyc,, -numpy/fft/tests/test_helper.py,sha256=Stwrak0FqjR3Wn41keelozyF_M45PL3jdhF3PjZVyIA,6326 -numpy/fft/tests/test_pocketfft.py,sha256=3rWWfY23nJyv7X_CUc8JWAGxTtug1_97scsjbFaujEg,9789 -numpy/lib/__init__.py,sha256=OcdEAprMAoTSp8psgeWH9jmZnh1QbkT29uY7Z4qcFzQ,1899 -numpy/lib/__pycache__/__init__.cpython-37.pyc,, -numpy/lib/__pycache__/_datasource.cpython-37.pyc,, -numpy/lib/__pycache__/_iotools.cpython-37.pyc,, -numpy/lib/__pycache__/_version.cpython-37.pyc,, -numpy/lib/__pycache__/arraypad.cpython-37.pyc,, -numpy/lib/__pycache__/arraysetops.cpython-37.pyc,, -numpy/lib/__pycache__/arrayterator.cpython-37.pyc,, -numpy/lib/__pycache__/financial.cpython-37.pyc,, -numpy/lib/__pycache__/format.cpython-37.pyc,, -numpy/lib/__pycache__/function_base.cpython-37.pyc,, -numpy/lib/__pycache__/histograms.cpython-37.pyc,, -numpy/lib/__pycache__/index_tricks.cpython-37.pyc,, -numpy/lib/__pycache__/mixins.cpython-37.pyc,, -numpy/lib/__pycache__/nanfunctions.cpython-37.pyc,, -numpy/lib/__pycache__/npyio.cpython-37.pyc,, -numpy/lib/__pycache__/polynomial.cpython-37.pyc,, -numpy/lib/__pycache__/recfunctions.cpython-37.pyc,, -numpy/lib/__pycache__/scimath.cpython-37.pyc,, -numpy/lib/__pycache__/setup.cpython-37.pyc,, -numpy/lib/__pycache__/shape_base.cpython-37.pyc,, -numpy/lib/__pycache__/stride_tricks.cpython-37.pyc,, -numpy/lib/__pycache__/twodim_base.cpython-37.pyc,, -numpy/lib/__pycache__/type_check.cpython-37.pyc,, -numpy/lib/__pycache__/ufunclike.cpython-37.pyc,, -numpy/lib/__pycache__/user_array.cpython-37.pyc,, -numpy/lib/__pycache__/utils.cpython-37.pyc,, -numpy/lib/_datasource.py,sha256=jYNwX7pKyn-N9KzpSmrfKWbT5dXci7-VtDk4pL-vCDs,25521 -numpy/lib/_iotools.py,sha256=Nkv-GMaSyzHfkZvLSJLLQ-8uyMRsdyy6seM-Mn0gqCs,32738 -numpy/lib/_version.py,sha256=BIGo2hWBan0Qxt5C3JoPi4TXLPUv0T-FU9366Qu_5XY,4972 -numpy/lib/arraypad.py,sha256=VNvHoD3NvnxbQ1rzujmVDWRGMt4bX-4-87g0wDaVvxA,31386 -numpy/lib/arraysetops.py,sha256=7iWnvYY9aUmr0J4aVqFf3hHH1G9gC-kUClD5KZbGmo8,24231 -numpy/lib/arrayterator.py,sha256=FTXwwzs5xzPxpUbZmE3J0ChjgesJD9TiqBA_bCI05SI,7207 -numpy/lib/financial.py,sha256=YfHWv9em4_ZQg4m-AWSKJPcT43lilBQWzcX52c_q0j8,31590 -numpy/lib/format.py,sha256=QzW9kEcjjmDw8mPmEQk8_2NlcCxfb_lljy8ro_KxGf4,31632 -numpy/lib/function_base.py,sha256=5FwWTpP_ShwjjdgXQQOzeq5I04WvYUyow3YgcS5qXRY,156177 -numpy/lib/histograms.py,sha256=zSYkRkTfX_3PsDIdzarTimVChFxKooPxV0LYOkldY6g,39967 -numpy/lib/index_tricks.py,sha256=dW4TEm_KcPtBYB9EQWCFKogVai3kXkPOgeVVIeBRlJo,29706 -numpy/lib/mixins.py,sha256=6huDUGjzCFoeKrCS2pGnMPoQxpgWyoriIJ3xVwoqugQ,7233 -numpy/lib/nanfunctions.py,sha256=QPtwAIWQDv1IEilpyaKlpVSlqikn0djbMeXAhFJsc0E,58955 -numpy/lib/npyio.py,sha256=6Cwwet8pQusDj1msyv5qjI6lxLzgD5E2Iuvtlu6Zj0s,88031 -numpy/lib/polynomial.py,sha256=urWjdZ8dAvkFDKR-vkSImJIskhTXe9XlVCly0aCX7vM,40755 -numpy/lib/recfunctions.py,sha256=2hsE8JD4RI-HHL7dPG7ku6c9zFBeSJ2-7Z17Q3NiodI,56875 -numpy/lib/scimath.py,sha256=hulwijLlO0q230XOrD5SRjlTY-9O7c1u68CeNjTgNl8,14789 -numpy/lib/setup.py,sha256=os9eV9wSzwTQlfxeoQ33gYQ4wOj1_6EvqcROc8PyGbE,379 -numpy/lib/shape_base.py,sha256=2G5a_-b-8iRG9liNMc4yabCPKHniN9QHQC0HgATA4QE,38204 -numpy/lib/stride_tricks.py,sha256=rwTBZ3o0AS2KxwOLGLDmk_5w6EVUi-X1P9sDXpM7yqM,9291 -numpy/lib/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/lib/tests/__pycache__/__init__.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test__datasource.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test__iotools.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test__version.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_arraypad.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_arraysetops.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_arrayterator.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_financial.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_format.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_function_base.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_histograms.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_index_tricks.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_io.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_mixins.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_nanfunctions.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_packbits.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_polynomial.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_recfunctions.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_regression.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_shape_base.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_stride_tricks.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_twodim_base.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_type_check.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_ufunclike.cpython-37.pyc,, -numpy/lib/tests/__pycache__/test_utils.cpython-37.pyc,, -numpy/lib/tests/data/py2-objarr.npy,sha256=F4cyUC-_TB9QSFLAo2c7c44rC6NUYIgrfGx9PqWPSKk,258 -numpy/lib/tests/data/py2-objarr.npz,sha256=xo13HBT0FbFZ2qvZz0LWGDb3SuQASSaXh7rKfVcJjx4,366 -numpy/lib/tests/data/py3-objarr.npy,sha256=pTTVh8ezp-lwAK3fkgvdKU8Arp5NMKznVD-M6Ex_uA0,341 -numpy/lib/tests/data/py3-objarr.npz,sha256=qQR0gS57e9ta16d_vCQjaaKM74gPdlwCPkp55P-qrdw,449 -numpy/lib/tests/data/python3.npy,sha256=X0ad3hAaLGXig9LtSHAo-BgOvLlFfPYMnZuVIxRmj-0,96 -numpy/lib/tests/data/win64python2.npy,sha256=agOcgHVYFJrV-nrRJDbGnUnF4ZTPYXuSeF-Mtg7GMpc,96 -numpy/lib/tests/test__datasource.py,sha256=5LwfmvIysaLHlCYkmsj46S7YRF2zRG4BmKSjjJr6fdE,11463 -numpy/lib/tests/test__iotools.py,sha256=P0FnwqfgYV4Nj9oEnwGm-vXYTS0A_5FRZNxFzvsL2qg,13885 -numpy/lib/tests/test__version.py,sha256=eCeeSqb8G3WNtCgkM3XGz9Zszyye-KFDlNQ7EY2J_UY,2055 -numpy/lib/tests/test_arraypad.py,sha256=5MNlIBrm3iLnJz0YPMvfmtTdG4utCBiNu_k0klKDgBA,54140 -numpy/lib/tests/test_arraysetops.py,sha256=M-pzWVCkCuFi0a8OpUOoXYz7OxMLud5dLPLRmo7cMyk,22367 -numpy/lib/tests/test_arrayterator.py,sha256=run7iWWbvoHGGsDv_uB6G8QENFzOCSgUIxAMVp7ZMu4,1357 -numpy/lib/tests/test_financial.py,sha256=NZ3Q_wXZr6YBBkK2uElV0Q7ko9GQdN6TEvScQTuXWpc,18390 -numpy/lib/tests/test_format.py,sha256=xd-EyPq4B2sL6wNNK1MnaSD6SefZuV6AtDHELri5pe8,38984 -numpy/lib/tests/test_function_base.py,sha256=0Jnax_jByCwTG2tLP35i2-2gwSuhUx0tYAVicUOBxg0,123208 -numpy/lib/tests/test_histograms.py,sha256=zljzM6vpMhE7pskptqxeC_sYMGlUW5k2GUJ2AZyY5oo,33761 -numpy/lib/tests/test_index_tricks.py,sha256=sVyE_b2FyXEe_FFUVcw2zCjb_d5F8DBvWvm8g3qpLOs,18454 -numpy/lib/tests/test_io.py,sha256=gn5UPy6466E8lVsWFhEGVIHPocVtAc_5OR_1H4VzzJ0,100409 -numpy/lib/tests/test_mixins.py,sha256=YNIKF716Jz7V8FJ8Zzww_F6laTD8j3A6SBxCXqt6rAQ,7233 -numpy/lib/tests/test_nanfunctions.py,sha256=qJAl3wlw4hrRmBwsIn-9iAfsVyXngGJ-P6tvpFKXaF4,38207 -numpy/lib/tests/test_packbits.py,sha256=D0lwihTICKvUm9LTIIs7R16kVK-yZddeCAGnJk6TkEM,17612 -numpy/lib/tests/test_polynomial.py,sha256=NhCF2nGmc43KraPfR6LCBD8M-i-xZKwIsLYPFXNi0WE,10087 -numpy/lib/tests/test_recfunctions.py,sha256=K65UOmcZNUtLGgvI_8gzktZn2Q_B6mC6oA6c7ZG2Ztc,41335 -numpy/lib/tests/test_regression.py,sha256=JeWbMHmGCoVeFtMvd30SVZCpXD9sxnRaI1Dy2wpr5iA,8483 -numpy/lib/tests/test_shape_base.py,sha256=3iwnWAGnerQp4B5Bx-_vTx00E7ZVzjMw6_eqj6H7wdY,24513 -numpy/lib/tests/test_stride_tricks.py,sha256=KCC5XRbKzOXvWo3Pboj9oJ9b0Fw3dCh7bY0HLAOP0_8,17110 -numpy/lib/tests/test_twodim_base.py,sha256=gcrJ43TvAKVqTdWGDx9Dcs79oZtiT6lswS3FVcpt3QQ,18504 -numpy/lib/tests/test_type_check.py,sha256=c9RaZtw85vqRVzsOV1lAgdmFm9V5VgRRfpn-X8Fcv3E,15398 -numpy/lib/tests/test_ufunclike.py,sha256=DdOvBcFD33OFUMsxhnGso7q18M1NAlG-2Zn1gWlu3XM,3352 -numpy/lib/tests/test_utils.py,sha256=4v1ZRTeBbdje3MpnRCVNtRJLEUgpT2qJblUMVB1C89A,3456 -numpy/lib/twodim_base.py,sha256=UIeJOwE6p-EjgUS0L9kJa1aZAQIZqUkmZtqArE7h5WY,27642 -numpy/lib/type_check.py,sha256=fYWhY6IsmBebOIk2XlJZ7ZfhyVO98Q8LtqYlFKIrNDI,19776 -numpy/lib/ufunclike.py,sha256=CB_OBC_pbhtNbuheM-21DIxMArdXIhiyaaSOMN42ZvA,7294 -numpy/lib/user_array.py,sha256=7nJPlDfP-04Lcq8iH_cqBbSEsx5cHCcj-2Py-oh-5t0,7817 -numpy/lib/utils.py,sha256=0yugAVeRUsElIahjKs53RkAxNEAGVCtf7ohKHS41tKA,34082 -numpy/linalg/__init__.py,sha256=qD8UCWbi9l_ik7PQIqw9ChnXo1_3CSZre18px1wIA-s,1825 -numpy/linalg/__pycache__/__init__.cpython-37.pyc,, -numpy/linalg/__pycache__/linalg.cpython-37.pyc,, -numpy/linalg/__pycache__/setup.cpython-37.pyc,, -numpy/linalg/_umath_linalg.cpython-37m-x86_64-linux-gnu.so,sha256=JyTtpoRAptApG5VgzIEl76P3oRSLvMUD8du2v7Vpb30,880560 -numpy/linalg/lapack_lite.cpython-37m-x86_64-linux-gnu.so,sha256=7N_I6kaqWZ6I23cWzrVMZX9gz1PZb_qENRdXbSR74dA,112928 -numpy/linalg/linalg.py,sha256=QbOcm4NDesoEAl7LpPXo23orid-lY2_fITxD3MCj1RI,86274 -numpy/linalg/setup.py,sha256=vTut50wTnLpnWl6i-P1BY2EjikVHrnhwOgpNAF-Lgig,2003 -numpy/linalg/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/linalg/tests/__pycache__/__init__.cpython-37.pyc,, -numpy/linalg/tests/__pycache__/test_build.cpython-37.pyc,, -numpy/linalg/tests/__pycache__/test_deprecations.cpython-37.pyc,, -numpy/linalg/tests/__pycache__/test_linalg.cpython-37.pyc,, -numpy/linalg/tests/__pycache__/test_regression.cpython-37.pyc,, -numpy/linalg/tests/test_build.py,sha256=xKcJ8JmGk-zTqxxMhDX5GFsw-ptn8uwOUOcxaTUuPHc,1704 -numpy/linalg/tests/test_deprecations.py,sha256=eGYDVF3rmGQyDEMGOc-p_zc84Cx1I3jQPyaJe7xOvEc,706 -numpy/linalg/tests/test_linalg.py,sha256=jhwNPXFJN9PLeRmoZwGZ9SBGEkXDvm60pXJJYCLJNFc,72621 -numpy/linalg/tests/test_regression.py,sha256=zz7lprqDg7yU-z1d6AOdCDH3Tjqgw82QGiaPM7peixY,5671 -numpy/ma/__init__.py,sha256=fcmMCElT3MmCkjIGVhXyEAbjuWe_j1NVUiE65eAMvy0,1470 -numpy/ma/__pycache__/__init__.cpython-37.pyc,, -numpy/ma/__pycache__/bench.cpython-37.pyc,, -numpy/ma/__pycache__/core.cpython-37.pyc,, -numpy/ma/__pycache__/extras.cpython-37.pyc,, -numpy/ma/__pycache__/mrecords.cpython-37.pyc,, -numpy/ma/__pycache__/setup.cpython-37.pyc,, -numpy/ma/__pycache__/testutils.cpython-37.pyc,, -numpy/ma/__pycache__/timer_comparison.cpython-37.pyc,, -numpy/ma/bench.py,sha256=q3y_e1wpHVEdg0iIxrBshWVt2LOFfYi6q-eIJ3RSVrU,4942 -numpy/ma/core.py,sha256=ljE2IcaC0KvnBp6M_F1pxPJfCCuLkdIk2RVXUxgZvHk,260311 -numpy/ma/extras.py,sha256=-egPiF1vXSRRb3m5sbLG-tU0c8sVV2ODdxj3p1Ws8Bk,58651 -numpy/ma/mrecords.py,sha256=0kbmSJKEbyHQEjqWiFZy64PaUfstRERbewwnWdyW8e8,26822 -numpy/ma/setup.py,sha256=zkieH8BeiGVXl3Wlt_WeP9kciZlyAZY20DDu4SGk4b4,429 -numpy/ma/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/ma/tests/__pycache__/__init__.cpython-37.pyc,, -numpy/ma/tests/__pycache__/test_core.cpython-37.pyc,, -numpy/ma/tests/__pycache__/test_deprecations.cpython-37.pyc,, -numpy/ma/tests/__pycache__/test_extras.cpython-37.pyc,, -numpy/ma/tests/__pycache__/test_mrecords.cpython-37.pyc,, -numpy/ma/tests/__pycache__/test_old_ma.cpython-37.pyc,, -numpy/ma/tests/__pycache__/test_regression.cpython-37.pyc,, -numpy/ma/tests/__pycache__/test_subclassing.cpython-37.pyc,, -numpy/ma/tests/test_core.py,sha256=5tiE3vmxdFBV4SXK9cPftUwfPlj8hEhNZ4ydq6EatqM,196581 -numpy/ma/tests/test_deprecations.py,sha256=StN-maPV6dwIPn1LmJ_Fd9l_Ysrbzvl8BZy6zYeUru8,2340 -numpy/ma/tests/test_extras.py,sha256=tw6htO0iACppdtcQ5Hc6fLVNlXWcxO72nCp7QKjUjn0,66087 -numpy/ma/tests/test_mrecords.py,sha256=G46t_9Kzo7wNv1N_Lb3zG4s6LMuXVir1NtMKDaKVdn8,19960 -numpy/ma/tests/test_old_ma.py,sha256=5Wned1evtBm2k1yFjcAnrKTvDjIL2Vatma1cH7ks1Tg,32373 -numpy/ma/tests/test_regression.py,sha256=Kq1OAjXuAyTv0J7UcWmQNd-nk8aFcU-5Vu84HPPK2Fs,3156 -numpy/ma/tests/test_subclassing.py,sha256=l4srPFjFT0jR51e9hbumLCawR9sqQ4cdH4QwY1t6Xek,12966 -numpy/ma/testutils.py,sha256=meyy8_0sx4g2sebsVO1PrFSc6ogLzEU7vjOuu2VjY1U,10365 -numpy/ma/timer_comparison.py,sha256=BCWzBW_z6M3k3Mfe-7ThiPEBF4a12J4ZXGIxFxXkY9c,15548 -numpy/matlib.py,sha256=CgnA_dNYnxFMqfwycoimMgGzjICJC1u6XRpwPEyPvXI,9757 -numpy/matrixlib/__init__.py,sha256=W-2bi7zuMWQY5U1ikwfaBPubrcYkbxzPzzIeYz3RYPA,284 -numpy/matrixlib/__pycache__/__init__.cpython-37.pyc,, -numpy/matrixlib/__pycache__/defmatrix.cpython-37.pyc,, -numpy/matrixlib/__pycache__/setup.cpython-37.pyc,, -numpy/matrixlib/defmatrix.py,sha256=r_rYp4ODTS9Rdw8EBIa0wS7NJ99ygDCzzGUPnI2ziMY,30713 -numpy/matrixlib/setup.py,sha256=7DS-rWnyWlLTuOj31UuhkyW8QhLQ7KD5wirtWT_DUhc,437 -numpy/matrixlib/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/matrixlib/tests/__pycache__/__init__.cpython-37.pyc,, -numpy/matrixlib/tests/__pycache__/test_defmatrix.cpython-37.pyc,, -numpy/matrixlib/tests/__pycache__/test_interaction.cpython-37.pyc,, -numpy/matrixlib/tests/__pycache__/test_masked_matrix.cpython-37.pyc,, -numpy/matrixlib/tests/__pycache__/test_matrix_linalg.cpython-37.pyc,, -numpy/matrixlib/tests/__pycache__/test_multiarray.cpython-37.pyc,, -numpy/matrixlib/tests/__pycache__/test_numeric.cpython-37.pyc,, -numpy/matrixlib/tests/__pycache__/test_regression.cpython-37.pyc,, -numpy/matrixlib/tests/test_defmatrix.py,sha256=FRkFPpDpgUEzEAgShORCVhPOuqclxBftHyEW5z2oV4o,15315 -numpy/matrixlib/tests/test_interaction.py,sha256=y0ldcMIKCeT_tRo_uON6Cvxuff-M4MxmqnzA0kDFHYU,12179 -numpy/matrixlib/tests/test_masked_matrix.py,sha256=jbmuf5BQjsae6kXZtH8XJ8TI5JJYDIZ0PZhGKBbxnmY,8925 -numpy/matrixlib/tests/test_matrix_linalg.py,sha256=XYsAcC02YgvlfqAQOLY2hOuggeRlRhkztNsLYWGb4QQ,2125 -numpy/matrixlib/tests/test_multiarray.py,sha256=jM-cFU_ktanoyJ0ScRYv5xwohhE3pKpVhBBtd31b-IQ,628 -numpy/matrixlib/tests/test_numeric.py,sha256=YPq5f11MUAV6WcLQbl8xKWcm17lMj9SJ09mamqGCpxA,515 -numpy/matrixlib/tests/test_regression.py,sha256=ou1TP5bFNpjRaL2-zQxzS11ChwvAkCVp3k71SBtOO9M,1001 -numpy/polynomial/__init__.py,sha256=boBgsbz2Rr49pBTyGNT3TnLRTPSauyjBNeCVGek7oUM,1134 -numpy/polynomial/__pycache__/__init__.cpython-37.pyc,, -numpy/polynomial/__pycache__/_polybase.cpython-37.pyc,, -numpy/polynomial/__pycache__/chebyshev.cpython-37.pyc,, -numpy/polynomial/__pycache__/hermite.cpython-37.pyc,, -numpy/polynomial/__pycache__/hermite_e.cpython-37.pyc,, -numpy/polynomial/__pycache__/laguerre.cpython-37.pyc,, -numpy/polynomial/__pycache__/legendre.cpython-37.pyc,, -numpy/polynomial/__pycache__/polynomial.cpython-37.pyc,, -numpy/polynomial/__pycache__/polyutils.cpython-37.pyc,, -numpy/polynomial/__pycache__/setup.cpython-37.pyc,, -numpy/polynomial/_polybase.py,sha256=HOIXM-w5L_TVFdWR72K_RtidpR8zHqNARoeVwf6gor8,33093 -numpy/polynomial/chebyshev.py,sha256=5pr-j0wWlKnNki-vaM2gV7Sni9FXtaomVMhYH01pw_I,63287 -numpy/polynomial/hermite.py,sha256=jTv8jCvVA5_bQ6AqLo5yF8n1-8mWpT_M1vET2BlKSdY,52671 -numpy/polynomial/hermite_e.py,sha256=03sKE5Osr1DIVUL3eMKmzKU0GGKUk7lEJM5K2_LRXG0,52853 -numpy/polynomial/laguerre.py,sha256=CSbhTmnKKIYGMLoahlQbFpPIvAvXQ8aQ6lQzy9ySmic,51106 -numpy/polynomial/legendre.py,sha256=4TjHkvFH8gPA2P_ncR0GyBYjp4YF5nYWVjmkkWa6DyE,52507 -numpy/polynomial/polynomial.py,sha256=_A6i4ZQKeOVy_g4Wui6f8ubbWbd0tPDpNS5VCbvqtEs,48706 -numpy/polynomial/polyutils.py,sha256=gvkAyz9vYqVAqu-X9NIVmXnZ3Lap0wGkWUHdHue3ktI,23243 -numpy/polynomial/setup.py,sha256=PKIUV6Jh7_0jBboPp3IHPmp6LWVs4tbIkdu_FtmI_5U,385 -numpy/polynomial/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/polynomial/tests/__pycache__/__init__.cpython-37.pyc,, -numpy/polynomial/tests/__pycache__/test_chebyshev.cpython-37.pyc,, -numpy/polynomial/tests/__pycache__/test_classes.cpython-37.pyc,, -numpy/polynomial/tests/__pycache__/test_hermite.cpython-37.pyc,, -numpy/polynomial/tests/__pycache__/test_hermite_e.cpython-37.pyc,, -numpy/polynomial/tests/__pycache__/test_laguerre.cpython-37.pyc,, -numpy/polynomial/tests/__pycache__/test_legendre.cpython-37.pyc,, -numpy/polynomial/tests/__pycache__/test_polynomial.cpython-37.pyc,, -numpy/polynomial/tests/__pycache__/test_polyutils.cpython-37.pyc,, -numpy/polynomial/tests/__pycache__/test_printing.cpython-37.pyc,, -numpy/polynomial/tests/test_chebyshev.py,sha256=Vda4vCJtdIAPs0tsbXexnw4kaaou30FjZ0gQxNxOcz8,20716 -numpy/polynomial/tests/test_classes.py,sha256=18hEEMQHB3o1roK4nlPrawv9pFif2gur6lkEBoxZAFg,20370 -numpy/polynomial/tests/test_hermite.py,sha256=3zU7T69fuFvn5gDOG34SCnyDm_pVvTVlcpUMlhoU2V0,18755 -numpy/polynomial/tests/test_hermite_e.py,sha256=06gCjnh0s-1h7jWpmJyjQdfzAK_4kywto7hHuQ7NmJQ,19089 -numpy/polynomial/tests/test_laguerre.py,sha256=O5zxZQ5GIOZrx4b0ttCUoDxmb3ifhwDRcq--hYyt3zU,17689 -numpy/polynomial/tests/test_legendre.py,sha256=2y8xF4PdU-uS7OjuIzMC6DAeVc9mlW83xj_N4NSGhSY,18453 -numpy/polynomial/tests/test_polynomial.py,sha256=MD4xxU3yWSbMK9B5wpYLQOeWZj0mH7g9p9ifMVhPQF4,20080 -numpy/polynomial/tests/test_polyutils.py,sha256=GzRz3leypd2UrWE-EwuIWL0lbbj6ks6Mjli3tozDN9U,3081 -numpy/polynomial/tests/test_printing.py,sha256=_7O-05q3JEjdxmuzBdWxligQVdC6qGygKmbhfiYW9KQ,2067 -numpy/random/__init__.pxd,sha256=-E4OlHPfdF_aLa7hXIZzBBBkTIK86tR9qXnKMeUnhcg,432 -numpy/random/__init__.py,sha256=yX9S3EpGEUAnSiwoBrccxFZngr5pLmbEx6dgLPH1r5s,7527 -numpy/random/__pycache__/__init__.cpython-37.pyc,, -numpy/random/__pycache__/_pickle.cpython-37.pyc,, -numpy/random/__pycache__/setup.cpython-37.pyc,, -numpy/random/_bit_generator.cpython-37m-x86_64-linux-gnu.so,sha256=bo3-lJOD40NhsqNIdaWdkOmw_x1WlTdfsyYCA1QDqqg,839767 -numpy/random/_bit_generator.pxd,sha256=nZRRH1h_FhR-YTE_Y0kJ5n_JyuFxFHA4II_K0sqNH3k,1005 -numpy/random/_bounded_integers.cpython-37m-x86_64-linux-gnu.so,sha256=U3RpwORvqwAOjiKCPKKiFXPfjIr_Rp4OAg9BAdV6fQU,2071041 -numpy/random/_bounded_integers.pxd,sha256=hcoucPH5hkFEM2nm12zYO-5O_Rt8RujEXT5YWuAzl1Q,1669 -numpy/random/_common.cpython-37m-x86_64-linux-gnu.so,sha256=yVwyV6I9ArJ16xL7RU78bGT5W6ix1QxQDpi6eF8c-Sg,1336140 -numpy/random/_common.pxd,sha256=jJSsc_MpqkizibG03OLe7gRN3DMfwGMjDkbG-utvDKM,4690 -numpy/random/_examples/cffi/__pycache__/extending.cpython-37.pyc,, -numpy/random/_examples/cffi/__pycache__/parse.cpython-37.pyc,, -numpy/random/_examples/cffi/extending.py,sha256=xSla3zWqxi6Hj48EvnYfD3WHfE189VvC4XsKu4_T_Iw,880 -numpy/random/_examples/cffi/parse.py,sha256=v0eB67u_SgfqSflvuB31YqHUZWh6XscNcLKaCn7fCaw,1515 -numpy/random/_examples/cython/__pycache__/setup.cpython-37.pyc,, -numpy/random/_examples/cython/extending.pyx,sha256=8nSM_iELliQYfp0Hj9VzD2XZAfaRdo7iJTNP5gLRn-k,2292 -numpy/random/_examples/cython/extending_distributions.pyx,sha256=o6Pd8XP7jvMaZeLaJZTN9Vp0_5rm4M_xF16GmJE-6yw,2332 -numpy/random/_examples/cython/setup.py,sha256=68K-GEXqTLGxXyMOttMH6nwMN6zcvLjY-lWrVml2jPk,1042 -numpy/random/_examples/numba/__pycache__/extending.cpython-37.pyc,, -numpy/random/_examples/numba/__pycache__/extending_distributions.cpython-37.pyc,, -numpy/random/_examples/numba/extending.py,sha256=L-ELWpGbqBC2WSiWHFatfTnRxu2a66x7vKIoU2zDx_U,1977 -numpy/random/_examples/numba/extending_distributions.py,sha256=Jnr9aWkHyIWygNbdae32GVURK-5T9BTGhuExRpvve98,2034 -numpy/random/_generator.cpython-37m-x86_64-linux-gnu.so,sha256=Pchb0c-AAKAi_x5bCmnDfP_Y8tYF8zyqVKC0kN1MyN0,3186082 -numpy/random/_mt19937.cpython-37m-x86_64-linux-gnu.so,sha256=_LqkYcQTdEIjyRLCpps_OBFmUqTCVAbWa4nMGol-yBw,441605 -numpy/random/_pcg64.cpython-37m-x86_64-linux-gnu.so,sha256=bzbnVd3lkge4S81m14zEkUCRYkuiquFw2OQ4OOd3Il4,313867 -numpy/random/_philox.cpython-37m-x86_64-linux-gnu.so,sha256=Mbz6bfYfW8F_4maVprTXhKva0_f6P9yrQEFuXKmiODw,378664 -numpy/random/_pickle.py,sha256=QJRCkyDVi7xJEx-XMcYlMoLwi2dPoz8jD_6NFo1nU-4,2247 -numpy/random/_sfc64.cpython-37m-x86_64-linux-gnu.so,sha256=6LnbG0QZQDufnGpL-IfiBKlVLMmwI379lsdY_XHJMlI,226830 -numpy/random/mtrand.cpython-37m-x86_64-linux-gnu.so,sha256=2W2kth8pl-ZvaTeL4AnUZ7ukUIvGTYm_NbgP6BX1PtA,2359706 -numpy/random/setup.py,sha256=OvadBHJDLR-VmfF0Ls598MMpP9kMfzkdtrei-sEpK4Q,5715 -numpy/random/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/random/tests/__pycache__/__init__.cpython-37.pyc,, -numpy/random/tests/__pycache__/test_direct.cpython-37.pyc,, -numpy/random/tests/__pycache__/test_extending.cpython-37.pyc,, -numpy/random/tests/__pycache__/test_generator_mt19937.cpython-37.pyc,, -numpy/random/tests/__pycache__/test_generator_mt19937_regressions.cpython-37.pyc,, -numpy/random/tests/__pycache__/test_random.cpython-37.pyc,, -numpy/random/tests/__pycache__/test_randomstate.cpython-37.pyc,, -numpy/random/tests/__pycache__/test_randomstate_regression.cpython-37.pyc,, -numpy/random/tests/__pycache__/test_regression.cpython-37.pyc,, -numpy/random/tests/__pycache__/test_seed_sequence.cpython-37.pyc,, -numpy/random/tests/__pycache__/test_smoke.cpython-37.pyc,, -numpy/random/tests/data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/random/tests/data/__pycache__/__init__.cpython-37.pyc,, -numpy/random/tests/data/mt19937-testset-1.csv,sha256=Xkef402AVB-eZgYQkVtoxERHkxffCA9Jyt_oMbtJGwY,15844 -numpy/random/tests/data/mt19937-testset-2.csv,sha256=nsBEQNnff-aFjHYK4thjvUK4xSXDSfv5aTbcE59pOkE,15825 -numpy/random/tests/data/pcg64-testset-1.csv,sha256=xB00DpknGUTTCxDr9L6aNo9Hs-sfzEMbUSS4t11TTfE,23839 -numpy/random/tests/data/pcg64-testset-2.csv,sha256=NTdzTKvG2U7_WyU_IoQUtMzU3kEvDH39CgnR6VzhTkw,23845 -numpy/random/tests/data/philox-testset-1.csv,sha256=SedRaIy5zFadmk71nKrGxCFZ6BwKz8g1A9-OZp3IkkY,23852 -numpy/random/tests/data/philox-testset-2.csv,sha256=dWECt-sbfvaSiK8-Ygp5AqyjoN5i26VEOrXqg01rk3g,23838 -numpy/random/tests/data/sfc64-testset-1.csv,sha256=iHs6iX6KR8bxGwKk-3tedAdMPz6ZW8slDSUECkAqC8Q,23840 -numpy/random/tests/data/sfc64-testset-2.csv,sha256=FIDIDFCaPZfWUSxsJMAe58hPNmMrU27kCd9FhCEYt_k,23833 -numpy/random/tests/test_direct.py,sha256=RHMSKQifz7vqhjn0z5rpJl_AlDLVSli-ldC6jKcwJP0,14435 -numpy/random/tests/test_extending.py,sha256=22-9bT9yMONuqb4r_5G-jV7QS_V1nN_rddEAs3X2aq4,1822 -numpy/random/tests/test_generator_mt19937.py,sha256=nmoG3KGeHyP_MO6Egr99DdEJFKCab8O98cEVKngj0ZE,94406 -numpy/random/tests/test_generator_mt19937_regressions.py,sha256=ldeCEO3N6dCAGA1g8YnqEwRTQAiv6tBuY9xuAELJNCQ,5834 -numpy/random/tests/test_random.py,sha256=6h_kDOT55P1Vq2tf8JUM4wJTqkEdftg9XlmUgYroAAc,66842 -numpy/random/tests/test_randomstate.py,sha256=P8ZLRb3EswHcZ3jTZ0tn6z33LiBiwlufTR9b6TPLUz4,78067 -numpy/random/tests/test_randomstate_regression.py,sha256=6nW_U3uLq3JbiIaNX0PstGgqHk8fhDiblDkmOvF2Huc,7707 -numpy/random/tests/test_regression.py,sha256=_M-We4kY74tXPonJjWN7rMXF5SoxHMapl1zM08-6p0w,5683 -numpy/random/tests/test_seed_sequence.py,sha256=-fvOA-gzi_hOugmzJfXxL0GNmfAvuAbiwDCuLggqrNY,2379 -numpy/random/tests/test_smoke.py,sha256=VOCrUBqDsJFu9yQ02DArd-NV5p3eTphY-NX3WwnyewU,27891 -numpy/setup.py,sha256=lsyhnRXfo0ybq63nVUX8HnYhQ1mI0bSic-mk-lK3wnc,920 -numpy/testing/__init__.py,sha256=MHRK5eimwrC9RE723HlOcOQGxu5HAmQ-qwlcVX1sZ1k,632 -numpy/testing/__pycache__/__init__.cpython-37.pyc,, -numpy/testing/__pycache__/print_coercion_tables.cpython-37.pyc,, -numpy/testing/__pycache__/setup.cpython-37.pyc,, -numpy/testing/__pycache__/utils.cpython-37.pyc,, -numpy/testing/_private/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/testing/_private/__pycache__/__init__.cpython-37.pyc,, -numpy/testing/_private/__pycache__/decorators.cpython-37.pyc,, -numpy/testing/_private/__pycache__/noseclasses.cpython-37.pyc,, -numpy/testing/_private/__pycache__/nosetester.cpython-37.pyc,, -numpy/testing/_private/__pycache__/parameterized.cpython-37.pyc,, -numpy/testing/_private/__pycache__/utils.cpython-37.pyc,, -numpy/testing/_private/decorators.py,sha256=JSIBsQH4t1rdMcr1-Cf2jBJ6CXzIGEFyZoWxUJuXI7M,9015 -numpy/testing/_private/noseclasses.py,sha256=nYtV16KcoqAcHswfYO-u6bRIrDBvCvpqjCNfl7zk-SA,14601 -numpy/testing/_private/nosetester.py,sha256=S1nEtDBvNT87Zrt8XmuSVIBWpanJwjtD1YiRlcf7eoA,20515 -numpy/testing/_private/parameterized.py,sha256=PQnCG1Ul0aE9MBTDL9lJ-DOMgsahDfpMn5Xhqld1KWk,18285 -numpy/testing/_private/utils.py,sha256=_na6o-vYzN8eDMww86X49m8ciCa3G_lZlDH7IEQLdyQ,84689 -numpy/testing/print_coercion_tables.py,sha256=qIIxBkc4f2aCKiUY6EsShxQzRrBkFEb4TB7KaQuTl58,2809 -numpy/testing/setup.py,sha256=9PnlgcejccUBzaGPi9Po-ElhmuQMAmWCBRdvCDwiKYw,676 -numpy/testing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/testing/tests/__pycache__/__init__.cpython-37.pyc,, -numpy/testing/tests/__pycache__/test_decorators.cpython-37.pyc,, -numpy/testing/tests/__pycache__/test_doctesting.cpython-37.pyc,, -numpy/testing/tests/__pycache__/test_utils.cpython-37.pyc,, -numpy/testing/tests/test_decorators.py,sha256=mkMCPSPJdrKxQl93u0QlIEdp5JS0tCzgLHXuoYDDvzs,6001 -numpy/testing/tests/test_doctesting.py,sha256=sKBXwuRZwMFSiem3R9egBzzSUB81kkpw9y-Y07iqU2M,1413 -numpy/testing/tests/test_utils.py,sha256=sB8vinI9-74VO9il6mf3a7k4OXh0HFp3dSVQk6br5JM,54774 -numpy/testing/utils.py,sha256=5-ntGTS7ux_T1sowuhRT5bwerhsCmgUfkMB-JJqPOOM,1298 -numpy/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/tests/__pycache__/__init__.cpython-37.pyc,, -numpy/tests/__pycache__/test_ctypeslib.cpython-37.pyc,, -numpy/tests/__pycache__/test_matlib.cpython-37.pyc,, -numpy/tests/__pycache__/test_numpy_version.cpython-37.pyc,, -numpy/tests/__pycache__/test_public_api.cpython-37.pyc,, -numpy/tests/__pycache__/test_reloading.cpython-37.pyc,, -numpy/tests/__pycache__/test_scripts.cpython-37.pyc,, -numpy/tests/__pycache__/test_warnings.cpython-37.pyc,, -numpy/tests/test_ctypeslib.py,sha256=Fy_dBd80RrBufyeXISkBu6kS3X700qOD5ob0pDjRssg,12276 -numpy/tests/test_matlib.py,sha256=WKILeEOe3NdKP_XAy-uCs4VEi7r_ghQ7NUhIgH1LzoM,2158 -numpy/tests/test_numpy_version.py,sha256=VtTTZAPnsJ8xtKLy1qYqIwrpcjTtqJ9xP9qP5-p8DbU,647 -numpy/tests/test_public_api.py,sha256=Cfv9zpw_M9XElubxmNs_d1lwgo3ErVdGI1ttHpjHHEM,15532 -numpy/tests/test_reloading.py,sha256=k_J-pWB1mO4XoSAqOZ-qgpsn5It6yXgcRvNs1wxbcoY,1298 -numpy/tests/test_scripts.py,sha256=SxlQPb8EttfP4V5iGJyXMBtDWTS3EcYVBN-JWDTtSy4,1637 -numpy/tests/test_warnings.py,sha256=38bAtHc0P2uZ8c2Y9TQse3k6KBtPnvix8Q7OlF3WgZw,2594 -numpy/version.py,sha256=yEnGmiF7H8pwqnezXt9q8Sc7b1bD2kI-p7hhywdWKMA,294 diff --git a/venv/lib/python3.7/site-packages/numpy-1.18.2.dist-info/WHEEL b/venv/lib/python3.7/site-packages/numpy-1.18.2.dist-info/WHEEL deleted file mode 100644 index 697e432..0000000 --- a/venv/lib/python3.7/site-packages/numpy-1.18.2.dist-info/WHEEL +++ /dev/null @@ -1,5 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.31.1) -Root-Is-Purelib: false -Tag: cp37-cp37m-manylinux1_x86_64 - diff --git a/venv/lib/python3.7/site-packages/numpy-1.18.2.dist-info/entry_points.txt b/venv/lib/python3.7/site-packages/numpy-1.18.2.dist-info/entry_points.txt deleted file mode 100644 index b6bb53a..0000000 --- a/venv/lib/python3.7/site-packages/numpy-1.18.2.dist-info/entry_points.txt +++ /dev/null @@ -1,5 +0,0 @@ -[console_scripts] -f2py = numpy.f2py.f2py2e:main -f2py3 = numpy.f2py.f2py2e:main -f2py3.7 = numpy.f2py.f2py2e:main - diff --git a/venv/lib/python3.7/site-packages/numpy-1.18.2.dist-info/top_level.txt b/venv/lib/python3.7/site-packages/numpy-1.18.2.dist-info/top_level.txt deleted file mode 100644 index 24ce15a..0000000 --- a/venv/lib/python3.7/site-packages/numpy-1.18.2.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -numpy diff --git a/venv/lib/python3.7/site-packages/numpy/.libs/libgfortran-ed201abd.so.3.0.0 b/venv/lib/python3.7/site-packages/numpy/.libs/libgfortran-ed201abd.so.3.0.0 deleted file mode 100755 index eb7ac25..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/.libs/libgfortran-ed201abd.so.3.0.0 and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/.libs/libopenblasp-r0-34a18dc3.3.7.so b/venv/lib/python3.7/site-packages/numpy/.libs/libopenblasp-r0-34a18dc3.3.7.so deleted file mode 100755 index 757de41..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/.libs/libopenblasp-r0-34a18dc3.3.7.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/LICENSE.txt b/venv/lib/python3.7/site-packages/numpy/LICENSE.txt deleted file mode 100644 index 3f1733f..0000000 --- a/venv/lib/python3.7/site-packages/numpy/LICENSE.txt +++ /dev/null @@ -1,910 +0,0 @@ -Copyright (c) 2005-2019, NumPy Developers. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - - * Neither the name of the NumPy Developers nor the names of any - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----- - -This binary distribution of NumPy also bundles the following software: - - -Name: OpenBLAS -Files: .libs/libopenb*.so -Description: bundled as a dynamically linked library -Availability: https://github.com/xianyi/OpenBLAS/ -License: 3-clause BSD - Copyright (c) 2011-2014, The OpenBLAS Project - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - 3. Neither the name of the OpenBLAS project nor the names of - its contributors may be used to endorse or promote products - derived from this software without specific prior written - permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE - USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -Name: LAPACK -Files: .libs/libopenb*.so -Description: bundled in OpenBLAS -Availability: https://github.com/xianyi/OpenBLAS/ -License 3-clause BSD - Copyright (c) 1992-2013 The University of Tennessee and The University - of Tennessee Research Foundation. All rights - reserved. - Copyright (c) 2000-2013 The University of California Berkeley. All - rights reserved. - Copyright (c) 2006-2013 The University of Colorado Denver. All rights - reserved. - - $COPYRIGHT$ - - Additional copyrights may follow - - $HEADER$ - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer listed - in this license in the documentation and/or other materials - provided with the distribution. - - - Neither the name of the copyright holders nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - The copyright holders provide no reassurances that the source code - provided does not infringe any patent, copyright, or any other - intellectual property rights of third parties. The copyright holders - disclaim any liability to any recipient for claims brought against - recipient by any third party for infringement of that parties - intellectual property rights. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -Name: GCC runtime library -Files: .libs/libgfortran*.so -Description: dynamically linked to files compiled with gcc -Availability: https://gcc.gnu.org/viewcvs/gcc/ -License: GPLv3 + runtime exception - Copyright (C) 2002-2017 Free Software Foundation, Inc. - - Libgfortran is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgfortran is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . - ----- - -Full text of license texts referred to above follows (that they are -listed below does not necessarily imply the conditions apply to the -present binary release): - ----- - -GCC RUNTIME LIBRARY EXCEPTION - -Version 3.1, 31 March 2009 - -Copyright (C) 2009 Free Software Foundation, Inc. - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -This GCC Runtime Library Exception ("Exception") is an additional -permission under section 7 of the GNU General Public License, version -3 ("GPLv3"). It applies to a given file (the "Runtime Library") that -bears a notice placed by the copyright holder of the file stating that -the file is governed by GPLv3 along with this Exception. - -When you use GCC to compile a program, GCC may combine portions of -certain GCC header files and runtime libraries with the compiled -program. The purpose of this Exception is to allow compilation of -non-GPL (including proprietary) programs to use, in this way, the -header files and runtime libraries covered by this Exception. - -0. Definitions. - -A file is an "Independent Module" if it either requires the Runtime -Library for execution after a Compilation Process, or makes use of an -interface provided by the Runtime Library, but is not otherwise based -on the Runtime Library. - -"GCC" means a version of the GNU Compiler Collection, with or without -modifications, governed by version 3 (or a specified later version) of -the GNU General Public License (GPL) with the option of using any -subsequent versions published by the FSF. - -"GPL-compatible Software" is software whose conditions of propagation, -modification and use would permit combination with GCC in accord with -the license of GCC. - -"Target Code" refers to output from any compiler for a real or virtual -target processor architecture, in executable form or suitable for -input to an assembler, loader, linker and/or execution -phase. Notwithstanding that, Target Code does not include data in any -format that is used as a compiler intermediate representation, or used -for producing a compiler intermediate representation. - -The "Compilation Process" transforms code entirely represented in -non-intermediate languages designed for human-written code, and/or in -Java Virtual Machine byte code, into Target Code. Thus, for example, -use of source code generators and preprocessors need not be considered -part of the Compilation Process, since the Compilation Process can be -understood as starting with the output of the generators or -preprocessors. - -A Compilation Process is "Eligible" if it is done using GCC, alone or -with other GPL-compatible software, or if it is done without using any -work based on GCC. For example, using non-GPL-compatible Software to -optimize any GCC intermediate representations would not qualify as an -Eligible Compilation Process. - -1. Grant of Additional Permission. - -You have permission to propagate a work of Target Code formed by -combining the Runtime Library with Independent Modules, even if such -propagation would otherwise violate the terms of GPLv3, provided that -all Target Code was generated by Eligible Compilation Processes. You -may then convey such a combination under terms of your choice, -consistent with the licensing of the Independent Modules. - -2. No Weakening of GCC Copyleft. - -The availability of this Exception does not imply any general -presumption that third-party software is unaffected by the copyleft -requirements of the license of GCC. - ----- - - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. diff --git a/venv/lib/python3.7/site-packages/numpy/__config__.py b/venv/lib/python3.7/site-packages/numpy/__config__.py deleted file mode 100644 index db2e454..0000000 --- a/venv/lib/python3.7/site-packages/numpy/__config__.py +++ /dev/null @@ -1,40 +0,0 @@ -# This file is generated by numpy's setup.py -# It contains system_info results at the time of building this package. -__all__ = ["get_info","show"] - - -import os -import sys - -extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs') - -if sys.platform == 'win32' and os.path.isdir(extra_dll_dir): - if sys.version_info >= (3, 8): - os.add_dll_directory(extra_dll_dir) - else: - os.environ.setdefault('PATH', '') - os.environ['PATH'] += os.pathsep + extra_dll_dir - -blas_mkl_info={} -blis_info={} -openblas_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]} -blas_opt_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]} -lapack_mkl_info={} -openblas_lapack_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]} -lapack_opt_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]} - -def get_info(name): - g = globals() - return g.get(name, g.get(name + "_info", {})) - -def show(): - for name,info_dict in globals().items(): - if name[0] == "_" or type(info_dict) is not type({}): continue - print(name + ":") - if not info_dict: - print(" NOT AVAILABLE") - for k,v in info_dict.items(): - v = str(v) - if k == "sources" and len(v) > 200: - v = v[:60] + " ...\n... " + v[-60:] - print(" %s = %s" % (k,v)) diff --git a/venv/lib/python3.7/site-packages/numpy/__init__.py b/venv/lib/python3.7/site-packages/numpy/__init__.py deleted file mode 100644 index 349914b..0000000 --- a/venv/lib/python3.7/site-packages/numpy/__init__.py +++ /dev/null @@ -1,260 +0,0 @@ -""" -NumPy -===== - -Provides - 1. An array object of arbitrary homogeneous items - 2. Fast mathematical operations over arrays - 3. Linear Algebra, Fourier Transforms, Random Number Generation - -How to use the documentation ----------------------------- -Documentation is available in two forms: docstrings provided -with the code, and a loose standing reference guide, available from -`the NumPy homepage `_. - -We recommend exploring the docstrings using -`IPython `_, an advanced Python shell with -TAB-completion and introspection capabilities. See below for further -instructions. - -The docstring examples assume that `numpy` has been imported as `np`:: - - >>> import numpy as np - -Code snippets are indicated by three greater-than signs:: - - >>> x = 42 - >>> x = x + 1 - -Use the built-in ``help`` function to view a function's docstring:: - - >>> help(np.sort) - ... # doctest: +SKIP - -For some objects, ``np.info(obj)`` may provide additional help. This is -particularly true if you see the line "Help on ufunc object:" at the top -of the help() page. Ufuncs are implemented in C, not Python, for speed. -The native Python help() does not know how to view their help, but our -np.info() function does. - -To search for documents containing a keyword, do:: - - >>> np.lookfor('keyword') - ... # doctest: +SKIP - -General-purpose documents like a glossary and help on the basic concepts -of numpy are available under the ``doc`` sub-module:: - - >>> from numpy import doc - >>> help(doc) - ... # doctest: +SKIP - -Available subpackages ---------------------- -doc - Topical documentation on broadcasting, indexing, etc. -lib - Basic functions used by several sub-packages. -random - Core Random Tools -linalg - Core Linear Algebra Tools -fft - Core FFT routines -polynomial - Polynomial tools -testing - NumPy testing tools -f2py - Fortran to Python Interface Generator. -distutils - Enhancements to distutils with support for - Fortran compilers support and more. - -Utilities ---------- -test - Run numpy unittests -show_config - Show numpy build configuration -dual - Overwrite certain functions with high-performance Scipy tools -matlib - Make everything matrices. -__version__ - NumPy version string - -Viewing documentation using IPython ------------------------------------ -Start IPython with the NumPy profile (``ipython -p numpy``), which will -import `numpy` under the alias `np`. Then, use the ``cpaste`` command to -paste examples into the shell. To see which functions are available in -`numpy`, type ``np.`` (where ```` refers to the TAB key), or use -``np.*cos*?`` (where ```` refers to the ENTER key) to narrow -down the list. To view the docstring for a function, use -``np.cos?`` (to view the docstring) and ``np.cos??`` (to view -the source code). - -Copies vs. in-place operation ------------------------------ -Most of the functions in `numpy` return a copy of the array argument -(e.g., `np.sort`). In-place versions of these functions are often -available as array methods, i.e. ``x = np.array([1,2,3]); x.sort()``. -Exceptions to this rule are documented. - -""" -from __future__ import division, absolute_import, print_function - -import sys -import warnings - -from ._globals import ModuleDeprecationWarning, VisibleDeprecationWarning -from ._globals import _NoValue - -# We first need to detect if we're being called as part of the numpy setup -# procedure itself in a reliable manner. -try: - __NUMPY_SETUP__ -except NameError: - __NUMPY_SETUP__ = False - -if __NUMPY_SETUP__: - sys.stderr.write('Running from numpy source directory.\n') -else: - try: - from numpy.__config__ import show as show_config - except ImportError: - msg = """Error importing numpy: you should not try to import numpy from - its source directory; please exit the numpy source tree, and relaunch - your python interpreter from there.""" - raise ImportError(msg) - - from .version import git_revision as __git_revision__ - from .version import version as __version__ - - __all__ = ['ModuleDeprecationWarning', - 'VisibleDeprecationWarning'] - - # Allow distributors to run custom init code - from . import _distributor_init - - from . import core - from .core import * - from . import compat - from . import lib - # FIXME: why have numpy.lib if everything is imported here?? - from .lib import * - - from . import linalg - from . import fft - from . import polynomial - from . import random - from . import ctypeslib - from . import ma - from . import matrixlib as _mat - from .matrixlib import * - from .compat import long - - # Make these accessible from numpy name-space - # but not imported in from numpy import * - # TODO[gh-6103]: Deprecate these - if sys.version_info[0] >= 3: - from builtins import bool, int, float, complex, object, str - unicode = str - else: - from __builtin__ import bool, int, float, complex, object, unicode, str - - from .core import round, abs, max, min - # now that numpy modules are imported, can initialize limits - core.getlimits._register_known_types() - - __all__.extend(['__version__', 'show_config']) - __all__.extend(core.__all__) - __all__.extend(_mat.__all__) - __all__.extend(lib.__all__) - __all__.extend(['linalg', 'fft', 'random', 'ctypeslib', 'ma']) - - # These are added by `from .core import *` and `core.__all__`, but we - # overwrite them above with builtins we do _not_ want to export. - __all__.remove('long') - __all__.remove('unicode') - - # Remove things that are in the numpy.lib but not in the numpy namespace - # Note that there is a test (numpy/tests/test_public_api.py:test_numpy_namespace) - # that prevents adding more things to the main namespace by accident. - # The list below will grow until the `from .lib import *` fixme above is - # taken care of - __all__.remove('Arrayterator') - del Arrayterator - - # Filter out Cython harmless warnings - warnings.filterwarnings("ignore", message="numpy.dtype size changed") - warnings.filterwarnings("ignore", message="numpy.ufunc size changed") - warnings.filterwarnings("ignore", message="numpy.ndarray size changed") - - # oldnumeric and numarray were removed in 1.9. In case some packages import - # but do not use them, we define them here for backward compatibility. - oldnumeric = 'removed' - numarray = 'removed' - - if sys.version_info[:2] >= (3, 7): - # Importing Tester requires importing all of UnitTest which is not a - # cheap import Since it is mainly used in test suits, we lazy import it - # here to save on the order of 10 ms of import time for most users - # - # The previous way Tester was imported also had a side effect of adding - # the full `numpy.testing` namespace - # - # module level getattr is only supported in 3.7 onwards - # https://www.python.org/dev/peps/pep-0562/ - def __getattr__(attr): - if attr == 'testing': - import numpy.testing as testing - return testing - elif attr == 'Tester': - from .testing import Tester - return Tester - else: - raise AttributeError("module {!r} has no attribute " - "{!r}".format(__name__, attr)) - - def __dir__(): - return list(globals().keys()) + ['Tester', 'testing'] - - else: - # We don't actually use this ourselves anymore, but I'm not 100% sure that - # no-one else in the world is using it (though I hope not) - from .testing import Tester - - # Pytest testing - from numpy._pytesttester import PytestTester - test = PytestTester(__name__) - del PytestTester - - - def _sanity_check(): - """ - Quick sanity checks for common bugs caused by environment. - There are some cases e.g. with wrong BLAS ABI that cause wrong - results under specific runtime conditions that are not necessarily - achieved during test suite runs, and it is useful to catch those early. - - See https://github.com/numpy/numpy/issues/8577 and other - similar bug reports. - - """ - try: - x = ones(2, dtype=float32) - if not abs(x.dot(x) - 2.0) < 1e-5: - raise AssertionError() - except AssertionError: - msg = ("The current Numpy installation ({!r}) fails to " - "pass simple sanity checks. This can be caused for example " - "by incorrect BLAS library being linked in, or by mixing " - "package managers (pip, conda, apt, ...). Search closed " - "numpy issues for similar problems.") - raise RuntimeError(msg.format(__file__)) - - _sanity_check() - del _sanity_check diff --git a/venv/lib/python3.7/site-packages/numpy/_distributor_init.py b/venv/lib/python3.7/site-packages/numpy/_distributor_init.py deleted file mode 100644 index d893ba3..0000000 --- a/venv/lib/python3.7/site-packages/numpy/_distributor_init.py +++ /dev/null @@ -1,10 +0,0 @@ -""" Distributor init file - -Distributors: you can add custom code here to support particular distributions -of numpy. - -For example, this is a good place to put any checks for hardware requirements. - -The numpy standard source distribution will not put code in this file, so you -can safely replace this file with your own version. -""" diff --git a/venv/lib/python3.7/site-packages/numpy/_globals.py b/venv/lib/python3.7/site-packages/numpy/_globals.py deleted file mode 100644 index f5c0761..0000000 --- a/venv/lib/python3.7/site-packages/numpy/_globals.py +++ /dev/null @@ -1,81 +0,0 @@ -""" -Module defining global singleton classes. - -This module raises a RuntimeError if an attempt to reload it is made. In that -way the identities of the classes defined here are fixed and will remain so -even if numpy itself is reloaded. In particular, a function like the following -will still work correctly after numpy is reloaded:: - - def foo(arg=np._NoValue): - if arg is np._NoValue: - ... - -That was not the case when the singleton classes were defined in the numpy -``__init__.py`` file. See gh-7844 for a discussion of the reload problem that -motivated this module. - -""" -from __future__ import division, absolute_import, print_function - -__ALL__ = [ - 'ModuleDeprecationWarning', 'VisibleDeprecationWarning', '_NoValue' - ] - - -# Disallow reloading this module so as to preserve the identities of the -# classes defined here. -if '_is_loaded' in globals(): - raise RuntimeError('Reloading numpy._globals is not allowed') -_is_loaded = True - - -class ModuleDeprecationWarning(DeprecationWarning): - """Module deprecation warning. - - The nose tester turns ordinary Deprecation warnings into test failures. - That makes it hard to deprecate whole modules, because they get - imported by default. So this is a special Deprecation warning that the - nose tester will let pass without making tests fail. - - """ - - -ModuleDeprecationWarning.__module__ = 'numpy' - - -class VisibleDeprecationWarning(UserWarning): - """Visible deprecation warning. - - By default, python will not show deprecation warnings, so this class - can be used when a very visible warning is helpful, for example because - the usage is most likely a user bug. - - """ - - -VisibleDeprecationWarning.__module__ = 'numpy' - - -class _NoValueType(object): - """Special keyword value. - - The instance of this class may be used as the default value assigned to a - deprecated keyword in order to check if it has been given a user defined - value. - """ - __instance = None - def __new__(cls): - # ensure that only one instance exists - if not cls.__instance: - cls.__instance = super(_NoValueType, cls).__new__(cls) - return cls.__instance - - # needed for python 2 to preserve identity through a pickle - def __reduce__(self): - return (self.__class__, ()) - - def __repr__(self): - return "" - - -_NoValue = _NoValueType() diff --git a/venv/lib/python3.7/site-packages/numpy/_pytesttester.py b/venv/lib/python3.7/site-packages/numpy/_pytesttester.py deleted file mode 100644 index b25224c..0000000 --- a/venv/lib/python3.7/site-packages/numpy/_pytesttester.py +++ /dev/null @@ -1,214 +0,0 @@ -""" -Pytest test running. - -This module implements the ``test()`` function for NumPy modules. The usual -boiler plate for doing that is to put the following in the module -``__init__.py`` file:: - - from numpy._pytesttester import PytestTester - test = PytestTester(__name__).test - del PytestTester - - -Warnings filtering and other runtime settings should be dealt with in the -``pytest.ini`` file in the numpy repo root. The behavior of the test depends on -whether or not that file is found as follows: - -* ``pytest.ini`` is present (develop mode) - All warnings except those explicily filtered out are raised as error. -* ``pytest.ini`` is absent (release mode) - DeprecationWarnings and PendingDeprecationWarnings are ignored, other - warnings are passed through. - -In practice, tests run from the numpy repo are run in develop mode. That -includes the standard ``python runtests.py`` invocation. - -This module is imported by every numpy subpackage, so lies at the top level to -simplify circular import issues. For the same reason, it contains no numpy -imports at module scope, instead importing numpy within function calls. -""" -from __future__ import division, absolute_import, print_function - -import sys -import os - -__all__ = ['PytestTester'] - - - -def _show_numpy_info(): - import numpy as np - - print("NumPy version %s" % np.__version__) - relaxed_strides = np.ones((10, 1), order="C").flags.f_contiguous - print("NumPy relaxed strides checking option:", relaxed_strides) - - -class PytestTester(object): - """ - Pytest test runner. - - A test function is typically added to a package's __init__.py like so:: - - from numpy._pytesttester import PytestTester - test = PytestTester(__name__).test - del PytestTester - - Calling this test function finds and runs all tests associated with the - module and all its sub-modules. - - Attributes - ---------- - module_name : str - Full path to the package to test. - - Parameters - ---------- - module_name : module name - The name of the module to test. - - Notes - ----- - Unlike the previous ``nose``-based implementation, this class is not - publicly exposed as it performs some ``numpy``-specific warning - suppression. - - """ - def __init__(self, module_name): - self.module_name = module_name - - def __call__(self, label='fast', verbose=1, extra_argv=None, - doctests=False, coverage=False, durations=-1, tests=None): - """ - Run tests for module using pytest. - - Parameters - ---------- - label : {'fast', 'full'}, optional - Identifies the tests to run. When set to 'fast', tests decorated - with `pytest.mark.slow` are skipped, when 'full', the slow marker - is ignored. - verbose : int, optional - Verbosity value for test outputs, in the range 1-3. Default is 1. - extra_argv : list, optional - List with any extra arguments to pass to pytests. - doctests : bool, optional - .. note:: Not supported - coverage : bool, optional - If True, report coverage of NumPy code. Default is False. - Requires installation of (pip) pytest-cov. - durations : int, optional - If < 0, do nothing, If 0, report time of all tests, if > 0, - report the time of the slowest `timer` tests. Default is -1. - tests : test or list of tests - Tests to be executed with pytest '--pyargs' - - Returns - ------- - result : bool - Return True on success, false otherwise. - - Notes - ----- - Each NumPy module exposes `test` in its namespace to run all tests for - it. For example, to run all tests for numpy.lib: - - >>> np.lib.test() #doctest: +SKIP - - Examples - -------- - >>> result = np.lib.test() #doctest: +SKIP - ... - 1023 passed, 2 skipped, 6 deselected, 1 xfailed in 10.39 seconds - >>> result - True - - """ - import pytest - import warnings - - #FIXME This is no longer needed? Assume it was for use in tests. - # cap verbosity at 3, which is equivalent to the pytest '-vv' option - #from . import utils - #verbose = min(int(verbose), 3) - #utils.verbose = verbose - # - - module = sys.modules[self.module_name] - module_path = os.path.abspath(module.__path__[0]) - - # setup the pytest arguments - pytest_args = ["-l"] - - # offset verbosity. The "-q" cancels a "-v". - pytest_args += ["-q"] - - # Filter out distutils cpu warnings (could be localized to - # distutils tests). ASV has problems with top level import, - # so fetch module for suppression here. - with warnings.catch_warnings(): - warnings.simplefilter("always") - from numpy.distutils import cpuinfo - - # Filter out annoying import messages. Want these in both develop and - # release mode. - pytest_args += [ - "-W ignore:Not importing directory", - "-W ignore:numpy.dtype size changed", - "-W ignore:numpy.ufunc size changed", - "-W ignore::UserWarning:cpuinfo", - ] - - # When testing matrices, ignore their PendingDeprecationWarnings - pytest_args += [ - "-W ignore:the matrix subclass is not", - ] - - # Ignore python2.7 -3 warnings - pytest_args += [ - r"-W ignore:sys\.exc_clear\(\) not supported in 3\.x:DeprecationWarning", - r"-W ignore:in 3\.x, __setslice__:DeprecationWarning", - r"-W ignore:in 3\.x, __getslice__:DeprecationWarning", - r"-W ignore:buffer\(\) not supported in 3\.x:DeprecationWarning", - r"-W ignore:CObject type is not supported in 3\.x:DeprecationWarning", - r"-W ignore:comparing unequal types not supported in 3\.x:DeprecationWarning", - r"-W ignore:the commands module has been removed in Python 3\.0:DeprecationWarning", - r"-W ignore:The 'new' module has been removed in Python 3\.0:DeprecationWarning", - ] - - - if doctests: - raise ValueError("Doctests not supported") - - if extra_argv: - pytest_args += list(extra_argv) - - if verbose > 1: - pytest_args += ["-" + "v"*(verbose - 1)] - - if coverage: - pytest_args += ["--cov=" + module_path] - - if label == "fast": - pytest_args += ["-m", "not slow"] - elif label != "full": - pytest_args += ["-m", label] - - if durations >= 0: - pytest_args += ["--durations=%s" % durations] - - if tests is None: - tests = [self.module_name] - - pytest_args += ["--pyargs"] + list(tests) - - - # run tests. - _show_numpy_info() - - try: - code = pytest.main(pytest_args) - except SystemExit as exc: - code = exc.code - - return code == 0 diff --git a/venv/lib/python3.7/site-packages/numpy/compat/__init__.py b/venv/lib/python3.7/site-packages/numpy/compat/__init__.py deleted file mode 100644 index 5b371f5..0000000 --- a/venv/lib/python3.7/site-packages/numpy/compat/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -""" -Compatibility module. - -This module contains duplicated code from Python itself or 3rd party -extensions, which may be included for the following reasons: - - * compatibility - * we may only need a small subset of the copied library/module - -""" -from __future__ import division, absolute_import, print_function - -from . import _inspect -from . import py3k -from ._inspect import getargspec, formatargspec -from .py3k import * - -__all__ = [] -__all__.extend(_inspect.__all__) -__all__.extend(py3k.__all__) diff --git a/venv/lib/python3.7/site-packages/numpy/compat/_inspect.py b/venv/lib/python3.7/site-packages/numpy/compat/_inspect.py deleted file mode 100644 index 439d0d2..0000000 --- a/venv/lib/python3.7/site-packages/numpy/compat/_inspect.py +++ /dev/null @@ -1,193 +0,0 @@ -"""Subset of inspect module from upstream python - -We use this instead of upstream because upstream inspect is slow to import, and -significantly contributes to numpy import times. Importing this copy has almost -no overhead. - -""" -from __future__ import division, absolute_import, print_function - -import types - -__all__ = ['getargspec', 'formatargspec'] - -# ----------------------------------------------------------- type-checking -def ismethod(object): - """Return true if the object is an instance method. - - Instance method objects provide these attributes: - __doc__ documentation string - __name__ name with which this method was defined - im_class class object in which this method belongs - im_func function object containing implementation of method - im_self instance to which this method is bound, or None - - """ - return isinstance(object, types.MethodType) - -def isfunction(object): - """Return true if the object is a user-defined function. - - Function objects provide these attributes: - __doc__ documentation string - __name__ name with which this function was defined - func_code code object containing compiled function bytecode - func_defaults tuple of any default values for arguments - func_doc (same as __doc__) - func_globals global namespace in which this function was defined - func_name (same as __name__) - - """ - return isinstance(object, types.FunctionType) - -def iscode(object): - """Return true if the object is a code object. - - Code objects provide these attributes: - co_argcount number of arguments (not including * or ** args) - co_code string of raw compiled bytecode - co_consts tuple of constants used in the bytecode - co_filename name of file in which this code object was created - co_firstlineno number of first line in Python source code - co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg - co_lnotab encoded mapping of line numbers to bytecode indices - co_name name with which this code object was defined - co_names tuple of names of local variables - co_nlocals number of local variables - co_stacksize virtual machine stack space required - co_varnames tuple of names of arguments and local variables - - """ - return isinstance(object, types.CodeType) - -# ------------------------------------------------ argument list extraction -# These constants are from Python's compile.h. -CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 1, 2, 4, 8 - -def getargs(co): - """Get information about the arguments accepted by a code object. - - Three things are returned: (args, varargs, varkw), where 'args' is - a list of argument names (possibly containing nested lists), and - 'varargs' and 'varkw' are the names of the * and ** arguments or None. - - """ - - if not iscode(co): - raise TypeError('arg is not a code object') - - nargs = co.co_argcount - names = co.co_varnames - args = list(names[:nargs]) - - # The following acrobatics are for anonymous (tuple) arguments. - # Which we do not need to support, so remove to avoid importing - # the dis module. - for i in range(nargs): - if args[i][:1] in ['', '.']: - raise TypeError("tuple function arguments are not supported") - varargs = None - if co.co_flags & CO_VARARGS: - varargs = co.co_varnames[nargs] - nargs = nargs + 1 - varkw = None - if co.co_flags & CO_VARKEYWORDS: - varkw = co.co_varnames[nargs] - return args, varargs, varkw - -def getargspec(func): - """Get the names and default values of a function's arguments. - - A tuple of four things is returned: (args, varargs, varkw, defaults). - 'args' is a list of the argument names (it may contain nested lists). - 'varargs' and 'varkw' are the names of the * and ** arguments or None. - 'defaults' is an n-tuple of the default values of the last n arguments. - - """ - - if ismethod(func): - func = func.__func__ - if not isfunction(func): - raise TypeError('arg is not a Python function') - args, varargs, varkw = getargs(func.__code__) - return args, varargs, varkw, func.__defaults__ - -def getargvalues(frame): - """Get information about arguments passed into a particular frame. - - A tuple of four things is returned: (args, varargs, varkw, locals). - 'args' is a list of the argument names (it may contain nested lists). - 'varargs' and 'varkw' are the names of the * and ** arguments or None. - 'locals' is the locals dictionary of the given frame. - - """ - args, varargs, varkw = getargs(frame.f_code) - return args, varargs, varkw, frame.f_locals - -def joinseq(seq): - if len(seq) == 1: - return '(' + seq[0] + ',)' - else: - return '(' + ', '.join(seq) + ')' - -def strseq(object, convert, join=joinseq): - """Recursively walk a sequence, stringifying each element. - - """ - if type(object) in [list, tuple]: - return join([strseq(_o, convert, join) for _o in object]) - else: - return convert(object) - -def formatargspec(args, varargs=None, varkw=None, defaults=None, - formatarg=str, - formatvarargs=lambda name: '*' + name, - formatvarkw=lambda name: '**' + name, - formatvalue=lambda value: '=' + repr(value), - join=joinseq): - """Format an argument spec from the 4 values returned by getargspec. - - The first four arguments are (args, varargs, varkw, defaults). The - other four arguments are the corresponding optional formatting functions - that are called to turn names and values into strings. The ninth - argument is an optional function to format the sequence of arguments. - - """ - specs = [] - if defaults: - firstdefault = len(args) - len(defaults) - for i in range(len(args)): - spec = strseq(args[i], formatarg, join) - if defaults and i >= firstdefault: - spec = spec + formatvalue(defaults[i - firstdefault]) - specs.append(spec) - if varargs is not None: - specs.append(formatvarargs(varargs)) - if varkw is not None: - specs.append(formatvarkw(varkw)) - return '(' + ', '.join(specs) + ')' - -def formatargvalues(args, varargs, varkw, locals, - formatarg=str, - formatvarargs=lambda name: '*' + name, - formatvarkw=lambda name: '**' + name, - formatvalue=lambda value: '=' + repr(value), - join=joinseq): - """Format an argument spec from the 4 values returned by getargvalues. - - The first four arguments are (args, varargs, varkw, locals). The - next four arguments are the corresponding optional formatting functions - that are called to turn names and values into strings. The ninth - argument is an optional function to format the sequence of arguments. - - """ - def convert(name, locals=locals, - formatarg=formatarg, formatvalue=formatvalue): - return formatarg(name) + formatvalue(locals[name]) - specs = [strseq(arg, convert, join) for arg in args] - - if varargs: - specs.append(formatvarargs(varargs) + formatvalue(locals[varargs])) - if varkw: - specs.append(formatvarkw(varkw) + formatvalue(locals[varkw])) - return '(' + ', '.join(specs) + ')' diff --git a/venv/lib/python3.7/site-packages/numpy/compat/py3k.py b/venv/lib/python3.7/site-packages/numpy/compat/py3k.py deleted file mode 100644 index 90e17d6..0000000 --- a/venv/lib/python3.7/site-packages/numpy/compat/py3k.py +++ /dev/null @@ -1,253 +0,0 @@ -""" -Python 3.X compatibility tools. - -While this file was originally intented for Python 2 -> 3 transition, -it is now used to create a compatibility layer between different -minor versions of Python 3. - -While the active version of numpy may not support a given version of python, we -allow downstream libraries to continue to use these shims for forward -compatibility with numpy while they transition their code to newer versions of -Python. -""" -__all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar', - 'unicode', 'asunicode', 'asbytes_nested', 'asunicode_nested', - 'asstr', 'open_latin1', 'long', 'basestring', 'sixu', - 'integer_types', 'is_pathlib_path', 'npy_load_module', 'Path', - 'pickle', 'contextlib_nullcontext', 'os_fspath', 'os_PathLike'] - -import sys -import os -try: - from pathlib import Path, PurePath -except ImportError: - Path = PurePath = None - -if sys.version_info[0] >= 3: - import io - - try: - import pickle5 as pickle - except ImportError: - import pickle - - long = int - integer_types = (int,) - basestring = str - unicode = str - bytes = bytes - - def asunicode(s): - if isinstance(s, bytes): - return s.decode('latin1') - return str(s) - - def asbytes(s): - if isinstance(s, bytes): - return s - return str(s).encode('latin1') - - def asstr(s): - if isinstance(s, bytes): - return s.decode('latin1') - return str(s) - - def isfileobj(f): - return isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter)) - - def open_latin1(filename, mode='r'): - return open(filename, mode=mode, encoding='iso-8859-1') - - def sixu(s): - return s - - strchar = 'U' - -else: - import cpickle as pickle - - bytes = str - long = long - basestring = basestring - unicode = unicode - integer_types = (int, long) - asbytes = str - asstr = str - strchar = 'S' - - def isfileobj(f): - return isinstance(f, file) - - def asunicode(s): - if isinstance(s, unicode): - return s - return str(s).decode('ascii') - - def open_latin1(filename, mode='r'): - return open(filename, mode=mode) - - def sixu(s): - return unicode(s, 'unicode_escape') - -def getexception(): - return sys.exc_info()[1] - -def asbytes_nested(x): - if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)): - return [asbytes_nested(y) for y in x] - else: - return asbytes(x) - -def asunicode_nested(x): - if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)): - return [asunicode_nested(y) for y in x] - else: - return asunicode(x) - -def is_pathlib_path(obj): - """ - Check whether obj is a pathlib.Path object. - - Prefer using `isinstance(obj, os_PathLike)` instead of this function. - """ - return Path is not None and isinstance(obj, Path) - -# from Python 3.7 -class contextlib_nullcontext(object): - """Context manager that does no additional processing. - - Used as a stand-in for a normal context manager, when a particular - block of code is only sometimes used with a normal context manager: - - cm = optional_cm if condition else nullcontext() - with cm: - # Perform operation, using optional_cm if condition is True - """ - - def __init__(self, enter_result=None): - self.enter_result = enter_result - - def __enter__(self): - return self.enter_result - - def __exit__(self, *excinfo): - pass - - -if sys.version_info[0] >= 3 and sys.version_info[1] >= 4: - def npy_load_module(name, fn, info=None): - """ - Load a module. - - .. versionadded:: 1.11.2 - - Parameters - ---------- - name : str - Full module name. - fn : str - Path to module file. - info : tuple, optional - Only here for backward compatibility with Python 2.*. - - Returns - ------- - mod : module - - """ - import importlib.machinery - return importlib.machinery.SourceFileLoader(name, fn).load_module() -else: - def npy_load_module(name, fn, info=None): - """ - Load a module. - - .. versionadded:: 1.11.2 - - Parameters - ---------- - name : str - Full module name. - fn : str - Path to module file. - info : tuple, optional - Information as returned by `imp.find_module` - (suffix, mode, type). - - Returns - ------- - mod : module - - """ - import imp - if info is None: - path = os.path.dirname(fn) - fo, fn, info = imp.find_module(name, [path]) - else: - fo = open(fn, info[1]) - try: - mod = imp.load_module(name, fo, fn, info) - finally: - fo.close() - return mod - -# backport abc.ABC -import abc -if sys.version_info[:2] >= (3, 4): - abc_ABC = abc.ABC -else: - abc_ABC = abc.ABCMeta('ABC', (object,), {'__slots__': ()}) - - -# Backport os.fs_path, os.PathLike, and PurePath.__fspath__ -if sys.version_info[:2] >= (3, 6): - os_fspath = os.fspath - os_PathLike = os.PathLike -else: - def _PurePath__fspath__(self): - return str(self) - - class os_PathLike(abc_ABC): - """Abstract base class for implementing the file system path protocol.""" - - @abc.abstractmethod - def __fspath__(self): - """Return the file system path representation of the object.""" - raise NotImplementedError - - @classmethod - def __subclasshook__(cls, subclass): - if PurePath is not None and issubclass(subclass, PurePath): - return True - return hasattr(subclass, '__fspath__') - - - def os_fspath(path): - """Return the path representation of a path-like object. - If str or bytes is passed in, it is returned unchanged. Otherwise the - os.PathLike interface is used to get the path representation. If the - path representation is not str or bytes, TypeError is raised. If the - provided path is not str, bytes, or os.PathLike, TypeError is raised. - """ - if isinstance(path, (str, bytes)): - return path - - # Work from the object's type to match method resolution of other magic - # methods. - path_type = type(path) - try: - path_repr = path_type.__fspath__(path) - except AttributeError: - if hasattr(path_type, '__fspath__'): - raise - elif PurePath is not None and issubclass(path_type, PurePath): - return _PurePath__fspath__(path) - else: - raise TypeError("expected str, bytes or os.PathLike object, " - "not " + path_type.__name__) - if isinstance(path_repr, (str, bytes)): - return path_repr - else: - raise TypeError("expected {}.__fspath__() to return str or bytes, " - "not {}".format(path_type.__name__, - type(path_repr).__name__)) diff --git a/venv/lib/python3.7/site-packages/numpy/compat/setup.py b/venv/lib/python3.7/site-packages/numpy/compat/setup.py deleted file mode 100644 index 8828574..0000000 --- a/venv/lib/python3.7/site-packages/numpy/compat/setup.py +++ /dev/null @@ -1,12 +0,0 @@ -from __future__ import division, print_function - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - - config = Configuration('compat', parent_package, top_path) - config.add_data_dir('tests') - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/venv/lib/python3.7/site-packages/numpy/compat/tests/__init__.py b/venv/lib/python3.7/site-packages/numpy/compat/tests/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/venv/lib/python3.7/site-packages/numpy/compat/tests/test_compat.py b/venv/lib/python3.7/site-packages/numpy/compat/tests/test_compat.py deleted file mode 100644 index 1543aaf..0000000 --- a/venv/lib/python3.7/site-packages/numpy/compat/tests/test_compat.py +++ /dev/null @@ -1,21 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from os.path import join - -from numpy.compat import isfileobj -from numpy.testing import assert_ -from numpy.testing import tempdir - - -def test_isfileobj(): - with tempdir(prefix="numpy_test_compat_") as folder: - filename = join(folder, 'a.bin') - - with open(filename, 'wb') as f: - assert_(isfileobj(f)) - - with open(filename, 'ab') as f: - assert_(isfileobj(f)) - - with open(filename, 'rb') as f: - assert_(isfileobj(f)) diff --git a/venv/lib/python3.7/site-packages/numpy/conftest.py b/venv/lib/python3.7/site-packages/numpy/conftest.py deleted file mode 100644 index 1baf4ad..0000000 --- a/venv/lib/python3.7/site-packages/numpy/conftest.py +++ /dev/null @@ -1,87 +0,0 @@ -""" -Pytest configuration and fixtures for the Numpy test suite. -""" -from __future__ import division, absolute_import, print_function - -import os - -import pytest -import numpy - -from numpy.core._multiarray_tests import get_fpu_mode - - -_old_fpu_mode = None -_collect_results = {} - - -def pytest_configure(config): - config.addinivalue_line("markers", - "valgrind_error: Tests that are known to error under valgrind.") - config.addinivalue_line("markers", - "leaks_references: Tests that are known to leak references.") - config.addinivalue_line("markers", - "slow: Tests that are very slow.") - - -def pytest_addoption(parser): - parser.addoption("--available-memory", action="store", default=None, - help=("Set amount of memory available for running the " - "test suite. This can result to tests requiring " - "especially large amounts of memory to be skipped. " - "Equivalent to setting environment variable " - "NPY_AVAILABLE_MEM. Default: determined" - "automatically.")) - - -def pytest_sessionstart(session): - available_mem = session.config.getoption('available_memory') - if available_mem is not None: - os.environ['NPY_AVAILABLE_MEM'] = available_mem - - -#FIXME when yield tests are gone. -@pytest.hookimpl() -def pytest_itemcollected(item): - """ - Check FPU precision mode was not changed during test collection. - - The clumsy way we do it here is mainly necessary because numpy - still uses yield tests, which can execute code at test collection - time. - """ - global _old_fpu_mode - - mode = get_fpu_mode() - - if _old_fpu_mode is None: - _old_fpu_mode = mode - elif mode != _old_fpu_mode: - _collect_results[item] = (_old_fpu_mode, mode) - _old_fpu_mode = mode - - -@pytest.fixture(scope="function", autouse=True) -def check_fpu_mode(request): - """ - Check FPU precision mode was not changed during the test. - """ - old_mode = get_fpu_mode() - yield - new_mode = get_fpu_mode() - - if old_mode != new_mode: - raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}" - " during the test".format(old_mode, new_mode)) - - collect_result = _collect_results.get(request.node) - if collect_result is not None: - old_mode, new_mode = collect_result - raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}" - " when collecting the test".format(old_mode, - new_mode)) - - -@pytest.fixture(autouse=True) -def add_np(doctest_namespace): - doctest_namespace['np'] = numpy diff --git a/venv/lib/python3.7/site-packages/numpy/core/__init__.py b/venv/lib/python3.7/site-packages/numpy/core/__init__.py deleted file mode 100644 index c3b3f03..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/__init__.py +++ /dev/null @@ -1,154 +0,0 @@ -""" -Contains the core of NumPy: ndarray, ufuncs, dtypes, etc. - -Please note that this module is private. All functions and objects -are available in the main ``numpy`` namespace - use that instead. - -""" - -from __future__ import division, absolute_import, print_function - -from numpy.version import version as __version__ - -import os - -# disables OpenBLAS affinity setting of the main thread that limits -# python threads or processes to one core -env_added = [] -for envkey in ['OPENBLAS_MAIN_FREE', 'GOTOBLAS_MAIN_FREE']: - if envkey not in os.environ: - os.environ[envkey] = '1' - env_added.append(envkey) - -try: - from . import multiarray -except ImportError as exc: - import sys - msg = """ - -IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE! - -Importing the numpy c-extensions failed. -- Try uninstalling and reinstalling numpy. -- If you have already done that, then: - 1. Check that you expected to use Python%d.%d from "%s", - and that you have no directories in your PATH or PYTHONPATH that can - interfere with the Python and numpy version "%s" you're trying to use. - 2. If (1) looks fine, you can open a new issue at - https://github.com/numpy/numpy/issues. Please include details on: - - how you installed Python - - how you installed numpy - - your operating system - - whether or not you have multiple versions of Python installed - - if you built from source, your compiler versions and ideally a build log - -- If you're working with a numpy git repository, try `git clean -xdf` - (removes all files not under version control) and rebuild numpy. - -Note: this error has many possible causes, so please don't comment on -an existing issue about this - open a new one instead. - -Original error was: %s -""" % (sys.version_info[0], sys.version_info[1], sys.executable, - __version__, exc) - raise ImportError(msg) -finally: - for envkey in env_added: - del os.environ[envkey] -del envkey -del env_added -del os - -from . import umath - -# Check that multiarray,umath are pure python modules wrapping -# _multiarray_umath and not either of the old c-extension modules -if not (hasattr(multiarray, '_multiarray_umath') and - hasattr(umath, '_multiarray_umath')): - import sys - path = sys.modules['numpy'].__path__ - msg = ("Something is wrong with the numpy installation. " - "While importing we detected an older version of " - "numpy in {}. One method of fixing this is to repeatedly uninstall " - "numpy until none is found, then reinstall this version.") - raise ImportError(msg.format(path)) - -from . import numerictypes as nt -multiarray.set_typeDict(nt.sctypeDict) -from . import numeric -from .numeric import * -from . import fromnumeric -from .fromnumeric import * -from . import defchararray as char -from . import records as rec -from .records import * -from .memmap import * -from .defchararray import chararray -from . import function_base -from .function_base import * -from . import machar -from .machar import * -from . import getlimits -from .getlimits import * -from . import shape_base -from .shape_base import * -from . import einsumfunc -from .einsumfunc import * -del nt - -from .fromnumeric import amax as max, amin as min, round_ as round -from .numeric import absolute as abs - -# do this after everything else, to minimize the chance of this misleadingly -# appearing in an import-time traceback -from . import _add_newdocs -# add these for module-freeze analysis (like PyInstaller) -from . import _dtype_ctypes -from . import _internal -from . import _dtype -from . import _methods - -__all__ = ['char', 'rec', 'memmap'] -__all__ += numeric.__all__ -__all__ += fromnumeric.__all__ -__all__ += rec.__all__ -__all__ += ['chararray'] -__all__ += function_base.__all__ -__all__ += machar.__all__ -__all__ += getlimits.__all__ -__all__ += shape_base.__all__ -__all__ += einsumfunc.__all__ - -# Make it possible so that ufuncs can be pickled -# Here are the loading and unloading functions -# The name numpy.core._ufunc_reconstruct must be -# available for unpickling to work. -def _ufunc_reconstruct(module, name): - # The `fromlist` kwarg is required to ensure that `mod` points to the - # inner-most module rather than the parent package when module name is - # nested. This makes it possible to pickle non-toplevel ufuncs such as - # scipy.special.expit for instance. - mod = __import__(module, fromlist=[name]) - return getattr(mod, name) - -def _ufunc_reduce(func): - from pickle import whichmodule - name = func.__name__ - return _ufunc_reconstruct, (whichmodule(func, name), name) - - -import sys -if sys.version_info[0] >= 3: - import copyreg -else: - import copy_reg as copyreg - -copyreg.pickle(ufunc, _ufunc_reduce, _ufunc_reconstruct) -# Unclutter namespace (must keep _ufunc_reconstruct for unpickling) -del copyreg -del sys -del _ufunc_reduce - -from numpy._pytesttester import PytestTester -test = PytestTester(__name__) -del PytestTester diff --git a/venv/lib/python3.7/site-packages/numpy/core/_add_newdocs.py b/venv/lib/python3.7/site-packages/numpy/core/_add_newdocs.py deleted file mode 100644 index 2f12739..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/_add_newdocs.py +++ /dev/null @@ -1,6874 +0,0 @@ -""" -This is only meant to add docs to objects defined in C-extension modules. -The purpose is to allow easier editing of the docstrings without -requiring a re-compile. - -NOTE: Many of the methods of ndarray have corresponding functions. - If you update these docstrings, please keep also the ones in - core/fromnumeric.py, core/defmatrix.py up-to-date. - -""" -from __future__ import division, absolute_import, print_function - -import sys - -from numpy.core import numerictypes as _numerictypes -from numpy.core import dtype -from numpy.core.function_base import add_newdoc - -############################################################################### -# -# flatiter -# -# flatiter needs a toplevel description -# -############################################################################### - -add_newdoc('numpy.core', 'flatiter', - """ - Flat iterator object to iterate over arrays. - - A `flatiter` iterator is returned by ``x.flat`` for any array `x`. - It allows iterating over the array as if it were a 1-D array, - either in a for-loop or by calling its `next` method. - - Iteration is done in row-major, C-style order (the last - index varying the fastest). The iterator can also be indexed using - basic slicing or advanced indexing. - - See Also - -------- - ndarray.flat : Return a flat iterator over an array. - ndarray.flatten : Returns a flattened copy of an array. - - Notes - ----- - A `flatiter` iterator can not be constructed directly from Python code - by calling the `flatiter` constructor. - - Examples - -------- - >>> x = np.arange(6).reshape(2, 3) - >>> fl = x.flat - >>> type(fl) - - >>> for item in fl: - ... print(item) - ... - 0 - 1 - 2 - 3 - 4 - 5 - - >>> fl[2:4] - array([2, 3]) - - """) - -# flatiter attributes - -add_newdoc('numpy.core', 'flatiter', ('base', - """ - A reference to the array that is iterated over. - - Examples - -------- - >>> x = np.arange(5) - >>> fl = x.flat - >>> fl.base is x - True - - """)) - - - -add_newdoc('numpy.core', 'flatiter', ('coords', - """ - An N-dimensional tuple of current coordinates. - - Examples - -------- - >>> x = np.arange(6).reshape(2, 3) - >>> fl = x.flat - >>> fl.coords - (0, 0) - >>> next(fl) - 0 - >>> fl.coords - (0, 1) - - """)) - - - -add_newdoc('numpy.core', 'flatiter', ('index', - """ - Current flat index into the array. - - Examples - -------- - >>> x = np.arange(6).reshape(2, 3) - >>> fl = x.flat - >>> fl.index - 0 - >>> next(fl) - 0 - >>> fl.index - 1 - - """)) - -# flatiter functions - -add_newdoc('numpy.core', 'flatiter', ('__array__', - """__array__(type=None) Get array from iterator - - """)) - - -add_newdoc('numpy.core', 'flatiter', ('copy', - """ - copy() - - Get a copy of the iterator as a 1-D array. - - Examples - -------- - >>> x = np.arange(6).reshape(2, 3) - >>> x - array([[0, 1, 2], - [3, 4, 5]]) - >>> fl = x.flat - >>> fl.copy() - array([0, 1, 2, 3, 4, 5]) - - """)) - - -############################################################################### -# -# nditer -# -############################################################################### - -add_newdoc('numpy.core', 'nditer', - """ - Efficient multi-dimensional iterator object to iterate over arrays. - To get started using this object, see the - :ref:`introductory guide to array iteration `. - - Parameters - ---------- - op : ndarray or sequence of array_like - The array(s) to iterate over. - - flags : sequence of str, optional - Flags to control the behavior of the iterator. - - * ``buffered`` enables buffering when required. - * ``c_index`` causes a C-order index to be tracked. - * ``f_index`` causes a Fortran-order index to be tracked. - * ``multi_index`` causes a multi-index, or a tuple of indices - with one per iteration dimension, to be tracked. - * ``common_dtype`` causes all the operands to be converted to - a common data type, with copying or buffering as necessary. - * ``copy_if_overlap`` causes the iterator to determine if read - operands have overlap with write operands, and make temporary - copies as necessary to avoid overlap. False positives (needless - copying) are possible in some cases. - * ``delay_bufalloc`` delays allocation of the buffers until - a reset() call is made. Allows ``allocate`` operands to - be initialized before their values are copied into the buffers. - * ``external_loop`` causes the ``values`` given to be - one-dimensional arrays with multiple values instead of - zero-dimensional arrays. - * ``grow_inner`` allows the ``value`` array sizes to be made - larger than the buffer size when both ``buffered`` and - ``external_loop`` is used. - * ``ranged`` allows the iterator to be restricted to a sub-range - of the iterindex values. - * ``refs_ok`` enables iteration of reference types, such as - object arrays. - * ``reduce_ok`` enables iteration of ``readwrite`` operands - which are broadcasted, also known as reduction operands. - * ``zerosize_ok`` allows `itersize` to be zero. - op_flags : list of list of str, optional - This is a list of flags for each operand. At minimum, one of - ``readonly``, ``readwrite``, or ``writeonly`` must be specified. - - * ``readonly`` indicates the operand will only be read from. - * ``readwrite`` indicates the operand will be read from and written to. - * ``writeonly`` indicates the operand will only be written to. - * ``no_broadcast`` prevents the operand from being broadcasted. - * ``contig`` forces the operand data to be contiguous. - * ``aligned`` forces the operand data to be aligned. - * ``nbo`` forces the operand data to be in native byte order. - * ``copy`` allows a temporary read-only copy if required. - * ``updateifcopy`` allows a temporary read-write copy if required. - * ``allocate`` causes the array to be allocated if it is None - in the ``op`` parameter. - * ``no_subtype`` prevents an ``allocate`` operand from using a subtype. - * ``arraymask`` indicates that this operand is the mask to use - for selecting elements when writing to operands with the - 'writemasked' flag set. The iterator does not enforce this, - but when writing from a buffer back to the array, it only - copies those elements indicated by this mask. - * ``writemasked`` indicates that only elements where the chosen - ``arraymask`` operand is True will be written to. - * ``overlap_assume_elementwise`` can be used to mark operands that are - accessed only in the iterator order, to allow less conservative - copying when ``copy_if_overlap`` is present. - op_dtypes : dtype or tuple of dtype(s), optional - The required data type(s) of the operands. If copying or buffering - is enabled, the data will be converted to/from their original types. - order : {'C', 'F', 'A', 'K'}, optional - Controls the iteration order. 'C' means C order, 'F' means - Fortran order, 'A' means 'F' order if all the arrays are Fortran - contiguous, 'C' order otherwise, and 'K' means as close to the - order the array elements appear in memory as possible. This also - affects the element memory order of ``allocate`` operands, as they - are allocated to be compatible with iteration order. - Default is 'K'. - casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional - Controls what kind of data casting may occur when making a copy - or buffering. Setting this to 'unsafe' is not recommended, - as it can adversely affect accumulations. - - * 'no' means the data types should not be cast at all. - * 'equiv' means only byte-order changes are allowed. - * 'safe' means only casts which can preserve values are allowed. - * 'same_kind' means only safe casts or casts within a kind, - like float64 to float32, are allowed. - * 'unsafe' means any data conversions may be done. - op_axes : list of list of ints, optional - If provided, is a list of ints or None for each operands. - The list of axes for an operand is a mapping from the dimensions - of the iterator to the dimensions of the operand. A value of - -1 can be placed for entries, causing that dimension to be - treated as `newaxis`. - itershape : tuple of ints, optional - The desired shape of the iterator. This allows ``allocate`` operands - with a dimension mapped by op_axes not corresponding to a dimension - of a different operand to get a value not equal to 1 for that - dimension. - buffersize : int, optional - When buffering is enabled, controls the size of the temporary - buffers. Set to 0 for the default value. - - Attributes - ---------- - dtypes : tuple of dtype(s) - The data types of the values provided in `value`. This may be - different from the operand data types if buffering is enabled. - Valid only before the iterator is closed. - finished : bool - Whether the iteration over the operands is finished or not. - has_delayed_bufalloc : bool - If True, the iterator was created with the ``delay_bufalloc`` flag, - and no reset() function was called on it yet. - has_index : bool - If True, the iterator was created with either the ``c_index`` or - the ``f_index`` flag, and the property `index` can be used to - retrieve it. - has_multi_index : bool - If True, the iterator was created with the ``multi_index`` flag, - and the property `multi_index` can be used to retrieve it. - index - When the ``c_index`` or ``f_index`` flag was used, this property - provides access to the index. Raises a ValueError if accessed - and ``has_index`` is False. - iterationneedsapi : bool - Whether iteration requires access to the Python API, for example - if one of the operands is an object array. - iterindex : int - An index which matches the order of iteration. - itersize : int - Size of the iterator. - itviews - Structured view(s) of `operands` in memory, matching the reordered - and optimized iterator access pattern. Valid only before the iterator - is closed. - multi_index - When the ``multi_index`` flag was used, this property - provides access to the index. Raises a ValueError if accessed - accessed and ``has_multi_index`` is False. - ndim : int - The dimensions of the iterator. - nop : int - The number of iterator operands. - operands : tuple of operand(s) - The array(s) to be iterated over. Valid only before the iterator is - closed. - shape : tuple of ints - Shape tuple, the shape of the iterator. - value - Value of ``operands`` at current iteration. Normally, this is a - tuple of array scalars, but if the flag ``external_loop`` is used, - it is a tuple of one dimensional arrays. - - Notes - ----- - `nditer` supersedes `flatiter`. The iterator implementation behind - `nditer` is also exposed by the NumPy C API. - - The Python exposure supplies two iteration interfaces, one which follows - the Python iterator protocol, and another which mirrors the C-style - do-while pattern. The native Python approach is better in most cases, but - if you need the coordinates or index of an iterator, use the C-style pattern. - - Examples - -------- - Here is how we might write an ``iter_add`` function, using the - Python iterator protocol: - - >>> def iter_add_py(x, y, out=None): - ... addop = np.add - ... it = np.nditer([x, y, out], [], - ... [['readonly'], ['readonly'], ['writeonly','allocate']]) - ... with it: - ... for (a, b, c) in it: - ... addop(a, b, out=c) - ... return it.operands[2] - - Here is the same function, but following the C-style pattern: - - >>> def iter_add(x, y, out=None): - ... addop = np.add - ... it = np.nditer([x, y, out], [], - ... [['readonly'], ['readonly'], ['writeonly','allocate']]) - ... with it: - ... while not it.finished: - ... addop(it[0], it[1], out=it[2]) - ... it.iternext() - ... return it.operands[2] - - Here is an example outer product function: - - >>> def outer_it(x, y, out=None): - ... mulop = np.multiply - ... it = np.nditer([x, y, out], ['external_loop'], - ... [['readonly'], ['readonly'], ['writeonly', 'allocate']], - ... op_axes=[list(range(x.ndim)) + [-1] * y.ndim, - ... [-1] * x.ndim + list(range(y.ndim)), - ... None]) - ... with it: - ... for (a, b, c) in it: - ... mulop(a, b, out=c) - ... return it.operands[2] - - >>> a = np.arange(2)+1 - >>> b = np.arange(3)+1 - >>> outer_it(a,b) - array([[1, 2, 3], - [2, 4, 6]]) - - Here is an example function which operates like a "lambda" ufunc: - - >>> def luf(lamdaexpr, *args, **kwargs): - ... '''luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)''' - ... nargs = len(args) - ... op = (kwargs.get('out',None),) + args - ... it = np.nditer(op, ['buffered','external_loop'], - ... [['writeonly','allocate','no_broadcast']] + - ... [['readonly','nbo','aligned']]*nargs, - ... order=kwargs.get('order','K'), - ... casting=kwargs.get('casting','safe'), - ... buffersize=kwargs.get('buffersize',0)) - ... while not it.finished: - ... it[0] = lamdaexpr(*it[1:]) - ... it.iternext() - ... return it.operands[0] - - >>> a = np.arange(5) - >>> b = np.ones(5) - >>> luf(lambda i,j:i*i + j/2, a, b) - array([ 0.5, 1.5, 4.5, 9.5, 16.5]) - - If operand flags `"writeonly"` or `"readwrite"` are used the - operands may be views into the original data with the - `WRITEBACKIFCOPY` flag. In this case `nditer` must be used as a - context manager or the `nditer.close` method must be called before - using the result. The temporary data will be written back to the - original data when the `__exit__` function is called but not before: - - >>> a = np.arange(6, dtype='i4')[::-2] - >>> with np.nditer(a, [], - ... [['writeonly', 'updateifcopy']], - ... casting='unsafe', - ... op_dtypes=[np.dtype('f4')]) as i: - ... x = i.operands[0] - ... x[:] = [-1, -2, -3] - ... # a still unchanged here - >>> a, x - (array([-1, -2, -3], dtype=int32), array([-1., -2., -3.], dtype=float32)) - - It is important to note that once the iterator is exited, dangling - references (like `x` in the example) may or may not share data with - the original data `a`. If writeback semantics were active, i.e. if - `x.base.flags.writebackifcopy` is `True`, then exiting the iterator - will sever the connection between `x` and `a`, writing to `x` will - no longer write to `a`. If writeback semantics are not active, then - `x.data` will still point at some part of `a.data`, and writing to - one will affect the other. - - Context management and the `close` method appeared in version 1.15.0. - - """) - -# nditer methods - -add_newdoc('numpy.core', 'nditer', ('copy', - """ - copy() - - Get a copy of the iterator in its current state. - - Examples - -------- - >>> x = np.arange(10) - >>> y = x + 1 - >>> it = np.nditer([x, y]) - >>> next(it) - (array(0), array(1)) - >>> it2 = it.copy() - >>> next(it2) - (array(1), array(2)) - - """)) - -add_newdoc('numpy.core', 'nditer', ('operands', - """ - operands[`Slice`] - - The array(s) to be iterated over. Valid only before the iterator is closed. - """)) - -add_newdoc('numpy.core', 'nditer', ('debug_print', - """ - debug_print() - - Print the current state of the `nditer` instance and debug info to stdout. - - """)) - -add_newdoc('numpy.core', 'nditer', ('enable_external_loop', - """ - enable_external_loop() - - When the "external_loop" was not used during construction, but - is desired, this modifies the iterator to behave as if the flag - was specified. - - """)) - -add_newdoc('numpy.core', 'nditer', ('iternext', - """ - iternext() - - Check whether iterations are left, and perform a single internal iteration - without returning the result. Used in the C-style pattern do-while - pattern. For an example, see `nditer`. - - Returns - ------- - iternext : bool - Whether or not there are iterations left. - - """)) - -add_newdoc('numpy.core', 'nditer', ('remove_axis', - """ - remove_axis(i) - - Removes axis `i` from the iterator. Requires that the flag "multi_index" - be enabled. - - """)) - -add_newdoc('numpy.core', 'nditer', ('remove_multi_index', - """ - remove_multi_index() - - When the "multi_index" flag was specified, this removes it, allowing - the internal iteration structure to be optimized further. - - """)) - -add_newdoc('numpy.core', 'nditer', ('reset', - """ - reset() - - Reset the iterator to its initial state. - - """)) - -add_newdoc('numpy.core', 'nested_iters', - """ - Create nditers for use in nested loops - - Create a tuple of `nditer` objects which iterate in nested loops over - different axes of the op argument. The first iterator is used in the - outermost loop, the last in the innermost loop. Advancing one will change - the subsequent iterators to point at its new element. - - Parameters - ---------- - op : ndarray or sequence of array_like - The array(s) to iterate over. - - axes : list of list of int - Each item is used as an "op_axes" argument to an nditer - - flags, op_flags, op_dtypes, order, casting, buffersize (optional) - See `nditer` parameters of the same name - - Returns - ------- - iters : tuple of nditer - An nditer for each item in `axes`, outermost first - - See Also - -------- - nditer - - Examples - -------- - - Basic usage. Note how y is the "flattened" version of - [a[:, 0, :], a[:, 1, 0], a[:, 2, :]] since we specified - the first iter's axes as [1] - - >>> a = np.arange(12).reshape(2, 3, 2) - >>> i, j = np.nested_iters(a, [[1], [0, 2]], flags=["multi_index"]) - >>> for x in i: - ... print(i.multi_index) - ... for y in j: - ... print('', j.multi_index, y) - (0,) - (0, 0) 0 - (0, 1) 1 - (1, 0) 6 - (1, 1) 7 - (1,) - (0, 0) 2 - (0, 1) 3 - (1, 0) 8 - (1, 1) 9 - (2,) - (0, 0) 4 - (0, 1) 5 - (1, 0) 10 - (1, 1) 11 - - """) - -add_newdoc('numpy.core', 'nditer', ('close', - """ - close() - - Resolve all writeback semantics in writeable operands. - - .. versionadded:: 1.15.0 - - See Also - -------- - - :ref:`nditer-context-manager` - - """)) - - -############################################################################### -# -# broadcast -# -############################################################################### - -add_newdoc('numpy.core', 'broadcast', - """ - Produce an object that mimics broadcasting. - - Parameters - ---------- - in1, in2, ... : array_like - Input parameters. - - Returns - ------- - b : broadcast object - Broadcast the input parameters against one another, and - return an object that encapsulates the result. - Amongst others, it has ``shape`` and ``nd`` properties, and - may be used as an iterator. - - See Also - -------- - broadcast_arrays - broadcast_to - - Examples - -------- - - Manually adding two vectors, using broadcasting: - - >>> x = np.array([[1], [2], [3]]) - >>> y = np.array([4, 5, 6]) - >>> b = np.broadcast(x, y) - - >>> out = np.empty(b.shape) - >>> out.flat = [u+v for (u,v) in b] - >>> out - array([[5., 6., 7.], - [6., 7., 8.], - [7., 8., 9.]]) - - Compare against built-in broadcasting: - - >>> x + y - array([[5, 6, 7], - [6, 7, 8], - [7, 8, 9]]) - - """) - -# attributes - -add_newdoc('numpy.core', 'broadcast', ('index', - """ - current index in broadcasted result - - Examples - -------- - >>> x = np.array([[1], [2], [3]]) - >>> y = np.array([4, 5, 6]) - >>> b = np.broadcast(x, y) - >>> b.index - 0 - >>> next(b), next(b), next(b) - ((1, 4), (1, 5), (1, 6)) - >>> b.index - 3 - - """)) - -add_newdoc('numpy.core', 'broadcast', ('iters', - """ - tuple of iterators along ``self``'s "components." - - Returns a tuple of `numpy.flatiter` objects, one for each "component" - of ``self``. - - See Also - -------- - numpy.flatiter - - Examples - -------- - >>> x = np.array([1, 2, 3]) - >>> y = np.array([[4], [5], [6]]) - >>> b = np.broadcast(x, y) - >>> row, col = b.iters - >>> next(row), next(col) - (1, 4) - - """)) - -add_newdoc('numpy.core', 'broadcast', ('ndim', - """ - Number of dimensions of broadcasted result. Alias for `nd`. - - .. versionadded:: 1.12.0 - - Examples - -------- - >>> x = np.array([1, 2, 3]) - >>> y = np.array([[4], [5], [6]]) - >>> b = np.broadcast(x, y) - >>> b.ndim - 2 - - """)) - -add_newdoc('numpy.core', 'broadcast', ('nd', - """ - Number of dimensions of broadcasted result. For code intended for NumPy - 1.12.0 and later the more consistent `ndim` is preferred. - - Examples - -------- - >>> x = np.array([1, 2, 3]) - >>> y = np.array([[4], [5], [6]]) - >>> b = np.broadcast(x, y) - >>> b.nd - 2 - - """)) - -add_newdoc('numpy.core', 'broadcast', ('numiter', - """ - Number of iterators possessed by the broadcasted result. - - Examples - -------- - >>> x = np.array([1, 2, 3]) - >>> y = np.array([[4], [5], [6]]) - >>> b = np.broadcast(x, y) - >>> b.numiter - 2 - - """)) - -add_newdoc('numpy.core', 'broadcast', ('shape', - """ - Shape of broadcasted result. - - Examples - -------- - >>> x = np.array([1, 2, 3]) - >>> y = np.array([[4], [5], [6]]) - >>> b = np.broadcast(x, y) - >>> b.shape - (3, 3) - - """)) - -add_newdoc('numpy.core', 'broadcast', ('size', - """ - Total size of broadcasted result. - - Examples - -------- - >>> x = np.array([1, 2, 3]) - >>> y = np.array([[4], [5], [6]]) - >>> b = np.broadcast(x, y) - >>> b.size - 9 - - """)) - -add_newdoc('numpy.core', 'broadcast', ('reset', - """ - reset() - - Reset the broadcasted result's iterator(s). - - Parameters - ---------- - None - - Returns - ------- - None - - Examples - -------- - >>> x = np.array([1, 2, 3]) - >>> y = np.array([[4], [5], [6]]) - >>> b = np.broadcast(x, y) - >>> b.index - 0 - >>> next(b), next(b), next(b) - ((1, 4), (2, 4), (3, 4)) - >>> b.index - 3 - >>> b.reset() - >>> b.index - 0 - - """)) - -############################################################################### -# -# numpy functions -# -############################################################################### - -add_newdoc('numpy.core.multiarray', 'array', - """ - array(object, dtype=None, copy=True, order='K', subok=False, ndmin=0) - - Create an array. - - Parameters - ---------- - object : array_like - An array, any object exposing the array interface, an object whose - __array__ method returns an array, or any (nested) sequence. - dtype : data-type, optional - The desired data-type for the array. If not given, then the type will - be determined as the minimum type required to hold the objects in the - sequence. - copy : bool, optional - If true (default), then the object is copied. Otherwise, a copy will - only be made if __array__ returns a copy, if obj is a nested sequence, - or if a copy is needed to satisfy any of the other requirements - (`dtype`, `order`, etc.). - order : {'K', 'A', 'C', 'F'}, optional - Specify the memory layout of the array. If object is not an array, the - newly created array will be in C order (row major) unless 'F' is - specified, in which case it will be in Fortran order (column major). - If object is an array the following holds. - - ===== ========= =================================================== - order no copy copy=True - ===== ========= =================================================== - 'K' unchanged F & C order preserved, otherwise most similar order - 'A' unchanged F order if input is F and not C, otherwise C order - 'C' C order C order - 'F' F order F order - ===== ========= =================================================== - - When ``copy=False`` and a copy is made for other reasons, the result is - the same as if ``copy=True``, with some exceptions for `A`, see the - Notes section. The default order is 'K'. - subok : bool, optional - If True, then sub-classes will be passed-through, otherwise - the returned array will be forced to be a base-class array (default). - ndmin : int, optional - Specifies the minimum number of dimensions that the resulting - array should have. Ones will be pre-pended to the shape as - needed to meet this requirement. - - Returns - ------- - out : ndarray - An array object satisfying the specified requirements. - - See Also - -------- - empty_like : Return an empty array with shape and type of input. - ones_like : Return an array of ones with shape and type of input. - zeros_like : Return an array of zeros with shape and type of input. - full_like : Return a new array with shape of input filled with value. - empty : Return a new uninitialized array. - ones : Return a new array setting values to one. - zeros : Return a new array setting values to zero. - full : Return a new array of given shape filled with value. - - - Notes - ----- - When order is 'A' and `object` is an array in neither 'C' nor 'F' order, - and a copy is forced by a change in dtype, then the order of the result is - not necessarily 'C' as expected. This is likely a bug. - - Examples - -------- - >>> np.array([1, 2, 3]) - array([1, 2, 3]) - - Upcasting: - - >>> np.array([1, 2, 3.0]) - array([ 1., 2., 3.]) - - More than one dimension: - - >>> np.array([[1, 2], [3, 4]]) - array([[1, 2], - [3, 4]]) - - Minimum dimensions 2: - - >>> np.array([1, 2, 3], ndmin=2) - array([[1, 2, 3]]) - - Type provided: - - >>> np.array([1, 2, 3], dtype=complex) - array([ 1.+0.j, 2.+0.j, 3.+0.j]) - - Data-type consisting of more than one element: - - >>> x = np.array([(1,2),(3,4)],dtype=[('a','>> x['a'] - array([1, 3]) - - Creating an array from sub-classes: - - >>> np.array(np.mat('1 2; 3 4')) - array([[1, 2], - [3, 4]]) - - >>> np.array(np.mat('1 2; 3 4'), subok=True) - matrix([[1, 2], - [3, 4]]) - - """) - -add_newdoc('numpy.core.multiarray', 'empty', - """ - empty(shape, dtype=float, order='C') - - Return a new array of given shape and type, without initializing entries. - - Parameters - ---------- - shape : int or tuple of int - Shape of the empty array, e.g., ``(2, 3)`` or ``2``. - dtype : data-type, optional - Desired output data-type for the array, e.g, `numpy.int8`. Default is - `numpy.float64`. - order : {'C', 'F'}, optional, default: 'C' - Whether to store multi-dimensional data in row-major - (C-style) or column-major (Fortran-style) order in - memory. - - Returns - ------- - out : ndarray - Array of uninitialized (arbitrary) data of the given shape, dtype, and - order. Object arrays will be initialized to None. - - See Also - -------- - empty_like : Return an empty array with shape and type of input. - ones : Return a new array setting values to one. - zeros : Return a new array setting values to zero. - full : Return a new array of given shape filled with value. - - - Notes - ----- - `empty`, unlike `zeros`, does not set the array values to zero, - and may therefore be marginally faster. On the other hand, it requires - the user to manually set all the values in the array, and should be - used with caution. - - Examples - -------- - >>> np.empty([2, 2]) - array([[ -9.74499359e+001, 6.69583040e-309], - [ 2.13182611e-314, 3.06959433e-309]]) #uninitialized - - >>> np.empty([2, 2], dtype=int) - array([[-1073741821, -1067949133], - [ 496041986, 19249760]]) #uninitialized - - """) - -add_newdoc('numpy.core.multiarray', 'scalar', - """ - scalar(dtype, obj) - - Return a new scalar array of the given type initialized with obj. - - This function is meant mainly for pickle support. `dtype` must be a - valid data-type descriptor. If `dtype` corresponds to an object - descriptor, then `obj` can be any object, otherwise `obj` must be a - string. If `obj` is not given, it will be interpreted as None for object - type and as zeros for all other types. - - """) - -add_newdoc('numpy.core.multiarray', 'zeros', - """ - zeros(shape, dtype=float, order='C') - - Return a new array of given shape and type, filled with zeros. - - Parameters - ---------- - shape : int or tuple of ints - Shape of the new array, e.g., ``(2, 3)`` or ``2``. - dtype : data-type, optional - The desired data-type for the array, e.g., `numpy.int8`. Default is - `numpy.float64`. - order : {'C', 'F'}, optional, default: 'C' - Whether to store multi-dimensional data in row-major - (C-style) or column-major (Fortran-style) order in - memory. - - Returns - ------- - out : ndarray - Array of zeros with the given shape, dtype, and order. - - See Also - -------- - zeros_like : Return an array of zeros with shape and type of input. - empty : Return a new uninitialized array. - ones : Return a new array setting values to one. - full : Return a new array of given shape filled with value. - - Examples - -------- - >>> np.zeros(5) - array([ 0., 0., 0., 0., 0.]) - - >>> np.zeros((5,), dtype=int) - array([0, 0, 0, 0, 0]) - - >>> np.zeros((2, 1)) - array([[ 0.], - [ 0.]]) - - >>> s = (2,2) - >>> np.zeros(s) - array([[ 0., 0.], - [ 0., 0.]]) - - >>> np.zeros((2,), dtype=[('x', 'i4'), ('y', 'i4')]) # custom dtype - array([(0, 0), (0, 0)], - dtype=[('x', '>> np.fromstring('1 2', dtype=int, sep=' ') - array([1, 2]) - >>> np.fromstring('1, 2', dtype=int, sep=',') - array([1, 2]) - - """) - -add_newdoc('numpy.core.multiarray', 'compare_chararrays', - """ - compare_chararrays(a, b, cmp_op, rstrip) - - Performs element-wise comparison of two string arrays using the - comparison operator specified by `cmp_op`. - - Parameters - ---------- - a, b : array_like - Arrays to be compared. - cmp_op : {"<", "<=", "==", ">=", ">", "!="} - Type of comparison. - rstrip : Boolean - If True, the spaces at the end of Strings are removed before the comparison. - - Returns - ------- - out : ndarray - The output array of type Boolean with the same shape as a and b. - - Raises - ------ - ValueError - If `cmp_op` is not valid. - TypeError - If at least one of `a` or `b` is a non-string array - - Examples - -------- - >>> a = np.array(["a", "b", "cde"]) - >>> b = np.array(["a", "a", "dec"]) - >>> np.compare_chararrays(a, b, ">", True) - array([False, True, False]) - - """) - -add_newdoc('numpy.core.multiarray', 'fromiter', - """ - fromiter(iterable, dtype, count=-1) - - Create a new 1-dimensional array from an iterable object. - - Parameters - ---------- - iterable : iterable object - An iterable object providing data for the array. - dtype : data-type - The data-type of the returned array. - count : int, optional - The number of items to read from *iterable*. The default is -1, - which means all data is read. - - Returns - ------- - out : ndarray - The output array. - - Notes - ----- - Specify `count` to improve performance. It allows ``fromiter`` to - pre-allocate the output array, instead of resizing it on demand. - - Examples - -------- - >>> iterable = (x*x for x in range(5)) - >>> np.fromiter(iterable, float) - array([ 0., 1., 4., 9., 16.]) - - """) - -add_newdoc('numpy.core.multiarray', 'fromfile', - """ - fromfile(file, dtype=float, count=-1, sep='', offset=0) - - Construct an array from data in a text or binary file. - - A highly efficient way of reading binary data with a known data-type, - as well as parsing simply formatted text files. Data written using the - `tofile` method can be read using this function. - - Parameters - ---------- - file : file or str or Path - Open file object or filename. - - .. versionchanged:: 1.17.0 - `pathlib.Path` objects are now accepted. - - dtype : data-type - Data type of the returned array. - For binary files, it is used to determine the size and byte-order - of the items in the file. - Most builtin numeric types are supported and extension types may be supported. - - .. versionadded:: 1.18.0 - Complex dtypes. - - count : int - Number of items to read. ``-1`` means all items (i.e., the complete - file). - sep : str - Separator between items if file is a text file. - Empty ("") separator means the file should be treated as binary. - Spaces (" ") in the separator match zero or more whitespace characters. - A separator consisting only of spaces must match at least one - whitespace. - offset : int - The offset (in bytes) from the file's current position. Defaults to 0. - Only permitted for binary files. - - .. versionadded:: 1.17.0 - - See also - -------- - load, save - ndarray.tofile - loadtxt : More flexible way of loading data from a text file. - - Notes - ----- - Do not rely on the combination of `tofile` and `fromfile` for - data storage, as the binary files generated are not platform - independent. In particular, no byte-order or data-type information is - saved. Data can be stored in the platform independent ``.npy`` format - using `save` and `load` instead. - - Examples - -------- - Construct an ndarray: - - >>> dt = np.dtype([('time', [('min', np.int64), ('sec', np.int64)]), - ... ('temp', float)]) - >>> x = np.zeros((1,), dtype=dt) - >>> x['time']['min'] = 10; x['temp'] = 98.25 - >>> x - array([((10, 0), 98.25)], - dtype=[('time', [('min', '>> import tempfile - >>> fname = tempfile.mkstemp()[1] - >>> x.tofile(fname) - - Read the raw data from disk: - - >>> np.fromfile(fname, dtype=dt) - array([((10, 0), 98.25)], - dtype=[('time', [('min', '>> np.save(fname, x) - >>> np.load(fname + '.npy') - array([((10, 0), 98.25)], - dtype=[('time', [('min', '>> dt = np.dtype(int) - >>> dt = dt.newbyteorder('>') - >>> np.frombuffer(buf, dtype=dt) # doctest: +SKIP - - The data of the resulting array will not be byteswapped, but will be - interpreted correctly. - - Examples - -------- - >>> s = b'hello world' - >>> np.frombuffer(s, dtype='S1', count=5, offset=6) - array([b'w', b'o', b'r', b'l', b'd'], dtype='|S1') - - >>> np.frombuffer(b'\\x01\\x02', dtype=np.uint8) - array([1, 2], dtype=uint8) - >>> np.frombuffer(b'\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3) - array([1, 2, 3], dtype=uint8) - - """) - -add_newdoc('numpy.core', 'fastCopyAndTranspose', - """_fastCopyAndTranspose(a)""") - -add_newdoc('numpy.core.multiarray', 'correlate', - """cross_correlate(a,v, mode=0)""") - -add_newdoc('numpy.core.multiarray', 'arange', - """ - arange([start,] stop[, step,], dtype=None) - - Return evenly spaced values within a given interval. - - Values are generated within the half-open interval ``[start, stop)`` - (in other words, the interval including `start` but excluding `stop`). - For integer arguments the function is equivalent to the Python built-in - `range` function, but returns an ndarray rather than a list. - - When using a non-integer step, such as 0.1, the results will often not - be consistent. It is better to use `numpy.linspace` for these cases. - - Parameters - ---------- - start : number, optional - Start of interval. The interval includes this value. The default - start value is 0. - stop : number - End of interval. The interval does not include this value, except - in some cases where `step` is not an integer and floating point - round-off affects the length of `out`. - step : number, optional - Spacing between values. For any output `out`, this is the distance - between two adjacent values, ``out[i+1] - out[i]``. The default - step size is 1. If `step` is specified as a position argument, - `start` must also be given. - dtype : dtype - The type of the output array. If `dtype` is not given, infer the data - type from the other input arguments. - - Returns - ------- - arange : ndarray - Array of evenly spaced values. - - For floating point arguments, the length of the result is - ``ceil((stop - start)/step)``. Because of floating point overflow, - this rule may result in the last element of `out` being greater - than `stop`. - - See Also - -------- - numpy.linspace : Evenly spaced numbers with careful handling of endpoints. - numpy.ogrid: Arrays of evenly spaced numbers in N-dimensions. - numpy.mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions. - - Examples - -------- - >>> np.arange(3) - array([0, 1, 2]) - >>> np.arange(3.0) - array([ 0., 1., 2.]) - >>> np.arange(3,7) - array([3, 4, 5, 6]) - >>> np.arange(3,7,2) - array([3, 5]) - - """) - -add_newdoc('numpy.core.multiarray', '_get_ndarray_c_version', - """_get_ndarray_c_version() - - Return the compile time NPY_VERSION (formerly called NDARRAY_VERSION) number. - - """) - -add_newdoc('numpy.core.multiarray', '_reconstruct', - """_reconstruct(subtype, shape, dtype) - - Construct an empty array. Used by Pickles. - - """) - - -add_newdoc('numpy.core.multiarray', 'set_string_function', - """ - set_string_function(f, repr=1) - - Internal method to set a function to be used when pretty printing arrays. - - """) - -add_newdoc('numpy.core.multiarray', 'set_numeric_ops', - """ - set_numeric_ops(op1=func1, op2=func2, ...) - - Set numerical operators for array objects. - - .. deprecated:: 1.16 - - For the general case, use :c:func:`PyUFunc_ReplaceLoopBySignature`. - For ndarray subclasses, define the ``__array_ufunc__`` method and - override the relevant ufunc. - - Parameters - ---------- - op1, op2, ... : callable - Each ``op = func`` pair describes an operator to be replaced. - For example, ``add = lambda x, y: np.add(x, y) % 5`` would replace - addition by modulus 5 addition. - - Returns - ------- - saved_ops : list of callables - A list of all operators, stored before making replacements. - - Notes - ----- - .. WARNING:: - Use with care! Incorrect usage may lead to memory errors. - - A function replacing an operator cannot make use of that operator. - For example, when replacing add, you may not use ``+``. Instead, - directly call ufuncs. - - Examples - -------- - >>> def add_mod5(x, y): - ... return np.add(x, y) % 5 - ... - >>> old_funcs = np.set_numeric_ops(add=add_mod5) - - >>> x = np.arange(12).reshape((3, 4)) - >>> x + x - array([[0, 2, 4, 1], - [3, 0, 2, 4], - [1, 3, 0, 2]]) - - >>> ignore = np.set_numeric_ops(**old_funcs) # restore operators - - """) - -add_newdoc('numpy.core.multiarray', 'promote_types', - """ - promote_types(type1, type2) - - Returns the data type with the smallest size and smallest scalar - kind to which both ``type1`` and ``type2`` may be safely cast. - The returned data type is always in native byte order. - - This function is symmetric, but rarely associative. - - Parameters - ---------- - type1 : dtype or dtype specifier - First data type. - type2 : dtype or dtype specifier - Second data type. - - Returns - ------- - out : dtype - The promoted data type. - - Notes - ----- - .. versionadded:: 1.6.0 - - Starting in NumPy 1.9, promote_types function now returns a valid string - length when given an integer or float dtype as one argument and a string - dtype as another argument. Previously it always returned the input string - dtype, even if it wasn't long enough to store the max integer/float value - converted to a string. - - See Also - -------- - result_type, dtype, can_cast - - Examples - -------- - >>> np.promote_types('f4', 'f8') - dtype('float64') - - >>> np.promote_types('i8', 'f4') - dtype('float64') - - >>> np.promote_types('>i8', '>> np.promote_types('i4', 'S8') - dtype('S11') - - An example of a non-associative case: - - >>> p = np.promote_types - >>> p('S', p('i1', 'u1')) - dtype('S6') - >>> p(p('S', 'i1'), 'u1') - dtype('S4') - - """) - -if sys.version_info.major < 3: - add_newdoc('numpy.core.multiarray', 'newbuffer', - """ - newbuffer(size) - - Return a new uninitialized buffer object. - - Parameters - ---------- - size : int - Size in bytes of returned buffer object. - - Returns - ------- - newbuffer : buffer object - Returned, uninitialized buffer object of `size` bytes. - - """) - - add_newdoc('numpy.core.multiarray', 'getbuffer', - """ - getbuffer(obj [,offset[, size]]) - - Create a buffer object from the given object referencing a slice of - length size starting at offset. - - Default is the entire buffer. A read-write buffer is attempted followed - by a read-only buffer. - - Parameters - ---------- - obj : object - - offset : int, optional - - size : int, optional - - Returns - ------- - buffer_obj : buffer - - Examples - -------- - >>> buf = np.getbuffer(np.ones(5), 1, 3) - >>> len(buf) - 3 - >>> buf[0] - '\\x00' - >>> buf - - - """) - -add_newdoc('numpy.core.multiarray', 'c_einsum', - """ - c_einsum(subscripts, *operands, out=None, dtype=None, order='K', - casting='safe') - - *This documentation shadows that of the native python implementation of the `einsum` function, - except all references and examples related to the `optimize` argument (v 0.12.0) have been removed.* - - Evaluates the Einstein summation convention on the operands. - - Using the Einstein summation convention, many common multi-dimensional, - linear algebraic array operations can be represented in a simple fashion. - In *implicit* mode `einsum` computes these values. - - In *explicit* mode, `einsum` provides further flexibility to compute - other array operations that might not be considered classical Einstein - summation operations, by disabling, or forcing summation over specified - subscript labels. - - See the notes and examples for clarification. - - Parameters - ---------- - subscripts : str - Specifies the subscripts for summation as comma separated list of - subscript labels. An implicit (classical Einstein summation) - calculation is performed unless the explicit indicator '->' is - included as well as subscript labels of the precise output form. - operands : list of array_like - These are the arrays for the operation. - out : ndarray, optional - If provided, the calculation is done into this array. - dtype : {data-type, None}, optional - If provided, forces the calculation to use the data type specified. - Note that you may have to also give a more liberal `casting` - parameter to allow the conversions. Default is None. - order : {'C', 'F', 'A', 'K'}, optional - Controls the memory layout of the output. 'C' means it should - be C contiguous. 'F' means it should be Fortran contiguous, - 'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise. - 'K' means it should be as close to the layout as the inputs as - is possible, including arbitrarily permuted axes. - Default is 'K'. - casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional - Controls what kind of data casting may occur. Setting this to - 'unsafe' is not recommended, as it can adversely affect accumulations. - - * 'no' means the data types should not be cast at all. - * 'equiv' means only byte-order changes are allowed. - * 'safe' means only casts which can preserve values are allowed. - * 'same_kind' means only safe casts or casts within a kind, - like float64 to float32, are allowed. - * 'unsafe' means any data conversions may be done. - - Default is 'safe'. - optimize : {False, True, 'greedy', 'optimal'}, optional - Controls if intermediate optimization should occur. No optimization - will occur if False and True will default to the 'greedy' algorithm. - Also accepts an explicit contraction list from the ``np.einsum_path`` - function. See ``np.einsum_path`` for more details. Defaults to False. - - Returns - ------- - output : ndarray - The calculation based on the Einstein summation convention. - - See Also - -------- - einsum_path, dot, inner, outer, tensordot, linalg.multi_dot - - Notes - ----- - .. versionadded:: 1.6.0 - - The Einstein summation convention can be used to compute - many multi-dimensional, linear algebraic array operations. `einsum` - provides a succinct way of representing these. - - A non-exhaustive list of these operations, - which can be computed by `einsum`, is shown below along with examples: - - * Trace of an array, :py:func:`numpy.trace`. - * Return a diagonal, :py:func:`numpy.diag`. - * Array axis summations, :py:func:`numpy.sum`. - * Transpositions and permutations, :py:func:`numpy.transpose`. - * Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`. - * Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`. - * Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`. - * Tensor contractions, :py:func:`numpy.tensordot`. - * Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`. - - The subscripts string is a comma-separated list of subscript labels, - where each label refers to a dimension of the corresponding operand. - Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)`` - is equivalent to :py:func:`np.inner(a,b) `. If a label - appears only once, it is not summed, so ``np.einsum('i', a)`` produces a - view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)`` - describes traditional matrix multiplication and is equivalent to - :py:func:`np.matmul(a,b) `. Repeated subscript labels in one - operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent - to :py:func:`np.trace(a) `. - - In *implicit mode*, the chosen subscripts are important - since the axes of the output are reordered alphabetically. This - means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while - ``np.einsum('ji', a)`` takes its transpose. Additionally, - ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while, - ``np.einsum('ij,jh', a, b)`` returns the transpose of the - multiplication since subscript 'h' precedes subscript 'i'. - - In *explicit mode* the output can be directly controlled by - specifying output subscript labels. This requires the - identifier '->' as well as the list of output subscript labels. - This feature increases the flexibility of the function since - summing can be disabled or forced when required. The call - ``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) `, - and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) `. - The difference is that `einsum` does not allow broadcasting by default. - Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the - order of the output subscript labels and therefore returns matrix - multiplication, unlike the example above in implicit mode. - - To enable and control broadcasting, use an ellipsis. Default - NumPy-style broadcasting is done by adding an ellipsis - to the left of each term, like ``np.einsum('...ii->...i', a)``. - To take the trace along the first and last axes, - you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix - product with the left-most indices instead of rightmost, one can do - ``np.einsum('ij...,jk...->ik...', a, b)``. - - When there is only one operand, no axes are summed, and no output - parameter is provided, a view into the operand is returned instead - of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)`` - produces a view (changed in version 1.10.0). - - `einsum` also provides an alternative way to provide the subscripts - and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. - If the output shape is not provided in this format `einsum` will be - calculated in implicit mode, otherwise it will be performed explicitly. - The examples below have corresponding `einsum` calls with the two - parameter methods. - - .. versionadded:: 1.10.0 - - Views returned from einsum are now writeable whenever the input array - is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now - have the same effect as :py:func:`np.swapaxes(a, 0, 2) ` - and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal - of a 2D array. - - Examples - -------- - >>> a = np.arange(25).reshape(5,5) - >>> b = np.arange(5) - >>> c = np.arange(6).reshape(2,3) - - Trace of a matrix: - - >>> np.einsum('ii', a) - 60 - >>> np.einsum(a, [0,0]) - 60 - >>> np.trace(a) - 60 - - Extract the diagonal (requires explicit form): - - >>> np.einsum('ii->i', a) - array([ 0, 6, 12, 18, 24]) - >>> np.einsum(a, [0,0], [0]) - array([ 0, 6, 12, 18, 24]) - >>> np.diag(a) - array([ 0, 6, 12, 18, 24]) - - Sum over an axis (requires explicit form): - - >>> np.einsum('ij->i', a) - array([ 10, 35, 60, 85, 110]) - >>> np.einsum(a, [0,1], [0]) - array([ 10, 35, 60, 85, 110]) - >>> np.sum(a, axis=1) - array([ 10, 35, 60, 85, 110]) - - For higher dimensional arrays summing a single axis can be done with ellipsis: - - >>> np.einsum('...j->...', a) - array([ 10, 35, 60, 85, 110]) - >>> np.einsum(a, [Ellipsis,1], [Ellipsis]) - array([ 10, 35, 60, 85, 110]) - - Compute a matrix transpose, or reorder any number of axes: - - >>> np.einsum('ji', c) - array([[0, 3], - [1, 4], - [2, 5]]) - >>> np.einsum('ij->ji', c) - array([[0, 3], - [1, 4], - [2, 5]]) - >>> np.einsum(c, [1,0]) - array([[0, 3], - [1, 4], - [2, 5]]) - >>> np.transpose(c) - array([[0, 3], - [1, 4], - [2, 5]]) - - Vector inner products: - - >>> np.einsum('i,i', b, b) - 30 - >>> np.einsum(b, [0], b, [0]) - 30 - >>> np.inner(b,b) - 30 - - Matrix vector multiplication: - - >>> np.einsum('ij,j', a, b) - array([ 30, 80, 130, 180, 230]) - >>> np.einsum(a, [0,1], b, [1]) - array([ 30, 80, 130, 180, 230]) - >>> np.dot(a, b) - array([ 30, 80, 130, 180, 230]) - >>> np.einsum('...j,j', a, b) - array([ 30, 80, 130, 180, 230]) - - Broadcasting and scalar multiplication: - - >>> np.einsum('..., ...', 3, c) - array([[ 0, 3, 6], - [ 9, 12, 15]]) - >>> np.einsum(',ij', 3, c) - array([[ 0, 3, 6], - [ 9, 12, 15]]) - >>> np.einsum(3, [Ellipsis], c, [Ellipsis]) - array([[ 0, 3, 6], - [ 9, 12, 15]]) - >>> np.multiply(3, c) - array([[ 0, 3, 6], - [ 9, 12, 15]]) - - Vector outer product: - - >>> np.einsum('i,j', np.arange(2)+1, b) - array([[0, 1, 2, 3, 4], - [0, 2, 4, 6, 8]]) - >>> np.einsum(np.arange(2)+1, [0], b, [1]) - array([[0, 1, 2, 3, 4], - [0, 2, 4, 6, 8]]) - >>> np.outer(np.arange(2)+1, b) - array([[0, 1, 2, 3, 4], - [0, 2, 4, 6, 8]]) - - Tensor contraction: - - >>> a = np.arange(60.).reshape(3,4,5) - >>> b = np.arange(24.).reshape(4,3,2) - >>> np.einsum('ijk,jil->kl', a, b) - array([[ 4400., 4730.], - [ 4532., 4874.], - [ 4664., 5018.], - [ 4796., 5162.], - [ 4928., 5306.]]) - >>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3]) - array([[ 4400., 4730.], - [ 4532., 4874.], - [ 4664., 5018.], - [ 4796., 5162.], - [ 4928., 5306.]]) - >>> np.tensordot(a,b, axes=([1,0],[0,1])) - array([[ 4400., 4730.], - [ 4532., 4874.], - [ 4664., 5018.], - [ 4796., 5162.], - [ 4928., 5306.]]) - - Writeable returned arrays (since version 1.10.0): - - >>> a = np.zeros((3, 3)) - >>> np.einsum('ii->i', a)[:] = 1 - >>> a - array([[ 1., 0., 0.], - [ 0., 1., 0.], - [ 0., 0., 1.]]) - - Example of ellipsis use: - - >>> a = np.arange(6).reshape((3,2)) - >>> b = np.arange(12).reshape((4,3)) - >>> np.einsum('ki,jk->ij', a, b) - array([[10, 28, 46, 64], - [13, 40, 67, 94]]) - >>> np.einsum('ki,...k->i...', a, b) - array([[10, 28, 46, 64], - [13, 40, 67, 94]]) - >>> np.einsum('k...,jk', a, b) - array([[10, 28, 46, 64], - [13, 40, 67, 94]]) - - """) - - -############################################################################## -# -# Documentation for ndarray attributes and methods -# -############################################################################## - - -############################################################################## -# -# ndarray object -# -############################################################################## - - -add_newdoc('numpy.core.multiarray', 'ndarray', - """ - ndarray(shape, dtype=float, buffer=None, offset=0, - strides=None, order=None) - - An array object represents a multidimensional, homogeneous array - of fixed-size items. An associated data-type object describes the - format of each element in the array (its byte-order, how many bytes it - occupies in memory, whether it is an integer, a floating point number, - or something else, etc.) - - Arrays should be constructed using `array`, `zeros` or `empty` (refer - to the See Also section below). The parameters given here refer to - a low-level method (`ndarray(...)`) for instantiating an array. - - For more information, refer to the `numpy` module and examine the - methods and attributes of an array. - - Parameters - ---------- - (for the __new__ method; see Notes below) - - shape : tuple of ints - Shape of created array. - dtype : data-type, optional - Any object that can be interpreted as a numpy data type. - buffer : object exposing buffer interface, optional - Used to fill the array with data. - offset : int, optional - Offset of array data in buffer. - strides : tuple of ints, optional - Strides of data in memory. - order : {'C', 'F'}, optional - Row-major (C-style) or column-major (Fortran-style) order. - - Attributes - ---------- - T : ndarray - Transpose of the array. - data : buffer - The array's elements, in memory. - dtype : dtype object - Describes the format of the elements in the array. - flags : dict - Dictionary containing information related to memory use, e.g., - 'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc. - flat : numpy.flatiter object - Flattened version of the array as an iterator. The iterator - allows assignments, e.g., ``x.flat = 3`` (See `ndarray.flat` for - assignment examples; TODO). - imag : ndarray - Imaginary part of the array. - real : ndarray - Real part of the array. - size : int - Number of elements in the array. - itemsize : int - The memory use of each array element in bytes. - nbytes : int - The total number of bytes required to store the array data, - i.e., ``itemsize * size``. - ndim : int - The array's number of dimensions. - shape : tuple of ints - Shape of the array. - strides : tuple of ints - The step-size required to move from one element to the next in - memory. For example, a contiguous ``(3, 4)`` array of type - ``int16`` in C-order has strides ``(8, 2)``. This implies that - to move from element to element in memory requires jumps of 2 bytes. - To move from row-to-row, one needs to jump 8 bytes at a time - (``2 * 4``). - ctypes : ctypes object - Class containing properties of the array needed for interaction - with ctypes. - base : ndarray - If the array is a view into another array, that array is its `base` - (unless that array is also a view). The `base` array is where the - array data is actually stored. - - See Also - -------- - array : Construct an array. - zeros : Create an array, each element of which is zero. - empty : Create an array, but leave its allocated memory unchanged (i.e., - it contains "garbage"). - dtype : Create a data-type. - - Notes - ----- - There are two modes of creating an array using ``__new__``: - - 1. If `buffer` is None, then only `shape`, `dtype`, and `order` - are used. - 2. If `buffer` is an object exposing the buffer interface, then - all keywords are interpreted. - - No ``__init__`` method is needed because the array is fully initialized - after the ``__new__`` method. - - Examples - -------- - These examples illustrate the low-level `ndarray` constructor. Refer - to the `See Also` section above for easier ways of constructing an - ndarray. - - First mode, `buffer` is None: - - >>> np.ndarray(shape=(2,2), dtype=float, order='F') - array([[0.0e+000, 0.0e+000], # random - [ nan, 2.5e-323]]) - - Second mode: - - >>> np.ndarray((2,), buffer=np.array([1,2,3]), - ... offset=np.int_().itemsize, - ... dtype=int) # offset = 1*itemsize, i.e. skip first element - array([2, 3]) - - """) - - -############################################################################## -# -# ndarray attributes -# -############################################################################## - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_interface__', - """Array protocol: Python side.""")) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__', - """None.""")) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_priority__', - """Array priority.""")) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__', - """Array protocol: C-struct side.""")) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('base', - """ - Base object if memory is from some other object. - - Examples - -------- - The base of an array that owns its memory is None: - - >>> x = np.array([1,2,3,4]) - >>> x.base is None - True - - Slicing creates a view, whose memory is shared with x: - - >>> y = x[2:] - >>> y.base is x - True - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes', - """ - An object to simplify the interaction of the array with the ctypes - module. - - This attribute creates an object that makes it easier to use arrays - when calling shared libraries with the ctypes module. The returned - object has, among others, data, shape, and strides attributes (see - Notes below) which themselves return ctypes objects that can be used - as arguments to a shared library. - - Parameters - ---------- - None - - Returns - ------- - c : Python object - Possessing attributes data, shape, strides, etc. - - See Also - -------- - numpy.ctypeslib - - Notes - ----- - Below are the public attributes of this object which were documented - in "Guide to NumPy" (we have omitted undocumented public attributes, - as well as documented private attributes): - - .. autoattribute:: numpy.core._internal._ctypes.data - :noindex: - - .. autoattribute:: numpy.core._internal._ctypes.shape - :noindex: - - .. autoattribute:: numpy.core._internal._ctypes.strides - :noindex: - - .. automethod:: numpy.core._internal._ctypes.data_as - :noindex: - - .. automethod:: numpy.core._internal._ctypes.shape_as - :noindex: - - .. automethod:: numpy.core._internal._ctypes.strides_as - :noindex: - - If the ctypes module is not available, then the ctypes attribute - of array objects still returns something useful, but ctypes objects - are not returned and errors may be raised instead. In particular, - the object will still have the ``as_parameter`` attribute which will - return an integer equal to the data attribute. - - Examples - -------- - >>> import ctypes - >>> x - array([[0, 1], - [2, 3]]) - >>> x.ctypes.data - 30439712 - >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long)) - - >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long)).contents - c_long(0) - >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_longlong)).contents - c_longlong(4294967296L) - >>> x.ctypes.shape - - >>> x.ctypes.shape_as(ctypes.c_long) - - >>> x.ctypes.strides - - >>> x.ctypes.strides_as(ctypes.c_longlong) - - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('data', - """Python buffer object pointing to the start of the array's data.""")) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype', - """ - Data-type of the array's elements. - - Parameters - ---------- - None - - Returns - ------- - d : numpy dtype object - - See Also - -------- - numpy.dtype - - Examples - -------- - >>> x - array([[0, 1], - [2, 3]]) - >>> x.dtype - dtype('int32') - >>> type(x.dtype) - - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('imag', - """ - The imaginary part of the array. - - Examples - -------- - >>> x = np.sqrt([1+0j, 0+1j]) - >>> x.imag - array([ 0. , 0.70710678]) - >>> x.imag.dtype - dtype('float64') - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('itemsize', - """ - Length of one array element in bytes. - - Examples - -------- - >>> x = np.array([1,2,3], dtype=np.float64) - >>> x.itemsize - 8 - >>> x = np.array([1,2,3], dtype=np.complex128) - >>> x.itemsize - 16 - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('flags', - """ - Information about the memory layout of the array. - - Attributes - ---------- - C_CONTIGUOUS (C) - The data is in a single, C-style contiguous segment. - F_CONTIGUOUS (F) - The data is in a single, Fortran-style contiguous segment. - OWNDATA (O) - The array owns the memory it uses or borrows it from another object. - WRITEABLE (W) - The data area can be written to. Setting this to False locks - the data, making it read-only. A view (slice, etc.) inherits WRITEABLE - from its base array at creation time, but a view of a writeable - array may be subsequently locked while the base array remains writeable. - (The opposite is not true, in that a view of a locked array may not - be made writeable. However, currently, locking a base object does not - lock any views that already reference it, so under that circumstance it - is possible to alter the contents of a locked array via a previously - created writeable view onto it.) Attempting to change a non-writeable - array raises a RuntimeError exception. - ALIGNED (A) - The data and all elements are aligned appropriately for the hardware. - WRITEBACKIFCOPY (X) - This array is a copy of some other array. The C-API function - PyArray_ResolveWritebackIfCopy must be called before deallocating - to the base array will be updated with the contents of this array. - UPDATEIFCOPY (U) - (Deprecated, use WRITEBACKIFCOPY) This array is a copy of some other array. - When this array is - deallocated, the base array will be updated with the contents of - this array. - FNC - F_CONTIGUOUS and not C_CONTIGUOUS. - FORC - F_CONTIGUOUS or C_CONTIGUOUS (one-segment test). - BEHAVED (B) - ALIGNED and WRITEABLE. - CARRAY (CA) - BEHAVED and C_CONTIGUOUS. - FARRAY (FA) - BEHAVED and F_CONTIGUOUS and not C_CONTIGUOUS. - - Notes - ----- - The `flags` object can be accessed dictionary-like (as in ``a.flags['WRITEABLE']``), - or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag - names are only supported in dictionary access. - - Only the WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED flags can be - changed by the user, via direct assignment to the attribute or dictionary - entry, or by calling `ndarray.setflags`. - - The array flags cannot be set arbitrarily: - - - UPDATEIFCOPY can only be set ``False``. - - WRITEBACKIFCOPY can only be set ``False``. - - ALIGNED can only be set ``True`` if the data is truly aligned. - - WRITEABLE can only be set ``True`` if the array owns its own memory - or the ultimate owner of the memory exposes a writeable buffer - interface or is a string. - - Arrays can be both C-style and Fortran-style contiguous simultaneously. - This is clear for 1-dimensional arrays, but can also be true for higher - dimensional arrays. - - Even for contiguous arrays a stride for a given dimension - ``arr.strides[dim]`` may be *arbitrary* if ``arr.shape[dim] == 1`` - or the array has no elements. - It does *not* generally hold that ``self.strides[-1] == self.itemsize`` - for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for - Fortran-style contiguous arrays is true. - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('flat', - """ - A 1-D iterator over the array. - - This is a `numpy.flatiter` instance, which acts similarly to, but is not - a subclass of, Python's built-in iterator object. - - See Also - -------- - flatten : Return a copy of the array collapsed into one dimension. - - flatiter - - Examples - -------- - >>> x = np.arange(1, 7).reshape(2, 3) - >>> x - array([[1, 2, 3], - [4, 5, 6]]) - >>> x.flat[3] - 4 - >>> x.T - array([[1, 4], - [2, 5], - [3, 6]]) - >>> x.T.flat[3] - 5 - >>> type(x.flat) - - - An assignment example: - - >>> x.flat = 3; x - array([[3, 3, 3], - [3, 3, 3]]) - >>> x.flat[[1,4]] = 1; x - array([[3, 1, 3], - [3, 1, 3]]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('nbytes', - """ - Total bytes consumed by the elements of the array. - - Notes - ----- - Does not include memory consumed by non-element attributes of the - array object. - - Examples - -------- - >>> x = np.zeros((3,5,2), dtype=np.complex128) - >>> x.nbytes - 480 - >>> np.prod(x.shape) * x.itemsize - 480 - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('ndim', - """ - Number of array dimensions. - - Examples - -------- - >>> x = np.array([1, 2, 3]) - >>> x.ndim - 1 - >>> y = np.zeros((2, 3, 4)) - >>> y.ndim - 3 - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('real', - """ - The real part of the array. - - Examples - -------- - >>> x = np.sqrt([1+0j, 0+1j]) - >>> x.real - array([ 1. , 0.70710678]) - >>> x.real.dtype - dtype('float64') - - See Also - -------- - numpy.real : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('shape', - """ - Tuple of array dimensions. - - The shape property is usually used to get the current shape of an array, - but may also be used to reshape the array in-place by assigning a tuple of - array dimensions to it. As with `numpy.reshape`, one of the new shape - dimensions can be -1, in which case its value is inferred from the size of - the array and the remaining dimensions. Reshaping an array in-place will - fail if a copy is required. - - Examples - -------- - >>> x = np.array([1, 2, 3, 4]) - >>> x.shape - (4,) - >>> y = np.zeros((2, 3, 4)) - >>> y.shape - (2, 3, 4) - >>> y.shape = (3, 8) - >>> y - array([[ 0., 0., 0., 0., 0., 0., 0., 0.], - [ 0., 0., 0., 0., 0., 0., 0., 0.], - [ 0., 0., 0., 0., 0., 0., 0., 0.]]) - >>> y.shape = (3, 6) - Traceback (most recent call last): - File "", line 1, in - ValueError: total size of new array must be unchanged - >>> np.zeros((4,2))[::2].shape = (-1,) - Traceback (most recent call last): - File "", line 1, in - AttributeError: incompatible shape for a non-contiguous array - - See Also - -------- - numpy.reshape : similar function - ndarray.reshape : similar method - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('size', - """ - Number of elements in the array. - - Equal to ``np.prod(a.shape)``, i.e., the product of the array's - dimensions. - - Notes - ----- - `a.size` returns a standard arbitrary precision Python integer. This - may not be the case with other methods of obtaining the same value - (like the suggested ``np.prod(a.shape)``, which returns an instance - of ``np.int_``), and may be relevant if the value is used further in - calculations that may overflow a fixed size integer type. - - Examples - -------- - >>> x = np.zeros((3, 5, 2), dtype=np.complex128) - >>> x.size - 30 - >>> np.prod(x.shape) - 30 - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('strides', - """ - Tuple of bytes to step in each dimension when traversing an array. - - The byte offset of element ``(i[0], i[1], ..., i[n])`` in an array `a` - is:: - - offset = sum(np.array(i) * a.strides) - - A more detailed explanation of strides can be found in the - "ndarray.rst" file in the NumPy reference guide. - - Notes - ----- - Imagine an array of 32-bit integers (each 4 bytes):: - - x = np.array([[0, 1, 2, 3, 4], - [5, 6, 7, 8, 9]], dtype=np.int32) - - This array is stored in memory as 40 bytes, one after the other - (known as a contiguous block of memory). The strides of an array tell - us how many bytes we have to skip in memory to move to the next position - along a certain axis. For example, we have to skip 4 bytes (1 value) to - move to the next column, but 20 bytes (5 values) to get to the same - position in the next row. As such, the strides for the array `x` will be - ``(20, 4)``. - - See Also - -------- - numpy.lib.stride_tricks.as_strided - - Examples - -------- - >>> y = np.reshape(np.arange(2*3*4), (2,3,4)) - >>> y - array([[[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]], - [[12, 13, 14, 15], - [16, 17, 18, 19], - [20, 21, 22, 23]]]) - >>> y.strides - (48, 16, 4) - >>> y[1,1,1] - 17 - >>> offset=sum(y.strides * np.array((1,1,1))) - >>> offset/y.itemsize - 17 - - >>> x = np.reshape(np.arange(5*6*7*8), (5,6,7,8)).transpose(2,3,1,0) - >>> x.strides - (32, 4, 224, 1344) - >>> i = np.array([3,5,2,2]) - >>> offset = sum(i * x.strides) - >>> x[3,5,2,2] - 813 - >>> offset / x.itemsize - 813 - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('T', - """ - The transposed array. - - Same as ``self.transpose()``. - - Examples - -------- - >>> x = np.array([[1.,2.],[3.,4.]]) - >>> x - array([[ 1., 2.], - [ 3., 4.]]) - >>> x.T - array([[ 1., 3.], - [ 2., 4.]]) - >>> x = np.array([1.,2.,3.,4.]) - >>> x - array([ 1., 2., 3., 4.]) - >>> x.T - array([ 1., 2., 3., 4.]) - - See Also - -------- - transpose - - """)) - - -############################################################################## -# -# ndarray methods -# -############################################################################## - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__', - """ a.__array__(|dtype) -> reference if type unchanged, copy otherwise. - - Returns either a new reference to self if dtype is not given or a new array - of provided data type if dtype is different from the current dtype of the - array. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_prepare__', - """a.__array_prepare__(obj) -> Object of same type as ndarray object obj. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_wrap__', - """a.__array_wrap__(obj) -> Object of same type as ndarray object a. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__', - """a.__copy__() - - Used if :func:`copy.copy` is called on an array. Returns a copy of the array. - - Equivalent to ``a.copy(order='K')``. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__', - """a.__deepcopy__(memo, /) -> Deep copy of array. - - Used if :func:`copy.deepcopy` is called on an array. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__reduce__', - """a.__reduce__() - - For pickling. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__', - """a.__setstate__(state, /) - - For unpickling. - - The `state` argument must be a sequence that contains the following - elements: - - Parameters - ---------- - version : int - optional pickle version. If omitted defaults to 0. - shape : tuple - dtype : data-type - isFortran : bool - rawdata : string or list - a binary string with the data (or a list if 'a' is an object array) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('all', - """ - a.all(axis=None, out=None, keepdims=False) - - Returns True if all elements evaluate to True. - - Refer to `numpy.all` for full documentation. - - See Also - -------- - numpy.all : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('any', - """ - a.any(axis=None, out=None, keepdims=False) - - Returns True if any of the elements of `a` evaluate to True. - - Refer to `numpy.any` for full documentation. - - See Also - -------- - numpy.any : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('argmax', - """ - a.argmax(axis=None, out=None) - - Return indices of the maximum values along the given axis. - - Refer to `numpy.argmax` for full documentation. - - See Also - -------- - numpy.argmax : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin', - """ - a.argmin(axis=None, out=None) - - Return indices of the minimum values along the given axis of `a`. - - Refer to `numpy.argmin` for detailed documentation. - - See Also - -------- - numpy.argmin : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort', - """ - a.argsort(axis=-1, kind=None, order=None) - - Returns the indices that would sort this array. - - Refer to `numpy.argsort` for full documentation. - - See Also - -------- - numpy.argsort : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('argpartition', - """ - a.argpartition(kth, axis=-1, kind='introselect', order=None) - - Returns the indices that would partition this array. - - Refer to `numpy.argpartition` for full documentation. - - .. versionadded:: 1.8.0 - - See Also - -------- - numpy.argpartition : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('astype', - """ - a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True) - - Copy of the array, cast to a specified type. - - Parameters - ---------- - dtype : str or dtype - Typecode or data-type to which the array is cast. - order : {'C', 'F', 'A', 'K'}, optional - Controls the memory layout order of the result. - 'C' means C order, 'F' means Fortran order, 'A' - means 'F' order if all the arrays are Fortran contiguous, - 'C' order otherwise, and 'K' means as close to the - order the array elements appear in memory as possible. - Default is 'K'. - casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional - Controls what kind of data casting may occur. Defaults to 'unsafe' - for backwards compatibility. - - * 'no' means the data types should not be cast at all. - * 'equiv' means only byte-order changes are allowed. - * 'safe' means only casts which can preserve values are allowed. - * 'same_kind' means only safe casts or casts within a kind, - like float64 to float32, are allowed. - * 'unsafe' means any data conversions may be done. - subok : bool, optional - If True, then sub-classes will be passed-through (default), otherwise - the returned array will be forced to be a base-class array. - copy : bool, optional - By default, astype always returns a newly allocated array. If this - is set to false, and the `dtype`, `order`, and `subok` - requirements are satisfied, the input array is returned instead - of a copy. - - Returns - ------- - arr_t : ndarray - Unless `copy` is False and the other conditions for returning the input - array are satisfied (see description for `copy` input parameter), `arr_t` - is a new array of the same shape as the input array, with dtype, order - given by `dtype`, `order`. - - Notes - ----- - .. versionchanged:: 1.17.0 - Casting between a simple data type and a structured one is possible only - for "unsafe" casting. Casting to multiple fields is allowed, but - casting from multiple fields is not. - - .. versionchanged:: 1.9.0 - Casting from numeric to string types in 'safe' casting mode requires - that the string dtype length is long enough to store the max - integer/float value converted. - - Raises - ------ - ComplexWarning - When casting from complex to float or int. To avoid this, - one should use ``a.real.astype(t)``. - - Examples - -------- - >>> x = np.array([1, 2, 2.5]) - >>> x - array([1. , 2. , 2.5]) - - >>> x.astype(int) - array([1, 2, 2]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap', - """ - a.byteswap(inplace=False) - - Swap the bytes of the array elements - - Toggle between low-endian and big-endian data representation by - returning a byteswapped array, optionally swapped in-place. - Arrays of byte-strings are not swapped. The real and imaginary - parts of a complex number are swapped individually. - - Parameters - ---------- - inplace : bool, optional - If ``True``, swap bytes in-place, default is ``False``. - - Returns - ------- - out : ndarray - The byteswapped array. If `inplace` is ``True``, this is - a view to self. - - Examples - -------- - >>> A = np.array([1, 256, 8755], dtype=np.int16) - >>> list(map(hex, A)) - ['0x1', '0x100', '0x2233'] - >>> A.byteswap(inplace=True) - array([ 256, 1, 13090], dtype=int16) - >>> list(map(hex, A)) - ['0x100', '0x1', '0x3322'] - - Arrays of byte-strings are not swapped - - >>> A = np.array([b'ceg', b'fac']) - >>> A.byteswap() - array([b'ceg', b'fac'], dtype='|S3') - - ``A.newbyteorder().byteswap()`` produces an array with the same values - but different representation in memory - - >>> A = np.array([1, 2, 3]) - >>> A.view(np.uint8) - array([1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, - 0, 0], dtype=uint8) - >>> A.newbyteorder().byteswap(inplace=True) - array([1, 2, 3]) - >>> A.view(np.uint8) - array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, - 0, 3], dtype=uint8) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('choose', - """ - a.choose(choices, out=None, mode='raise') - - Use an index array to construct a new array from a set of choices. - - Refer to `numpy.choose` for full documentation. - - See Also - -------- - numpy.choose : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('clip', - """ - a.clip(min=None, max=None, out=None, **kwargs) - - Return an array whose values are limited to ``[min, max]``. - One of max or min must be given. - - Refer to `numpy.clip` for full documentation. - - See Also - -------- - numpy.clip : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('compress', - """ - a.compress(condition, axis=None, out=None) - - Return selected slices of this array along given axis. - - Refer to `numpy.compress` for full documentation. - - See Also - -------- - numpy.compress : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('conj', - """ - a.conj() - - Complex-conjugate all elements. - - Refer to `numpy.conjugate` for full documentation. - - See Also - -------- - numpy.conjugate : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('conjugate', - """ - a.conjugate() - - Return the complex conjugate, element-wise. - - Refer to `numpy.conjugate` for full documentation. - - See Also - -------- - numpy.conjugate : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('copy', - """ - a.copy(order='C') - - Return a copy of the array. - - Parameters - ---------- - order : {'C', 'F', 'A', 'K'}, optional - Controls the memory layout of the copy. 'C' means C-order, - 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, - 'C' otherwise. 'K' means match the layout of `a` as closely - as possible. (Note that this function and :func:`numpy.copy` are very - similar, but have different default values for their order= - arguments.) - - See also - -------- - numpy.copy - numpy.copyto - - Examples - -------- - >>> x = np.array([[1,2,3],[4,5,6]], order='F') - - >>> y = x.copy() - - >>> x.fill(0) - - >>> x - array([[0, 0, 0], - [0, 0, 0]]) - - >>> y - array([[1, 2, 3], - [4, 5, 6]]) - - >>> y.flags['C_CONTIGUOUS'] - True - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('cumprod', - """ - a.cumprod(axis=None, dtype=None, out=None) - - Return the cumulative product of the elements along the given axis. - - Refer to `numpy.cumprod` for full documentation. - - See Also - -------- - numpy.cumprod : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('cumsum', - """ - a.cumsum(axis=None, dtype=None, out=None) - - Return the cumulative sum of the elements along the given axis. - - Refer to `numpy.cumsum` for full documentation. - - See Also - -------- - numpy.cumsum : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('diagonal', - """ - a.diagonal(offset=0, axis1=0, axis2=1) - - Return specified diagonals. In NumPy 1.9 the returned array is a - read-only view instead of a copy as in previous NumPy versions. In - a future version the read-only restriction will be removed. - - Refer to :func:`numpy.diagonal` for full documentation. - - See Also - -------- - numpy.diagonal : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('dot', - """ - a.dot(b, out=None) - - Dot product of two arrays. - - Refer to `numpy.dot` for full documentation. - - See Also - -------- - numpy.dot : equivalent function - - Examples - -------- - >>> a = np.eye(2) - >>> b = np.ones((2, 2)) * 2 - >>> a.dot(b) - array([[2., 2.], - [2., 2.]]) - - This array method can be conveniently chained: - - >>> a.dot(b).dot(b) - array([[8., 8.], - [8., 8.]]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('dump', - """a.dump(file) - - Dump a pickle of the array to the specified file. - The array can be read back with pickle.load or numpy.load. - - Parameters - ---------- - file : str or Path - A string naming the dump file. - - .. versionchanged:: 1.17.0 - `pathlib.Path` objects are now accepted. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('dumps', - """ - a.dumps() - - Returns the pickle of the array as a string. - pickle.loads or numpy.loads will convert the string back to an array. - - Parameters - ---------- - None - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('fill', - """ - a.fill(value) - - Fill the array with a scalar value. - - Parameters - ---------- - value : scalar - All elements of `a` will be assigned this value. - - Examples - -------- - >>> a = np.array([1, 2]) - >>> a.fill(0) - >>> a - array([0, 0]) - >>> a = np.empty(2) - >>> a.fill(1) - >>> a - array([1., 1.]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('flatten', - """ - a.flatten(order='C') - - Return a copy of the array collapsed into one dimension. - - Parameters - ---------- - order : {'C', 'F', 'A', 'K'}, optional - 'C' means to flatten in row-major (C-style) order. - 'F' means to flatten in column-major (Fortran- - style) order. 'A' means to flatten in column-major - order if `a` is Fortran *contiguous* in memory, - row-major order otherwise. 'K' means to flatten - `a` in the order the elements occur in memory. - The default is 'C'. - - Returns - ------- - y : ndarray - A copy of the input array, flattened to one dimension. - - See Also - -------- - ravel : Return a flattened array. - flat : A 1-D flat iterator over the array. - - Examples - -------- - >>> a = np.array([[1,2], [3,4]]) - >>> a.flatten() - array([1, 2, 3, 4]) - >>> a.flatten('F') - array([1, 3, 2, 4]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('getfield', - """ - a.getfield(dtype, offset=0) - - Returns a field of the given array as a certain type. - - A field is a view of the array data with a given data-type. The values in - the view are determined by the given type and the offset into the current - array in bytes. The offset needs to be such that the view dtype fits in the - array dtype; for example an array of dtype complex128 has 16-byte elements. - If taking a view with a 32-bit integer (4 bytes), the offset needs to be - between 0 and 12 bytes. - - Parameters - ---------- - dtype : str or dtype - The data type of the view. The dtype size of the view can not be larger - than that of the array itself. - offset : int - Number of bytes to skip before beginning the element view. - - Examples - -------- - >>> x = np.diag([1.+1.j]*2) - >>> x[1, 1] = 2 + 4.j - >>> x - array([[1.+1.j, 0.+0.j], - [0.+0.j, 2.+4.j]]) - >>> x.getfield(np.float64) - array([[1., 0.], - [0., 2.]]) - - By choosing an offset of 8 bytes we can select the complex part of the - array for our view: - - >>> x.getfield(np.float64, offset=8) - array([[1., 0.], - [0., 4.]]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('item', - """ - a.item(*args) - - Copy an element of an array to a standard Python scalar and return it. - - Parameters - ---------- - \\*args : Arguments (variable number and type) - - * none: in this case, the method only works for arrays - with one element (`a.size == 1`), which element is - copied into a standard Python scalar object and returned. - - * int_type: this argument is interpreted as a flat index into - the array, specifying which element to copy and return. - - * tuple of int_types: functions as does a single int_type argument, - except that the argument is interpreted as an nd-index into the - array. - - Returns - ------- - z : Standard Python scalar object - A copy of the specified element of the array as a suitable - Python scalar - - Notes - ----- - When the data type of `a` is longdouble or clongdouble, item() returns - a scalar array object because there is no available Python scalar that - would not lose information. Void arrays return a buffer object for item(), - unless fields are defined, in which case a tuple is returned. - - `item` is very similar to a[args], except, instead of an array scalar, - a standard Python scalar is returned. This can be useful for speeding up - access to elements of the array and doing arithmetic on elements of the - array using Python's optimized math. - - Examples - -------- - >>> np.random.seed(123) - >>> x = np.random.randint(9, size=(3, 3)) - >>> x - array([[2, 2, 6], - [1, 3, 6], - [1, 0, 1]]) - >>> x.item(3) - 1 - >>> x.item(7) - 0 - >>> x.item((0, 1)) - 2 - >>> x.item((2, 2)) - 1 - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('itemset', - """ - a.itemset(*args) - - Insert scalar into an array (scalar is cast to array's dtype, if possible) - - There must be at least 1 argument, and define the last argument - as *item*. Then, ``a.itemset(*args)`` is equivalent to but faster - than ``a[args] = item``. The item should be a scalar value and `args` - must select a single item in the array `a`. - - Parameters - ---------- - \\*args : Arguments - If one argument: a scalar, only used in case `a` is of size 1. - If two arguments: the last argument is the value to be set - and must be a scalar, the first argument specifies a single array - element location. It is either an int or a tuple. - - Notes - ----- - Compared to indexing syntax, `itemset` provides some speed increase - for placing a scalar into a particular location in an `ndarray`, - if you must do this. However, generally this is discouraged: - among other problems, it complicates the appearance of the code. - Also, when using `itemset` (and `item`) inside a loop, be sure - to assign the methods to a local variable to avoid the attribute - look-up at each loop iteration. - - Examples - -------- - >>> np.random.seed(123) - >>> x = np.random.randint(9, size=(3, 3)) - >>> x - array([[2, 2, 6], - [1, 3, 6], - [1, 0, 1]]) - >>> x.itemset(4, 0) - >>> x.itemset((2, 2), 9) - >>> x - array([[2, 2, 6], - [1, 0, 6], - [1, 0, 9]]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('max', - """ - a.max(axis=None, out=None, keepdims=False, initial=, where=True) - - Return the maximum along a given axis. - - Refer to `numpy.amax` for full documentation. - - See Also - -------- - numpy.amax : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('mean', - """ - a.mean(axis=None, dtype=None, out=None, keepdims=False) - - Returns the average of the array elements along given axis. - - Refer to `numpy.mean` for full documentation. - - See Also - -------- - numpy.mean : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('min', - """ - a.min(axis=None, out=None, keepdims=False, initial=, where=True) - - Return the minimum along a given axis. - - Refer to `numpy.amin` for full documentation. - - See Also - -------- - numpy.amin : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder', - """ - arr.newbyteorder(new_order='S') - - Return the array with the same data viewed with a different byte order. - - Equivalent to:: - - arr.view(arr.dtype.newbytorder(new_order)) - - Changes are also made in all fields and sub-arrays of the array data - type. - - - - Parameters - ---------- - new_order : string, optional - Byte order to force; a value from the byte order specifications - below. `new_order` codes can be any of: - - * 'S' - swap dtype from current to opposite endian - * {'<', 'L'} - little endian - * {'>', 'B'} - big endian - * {'=', 'N'} - native order - * {'|', 'I'} - ignore (no change to byte order) - - The default value ('S') results in swapping the current - byte order. The code does a case-insensitive check on the first - letter of `new_order` for the alternatives above. For example, - any of 'B' or 'b' or 'biggish' are valid to specify big-endian. - - - Returns - ------- - new_arr : array - New array object with the dtype reflecting given change to the - byte order. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero', - """ - a.nonzero() - - Return the indices of the elements that are non-zero. - - Refer to `numpy.nonzero` for full documentation. - - See Also - -------- - numpy.nonzero : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('prod', - """ - a.prod(axis=None, dtype=None, out=None, keepdims=False, initial=1, where=True) - - Return the product of the array elements over the given axis - - Refer to `numpy.prod` for full documentation. - - See Also - -------- - numpy.prod : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('ptp', - """ - a.ptp(axis=None, out=None, keepdims=False) - - Peak to peak (maximum - minimum) value along a given axis. - - Refer to `numpy.ptp` for full documentation. - - See Also - -------- - numpy.ptp : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('put', - """ - a.put(indices, values, mode='raise') - - Set ``a.flat[n] = values[n]`` for all `n` in indices. - - Refer to `numpy.put` for full documentation. - - See Also - -------- - numpy.put : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel', - """ - a.ravel([order]) - - Return a flattened array. - - Refer to `numpy.ravel` for full documentation. - - See Also - -------- - numpy.ravel : equivalent function - - ndarray.flat : a flat iterator on the array. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('repeat', - """ - a.repeat(repeats, axis=None) - - Repeat elements of an array. - - Refer to `numpy.repeat` for full documentation. - - See Also - -------- - numpy.repeat : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('reshape', - """ - a.reshape(shape, order='C') - - Returns an array containing the same data with a new shape. - - Refer to `numpy.reshape` for full documentation. - - See Also - -------- - numpy.reshape : equivalent function - - Notes - ----- - Unlike the free function `numpy.reshape`, this method on `ndarray` allows - the elements of the shape parameter to be passed in as separate arguments. - For example, ``a.reshape(10, 11)`` is equivalent to - ``a.reshape((10, 11))``. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('resize', - """ - a.resize(new_shape, refcheck=True) - - Change shape and size of array in-place. - - Parameters - ---------- - new_shape : tuple of ints, or `n` ints - Shape of resized array. - refcheck : bool, optional - If False, reference count will not be checked. Default is True. - - Returns - ------- - None - - Raises - ------ - ValueError - If `a` does not own its own data or references or views to it exist, - and the data memory must be changed. - PyPy only: will always raise if the data memory must be changed, since - there is no reliable way to determine if references or views to it - exist. - - SystemError - If the `order` keyword argument is specified. This behaviour is a - bug in NumPy. - - See Also - -------- - resize : Return a new array with the specified shape. - - Notes - ----- - This reallocates space for the data area if necessary. - - Only contiguous arrays (data elements consecutive in memory) can be - resized. - - The purpose of the reference count check is to make sure you - do not use this array as a buffer for another Python object and then - reallocate the memory. However, reference counts can increase in - other ways so if you are sure that you have not shared the memory - for this array with another Python object, then you may safely set - `refcheck` to False. - - Examples - -------- - Shrinking an array: array is flattened (in the order that the data are - stored in memory), resized, and reshaped: - - >>> a = np.array([[0, 1], [2, 3]], order='C') - >>> a.resize((2, 1)) - >>> a - array([[0], - [1]]) - - >>> a = np.array([[0, 1], [2, 3]], order='F') - >>> a.resize((2, 1)) - >>> a - array([[0], - [2]]) - - Enlarging an array: as above, but missing entries are filled with zeros: - - >>> b = np.array([[0, 1], [2, 3]]) - >>> b.resize(2, 3) # new_shape parameter doesn't have to be a tuple - >>> b - array([[0, 1, 2], - [3, 0, 0]]) - - Referencing an array prevents resizing... - - >>> c = a - >>> a.resize((1, 1)) - Traceback (most recent call last): - ... - ValueError: cannot resize an array that references or is referenced ... - - Unless `refcheck` is False: - - >>> a.resize((1, 1), refcheck=False) - >>> a - array([[0]]) - >>> c - array([[0]]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('round', - """ - a.round(decimals=0, out=None) - - Return `a` with each element rounded to the given number of decimals. - - Refer to `numpy.around` for full documentation. - - See Also - -------- - numpy.around : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('searchsorted', - """ - a.searchsorted(v, side='left', sorter=None) - - Find indices where elements of v should be inserted in a to maintain order. - - For full documentation, see `numpy.searchsorted` - - See Also - -------- - numpy.searchsorted : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('setfield', - """ - a.setfield(val, dtype, offset=0) - - Put a value into a specified place in a field defined by a data-type. - - Place `val` into `a`'s field defined by `dtype` and beginning `offset` - bytes into the field. - - Parameters - ---------- - val : object - Value to be placed in field. - dtype : dtype object - Data-type of the field in which to place `val`. - offset : int, optional - The number of bytes into the field at which to place `val`. - - Returns - ------- - None - - See Also - -------- - getfield - - Examples - -------- - >>> x = np.eye(3) - >>> x.getfield(np.float64) - array([[1., 0., 0.], - [0., 1., 0.], - [0., 0., 1.]]) - >>> x.setfield(3, np.int32) - >>> x.getfield(np.int32) - array([[3, 3, 3], - [3, 3, 3], - [3, 3, 3]], dtype=int32) - >>> x - array([[1.0e+000, 1.5e-323, 1.5e-323], - [1.5e-323, 1.0e+000, 1.5e-323], - [1.5e-323, 1.5e-323, 1.0e+000]]) - >>> x.setfield(np.eye(3), np.int32) - >>> x - array([[1., 0., 0.], - [0., 1., 0.], - [0., 0., 1.]]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags', - """ - a.setflags(write=None, align=None, uic=None) - - Set array flags WRITEABLE, ALIGNED, (WRITEBACKIFCOPY and UPDATEIFCOPY), - respectively. - - These Boolean-valued flags affect how numpy interprets the memory - area used by `a` (see Notes below). The ALIGNED flag can only - be set to True if the data is actually aligned according to the type. - The WRITEBACKIFCOPY and (deprecated) UPDATEIFCOPY flags can never be set - to True. The flag WRITEABLE can only be set to True if the array owns its - own memory, or the ultimate owner of the memory exposes a writeable buffer - interface, or is a string. (The exception for string is made so that - unpickling can be done without copying memory.) - - Parameters - ---------- - write : bool, optional - Describes whether or not `a` can be written to. - align : bool, optional - Describes whether or not `a` is aligned properly for its type. - uic : bool, optional - Describes whether or not `a` is a copy of another "base" array. - - Notes - ----- - Array flags provide information about how the memory area used - for the array is to be interpreted. There are 7 Boolean flags - in use, only four of which can be changed by the user: - WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED. - - WRITEABLE (W) the data area can be written to; - - ALIGNED (A) the data and strides are aligned appropriately for the hardware - (as determined by the compiler); - - UPDATEIFCOPY (U) (deprecated), replaced by WRITEBACKIFCOPY; - - WRITEBACKIFCOPY (X) this array is a copy of some other array (referenced - by .base). When the C-API function PyArray_ResolveWritebackIfCopy is - called, the base array will be updated with the contents of this array. - - All flags can be accessed using the single (upper case) letter as well - as the full name. - - Examples - -------- - >>> y = np.array([[3, 1, 7], - ... [2, 0, 0], - ... [8, 5, 9]]) - >>> y - array([[3, 1, 7], - [2, 0, 0], - [8, 5, 9]]) - >>> y.flags - C_CONTIGUOUS : True - F_CONTIGUOUS : False - OWNDATA : True - WRITEABLE : True - ALIGNED : True - WRITEBACKIFCOPY : False - UPDATEIFCOPY : False - >>> y.setflags(write=0, align=0) - >>> y.flags - C_CONTIGUOUS : True - F_CONTIGUOUS : False - OWNDATA : True - WRITEABLE : False - ALIGNED : False - WRITEBACKIFCOPY : False - UPDATEIFCOPY : False - >>> y.setflags(uic=1) - Traceback (most recent call last): - File "", line 1, in - ValueError: cannot set WRITEBACKIFCOPY flag to True - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('sort', - """ - a.sort(axis=-1, kind=None, order=None) - - Sort an array in-place. Refer to `numpy.sort` for full documentation. - - Parameters - ---------- - axis : int, optional - Axis along which to sort. Default is -1, which means sort along the - last axis. - kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional - Sorting algorithm. The default is 'quicksort'. Note that both 'stable' - and 'mergesort' use timsort under the covers and, in general, the - actual implementation will vary with datatype. The 'mergesort' option - is retained for backwards compatibility. - - .. versionchanged:: 1.15.0. - The 'stable' option was added. - - order : str or list of str, optional - When `a` is an array with fields defined, this argument specifies - which fields to compare first, second, etc. A single field can - be specified as a string, and not all fields need be specified, - but unspecified fields will still be used, in the order in which - they come up in the dtype, to break ties. - - See Also - -------- - numpy.sort : Return a sorted copy of an array. - numpy.argsort : Indirect sort. - numpy.lexsort : Indirect stable sort on multiple keys. - numpy.searchsorted : Find elements in sorted array. - numpy.partition: Partial sort. - - Notes - ----- - See `numpy.sort` for notes on the different sorting algorithms. - - Examples - -------- - >>> a = np.array([[1,4], [3,1]]) - >>> a.sort(axis=1) - >>> a - array([[1, 4], - [1, 3]]) - >>> a.sort(axis=0) - >>> a - array([[1, 3], - [1, 4]]) - - Use the `order` keyword to specify a field to use when sorting a - structured array: - - >>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)]) - >>> a.sort(order='y') - >>> a - array([(b'c', 1), (b'a', 2)], - dtype=[('x', 'S1'), ('y', '>> a = np.array([3, 4, 2, 1]) - >>> a.partition(3) - >>> a - array([2, 1, 3, 4]) - - >>> a.partition((1, 3)) - >>> a - array([1, 2, 3, 4]) - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('squeeze', - """ - a.squeeze(axis=None) - - Remove single-dimensional entries from the shape of `a`. - - Refer to `numpy.squeeze` for full documentation. - - See Also - -------- - numpy.squeeze : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('std', - """ - a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False) - - Returns the standard deviation of the array elements along given axis. - - Refer to `numpy.std` for full documentation. - - See Also - -------- - numpy.std : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('sum', - """ - a.sum(axis=None, dtype=None, out=None, keepdims=False, initial=0, where=True) - - Return the sum of the array elements over the given axis. - - Refer to `numpy.sum` for full documentation. - - See Also - -------- - numpy.sum : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('swapaxes', - """ - a.swapaxes(axis1, axis2) - - Return a view of the array with `axis1` and `axis2` interchanged. - - Refer to `numpy.swapaxes` for full documentation. - - See Also - -------- - numpy.swapaxes : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('take', - """ - a.take(indices, axis=None, out=None, mode='raise') - - Return an array formed from the elements of `a` at the given indices. - - Refer to `numpy.take` for full documentation. - - See Also - -------- - numpy.take : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile', - """ - a.tofile(fid, sep="", format="%s") - - Write array to a file as text or binary (default). - - Data is always written in 'C' order, independent of the order of `a`. - The data produced by this method can be recovered using the function - fromfile(). - - Parameters - ---------- - fid : file or str or Path - An open file object, or a string containing a filename. - - .. versionchanged:: 1.17.0 - `pathlib.Path` objects are now accepted. - - sep : str - Separator between array items for text output. - If "" (empty), a binary file is written, equivalent to - ``file.write(a.tobytes())``. - format : str - Format string for text file output. - Each entry in the array is formatted to text by first converting - it to the closest Python type, and then using "format" % item. - - Notes - ----- - This is a convenience function for quick storage of array data. - Information on endianness and precision is lost, so this method is not a - good choice for files intended to archive data or transport data between - machines with different endianness. Some of these problems can be overcome - by outputting the data as text files, at the expense of speed and file - size. - - When fid is a file object, array contents are directly written to the - file, bypassing the file object's ``write`` method. As a result, tofile - cannot be used with files objects supporting compression (e.g., GzipFile) - or file-like objects that do not support ``fileno()`` (e.g., BytesIO). - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist', - """ - a.tolist() - - Return the array as an ``a.ndim``-levels deep nested list of Python scalars. - - Return a copy of the array data as a (nested) Python list. - Data items are converted to the nearest compatible builtin Python type, via - the `~numpy.ndarray.item` function. - - If ``a.ndim`` is 0, then since the depth of the nested list is 0, it will - not be a list at all, but a simple Python scalar. - - Parameters - ---------- - none - - Returns - ------- - y : object, or list of object, or list of list of object, or ... - The possibly nested list of array elements. - - Notes - ----- - The array may be recreated via ``a = np.array(a.tolist())``, although this - may sometimes lose precision. - - Examples - -------- - For a 1D array, ``a.tolist()`` is almost the same as ``list(a)``, - except that ``tolist`` changes numpy scalars to Python scalars: - - >>> a = np.uint32([1, 2]) - >>> a_list = list(a) - >>> a_list - [1, 2] - >>> type(a_list[0]) - - >>> a_tolist = a.tolist() - >>> a_tolist - [1, 2] - >>> type(a_tolist[0]) - - - Additionally, for a 2D array, ``tolist`` applies recursively: - - >>> a = np.array([[1, 2], [3, 4]]) - >>> list(a) - [array([1, 2]), array([3, 4])] - >>> a.tolist() - [[1, 2], [3, 4]] - - The base case for this recursion is a 0D array: - - >>> a = np.array(1) - >>> list(a) - Traceback (most recent call last): - ... - TypeError: iteration over a 0-d array - >>> a.tolist() - 1 - """)) - - -tobytesdoc = """ - a.{name}(order='C') - - Construct Python bytes containing the raw data bytes in the array. - - Constructs Python bytes showing a copy of the raw contents of - data memory. The bytes object can be produced in either 'C' or 'Fortran', - or 'Any' order (the default is 'C'-order). 'Any' order means C-order - unless the F_CONTIGUOUS flag in the array is set, in which case it - means 'Fortran' order. - - {deprecated} - - Parameters - ---------- - order : {{'C', 'F', None}}, optional - Order of the data for multidimensional arrays: - C, Fortran, or the same as for the original array. - - Returns - ------- - s : bytes - Python bytes exhibiting a copy of `a`'s raw data. - - Examples - -------- - >>> x = np.array([[0, 1], [2, 3]], dtype='>> x.tobytes() - b'\\x00\\x00\\x01\\x00\\x02\\x00\\x03\\x00' - >>> x.tobytes('C') == x.tobytes() - True - >>> x.tobytes('F') - b'\\x00\\x00\\x02\\x00\\x01\\x00\\x03\\x00' - - """ - -add_newdoc('numpy.core.multiarray', 'ndarray', - ('tostring', tobytesdoc.format(name='tostring', - deprecated= - 'This function is a compatibility ' - 'alias for tobytes. Despite its ' - 'name it returns bytes not ' - 'strings.'))) -add_newdoc('numpy.core.multiarray', 'ndarray', - ('tobytes', tobytesdoc.format(name='tobytes', - deprecated='.. versionadded:: 1.9.0'))) - -add_newdoc('numpy.core.multiarray', 'ndarray', ('trace', - """ - a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None) - - Return the sum along diagonals of the array. - - Refer to `numpy.trace` for full documentation. - - See Also - -------- - numpy.trace : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose', - """ - a.transpose(*axes) - - Returns a view of the array with axes transposed. - - For a 1-D array this has no effect, as a transposed vector is simply the - same vector. To convert a 1-D array into a 2D column vector, an additional - dimension must be added. `np.atleast2d(a).T` achieves this, as does - `a[:, np.newaxis]`. - For a 2-D array, this is a standard matrix transpose. - For an n-D array, if axes are given, their order indicates how the - axes are permuted (see Examples). If axes are not provided and - ``a.shape = (i[0], i[1], ... i[n-2], i[n-1])``, then - ``a.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0])``. - - Parameters - ---------- - axes : None, tuple of ints, or `n` ints - - * None or no argument: reverses the order of the axes. - - * tuple of ints: `i` in the `j`-th place in the tuple means `a`'s - `i`-th axis becomes `a.transpose()`'s `j`-th axis. - - * `n` ints: same as an n-tuple of the same ints (this form is - intended simply as a "convenience" alternative to the tuple form) - - Returns - ------- - out : ndarray - View of `a`, with axes suitably permuted. - - See Also - -------- - ndarray.T : Array property returning the array transposed. - ndarray.reshape : Give a new shape to an array without changing its data. - - Examples - -------- - >>> a = np.array([[1, 2], [3, 4]]) - >>> a - array([[1, 2], - [3, 4]]) - >>> a.transpose() - array([[1, 3], - [2, 4]]) - >>> a.transpose((1, 0)) - array([[1, 3], - [2, 4]]) - >>> a.transpose(1, 0) - array([[1, 3], - [2, 4]]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('var', - """ - a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False) - - Returns the variance of the array elements, along given axis. - - Refer to `numpy.var` for full documentation. - - See Also - -------- - numpy.var : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('view', - """ - a.view(dtype=None, type=None) - - New view of array with the same data. - - Parameters - ---------- - dtype : data-type or ndarray sub-class, optional - Data-type descriptor of the returned view, e.g., float32 or int16. The - default, None, results in the view having the same data-type as `a`. - This argument can also be specified as an ndarray sub-class, which - then specifies the type of the returned object (this is equivalent to - setting the ``type`` parameter). - type : Python type, optional - Type of the returned view, e.g., ndarray or matrix. Again, the - default None results in type preservation. - - Notes - ----- - ``a.view()`` is used two different ways: - - ``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view - of the array's memory with a different data-type. This can cause a - reinterpretation of the bytes of memory. - - ``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just - returns an instance of `ndarray_subclass` that looks at the same array - (same shape, dtype, etc.) This does not cause a reinterpretation of the - memory. - - For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of - bytes per entry than the previous dtype (for example, converting a - regular array to a structured array), then the behavior of the view - cannot be predicted just from the superficial appearance of ``a`` (shown - by ``print(a)``). It also depends on exactly how ``a`` is stored in - memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus - defined as a slice or transpose, etc., the view may give different - results. - - - Examples - -------- - >>> x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)]) - - Viewing array data using a different type and dtype: - - >>> y = x.view(dtype=np.int16, type=np.matrix) - >>> y - matrix([[513]], dtype=int16) - >>> print(type(y)) - - - Creating a view on a structured array so it can be used in calculations - - >>> x = np.array([(1, 2),(3,4)], dtype=[('a', np.int8), ('b', np.int8)]) - >>> xv = x.view(dtype=np.int8).reshape(-1,2) - >>> xv - array([[1, 2], - [3, 4]], dtype=int8) - >>> xv.mean(0) - array([2., 3.]) - - Making changes to the view changes the underlying array - - >>> xv[0,1] = 20 - >>> x - array([(1, 20), (3, 4)], dtype=[('a', 'i1'), ('b', 'i1')]) - - Using a view to convert an array to a recarray: - - >>> z = x.view(np.recarray) - >>> z.a - array([1, 3], dtype=int8) - - Views share data: - - >>> x[0] = (9, 10) - >>> z[0] - (9, 10) - - Views that change the dtype size (bytes per entry) should normally be - avoided on arrays defined by slices, transposes, fortran-ordering, etc.: - - >>> x = np.array([[1,2,3],[4,5,6]], dtype=np.int16) - >>> y = x[:, 0:2] - >>> y - array([[1, 2], - [4, 5]], dtype=int16) - >>> y.view(dtype=[('width', np.int16), ('length', np.int16)]) - Traceback (most recent call last): - ... - ValueError: To change to a dtype of a different size, the array must be C-contiguous - >>> z = y.copy() - >>> z.view(dtype=[('width', np.int16), ('length', np.int16)]) - array([[(1, 2)], - [(4, 5)]], dtype=[('width', '>> oct_array = np.frompyfunc(oct, 1, 1) - >>> oct_array(np.array((10, 30, 100))) - array(['0o12', '0o36', '0o144'], dtype=object) - >>> np.array((oct(10), oct(30), oct(100))) # for comparison - array(['0o12', '0o36', '0o144'], dtype='>> np.geterrobj() # first get the defaults - [8192, 521, None] - - >>> def err_handler(type, flag): - ... print("Floating point error (%s), with flag %s" % (type, flag)) - ... - >>> old_bufsize = np.setbufsize(20000) - >>> old_err = np.seterr(divide='raise') - >>> old_handler = np.seterrcall(err_handler) - >>> np.geterrobj() - [8192, 521, ] - - >>> old_err = np.seterr(all='ignore') - >>> np.base_repr(np.geterrobj()[1], 8) - '0' - >>> old_err = np.seterr(divide='warn', over='log', under='call', - ... invalid='print') - >>> np.base_repr(np.geterrobj()[1], 8) - '4351' - - """) - -add_newdoc('numpy.core.umath', 'seterrobj', - """ - seterrobj(errobj) - - Set the object that defines floating-point error handling. - - The error object contains all information that defines the error handling - behavior in NumPy. `seterrobj` is used internally by the other - functions that set error handling behavior (`seterr`, `seterrcall`). - - Parameters - ---------- - errobj : list - The error object, a list containing three elements: - [internal numpy buffer size, error mask, error callback function]. - - The error mask is a single integer that holds the treatment information - on all four floating point errors. The information for each error type - is contained in three bits of the integer. If we print it in base 8, we - can see what treatment is set for "invalid", "under", "over", and - "divide" (in that order). The printed string can be interpreted with - - * 0 : 'ignore' - * 1 : 'warn' - * 2 : 'raise' - * 3 : 'call' - * 4 : 'print' - * 5 : 'log' - - See Also - -------- - geterrobj, seterr, geterr, seterrcall, geterrcall - getbufsize, setbufsize - - Notes - ----- - For complete documentation of the types of floating-point exceptions and - treatment options, see `seterr`. - - Examples - -------- - >>> old_errobj = np.geterrobj() # first get the defaults - >>> old_errobj - [8192, 521, None] - - >>> def err_handler(type, flag): - ... print("Floating point error (%s), with flag %s" % (type, flag)) - ... - >>> new_errobj = [20000, 12, err_handler] - >>> np.seterrobj(new_errobj) - >>> np.base_repr(12, 8) # int for divide=4 ('print') and over=1 ('warn') - '14' - >>> np.geterr() - {'over': 'warn', 'divide': 'print', 'invalid': 'ignore', 'under': 'ignore'} - >>> np.geterrcall() is err_handler - True - - """) - - -############################################################################## -# -# compiled_base functions -# -############################################################################## - -add_newdoc('numpy.core.multiarray', 'add_docstring', - """ - add_docstring(obj, docstring) - - Add a docstring to a built-in obj if possible. - If the obj already has a docstring raise a RuntimeError - If this routine does not know how to add a docstring to the object - raise a TypeError - """) - -add_newdoc('numpy.core.umath', '_add_newdoc_ufunc', - """ - add_ufunc_docstring(ufunc, new_docstring) - - Replace the docstring for a ufunc with new_docstring. - This method will only work if the current docstring for - the ufunc is NULL. (At the C level, i.e. when ufunc->doc is NULL.) - - Parameters - ---------- - ufunc : numpy.ufunc - A ufunc whose current doc is NULL. - new_docstring : string - The new docstring for the ufunc. - - Notes - ----- - This method allocates memory for new_docstring on - the heap. Technically this creates a mempory leak, since this - memory will not be reclaimed until the end of the program - even if the ufunc itself is removed. However this will only - be a problem if the user is repeatedly creating ufuncs with - no documentation, adding documentation via add_newdoc_ufunc, - and then throwing away the ufunc. - """) - - -add_newdoc('numpy.core._multiarray_tests', 'format_float_OSprintf_g', - """ - format_float_OSprintf_g(val, precision) - - Print a floating point scalar using the system's printf function, - equivalent to: - - printf("%.*g", precision, val); - - for half/float/double, or replacing 'g' by 'Lg' for longdouble. This - method is designed to help cross-validate the format_float_* methods. - - Parameters - ---------- - val : python float or numpy floating scalar - Value to format. - - precision : non-negative integer, optional - Precision given to printf. - - Returns - ------- - rep : string - The string representation of the floating point value - - See Also - -------- - format_float_scientific - format_float_positional - """) - - -############################################################################## -# -# Documentation for ufunc attributes and methods -# -############################################################################## - - -############################################################################## -# -# ufunc object -# -############################################################################## - -add_newdoc('numpy.core', 'ufunc', - """ - Functions that operate element by element on whole arrays. - - To see the documentation for a specific ufunc, use `info`. For - example, ``np.info(np.sin)``. Because ufuncs are written in C - (for speed) and linked into Python with NumPy's ufunc facility, - Python's help() function finds this page whenever help() is called - on a ufunc. - - A detailed explanation of ufuncs can be found in the docs for :ref:`ufuncs`. - - Calling ufuncs: - =============== - - op(*x[, out], where=True, **kwargs) - Apply `op` to the arguments `*x` elementwise, broadcasting the arguments. - - The broadcasting rules are: - - * Dimensions of length 1 may be prepended to either array. - * Arrays may be repeated along dimensions of length 1. - - Parameters - ---------- - *x : array_like - Input arrays. - out : ndarray, None, or tuple of ndarray and None, optional - Alternate array object(s) in which to put the result; if provided, it - must have a shape that the inputs broadcast to. A tuple of arrays - (possible only as a keyword argument) must have length equal to the - number of outputs; use None for uninitialized outputs to be - allocated by the ufunc. - where : array_like, optional - This condition is broadcast over the input. At locations where the - condition is True, the `out` array will be set to the ufunc result. - Elsewhere, the `out` array will retain its original value. - Note that if an uninitialized `out` array is created via the default - ``out=None``, locations within it where the condition is False will - remain uninitialized. - **kwargs - For other keyword-only arguments, see the :ref:`ufunc docs `. - - Returns - ------- - r : ndarray or tuple of ndarray - `r` will have the shape that the arrays in `x` broadcast to; if `out` is - provided, it will be returned. If not, `r` will be allocated and - may contain uninitialized values. If the function has more than one - output, then the result will be a tuple of arrays. - - """) - - -############################################################################## -# -# ufunc attributes -# -############################################################################## - -add_newdoc('numpy.core', 'ufunc', ('identity', - """ - The identity value. - - Data attribute containing the identity element for the ufunc, if it has one. - If it does not, the attribute value is None. - - Examples - -------- - >>> np.add.identity - 0 - >>> np.multiply.identity - 1 - >>> np.power.identity - 1 - >>> print(np.exp.identity) - None - """)) - -add_newdoc('numpy.core', 'ufunc', ('nargs', - """ - The number of arguments. - - Data attribute containing the number of arguments the ufunc takes, including - optional ones. - - Notes - ----- - Typically this value will be one more than what you might expect because all - ufuncs take the optional "out" argument. - - Examples - -------- - >>> np.add.nargs - 3 - >>> np.multiply.nargs - 3 - >>> np.power.nargs - 3 - >>> np.exp.nargs - 2 - """)) - -add_newdoc('numpy.core', 'ufunc', ('nin', - """ - The number of inputs. - - Data attribute containing the number of arguments the ufunc treats as input. - - Examples - -------- - >>> np.add.nin - 2 - >>> np.multiply.nin - 2 - >>> np.power.nin - 2 - >>> np.exp.nin - 1 - """)) - -add_newdoc('numpy.core', 'ufunc', ('nout', - """ - The number of outputs. - - Data attribute containing the number of arguments the ufunc treats as output. - - Notes - ----- - Since all ufuncs can take output arguments, this will always be (at least) 1. - - Examples - -------- - >>> np.add.nout - 1 - >>> np.multiply.nout - 1 - >>> np.power.nout - 1 - >>> np.exp.nout - 1 - - """)) - -add_newdoc('numpy.core', 'ufunc', ('ntypes', - """ - The number of types. - - The number of numerical NumPy types - of which there are 18 total - on which - the ufunc can operate. - - See Also - -------- - numpy.ufunc.types - - Examples - -------- - >>> np.add.ntypes - 18 - >>> np.multiply.ntypes - 18 - >>> np.power.ntypes - 17 - >>> np.exp.ntypes - 7 - >>> np.remainder.ntypes - 14 - - """)) - -add_newdoc('numpy.core', 'ufunc', ('types', - """ - Returns a list with types grouped input->output. - - Data attribute listing the data-type "Domain-Range" groupings the ufunc can - deliver. The data-types are given using the character codes. - - See Also - -------- - numpy.ufunc.ntypes - - Examples - -------- - >>> np.add.types - ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', - 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', - 'GG->G', 'OO->O'] - - >>> np.multiply.types - ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', - 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', - 'GG->G', 'OO->O'] - - >>> np.power.types - ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', - 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G', - 'OO->O'] - - >>> np.exp.types - ['f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O'] - - >>> np.remainder.types - ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', - 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'OO->O'] - - """)) - -add_newdoc('numpy.core', 'ufunc', ('signature', - """ - Definition of the core elements a generalized ufunc operates on. - - The signature determines how the dimensions of each input/output array - are split into core and loop dimensions: - - 1. Each dimension in the signature is matched to a dimension of the - corresponding passed-in array, starting from the end of the shape tuple. - 2. Core dimensions assigned to the same label in the signature must have - exactly matching sizes, no broadcasting is performed. - 3. The core dimensions are removed from all inputs and the remaining - dimensions are broadcast together, defining the loop dimensions. - - Notes - ----- - Generalized ufuncs are used internally in many linalg functions, and in - the testing suite; the examples below are taken from these. - For ufuncs that operate on scalars, the signature is None, which is - equivalent to '()' for every argument. - - Examples - -------- - >>> np.core.umath_tests.matrix_multiply.signature - '(m,n),(n,p)->(m,p)' - >>> np.linalg._umath_linalg.det.signature - '(m,m)->()' - >>> np.add.signature is None - True # equivalent to '(),()->()' - """)) - -############################################################################## -# -# ufunc methods -# -############################################################################## - -add_newdoc('numpy.core', 'ufunc', ('reduce', - """ - reduce(a, axis=0, dtype=None, out=None, keepdims=False, initial=, where=True) - - Reduces `a`'s dimension by one, by applying ufunc along one axis. - - Let :math:`a.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then - :math:`ufunc.reduce(a, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` = - the result of iterating `j` over :math:`range(N_i)`, cumulatively applying - ufunc to each :math:`a[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`. - For a one-dimensional array, reduce produces results equivalent to: - :: - - r = op.identity # op = ufunc - for i in range(len(A)): - r = op(r, A[i]) - return r - - For example, add.reduce() is equivalent to sum(). - - Parameters - ---------- - a : array_like - The array to act on. - axis : None or int or tuple of ints, optional - Axis or axes along which a reduction is performed. - The default (`axis` = 0) is perform a reduction over the first - dimension of the input array. `axis` may be negative, in - which case it counts from the last to the first axis. - - .. versionadded:: 1.7.0 - - If this is None, a reduction is performed over all the axes. - If this is a tuple of ints, a reduction is performed on multiple - axes, instead of a single axis or all the axes as before. - - For operations which are either not commutative or not associative, - doing a reduction over multiple axes is not well-defined. The - ufuncs do not currently raise an exception in this case, but will - likely do so in the future. - dtype : data-type code, optional - The type used to represent the intermediate results. Defaults - to the data-type of the output array if this is provided, or - the data-type of the input array if no output array is provided. - out : ndarray, None, or tuple of ndarray and None, optional - A location into which the result is stored. If not provided or None, - a freshly-allocated array is returned. For consistency with - ``ufunc.__call__``, if given as a keyword, this may be wrapped in a - 1-element tuple. - - .. versionchanged:: 1.13.0 - Tuples are allowed for keyword argument. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `arr`. - - .. versionadded:: 1.7.0 - initial : scalar, optional - The value with which to start the reduction. - If the ufunc has no identity or the dtype is object, this defaults - to None - otherwise it defaults to ufunc.identity. - If ``None`` is given, the first element of the reduction is used, - and an error is thrown if the reduction is empty. - - .. versionadded:: 1.15.0 - - where : array_like of bool, optional - A boolean array which is broadcasted to match the dimensions - of `a`, and selects elements to include in the reduction. Note - that for ufuncs like ``minimum`` that do not have an identity - defined, one has to pass in also ``initial``. - - .. versionadded:: 1.17.0 - - Returns - ------- - r : ndarray - The reduced array. If `out` was supplied, `r` is a reference to it. - - Examples - -------- - >>> np.multiply.reduce([2,3,5]) - 30 - - A multi-dimensional array example: - - >>> X = np.arange(8).reshape((2,2,2)) - >>> X - array([[[0, 1], - [2, 3]], - [[4, 5], - [6, 7]]]) - >>> np.add.reduce(X, 0) - array([[ 4, 6], - [ 8, 10]]) - >>> np.add.reduce(X) # confirm: default axis value is 0 - array([[ 4, 6], - [ 8, 10]]) - >>> np.add.reduce(X, 1) - array([[ 2, 4], - [10, 12]]) - >>> np.add.reduce(X, 2) - array([[ 1, 5], - [ 9, 13]]) - - You can use the ``initial`` keyword argument to initialize the reduction - with a different value, and ``where`` to select specific elements to include: - - >>> np.add.reduce([10], initial=5) - 15 - >>> np.add.reduce(np.ones((2, 2, 2)), axis=(0, 2), initial=10) - array([14., 14.]) - >>> a = np.array([10., np.nan, 10]) - >>> np.add.reduce(a, where=~np.isnan(a)) - 20.0 - - Allows reductions of empty arrays where they would normally fail, i.e. - for ufuncs without an identity. - - >>> np.minimum.reduce([], initial=np.inf) - inf - >>> np.minimum.reduce([[1., 2.], [3., 4.]], initial=10., where=[True, False]) - array([ 1., 10.]) - >>> np.minimum.reduce([]) - Traceback (most recent call last): - ... - ValueError: zero-size array to reduction operation minimum which has no identity - """)) - -add_newdoc('numpy.core', 'ufunc', ('accumulate', - """ - accumulate(array, axis=0, dtype=None, out=None) - - Accumulate the result of applying the operator to all elements. - - For a one-dimensional array, accumulate produces results equivalent to:: - - r = np.empty(len(A)) - t = op.identity # op = the ufunc being applied to A's elements - for i in range(len(A)): - t = op(t, A[i]) - r[i] = t - return r - - For example, add.accumulate() is equivalent to np.cumsum(). - - For a multi-dimensional array, accumulate is applied along only one - axis (axis zero by default; see Examples below) so repeated use is - necessary if one wants to accumulate over multiple axes. - - Parameters - ---------- - array : array_like - The array to act on. - axis : int, optional - The axis along which to apply the accumulation; default is zero. - dtype : data-type code, optional - The data-type used to represent the intermediate results. Defaults - to the data-type of the output array if such is provided, or the - the data-type of the input array if no output array is provided. - out : ndarray, None, or tuple of ndarray and None, optional - A location into which the result is stored. If not provided or None, - a freshly-allocated array is returned. For consistency with - ``ufunc.__call__``, if given as a keyword, this may be wrapped in a - 1-element tuple. - - .. versionchanged:: 1.13.0 - Tuples are allowed for keyword argument. - - Returns - ------- - r : ndarray - The accumulated values. If `out` was supplied, `r` is a reference to - `out`. - - Examples - -------- - 1-D array examples: - - >>> np.add.accumulate([2, 3, 5]) - array([ 2, 5, 10]) - >>> np.multiply.accumulate([2, 3, 5]) - array([ 2, 6, 30]) - - 2-D array examples: - - >>> I = np.eye(2) - >>> I - array([[1., 0.], - [0., 1.]]) - - Accumulate along axis 0 (rows), down columns: - - >>> np.add.accumulate(I, 0) - array([[1., 0.], - [1., 1.]]) - >>> np.add.accumulate(I) # no axis specified = axis zero - array([[1., 0.], - [1., 1.]]) - - Accumulate along axis 1 (columns), through rows: - - >>> np.add.accumulate(I, 1) - array([[1., 1.], - [0., 1.]]) - - """)) - -add_newdoc('numpy.core', 'ufunc', ('reduceat', - """ - reduceat(a, indices, axis=0, dtype=None, out=None) - - Performs a (local) reduce with specified slices over a single axis. - - For i in ``range(len(indices))``, `reduceat` computes - ``ufunc.reduce(a[indices[i]:indices[i+1]])``, which becomes the i-th - generalized "row" parallel to `axis` in the final result (i.e., in a - 2-D array, for example, if `axis = 0`, it becomes the i-th row, but if - `axis = 1`, it becomes the i-th column). There are three exceptions to this: - - * when ``i = len(indices) - 1`` (so for the last index), - ``indices[i+1] = a.shape[axis]``. - * if ``indices[i] >= indices[i + 1]``, the i-th generalized "row" is - simply ``a[indices[i]]``. - * if ``indices[i] >= len(a)`` or ``indices[i] < 0``, an error is raised. - - The shape of the output depends on the size of `indices`, and may be - larger than `a` (this happens if ``len(indices) > a.shape[axis]``). - - Parameters - ---------- - a : array_like - The array to act on. - indices : array_like - Paired indices, comma separated (not colon), specifying slices to - reduce. - axis : int, optional - The axis along which to apply the reduceat. - dtype : data-type code, optional - The type used to represent the intermediate results. Defaults - to the data type of the output array if this is provided, or - the data type of the input array if no output array is provided. - out : ndarray, None, or tuple of ndarray and None, optional - A location into which the result is stored. If not provided or None, - a freshly-allocated array is returned. For consistency with - ``ufunc.__call__``, if given as a keyword, this may be wrapped in a - 1-element tuple. - - .. versionchanged:: 1.13.0 - Tuples are allowed for keyword argument. - - Returns - ------- - r : ndarray - The reduced values. If `out` was supplied, `r` is a reference to - `out`. - - Notes - ----- - A descriptive example: - - If `a` is 1-D, the function `ufunc.accumulate(a)` is the same as - ``ufunc.reduceat(a, indices)[::2]`` where `indices` is - ``range(len(array) - 1)`` with a zero placed - in every other element: - ``indices = zeros(2 * len(a) - 1)``, ``indices[1::2] = range(1, len(a))``. - - Don't be fooled by this attribute's name: `reduceat(a)` is not - necessarily smaller than `a`. - - Examples - -------- - To take the running sum of four successive values: - - >>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2] - array([ 6, 10, 14, 18]) - - A 2-D example: - - >>> x = np.linspace(0, 15, 16).reshape(4,4) - >>> x - array([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.], - [12., 13., 14., 15.]]) - - :: - - # reduce such that the result has the following five rows: - # [row1 + row2 + row3] - # [row4] - # [row2] - # [row3] - # [row1 + row2 + row3 + row4] - - >>> np.add.reduceat(x, [0, 3, 1, 2, 0]) - array([[12., 15., 18., 21.], - [12., 13., 14., 15.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.], - [24., 28., 32., 36.]]) - - :: - - # reduce such that result has the following two columns: - # [col1 * col2 * col3, col4] - - >>> np.multiply.reduceat(x, [0, 3], 1) - array([[ 0., 3.], - [ 120., 7.], - [ 720., 11.], - [2184., 15.]]) - - """)) - -add_newdoc('numpy.core', 'ufunc', ('outer', - """ - outer(A, B, **kwargs) - - Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`. - - Let ``M = A.ndim``, ``N = B.ndim``. Then the result, `C`, of - ``op.outer(A, B)`` is an array of dimension M + N such that: - - .. math:: C[i_0, ..., i_{M-1}, j_0, ..., j_{N-1}] = - op(A[i_0, ..., i_{M-1}], B[j_0, ..., j_{N-1}]) - - For `A` and `B` one-dimensional, this is equivalent to:: - - r = empty(len(A),len(B)) - for i in range(len(A)): - for j in range(len(B)): - r[i,j] = op(A[i], B[j]) # op = ufunc in question - - Parameters - ---------- - A : array_like - First array - B : array_like - Second array - kwargs : any - Arguments to pass on to the ufunc. Typically `dtype` or `out`. - - Returns - ------- - r : ndarray - Output array - - See Also - -------- - numpy.outer - - Examples - -------- - >>> np.multiply.outer([1, 2, 3], [4, 5, 6]) - array([[ 4, 5, 6], - [ 8, 10, 12], - [12, 15, 18]]) - - A multi-dimensional example: - - >>> A = np.array([[1, 2, 3], [4, 5, 6]]) - >>> A.shape - (2, 3) - >>> B = np.array([[1, 2, 3, 4]]) - >>> B.shape - (1, 4) - >>> C = np.multiply.outer(A, B) - >>> C.shape; C - (2, 3, 1, 4) - array([[[[ 1, 2, 3, 4]], - [[ 2, 4, 6, 8]], - [[ 3, 6, 9, 12]]], - [[[ 4, 8, 12, 16]], - [[ 5, 10, 15, 20]], - [[ 6, 12, 18, 24]]]]) - - """)) - -add_newdoc('numpy.core', 'ufunc', ('at', - """ - at(a, indices, b=None) - - Performs unbuffered in place operation on operand 'a' for elements - specified by 'indices'. For addition ufunc, this method is equivalent to - ``a[indices] += b``, except that results are accumulated for elements that - are indexed more than once. For example, ``a[[0,0]] += 1`` will only - increment the first element once because of buffering, whereas - ``add.at(a, [0,0], 1)`` will increment the first element twice. - - .. versionadded:: 1.8.0 - - Parameters - ---------- - a : array_like - The array to perform in place operation on. - indices : array_like or tuple - Array like index object or slice object for indexing into first - operand. If first operand has multiple dimensions, indices can be a - tuple of array like index objects or slice objects. - b : array_like - Second operand for ufuncs requiring two operands. Operand must be - broadcastable over first operand after indexing or slicing. - - Examples - -------- - Set items 0 and 1 to their negative values: - - >>> a = np.array([1, 2, 3, 4]) - >>> np.negative.at(a, [0, 1]) - >>> a - array([-1, -2, 3, 4]) - - Increment items 0 and 1, and increment item 2 twice: - - >>> a = np.array([1, 2, 3, 4]) - >>> np.add.at(a, [0, 1, 2, 2], 1) - >>> a - array([2, 3, 5, 4]) - - Add items 0 and 1 in first array to second array, - and store results in first array: - - >>> a = np.array([1, 2, 3, 4]) - >>> b = np.array([1, 2]) - >>> np.add.at(a, [0, 1], b) - >>> a - array([2, 4, 3, 4]) - - """)) - -############################################################################## -# -# Documentation for dtype attributes and methods -# -############################################################################## - -############################################################################## -# -# dtype object -# -############################################################################## - -add_newdoc('numpy.core.multiarray', 'dtype', - """ - dtype(obj, align=False, copy=False) - - Create a data type object. - - A numpy array is homogeneous, and contains elements described by a - dtype object. A dtype object can be constructed from different - combinations of fundamental numeric types. - - Parameters - ---------- - obj - Object to be converted to a data type object. - align : bool, optional - Add padding to the fields to match what a C compiler would output - for a similar C-struct. Can be ``True`` only if `obj` is a dictionary - or a comma-separated string. If a struct dtype is being created, - this also sets a sticky alignment flag ``isalignedstruct``. - copy : bool, optional - Make a new copy of the data-type object. If ``False``, the result - may just be a reference to a built-in data-type object. - - See also - -------- - result_type - - Examples - -------- - Using array-scalar type: - - >>> np.dtype(np.int16) - dtype('int16') - - Structured type, one field name 'f1', containing int16: - - >>> np.dtype([('f1', np.int16)]) - dtype([('f1', '>> np.dtype([('f1', [('f1', np.int16)])]) - dtype([('f1', [('f1', '>> np.dtype([('f1', np.uint64), ('f2', np.int32)]) - dtype([('f1', '>> np.dtype([('a','f8'),('b','S10')]) - dtype([('a', '>> np.dtype("i4, (2,3)f8") - dtype([('f0', '>> np.dtype([('hello',(np.int64,3)),('world',np.void,10)]) - dtype([('hello', '>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)})) - dtype((numpy.int16, [('x', 'i1'), ('y', 'i1')])) - - Using dictionaries. Two fields named 'gender' and 'age': - - >>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]}) - dtype([('gender', 'S1'), ('age', 'u1')]) - - Offsets in bytes, here 0 and 25: - - >>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)}) - dtype([('surname', 'S25'), ('age', 'u1')]) - - """) - -############################################################################## -# -# dtype attributes -# -############################################################################## - -add_newdoc('numpy.core.multiarray', 'dtype', ('alignment', - """ - The required alignment (bytes) of this data-type according to the compiler. - - More information is available in the C-API section of the manual. - - Examples - -------- - - >>> x = np.dtype('i4') - >>> x.alignment - 4 - - >>> x = np.dtype(float) - >>> x.alignment - 8 - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder', - """ - A character indicating the byte-order of this data-type object. - - One of: - - === ============== - '=' native - '<' little-endian - '>' big-endian - '|' not applicable - === ============== - - All built-in data-type objects have byteorder either '=' or '|'. - - Examples - -------- - - >>> dt = np.dtype('i2') - >>> dt.byteorder - '=' - >>> # endian is not relevant for 8 bit numbers - >>> np.dtype('i1').byteorder - '|' - >>> # or ASCII strings - >>> np.dtype('S2').byteorder - '|' - >>> # Even if specific code is given, and it is native - >>> # '=' is the byteorder - >>> import sys - >>> sys_is_le = sys.byteorder == 'little' - >>> native_code = sys_is_le and '<' or '>' - >>> swapped_code = sys_is_le and '>' or '<' - >>> dt = np.dtype(native_code + 'i2') - >>> dt.byteorder - '=' - >>> # Swapped code shows up as itself - >>> dt = np.dtype(swapped_code + 'i2') - >>> dt.byteorder == swapped_code - True - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('char', - """A unique character code for each of the 21 different built-in types. - - Examples - -------- - - >>> x = np.dtype(float) - >>> x.char - 'd' - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('descr', - """ - `__array_interface__` description of the data-type. - - The format is that required by the 'descr' key in the - `__array_interface__` attribute. - - Warning: This attribute exists specifically for `__array_interface__`, - and passing it directly to `np.dtype` will not accurately reconstruct - some dtypes (e.g., scalar and subarray dtypes). - - Examples - -------- - - >>> x = np.dtype(float) - >>> x.descr - [('', '>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) - >>> dt.descr - [('name', '>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) - >>> print(dt.fields) - {'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)} - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('flags', - """ - Bit-flags describing how this data type is to be interpreted. - - Bit-masks are in `numpy.core.multiarray` as the constants - `ITEM_HASOBJECT`, `LIST_PICKLE`, `ITEM_IS_POINTER`, `NEEDS_INIT`, - `NEEDS_PYAPI`, `USE_GETITEM`, `USE_SETITEM`. A full explanation - of these flags is in C-API documentation; they are largely useful - for user-defined data-types. - - The following example demonstrates that operations on this particular - dtype requires Python C-API. - - Examples - -------- - - >>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)]) - >>> x.flags - 16 - >>> np.core.multiarray.NEEDS_PYAPI - 16 - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('hasobject', - """ - Boolean indicating whether this dtype contains any reference-counted - objects in any fields or sub-dtypes. - - Recall that what is actually in the ndarray memory representing - the Python object is the memory address of that object (a pointer). - Special handling may be required, and this attribute is useful for - distinguishing data types that may contain arbitrary Python objects - and data-types that won't. - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('isbuiltin', - """ - Integer indicating how this dtype relates to the built-in dtypes. - - Read-only. - - = ======================================================================== - 0 if this is a structured array type, with fields - 1 if this is a dtype compiled into numpy (such as ints, floats etc) - 2 if the dtype is for a user-defined numpy type - A user-defined type uses the numpy C-API machinery to extend - numpy to handle a new array type. See - :ref:`user.user-defined-data-types` in the NumPy manual. - = ======================================================================== - - Examples - -------- - >>> dt = np.dtype('i2') - >>> dt.isbuiltin - 1 - >>> dt = np.dtype('f8') - >>> dt.isbuiltin - 1 - >>> dt = np.dtype([('field1', 'f8')]) - >>> dt.isbuiltin - 0 - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('isnative', - """ - Boolean indicating whether the byte order of this dtype is native - to the platform. - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('isalignedstruct', - """ - Boolean indicating whether the dtype is a struct which maintains - field alignment. This flag is sticky, so when combining multiple - structs together, it is preserved and produces new dtypes which - are also aligned. - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('itemsize', - """ - The element size of this data-type object. - - For 18 of the 21 types this number is fixed by the data-type. - For the flexible data-types, this number can be anything. - - Examples - -------- - - >>> arr = np.array([[1, 2], [3, 4]]) - >>> arr.dtype - dtype('int64') - >>> arr.itemsize - 8 - - >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) - >>> dt.itemsize - 80 - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('kind', - """ - A character code (one of 'biufcmMOSUV') identifying the general kind of data. - - = ====================== - b boolean - i signed integer - u unsigned integer - f floating-point - c complex floating-point - m timedelta - M datetime - O object - S (byte-)string - U Unicode - V void - = ====================== - - Examples - -------- - - >>> dt = np.dtype('i4') - >>> dt.kind - 'i' - >>> dt = np.dtype('f8') - >>> dt.kind - 'f' - >>> dt = np.dtype([('field1', 'f8')]) - >>> dt.kind - 'V' - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('name', - """ - A bit-width name for this data-type. - - Un-sized flexible data-type objects do not have this attribute. - - Examples - -------- - - >>> x = np.dtype(float) - >>> x.name - 'float64' - >>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)]) - >>> x.name - 'void640' - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('names', - """ - Ordered list of field names, or ``None`` if there are no fields. - - The names are ordered according to increasing byte offset. This can be - used, for example, to walk through all of the named fields in offset order. - - Examples - -------- - >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) - >>> dt.names - ('name', 'grades') - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('num', - """ - A unique number for each of the 21 different built-in types. - - These are roughly ordered from least-to-most precision. - - Examples - -------- - - >>> dt = np.dtype(str) - >>> dt.num - 19 - - >>> dt = np.dtype(float) - >>> dt.num - 12 - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('shape', - """ - Shape tuple of the sub-array if this data type describes a sub-array, - and ``()`` otherwise. - - Examples - -------- - - >>> dt = np.dtype(('i4', 4)) - >>> dt.shape - (4,) - - >>> dt = np.dtype(('i4', (2, 3))) - >>> dt.shape - (2, 3) - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('ndim', - """ - Number of dimensions of the sub-array if this data type describes a - sub-array, and ``0`` otherwise. - - .. versionadded:: 1.13.0 - - Examples - -------- - >>> x = np.dtype(float) - >>> x.ndim - 0 - - >>> x = np.dtype((float, 8)) - >>> x.ndim - 1 - - >>> x = np.dtype(('i4', (3, 4))) - >>> x.ndim - 2 - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('str', - """The array-protocol typestring of this data-type object.""")) - -add_newdoc('numpy.core.multiarray', 'dtype', ('subdtype', - """ - Tuple ``(item_dtype, shape)`` if this `dtype` describes a sub-array, and - None otherwise. - - The *shape* is the fixed shape of the sub-array described by this - data type, and *item_dtype* the data type of the array. - - If a field whose dtype object has this attribute is retrieved, - then the extra dimensions implied by *shape* are tacked on to - the end of the retrieved array. - - See Also - -------- - dtype.base - - Examples - -------- - >>> x = numpy.dtype('8f') - >>> x.subdtype - (dtype('float32'), (8,)) - - >>> x = numpy.dtype('i2') - >>> x.subdtype - >>> - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('base', - """ - Returns dtype for the base element of the subarrays, - regardless of their dimension or shape. - - See Also - -------- - dtype.subdtype - - Examples - -------- - >>> x = numpy.dtype('8f') - >>> x.base - dtype('float32') - - >>> x = numpy.dtype('i2') - >>> x.base - dtype('int16') - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('type', - """The type object used to instantiate a scalar of this data-type.""")) - -############################################################################## -# -# dtype methods -# -############################################################################## - -add_newdoc('numpy.core.multiarray', 'dtype', ('newbyteorder', - """ - newbyteorder(new_order='S') - - Return a new dtype with a different byte order. - - Changes are also made in all fields and sub-arrays of the data type. - - Parameters - ---------- - new_order : string, optional - Byte order to force; a value from the byte order specifications - below. The default value ('S') results in swapping the current - byte order. `new_order` codes can be any of: - - * 'S' - swap dtype from current to opposite endian - * {'<', 'L'} - little endian - * {'>', 'B'} - big endian - * {'=', 'N'} - native order - * {'|', 'I'} - ignore (no change to byte order) - - The code does a case-insensitive check on the first letter of - `new_order` for these alternatives. For example, any of '>' - or 'B' or 'b' or 'brian' are valid to specify big-endian. - - Returns - ------- - new_dtype : dtype - New dtype object with the given change to the byte order. - - Notes - ----- - Changes are also made in all fields and sub-arrays of the data type. - - Examples - -------- - >>> import sys - >>> sys_is_le = sys.byteorder == 'little' - >>> native_code = sys_is_le and '<' or '>' - >>> swapped_code = sys_is_le and '>' or '<' - >>> native_dt = np.dtype(native_code+'i2') - >>> swapped_dt = np.dtype(swapped_code+'i2') - >>> native_dt.newbyteorder('S') == swapped_dt - True - >>> native_dt.newbyteorder() == swapped_dt - True - >>> native_dt == swapped_dt.newbyteorder('S') - True - >>> native_dt == swapped_dt.newbyteorder('=') - True - >>> native_dt == swapped_dt.newbyteorder('N') - True - >>> native_dt == native_dt.newbyteorder('|') - True - >>> np.dtype('>> np.dtype('>> np.dtype('>i2') == native_dt.newbyteorder('>') - True - >>> np.dtype('>i2') == native_dt.newbyteorder('B') - True - - """)) - - -############################################################################## -# -# Datetime-related Methods -# -############################################################################## - -add_newdoc('numpy.core.multiarray', 'busdaycalendar', - """ - busdaycalendar(weekmask='1111100', holidays=None) - - A business day calendar object that efficiently stores information - defining valid days for the busday family of functions. - - The default valid days are Monday through Friday ("business days"). - A busdaycalendar object can be specified with any set of weekly - valid days, plus an optional "holiday" dates that always will be invalid. - - Once a busdaycalendar object is created, the weekmask and holidays - cannot be modified. - - .. versionadded:: 1.7.0 - - Parameters - ---------- - weekmask : str or array_like of bool, optional - A seven-element array indicating which of Monday through Sunday are - valid days. May be specified as a length-seven list or array, like - [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string - like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for - weekdays, optionally separated by white space. Valid abbreviations - are: Mon Tue Wed Thu Fri Sat Sun - holidays : array_like of datetime64[D], optional - An array of dates to consider as invalid dates, no matter which - weekday they fall upon. Holiday dates may be specified in any - order, and NaT (not-a-time) dates are ignored. This list is - saved in a normalized form that is suited for fast calculations - of valid days. - - Returns - ------- - out : busdaycalendar - A business day calendar object containing the specified - weekmask and holidays values. - - See Also - -------- - is_busday : Returns a boolean array indicating valid days. - busday_offset : Applies an offset counted in valid days. - busday_count : Counts how many valid days are in a half-open date range. - - Attributes - ---------- - Note: once a busdaycalendar object is created, you cannot modify the - weekmask or holidays. The attributes return copies of internal data. - weekmask : (copy) seven-element array of bool - holidays : (copy) sorted array of datetime64[D] - - Examples - -------- - >>> # Some important days in July - ... bdd = np.busdaycalendar( - ... holidays=['2011-07-01', '2011-07-04', '2011-07-17']) - >>> # Default is Monday to Friday weekdays - ... bdd.weekmask - array([ True, True, True, True, True, False, False]) - >>> # Any holidays already on the weekend are removed - ... bdd.holidays - array(['2011-07-01', '2011-07-04'], dtype='datetime64[D]') - """) - -add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('weekmask', - """A copy of the seven-element boolean mask indicating valid days.""")) - -add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('holidays', - """A copy of the holiday array indicating additional invalid days.""")) - -add_newdoc('numpy.core.multiarray', 'normalize_axis_index', - """ - normalize_axis_index(axis, ndim, msg_prefix=None) - - Normalizes an axis index, `axis`, such that is a valid positive index into - the shape of array with `ndim` dimensions. Raises an AxisError with an - appropriate message if this is not possible. - - Used internally by all axis-checking logic. - - .. versionadded:: 1.13.0 - - Parameters - ---------- - axis : int - The un-normalized index of the axis. Can be negative - ndim : int - The number of dimensions of the array that `axis` should be normalized - against - msg_prefix : str - A prefix to put before the message, typically the name of the argument - - Returns - ------- - normalized_axis : int - The normalized axis index, such that `0 <= normalized_axis < ndim` - - Raises - ------ - AxisError - If the axis index is invalid, when `-ndim <= axis < ndim` is false. - - Examples - -------- - >>> normalize_axis_index(0, ndim=3) - 0 - >>> normalize_axis_index(1, ndim=3) - 1 - >>> normalize_axis_index(-1, ndim=3) - 2 - - >>> normalize_axis_index(3, ndim=3) - Traceback (most recent call last): - ... - AxisError: axis 3 is out of bounds for array of dimension 3 - >>> normalize_axis_index(-4, ndim=3, msg_prefix='axes_arg') - Traceback (most recent call last): - ... - AxisError: axes_arg: axis -4 is out of bounds for array of dimension 3 - """) - -add_newdoc('numpy.core.multiarray', 'datetime_data', - """ - datetime_data(dtype, /) - - Get information about the step size of a date or time type. - - The returned tuple can be passed as the second argument of `numpy.datetime64` and - `numpy.timedelta64`. - - Parameters - ---------- - dtype : dtype - The dtype object, which must be a `datetime64` or `timedelta64` type. - - Returns - ------- - unit : str - The :ref:`datetime unit ` on which this dtype - is based. - count : int - The number of base units in a step. - - Examples - -------- - >>> dt_25s = np.dtype('timedelta64[25s]') - >>> np.datetime_data(dt_25s) - ('s', 25) - >>> np.array(10, dt_25s).astype('timedelta64[s]') - array(250, dtype='timedelta64[s]') - - The result can be used to construct a datetime that uses the same units - as a timedelta - - >>> np.datetime64('2010', np.datetime_data(dt_25s)) - numpy.datetime64('2010-01-01T00:00:00','25s') - """) - - -############################################################################## -# -# Documentation for `generic` attributes and methods -# -############################################################################## - -add_newdoc('numpy.core.numerictypes', 'generic', - """ - Base class for numpy scalar types. - - Class from which most (all?) numpy scalar types are derived. For - consistency, exposes the same API as `ndarray`, despite many - consequent attributes being either "get-only," or completely irrelevant. - This is the class from which it is strongly suggested users should derive - custom scalar types. - - """) - -# Attributes - -add_newdoc('numpy.core.numerictypes', 'generic', ('T', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class so as to - provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('base', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class so as to - a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('data', - """Pointer to start of data.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('dtype', - """Get array data-descriptor.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('flags', - """The integer value of flags.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('flat', - """A 1-D view of the scalar.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('imag', - """The imaginary part of the scalar.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('itemsize', - """The length of one element in bytes.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('nbytes', - """The length of the scalar in bytes.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('ndim', - """The number of array dimensions.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('real', - """The real part of the scalar.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('shape', - """Tuple of array dimensions.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('size', - """The number of elements in the gentype.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('strides', - """Tuple of bytes steps in each dimension.""")) - -# Methods - -add_newdoc('numpy.core.numerictypes', 'generic', ('all', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('any', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('argmax', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('argmin', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('argsort', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('astype', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('byteswap', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class so as to - provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('choose', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('clip', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('compress', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('conjugate', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('copy', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('cumprod', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('cumsum', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('diagonal', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('dump', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('dumps', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('fill', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('flatten', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('getfield', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('item', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('itemset', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('max', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('mean', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('min', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('newbyteorder', - """ - newbyteorder(new_order='S') - - Return a new `dtype` with a different byte order. - - Changes are also made in all fields and sub-arrays of the data type. - - The `new_order` code can be any from the following: - - * 'S' - swap dtype from current to opposite endian - * {'<', 'L'} - little endian - * {'>', 'B'} - big endian - * {'=', 'N'} - native order - * {'|', 'I'} - ignore (no change to byte order) - - Parameters - ---------- - new_order : str, optional - Byte order to force; a value from the byte order specifications - above. The default value ('S') results in swapping the current - byte order. The code does a case-insensitive check on the first - letter of `new_order` for the alternatives above. For example, - any of 'B' or 'b' or 'biggish' are valid to specify big-endian. - - - Returns - ------- - new_dtype : dtype - New `dtype` object with the given change to the byte order. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('nonzero', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('prod', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('ptp', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('put', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('ravel', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('repeat', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('reshape', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('resize', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('round', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('searchsorted', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('setfield', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('setflags', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class so as to - provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('sort', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('squeeze', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('std', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('sum', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('swapaxes', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('take', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('tofile', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('tolist', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('tostring', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('trace', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('transpose', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('var', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('view', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See also the corresponding attribute of the derived class of interest. - - """)) - - -############################################################################## -# -# Documentation for scalar type abstract base classes in type hierarchy -# -############################################################################## - - -add_newdoc('numpy.core.numerictypes', 'number', - """ - Abstract base class of all numeric scalar types. - - """) - -add_newdoc('numpy.core.numerictypes', 'integer', - """ - Abstract base class of all integer scalar types. - - """) - -add_newdoc('numpy.core.numerictypes', 'signedinteger', - """ - Abstract base class of all signed integer scalar types. - - """) - -add_newdoc('numpy.core.numerictypes', 'unsignedinteger', - """ - Abstract base class of all unsigned integer scalar types. - - """) - -add_newdoc('numpy.core.numerictypes', 'inexact', - """ - Abstract base class of all numeric scalar types with a (potentially) - inexact representation of the values in its range, such as - floating-point numbers. - - """) - -add_newdoc('numpy.core.numerictypes', 'floating', - """ - Abstract base class of all floating-point scalar types. - - """) - -add_newdoc('numpy.core.numerictypes', 'complexfloating', - """ - Abstract base class of all complex number scalar types that are made up of - floating-point numbers. - - """) - -add_newdoc('numpy.core.numerictypes', 'flexible', - """ - Abstract base class of all scalar types without predefined length. - The actual size of these types depends on the specific `np.dtype` - instantiation. - - """) - -add_newdoc('numpy.core.numerictypes', 'character', - """ - Abstract base class of all character string scalar types. - - """) - - -############################################################################## -# -# Documentation for concrete scalar classes -# -############################################################################## - -def numeric_type_aliases(aliases): - def type_aliases_gen(): - for alias, doc in aliases: - try: - alias_type = getattr(_numerictypes, alias) - except AttributeError: - # The set of aliases that actually exist varies between platforms - pass - else: - yield (alias_type, alias, doc) - return list(type_aliases_gen()) - - -possible_aliases = numeric_type_aliases([ - ('int8', '8-bit signed integer (-128 to 127)'), - ('int16', '16-bit signed integer (-32768 to 32767)'), - ('int32', '32-bit signed integer (-2147483648 to 2147483647)'), - ('int64', '64-bit signed integer (-9223372036854775808 to 9223372036854775807)'), - ('intp', 'Signed integer large enough to fit pointer, compatible with C ``intptr_t``'), - ('uint8', '8-bit unsigned integer (0 to 255)'), - ('uint16', '16-bit unsigned integer (0 to 65535)'), - ('uint32', '32-bit unsigned integer (0 to 4294967295)'), - ('uint64', '64-bit unsigned integer (0 to 18446744073709551615)'), - ('uintp', 'Unsigned integer large enough to fit pointer, compatible with C ``uintptr_t``'), - ('float16', '16-bit-precision floating-point number type: sign bit, 5 bits exponent, 10 bits mantissa'), - ('float32', '32-bit-precision floating-point number type: sign bit, 8 bits exponent, 23 bits mantissa'), - ('float64', '64-bit precision floating-point number type: sign bit, 11 bits exponent, 52 bits mantissa'), - ('float96', '96-bit extended-precision floating-point number type'), - ('float128', '128-bit extended-precision floating-point number type'), - ('complex64', 'Complex number type composed of 2 32-bit-precision floating-point numbers'), - ('complex128', 'Complex number type composed of 2 64-bit-precision floating-point numbers'), - ('complex192', 'Complex number type composed of 2 96-bit extended-precision floating-point numbers'), - ('complex256', 'Complex number type composed of 2 128-bit extended-precision floating-point numbers'), - ]) - - -def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): - o = getattr(_numerictypes, obj) - - character_code = dtype(o).char - canonical_name_doc = "" if obj == o.__name__ else "Canonical name: ``np.{}``.\n ".format(obj) - alias_doc = ''.join("Alias: ``np.{}``.\n ".format(alias) for alias in fixed_aliases) - alias_doc += ''.join("Alias *on this platform*: ``np.{}``: {}.\n ".format(alias, doc) - for (alias_type, alias, doc) in possible_aliases if alias_type is o) - - docstring = """ - {doc} - Character code: ``'{character_code}'``. - {canonical_name_doc}{alias_doc} - """.format(doc=doc.strip(), character_code=character_code, - canonical_name_doc=canonical_name_doc, alias_doc=alias_doc) - - add_newdoc('numpy.core.numerictypes', obj, docstring) - - -add_newdoc_for_scalar_type('bool_', ['bool8'], - """ - Boolean type (True or False), stored as a byte. - """) - -add_newdoc_for_scalar_type('byte', [], - """ - Signed integer type, compatible with C ``char``. - """) - -add_newdoc_for_scalar_type('short', [], - """ - Signed integer type, compatible with C ``short``. - """) - -add_newdoc_for_scalar_type('intc', [], - """ - Signed integer type, compatible with C ``int``. - """) - -add_newdoc_for_scalar_type('int_', [], - """ - Signed integer type, compatible with Python `int` anc C ``long``. - """) - -add_newdoc_for_scalar_type('longlong', [], - """ - Signed integer type, compatible with C ``long long``. - """) - -add_newdoc_for_scalar_type('ubyte', [], - """ - Unsigned integer type, compatible with C ``unsigned char``. - """) - -add_newdoc_for_scalar_type('ushort', [], - """ - Unsigned integer type, compatible with C ``unsigned short``. - """) - -add_newdoc_for_scalar_type('uintc', [], - """ - Unsigned integer type, compatible with C ``unsigned int``. - """) - -add_newdoc_for_scalar_type('uint', [], - """ - Unsigned integer type, compatible with C ``unsigned long``. - """) - -add_newdoc_for_scalar_type('ulonglong', [], - """ - Signed integer type, compatible with C ``unsigned long long``. - """) - -add_newdoc_for_scalar_type('half', [], - """ - Half-precision floating-point number type. - """) - -add_newdoc_for_scalar_type('single', [], - """ - Single-precision floating-point number type, compatible with C ``float``. - """) - -add_newdoc_for_scalar_type('double', ['float_'], - """ - Double-precision floating-point number type, compatible with Python `float` - and C ``double``. - """) - -add_newdoc_for_scalar_type('longdouble', ['longfloat'], - """ - Extended-precision floating-point number type, compatible with C - ``long double`` but not necessarily with IEEE 754 quadruple-precision. - """) - -add_newdoc_for_scalar_type('csingle', ['singlecomplex'], - """ - Complex number type composed of two single-precision floating-point - numbers. - """) - -add_newdoc_for_scalar_type('cdouble', ['cfloat', 'complex_'], - """ - Complex number type composed of two double-precision floating-point - numbers, compatible with Python `complex`. - """) - -add_newdoc_for_scalar_type('clongdouble', ['clongfloat', 'longcomplex'], - """ - Complex number type composed of two extended-precision floating-point - numbers. - """) - -add_newdoc_for_scalar_type('object_', [], - """ - Any Python object. - """) - -# TODO: work out how to put this on the base class, np.floating -for float_name in ('half', 'single', 'double', 'longdouble'): - add_newdoc('numpy.core.numerictypes', float_name, ('as_integer_ratio', - """ - {ftype}.as_integer_ratio() -> (int, int) - - Return a pair of integers, whose ratio is exactly equal to the original - floating point number, and with a positive denominator. - Raise OverflowError on infinities and a ValueError on NaNs. - - >>> np.{ftype}(10.0).as_integer_ratio() - (10, 1) - >>> np.{ftype}(0.0).as_integer_ratio() - (0, 1) - >>> np.{ftype}(-.25).as_integer_ratio() - (-1, 4) - """.format(ftype=float_name))) - diff --git a/venv/lib/python3.7/site-packages/numpy/core/_asarray.py b/venv/lib/python3.7/site-packages/numpy/core/_asarray.py deleted file mode 100644 index 0ad4161..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/_asarray.py +++ /dev/null @@ -1,324 +0,0 @@ -""" -Functions in the ``as*array`` family that promote array-likes into arrays. - -`require` fits this category despite its name not matching this pattern. -""" -from __future__ import division, absolute_import, print_function - -from .overrides import set_module -from .multiarray import array - - -__all__ = [ - "asarray", "asanyarray", "ascontiguousarray", "asfortranarray", "require", -] - -@set_module('numpy') -def asarray(a, dtype=None, order=None): - """Convert the input to an array. - - Parameters - ---------- - a : array_like - Input data, in any form that can be converted to an array. This - includes lists, lists of tuples, tuples, tuples of tuples, tuples - of lists and ndarrays. - dtype : data-type, optional - By default, the data-type is inferred from the input data. - order : {'C', 'F'}, optional - Whether to use row-major (C-style) or - column-major (Fortran-style) memory representation. - Defaults to 'C'. - - Returns - ------- - out : ndarray - Array interpretation of `a`. No copy is performed if the input - is already an ndarray with matching dtype and order. If `a` is a - subclass of ndarray, a base class ndarray is returned. - - See Also - -------- - asanyarray : Similar function which passes through subclasses. - ascontiguousarray : Convert input to a contiguous array. - asfarray : Convert input to a floating point ndarray. - asfortranarray : Convert input to an ndarray with column-major - memory order. - asarray_chkfinite : Similar function which checks input for NaNs and Infs. - fromiter : Create an array from an iterator. - fromfunction : Construct an array by executing a function on grid - positions. - - Examples - -------- - Convert a list into an array: - - >>> a = [1, 2] - >>> np.asarray(a) - array([1, 2]) - - Existing arrays are not copied: - - >>> a = np.array([1, 2]) - >>> np.asarray(a) is a - True - - If `dtype` is set, array is copied only if dtype does not match: - - >>> a = np.array([1, 2], dtype=np.float32) - >>> np.asarray(a, dtype=np.float32) is a - True - >>> np.asarray(a, dtype=np.float64) is a - False - - Contrary to `asanyarray`, ndarray subclasses are not passed through: - - >>> issubclass(np.recarray, np.ndarray) - True - >>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray) - >>> np.asarray(a) is a - False - >>> np.asanyarray(a) is a - True - - """ - return array(a, dtype, copy=False, order=order) - - -@set_module('numpy') -def asanyarray(a, dtype=None, order=None): - """Convert the input to an ndarray, but pass ndarray subclasses through. - - Parameters - ---------- - a : array_like - Input data, in any form that can be converted to an array. This - includes scalars, lists, lists of tuples, tuples, tuples of tuples, - tuples of lists, and ndarrays. - dtype : data-type, optional - By default, the data-type is inferred from the input data. - order : {'C', 'F'}, optional - Whether to use row-major (C-style) or column-major - (Fortran-style) memory representation. Defaults to 'C'. - - Returns - ------- - out : ndarray or an ndarray subclass - Array interpretation of `a`. If `a` is an ndarray or a subclass - of ndarray, it is returned as-is and no copy is performed. - - See Also - -------- - asarray : Similar function which always returns ndarrays. - ascontiguousarray : Convert input to a contiguous array. - asfarray : Convert input to a floating point ndarray. - asfortranarray : Convert input to an ndarray with column-major - memory order. - asarray_chkfinite : Similar function which checks input for NaNs and - Infs. - fromiter : Create an array from an iterator. - fromfunction : Construct an array by executing a function on grid - positions. - - Examples - -------- - Convert a list into an array: - - >>> a = [1, 2] - >>> np.asanyarray(a) - array([1, 2]) - - Instances of `ndarray` subclasses are passed through as-is: - - >>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray) - >>> np.asanyarray(a) is a - True - - """ - return array(a, dtype, copy=False, order=order, subok=True) - - -@set_module('numpy') -def ascontiguousarray(a, dtype=None): - """ - Return a contiguous array (ndim >= 1) in memory (C order). - - Parameters - ---------- - a : array_like - Input array. - dtype : str or dtype object, optional - Data-type of returned array. - - Returns - ------- - out : ndarray - Contiguous array of same shape and content as `a`, with type `dtype` - if specified. - - See Also - -------- - asfortranarray : Convert input to an ndarray with column-major - memory order. - require : Return an ndarray that satisfies requirements. - ndarray.flags : Information about the memory layout of the array. - - Examples - -------- - >>> x = np.arange(6).reshape(2,3) - >>> np.ascontiguousarray(x, dtype=np.float32) - array([[0., 1., 2.], - [3., 4., 5.]], dtype=float32) - >>> x.flags['C_CONTIGUOUS'] - True - - Note: This function returns an array with at least one-dimension (1-d) - so it will not preserve 0-d arrays. - - """ - return array(a, dtype, copy=False, order='C', ndmin=1) - - -@set_module('numpy') -def asfortranarray(a, dtype=None): - """ - Return an array (ndim >= 1) laid out in Fortran order in memory. - - Parameters - ---------- - a : array_like - Input array. - dtype : str or dtype object, optional - By default, the data-type is inferred from the input data. - - Returns - ------- - out : ndarray - The input `a` in Fortran, or column-major, order. - - See Also - -------- - ascontiguousarray : Convert input to a contiguous (C order) array. - asanyarray : Convert input to an ndarray with either row or - column-major memory order. - require : Return an ndarray that satisfies requirements. - ndarray.flags : Information about the memory layout of the array. - - Examples - -------- - >>> x = np.arange(6).reshape(2,3) - >>> y = np.asfortranarray(x) - >>> x.flags['F_CONTIGUOUS'] - False - >>> y.flags['F_CONTIGUOUS'] - True - - Note: This function returns an array with at least one-dimension (1-d) - so it will not preserve 0-d arrays. - - """ - return array(a, dtype, copy=False, order='F', ndmin=1) - - -@set_module('numpy') -def require(a, dtype=None, requirements=None): - """ - Return an ndarray of the provided type that satisfies requirements. - - This function is useful to be sure that an array with the correct flags - is returned for passing to compiled code (perhaps through ctypes). - - Parameters - ---------- - a : array_like - The object to be converted to a type-and-requirement-satisfying array. - dtype : data-type - The required data-type. If None preserve the current dtype. If your - application requires the data to be in native byteorder, include - a byteorder specification as a part of the dtype specification. - requirements : str or list of str - The requirements list can be any of the following - - * 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array - * 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array - * 'ALIGNED' ('A') - ensure a data-type aligned array - * 'WRITEABLE' ('W') - ensure a writable array - * 'OWNDATA' ('O') - ensure an array that owns its own data - * 'ENSUREARRAY', ('E') - ensure a base array, instead of a subclass - - Returns - ------- - out : ndarray - Array with specified requirements and type if given. - - See Also - -------- - asarray : Convert input to an ndarray. - asanyarray : Convert to an ndarray, but pass through ndarray subclasses. - ascontiguousarray : Convert input to a contiguous array. - asfortranarray : Convert input to an ndarray with column-major - memory order. - ndarray.flags : Information about the memory layout of the array. - - Notes - ----- - The returned array will be guaranteed to have the listed requirements - by making a copy if needed. - - Examples - -------- - >>> x = np.arange(6).reshape(2,3) - >>> x.flags - C_CONTIGUOUS : True - F_CONTIGUOUS : False - OWNDATA : False - WRITEABLE : True - ALIGNED : True - WRITEBACKIFCOPY : False - UPDATEIFCOPY : False - - >>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F']) - >>> y.flags - C_CONTIGUOUS : False - F_CONTIGUOUS : True - OWNDATA : True - WRITEABLE : True - ALIGNED : True - WRITEBACKIFCOPY : False - UPDATEIFCOPY : False - - """ - possible_flags = {'C': 'C', 'C_CONTIGUOUS': 'C', 'CONTIGUOUS': 'C', - 'F': 'F', 'F_CONTIGUOUS': 'F', 'FORTRAN': 'F', - 'A': 'A', 'ALIGNED': 'A', - 'W': 'W', 'WRITEABLE': 'W', - 'O': 'O', 'OWNDATA': 'O', - 'E': 'E', 'ENSUREARRAY': 'E'} - if not requirements: - return asanyarray(a, dtype=dtype) - else: - requirements = {possible_flags[x.upper()] for x in requirements} - - if 'E' in requirements: - requirements.remove('E') - subok = False - else: - subok = True - - order = 'A' - if requirements >= {'C', 'F'}: - raise ValueError('Cannot specify both "C" and "F" order') - elif 'F' in requirements: - order = 'F' - requirements.remove('F') - elif 'C' in requirements: - order = 'C' - requirements.remove('C') - - arr = array(a, dtype=dtype, order=order, copy=False, subok=subok) - - for prop in requirements: - if not arr.flags[prop]: - arr = arr.copy(order) - break - return arr diff --git a/venv/lib/python3.7/site-packages/numpy/core/_dtype.py b/venv/lib/python3.7/site-packages/numpy/core/_dtype.py deleted file mode 100644 index df1ff18..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/_dtype.py +++ /dev/null @@ -1,354 +0,0 @@ -""" -A place for code to be called from the implementation of np.dtype - -String handling is much easier to do correctly in python. -""" -from __future__ import division, absolute_import, print_function - -import sys - -import numpy as np - - -_kind_to_stem = { - 'u': 'uint', - 'i': 'int', - 'c': 'complex', - 'f': 'float', - 'b': 'bool', - 'V': 'void', - 'O': 'object', - 'M': 'datetime', - 'm': 'timedelta' -} -if sys.version_info[0] >= 3: - _kind_to_stem.update({ - 'S': 'bytes', - 'U': 'str' - }) -else: - _kind_to_stem.update({ - 'S': 'string', - 'U': 'unicode' - }) - - -def _kind_name(dtype): - try: - return _kind_to_stem[dtype.kind] - except KeyError: - raise RuntimeError( - "internal dtype error, unknown kind {!r}" - .format(dtype.kind) - ) - - -def __str__(dtype): - if dtype.fields is not None: - return _struct_str(dtype, include_align=True) - elif dtype.subdtype: - return _subarray_str(dtype) - elif issubclass(dtype.type, np.flexible) or not dtype.isnative: - return dtype.str - else: - return dtype.name - - -def __repr__(dtype): - arg_str = _construction_repr(dtype, include_align=False) - if dtype.isalignedstruct: - arg_str = arg_str + ", align=True" - return "dtype({})".format(arg_str) - - -def _unpack_field(dtype, offset, title=None): - """ - Helper function to normalize the items in dtype.fields. - - Call as: - - dtype, offset, title = _unpack_field(*dtype.fields[name]) - """ - return dtype, offset, title - - -def _isunsized(dtype): - # PyDataType_ISUNSIZED - return dtype.itemsize == 0 - - -def _construction_repr(dtype, include_align=False, short=False): - """ - Creates a string repr of the dtype, excluding the 'dtype()' part - surrounding the object. This object may be a string, a list, or - a dict depending on the nature of the dtype. This - is the object passed as the first parameter to the dtype - constructor, and if no additional constructor parameters are - given, will reproduce the exact memory layout. - - Parameters - ---------- - short : bool - If true, this creates a shorter repr using 'kind' and 'itemsize', instead - of the longer type name. - - include_align : bool - If true, this includes the 'align=True' parameter - inside the struct dtype construction dict when needed. Use this flag - if you want a proper repr string without the 'dtype()' part around it. - - If false, this does not preserve the - 'align=True' parameter or sticky NPY_ALIGNED_STRUCT flag for - struct arrays like the regular repr does, because the 'align' - flag is not part of first dtype constructor parameter. This - mode is intended for a full 'repr', where the 'align=True' is - provided as the second parameter. - """ - if dtype.fields is not None: - return _struct_str(dtype, include_align=include_align) - elif dtype.subdtype: - return _subarray_str(dtype) - else: - return _scalar_str(dtype, short=short) - - -def _scalar_str(dtype, short): - byteorder = _byte_order_str(dtype) - - if dtype.type == np.bool_: - if short: - return "'?'" - else: - return "'bool'" - - elif dtype.type == np.object_: - # The object reference may be different sizes on different - # platforms, so it should never include the itemsize here. - return "'O'" - - elif dtype.type == np.string_: - if _isunsized(dtype): - return "'S'" - else: - return "'S%d'" % dtype.itemsize - - elif dtype.type == np.unicode_: - if _isunsized(dtype): - return "'%sU'" % byteorder - else: - return "'%sU%d'" % (byteorder, dtype.itemsize / 4) - - # unlike the other types, subclasses of void are preserved - but - # historically the repr does not actually reveal the subclass - elif issubclass(dtype.type, np.void): - if _isunsized(dtype): - return "'V'" - else: - return "'V%d'" % dtype.itemsize - - elif dtype.type == np.datetime64: - return "'%sM8%s'" % (byteorder, _datetime_metadata_str(dtype)) - - elif dtype.type == np.timedelta64: - return "'%sm8%s'" % (byteorder, _datetime_metadata_str(dtype)) - - elif np.issubdtype(dtype, np.number): - # Short repr with endianness, like '' """ - # hack to obtain the native and swapped byte order characters - swapped = np.dtype(int).newbyteorder('s') - native = swapped.newbyteorder('s') - - byteorder = dtype.byteorder - if byteorder == '=': - return native.byteorder - if byteorder == 's': - # TODO: this path can never be reached - return swapped.byteorder - elif byteorder == '|': - return '' - else: - return byteorder - - -def _datetime_metadata_str(dtype): - # TODO: this duplicates the C append_metastr_to_string - unit, count = np.datetime_data(dtype) - if unit == 'generic': - return '' - elif count == 1: - return '[{}]'.format(unit) - else: - return '[{}{}]'.format(count, unit) - - -def _struct_dict_str(dtype, includealignedflag): - # unpack the fields dictionary into ls - names = dtype.names - fld_dtypes = [] - offsets = [] - titles = [] - for name in names: - fld_dtype, offset, title = _unpack_field(*dtype.fields[name]) - fld_dtypes.append(fld_dtype) - offsets.append(offset) - titles.append(title) - - # Build up a string to make the dictionary - - # First, the names - ret = "{'names':[" - ret += ",".join(repr(name) for name in names) - - # Second, the formats - ret += "], 'formats':[" - ret += ",".join( - _construction_repr(fld_dtype, short=True) for fld_dtype in fld_dtypes) - - # Third, the offsets - ret += "], 'offsets':[" - ret += ",".join("%d" % offset for offset in offsets) - - # Fourth, the titles - if any(title is not None for title in titles): - ret += "], 'titles':[" - ret += ",".join(repr(title) for title in titles) - - # Fifth, the itemsize - ret += "], 'itemsize':%d" % dtype.itemsize - - if (includealignedflag and dtype.isalignedstruct): - # Finally, the aligned flag - ret += ", 'aligned':True}" - else: - ret += "}" - - return ret - - -def _is_packed(dtype): - """ - Checks whether the structured data type in 'dtype' - has a simple layout, where all the fields are in order, - and follow each other with no alignment padding. - - When this returns true, the dtype can be reconstructed - from a list of the field names and dtypes with no additional - dtype parameters. - - Duplicates the C `is_dtype_struct_simple_unaligned_layout` function. - """ - total_offset = 0 - for name in dtype.names: - fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name]) - if fld_offset != total_offset: - return False - total_offset += fld_dtype.itemsize - if total_offset != dtype.itemsize: - return False - return True - - -def _struct_list_str(dtype): - items = [] - for name in dtype.names: - fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name]) - - item = "(" - if title is not None: - item += "({!r}, {!r}), ".format(title, name) - else: - item += "{!r}, ".format(name) - # Special case subarray handling here - if fld_dtype.subdtype is not None: - base, shape = fld_dtype.subdtype - item += "{}, {}".format( - _construction_repr(base, short=True), - shape - ) - else: - item += _construction_repr(fld_dtype, short=True) - - item += ")" - items.append(item) - - return "[" + ", ".join(items) + "]" - - -def _struct_str(dtype, include_align): - # The list str representation can't include the 'align=' flag, - # so if it is requested and the struct has the aligned flag set, - # we must use the dict str instead. - if not (include_align and dtype.isalignedstruct) and _is_packed(dtype): - sub = _struct_list_str(dtype) - - else: - sub = _struct_dict_str(dtype, include_align) - - # If the data type isn't the default, void, show it - if dtype.type != np.void: - return "({t.__module__}.{t.__name__}, {f})".format(t=dtype.type, f=sub) - else: - return sub - - -def _subarray_str(dtype): - base, shape = dtype.subdtype - return "({}, {})".format( - _construction_repr(base, short=True), - shape - ) - - -def _name_includes_bit_suffix(dtype): - if dtype.type == np.object_: - # pointer size varies by system, best to omit it - return False - elif dtype.type == np.bool_: - # implied - return False - elif np.issubdtype(dtype, np.flexible) and _isunsized(dtype): - # unspecified - return False - else: - return True - - -def _name_get(dtype): - # provides dtype.name.__get__, documented as returning a "bit name" - - if dtype.isbuiltin == 2: - # user dtypes don't promise to do anything special - return dtype.type.__name__ - - if issubclass(dtype.type, np.void): - # historically, void subclasses preserve their name, eg `record64` - name = dtype.type.__name__ - else: - name = _kind_name(dtype) - - # append bit counts - if _name_includes_bit_suffix(dtype): - name += "{}".format(dtype.itemsize * 8) - - # append metadata to datetimes - if dtype.type in (np.datetime64, np.timedelta64): - name += _datetime_metadata_str(dtype) - - return name diff --git a/venv/lib/python3.7/site-packages/numpy/core/_dtype_ctypes.py b/venv/lib/python3.7/site-packages/numpy/core/_dtype_ctypes.py deleted file mode 100644 index 7082412..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/_dtype_ctypes.py +++ /dev/null @@ -1,113 +0,0 @@ -""" -Conversion from ctypes to dtype. - -In an ideal world, we could achieve this through the PEP3118 buffer protocol, -something like:: - - def dtype_from_ctypes_type(t): - # needed to ensure that the shape of `t` is within memoryview.format - class DummyStruct(ctypes.Structure): - _fields_ = [('a', t)] - - # empty to avoid memory allocation - ctype_0 = (DummyStruct * 0)() - mv = memoryview(ctype_0) - - # convert the struct, and slice back out the field - return _dtype_from_pep3118(mv.format)['a'] - -Unfortunately, this fails because: - -* ctypes cannot handle length-0 arrays with PEP3118 (bpo-32782) -* PEP3118 cannot represent unions, but both numpy and ctypes can -* ctypes cannot handle big-endian structs with PEP3118 (bpo-32780) -""" -import _ctypes -import ctypes - -import numpy as np - - -def _from_ctypes_array(t): - return np.dtype((dtype_from_ctypes_type(t._type_), (t._length_,))) - - -def _from_ctypes_structure(t): - for item in t._fields_: - if len(item) > 2: - raise TypeError( - "ctypes bitfields have no dtype equivalent") - - if hasattr(t, "_pack_"): - formats = [] - offsets = [] - names = [] - current_offset = 0 - for fname, ftyp in t._fields_: - names.append(fname) - formats.append(dtype_from_ctypes_type(ftyp)) - # Each type has a default offset, this is platform dependent for some types. - effective_pack = min(t._pack_, ctypes.alignment(ftyp)) - current_offset = ((current_offset + effective_pack - 1) // effective_pack) * effective_pack - offsets.append(current_offset) - current_offset += ctypes.sizeof(ftyp) - - return np.dtype(dict( - formats=formats, - offsets=offsets, - names=names, - itemsize=ctypes.sizeof(t))) - else: - fields = [] - for fname, ftyp in t._fields_: - fields.append((fname, dtype_from_ctypes_type(ftyp))) - - # by default, ctypes structs are aligned - return np.dtype(fields, align=True) - - -def _from_ctypes_scalar(t): - """ - Return the dtype type with endianness included if it's the case - """ - if getattr(t, '__ctype_be__', None) is t: - return np.dtype('>' + t._type_) - elif getattr(t, '__ctype_le__', None) is t: - return np.dtype('<' + t._type_) - else: - return np.dtype(t._type_) - - -def _from_ctypes_union(t): - formats = [] - offsets = [] - names = [] - for fname, ftyp in t._fields_: - names.append(fname) - formats.append(dtype_from_ctypes_type(ftyp)) - offsets.append(0) # Union fields are offset to 0 - - return np.dtype(dict( - formats=formats, - offsets=offsets, - names=names, - itemsize=ctypes.sizeof(t))) - - -def dtype_from_ctypes_type(t): - """ - Construct a dtype object from a ctypes type - """ - if issubclass(t, _ctypes.Array): - return _from_ctypes_array(t) - elif issubclass(t, _ctypes._Pointer): - raise TypeError("ctypes pointers have no dtype equivalent") - elif issubclass(t, _ctypes.Structure): - return _from_ctypes_structure(t) - elif issubclass(t, _ctypes.Union): - return _from_ctypes_union(t) - elif isinstance(getattr(t, '_type_', None), str): - return _from_ctypes_scalar(t) - else: - raise NotImplementedError( - "Unknown ctypes type {}".format(t.__name__)) diff --git a/venv/lib/python3.7/site-packages/numpy/core/_exceptions.py b/venv/lib/python3.7/site-packages/numpy/core/_exceptions.py deleted file mode 100644 index 88a4556..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/_exceptions.py +++ /dev/null @@ -1,200 +0,0 @@ -""" -Various richly-typed exceptions, that also help us deal with string formatting -in python where it's easier. - -By putting the formatting in `__str__`, we also avoid paying the cost for -users who silence the exceptions. -""" -from numpy.core.overrides import set_module - -def _unpack_tuple(tup): - if len(tup) == 1: - return tup[0] - else: - return tup - - -def _display_as_base(cls): - """ - A decorator that makes an exception class look like its base. - - We use this to hide subclasses that are implementation details - the user - should catch the base type, which is what the traceback will show them. - - Classes decorated with this decorator are subject to removal without a - deprecation warning. - """ - assert issubclass(cls, Exception) - cls.__name__ = cls.__base__.__name__ - cls.__qualname__ = cls.__base__.__qualname__ - set_module(cls.__base__.__module__)(cls) - return cls - - -class UFuncTypeError(TypeError): - """ Base class for all ufunc exceptions """ - def __init__(self, ufunc): - self.ufunc = ufunc - - -@_display_as_base -class _UFuncBinaryResolutionError(UFuncTypeError): - """ Thrown when a binary resolution fails """ - def __init__(self, ufunc, dtypes): - super().__init__(ufunc) - self.dtypes = tuple(dtypes) - assert len(self.dtypes) == 2 - - def __str__(self): - return ( - "ufunc {!r} cannot use operands with types {!r} and {!r}" - ).format( - self.ufunc.__name__, *self.dtypes - ) - - -@_display_as_base -class _UFuncNoLoopError(UFuncTypeError): - """ Thrown when a ufunc loop cannot be found """ - def __init__(self, ufunc, dtypes): - super().__init__(ufunc) - self.dtypes = tuple(dtypes) - - def __str__(self): - return ( - "ufunc {!r} did not contain a loop with signature matching types " - "{!r} -> {!r}" - ).format( - self.ufunc.__name__, - _unpack_tuple(self.dtypes[:self.ufunc.nin]), - _unpack_tuple(self.dtypes[self.ufunc.nin:]) - ) - - -@_display_as_base -class _UFuncCastingError(UFuncTypeError): - def __init__(self, ufunc, casting, from_, to): - super().__init__(ufunc) - self.casting = casting - self.from_ = from_ - self.to = to - - -@_display_as_base -class _UFuncInputCastingError(_UFuncCastingError): - """ Thrown when a ufunc input cannot be casted """ - def __init__(self, ufunc, casting, from_, to, i): - super().__init__(ufunc, casting, from_, to) - self.in_i = i - - def __str__(self): - # only show the number if more than one input exists - i_str = "{} ".format(self.in_i) if self.ufunc.nin != 1 else "" - return ( - "Cannot cast ufunc {!r} input {}from {!r} to {!r} with casting " - "rule {!r}" - ).format( - self.ufunc.__name__, i_str, self.from_, self.to, self.casting - ) - - -@_display_as_base -class _UFuncOutputCastingError(_UFuncCastingError): - """ Thrown when a ufunc output cannot be casted """ - def __init__(self, ufunc, casting, from_, to, i): - super().__init__(ufunc, casting, from_, to) - self.out_i = i - - def __str__(self): - # only show the number if more than one output exists - i_str = "{} ".format(self.out_i) if self.ufunc.nout != 1 else "" - return ( - "Cannot cast ufunc {!r} output {}from {!r} to {!r} with casting " - "rule {!r}" - ).format( - self.ufunc.__name__, i_str, self.from_, self.to, self.casting - ) - - -# Exception used in shares_memory() -@set_module('numpy') -class TooHardError(RuntimeError): - pass - - -@set_module('numpy') -class AxisError(ValueError, IndexError): - """ Axis supplied was invalid. """ - def __init__(self, axis, ndim=None, msg_prefix=None): - # single-argument form just delegates to base class - if ndim is None and msg_prefix is None: - msg = axis - - # do the string formatting here, to save work in the C code - else: - msg = ("axis {} is out of bounds for array of dimension {}" - .format(axis, ndim)) - if msg_prefix is not None: - msg = "{}: {}".format(msg_prefix, msg) - - super(AxisError, self).__init__(msg) - - -@_display_as_base -class _ArrayMemoryError(MemoryError): - """ Thrown when an array cannot be allocated""" - def __init__(self, shape, dtype): - self.shape = shape - self.dtype = dtype - - @property - def _total_size(self): - num_bytes = self.dtype.itemsize - for dim in self.shape: - num_bytes *= dim - return num_bytes - - @staticmethod - def _size_to_string(num_bytes): - """ Convert a number of bytes into a binary size string """ - import math - - # https://en.wikipedia.org/wiki/Binary_prefix - LOG2_STEP = 10 - STEP = 1024 - units = ['bytes', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB'] - - unit_i = max(num_bytes.bit_length() - 1, 1) // LOG2_STEP - unit_val = 1 << (unit_i * LOG2_STEP) - n_units = num_bytes / unit_val - del unit_val - - # ensure we pick a unit that is correct after rounding - if round(n_units) == STEP: - unit_i += 1 - n_units /= STEP - - # deal with sizes so large that we don't have units for them - if unit_i >= len(units): - new_unit_i = len(units) - 1 - n_units *= 1 << ((unit_i - new_unit_i) * LOG2_STEP) - unit_i = new_unit_i - - unit_name = units[unit_i] - # format with a sensible number of digits - if unit_i == 0: - # no decimal point on bytes - return '{:.0f} {}'.format(n_units, unit_name) - elif round(n_units) < 1000: - # 3 significant figures, if none are dropped to the left of the . - return '{:#.3g} {}'.format(n_units, unit_name) - else: - # just give all the digits otherwise - return '{:#.0f} {}'.format(n_units, unit_name) - - def __str__(self): - size_str = self._size_to_string(self._total_size) - return ( - "Unable to allocate {} for an array with shape {} and data type {}" - .format(size_str, self.shape, self.dtype) - ) diff --git a/venv/lib/python3.7/site-packages/numpy/core/_internal.py b/venv/lib/python3.7/site-packages/numpy/core/_internal.py deleted file mode 100644 index 05e401e..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/_internal.py +++ /dev/null @@ -1,877 +0,0 @@ -""" -A place for internal code - -Some things are more easily handled Python. - -""" -from __future__ import division, absolute_import, print_function - -import re -import sys -import platform - -from numpy.compat import unicode -from .multiarray import dtype, array, ndarray -try: - import ctypes -except ImportError: - ctypes = None - -IS_PYPY = platform.python_implementation() == 'PyPy' - -if (sys.byteorder == 'little'): - _nbo = b'<' -else: - _nbo = b'>' - -def _makenames_list(adict, align): - allfields = [] - fnames = list(adict.keys()) - for fname in fnames: - obj = adict[fname] - n = len(obj) - if not isinstance(obj, tuple) or n not in [2, 3]: - raise ValueError("entry not a 2- or 3- tuple") - if (n > 2) and (obj[2] == fname): - continue - num = int(obj[1]) - if (num < 0): - raise ValueError("invalid offset.") - format = dtype(obj[0], align=align) - if (n > 2): - title = obj[2] - else: - title = None - allfields.append((fname, format, num, title)) - # sort by offsets - allfields.sort(key=lambda x: x[2]) - names = [x[0] for x in allfields] - formats = [x[1] for x in allfields] - offsets = [x[2] for x in allfields] - titles = [x[3] for x in allfields] - - return names, formats, offsets, titles - -# Called in PyArray_DescrConverter function when -# a dictionary without "names" and "formats" -# fields is used as a data-type descriptor. -def _usefields(adict, align): - try: - names = adict[-1] - except KeyError: - names = None - if names is None: - names, formats, offsets, titles = _makenames_list(adict, align) - else: - formats = [] - offsets = [] - titles = [] - for name in names: - res = adict[name] - formats.append(res[0]) - offsets.append(res[1]) - if (len(res) > 2): - titles.append(res[2]) - else: - titles.append(None) - - return dtype({"names": names, - "formats": formats, - "offsets": offsets, - "titles": titles}, align) - - -# construct an array_protocol descriptor list -# from the fields attribute of a descriptor -# This calls itself recursively but should eventually hit -# a descriptor that has no fields and then return -# a simple typestring - -def _array_descr(descriptor): - fields = descriptor.fields - if fields is None: - subdtype = descriptor.subdtype - if subdtype is None: - if descriptor.metadata is None: - return descriptor.str - else: - new = descriptor.metadata.copy() - if new: - return (descriptor.str, new) - else: - return descriptor.str - else: - return (_array_descr(subdtype[0]), subdtype[1]) - - names = descriptor.names - ordered_fields = [fields[x] + (x,) for x in names] - result = [] - offset = 0 - for field in ordered_fields: - if field[1] > offset: - num = field[1] - offset - result.append(('', '|V%d' % num)) - offset += num - elif field[1] < offset: - raise ValueError( - "dtype.descr is not defined for types with overlapping or " - "out-of-order fields") - if len(field) > 3: - name = (field[2], field[3]) - else: - name = field[2] - if field[0].subdtype: - tup = (name, _array_descr(field[0].subdtype[0]), - field[0].subdtype[1]) - else: - tup = (name, _array_descr(field[0])) - offset += field[0].itemsize - result.append(tup) - - if descriptor.itemsize > offset: - num = descriptor.itemsize - offset - result.append(('', '|V%d' % num)) - - return result - -# Build a new array from the information in a pickle. -# Note that the name numpy.core._internal._reconstruct is embedded in -# pickles of ndarrays made with NumPy before release 1.0 -# so don't remove the name here, or you'll -# break backward compatibility. -def _reconstruct(subtype, shape, dtype): - return ndarray.__new__(subtype, shape, dtype) - - -# format_re was originally from numarray by J. Todd Miller - -format_re = re.compile(br'(?P[<>|=]?)' - br'(?P *[(]?[ ,0-9]*[)]? *)' - br'(?P[<>|=]?)' - br'(?P[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)') -sep_re = re.compile(br'\s*,\s*') -space_re = re.compile(br'\s+$') - -# astr is a string (perhaps comma separated) - -_convorder = {b'=': _nbo} - -def _commastring(astr): - startindex = 0 - result = [] - while startindex < len(astr): - mo = format_re.match(astr, pos=startindex) - try: - (order1, repeats, order2, dtype) = mo.groups() - except (TypeError, AttributeError): - raise ValueError('format number %d of "%s" is not recognized' % - (len(result)+1, astr)) - startindex = mo.end() - # Separator or ending padding - if startindex < len(astr): - if space_re.match(astr, pos=startindex): - startindex = len(astr) - else: - mo = sep_re.match(astr, pos=startindex) - if not mo: - raise ValueError( - 'format number %d of "%s" is not recognized' % - (len(result)+1, astr)) - startindex = mo.end() - - if order2 == b'': - order = order1 - elif order1 == b'': - order = order2 - else: - order1 = _convorder.get(order1, order1) - order2 = _convorder.get(order2, order2) - if (order1 != order2): - raise ValueError( - 'inconsistent byte-order specification %s and %s' % - (order1, order2)) - order = order1 - - if order in [b'|', b'=', _nbo]: - order = b'' - dtype = order + dtype - if (repeats == b''): - newitem = dtype - else: - newitem = (dtype, eval(repeats)) - result.append(newitem) - - return result - -class dummy_ctype(object): - def __init__(self, cls): - self._cls = cls - def __mul__(self, other): - return self - def __call__(self, *other): - return self._cls(other) - def __eq__(self, other): - return self._cls == other._cls - def __ne__(self, other): - return self._cls != other._cls - -def _getintp_ctype(): - val = _getintp_ctype.cache - if val is not None: - return val - if ctypes is None: - import numpy as np - val = dummy_ctype(np.intp) - else: - char = dtype('p').char - if (char == 'i'): - val = ctypes.c_int - elif char == 'l': - val = ctypes.c_long - elif char == 'q': - val = ctypes.c_longlong - else: - val = ctypes.c_long - _getintp_ctype.cache = val - return val -_getintp_ctype.cache = None - -# Used for .ctypes attribute of ndarray - -class _missing_ctypes(object): - def cast(self, num, obj): - return num.value - - class c_void_p(object): - def __init__(self, ptr): - self.value = ptr - - -class _ctypes(object): - def __init__(self, array, ptr=None): - self._arr = array - - if ctypes: - self._ctypes = ctypes - self._data = self._ctypes.c_void_p(ptr) - else: - # fake a pointer-like object that holds onto the reference - self._ctypes = _missing_ctypes() - self._data = self._ctypes.c_void_p(ptr) - self._data._objects = array - - if self._arr.ndim == 0: - self._zerod = True - else: - self._zerod = False - - def data_as(self, obj): - """ - Return the data pointer cast to a particular c-types object. - For example, calling ``self._as_parameter_`` is equivalent to - ``self.data_as(ctypes.c_void_p)``. Perhaps you want to use the data as a - pointer to a ctypes array of floating-point data: - ``self.data_as(ctypes.POINTER(ctypes.c_double))``. - - The returned pointer will keep a reference to the array. - """ - # _ctypes.cast function causes a circular reference of self._data in - # self._data._objects. Attributes of self._data cannot be released - # until gc.collect is called. Make a copy of the pointer first then let - # it hold the array reference. This is a workaround to circumvent the - # CPython bug https://bugs.python.org/issue12836 - ptr = self._ctypes.cast(self._data, obj) - ptr._arr = self._arr - return ptr - - def shape_as(self, obj): - """ - Return the shape tuple as an array of some other c-types - type. For example: ``self.shape_as(ctypes.c_short)``. - """ - if self._zerod: - return None - return (obj*self._arr.ndim)(*self._arr.shape) - - def strides_as(self, obj): - """ - Return the strides tuple as an array of some other - c-types type. For example: ``self.strides_as(ctypes.c_longlong)``. - """ - if self._zerod: - return None - return (obj*self._arr.ndim)(*self._arr.strides) - - @property - def data(self): - """ - A pointer to the memory area of the array as a Python integer. - This memory area may contain data that is not aligned, or not in correct - byte-order. The memory area may not even be writeable. The array - flags and data-type of this array should be respected when passing this - attribute to arbitrary C-code to avoid trouble that can include Python - crashing. User Beware! The value of this attribute is exactly the same - as ``self._array_interface_['data'][0]``. - - Note that unlike ``data_as``, a reference will not be kept to the array: - code like ``ctypes.c_void_p((a + b).ctypes.data)`` will result in a - pointer to a deallocated array, and should be spelt - ``(a + b).ctypes.data_as(ctypes.c_void_p)`` - """ - return self._data.value - - @property - def shape(self): - """ - (c_intp*self.ndim): A ctypes array of length self.ndim where - the basetype is the C-integer corresponding to ``dtype('p')`` on this - platform. This base-type could be `ctypes.c_int`, `ctypes.c_long`, or - `ctypes.c_longlong` depending on the platform. - The c_intp type is defined accordingly in `numpy.ctypeslib`. - The ctypes array contains the shape of the underlying array. - """ - return self.shape_as(_getintp_ctype()) - - @property - def strides(self): - """ - (c_intp*self.ndim): A ctypes array of length self.ndim where - the basetype is the same as for the shape attribute. This ctypes array - contains the strides information from the underlying array. This strides - information is important for showing how many bytes must be jumped to - get to the next element in the array. - """ - return self.strides_as(_getintp_ctype()) - - @property - def _as_parameter_(self): - """ - Overrides the ctypes semi-magic method - - Enables `c_func(some_array.ctypes)` - """ - return self.data_as(ctypes.c_void_p) - - # kept for compatibility - get_data = data.fget - get_shape = shape.fget - get_strides = strides.fget - get_as_parameter = _as_parameter_.fget - - -def _newnames(datatype, order): - """ - Given a datatype and an order object, return a new names tuple, with the - order indicated - """ - oldnames = datatype.names - nameslist = list(oldnames) - if isinstance(order, (str, unicode)): - order = [order] - seen = set() - if isinstance(order, (list, tuple)): - for name in order: - try: - nameslist.remove(name) - except ValueError: - if name in seen: - raise ValueError("duplicate field name: %s" % (name,)) - else: - raise ValueError("unknown field name: %s" % (name,)) - seen.add(name) - return tuple(list(order) + nameslist) - raise ValueError("unsupported order value: %s" % (order,)) - -def _copy_fields(ary): - """Return copy of structured array with padding between fields removed. - - Parameters - ---------- - ary : ndarray - Structured array from which to remove padding bytes - - Returns - ------- - ary_copy : ndarray - Copy of ary with padding bytes removed - """ - dt = ary.dtype - copy_dtype = {'names': dt.names, - 'formats': [dt.fields[name][0] for name in dt.names]} - return array(ary, dtype=copy_dtype, copy=True) - -def _getfield_is_safe(oldtype, newtype, offset): - """ Checks safety of getfield for object arrays. - - As in _view_is_safe, we need to check that memory containing objects is not - reinterpreted as a non-object datatype and vice versa. - - Parameters - ---------- - oldtype : data-type - Data type of the original ndarray. - newtype : data-type - Data type of the field being accessed by ndarray.getfield - offset : int - Offset of the field being accessed by ndarray.getfield - - Raises - ------ - TypeError - If the field access is invalid - - """ - if newtype.hasobject or oldtype.hasobject: - if offset == 0 and newtype == oldtype: - return - if oldtype.names is not None: - for name in oldtype.names: - if (oldtype.fields[name][1] == offset and - oldtype.fields[name][0] == newtype): - return - raise TypeError("Cannot get/set field of an object array") - return - -def _view_is_safe(oldtype, newtype): - """ Checks safety of a view involving object arrays, for example when - doing:: - - np.zeros(10, dtype=oldtype).view(newtype) - - Parameters - ---------- - oldtype : data-type - Data type of original ndarray - newtype : data-type - Data type of the view - - Raises - ------ - TypeError - If the new type is incompatible with the old type. - - """ - - # if the types are equivalent, there is no problem. - # for example: dtype((np.record, 'i4,i4')) == dtype((np.void, 'i4,i4')) - if oldtype == newtype: - return - - if newtype.hasobject or oldtype.hasobject: - raise TypeError("Cannot change data-type for object array.") - return - -# Given a string containing a PEP 3118 format specifier, -# construct a NumPy dtype - -_pep3118_native_map = { - '?': '?', - 'c': 'S1', - 'b': 'b', - 'B': 'B', - 'h': 'h', - 'H': 'H', - 'i': 'i', - 'I': 'I', - 'l': 'l', - 'L': 'L', - 'q': 'q', - 'Q': 'Q', - 'e': 'e', - 'f': 'f', - 'd': 'd', - 'g': 'g', - 'Zf': 'F', - 'Zd': 'D', - 'Zg': 'G', - 's': 'S', - 'w': 'U', - 'O': 'O', - 'x': 'V', # padding -} -_pep3118_native_typechars = ''.join(_pep3118_native_map.keys()) - -_pep3118_standard_map = { - '?': '?', - 'c': 'S1', - 'b': 'b', - 'B': 'B', - 'h': 'i2', - 'H': 'u2', - 'i': 'i4', - 'I': 'u4', - 'l': 'i4', - 'L': 'u4', - 'q': 'i8', - 'Q': 'u8', - 'e': 'f2', - 'f': 'f', - 'd': 'd', - 'Zf': 'F', - 'Zd': 'D', - 's': 'S', - 'w': 'U', - 'O': 'O', - 'x': 'V', # padding -} -_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys()) - -_pep3118_unsupported_map = { - 'u': 'UCS-2 strings', - '&': 'pointers', - 't': 'bitfields', - 'X': 'function pointers', -} - -class _Stream(object): - def __init__(self, s): - self.s = s - self.byteorder = '@' - - def advance(self, n): - res = self.s[:n] - self.s = self.s[n:] - return res - - def consume(self, c): - if self.s[:len(c)] == c: - self.advance(len(c)) - return True - return False - - def consume_until(self, c): - if callable(c): - i = 0 - while i < len(self.s) and not c(self.s[i]): - i = i + 1 - return self.advance(i) - else: - i = self.s.index(c) - res = self.advance(i) - self.advance(len(c)) - return res - - @property - def next(self): - return self.s[0] - - def __bool__(self): - return bool(self.s) - __nonzero__ = __bool__ - - -def _dtype_from_pep3118(spec): - stream = _Stream(spec) - dtype, align = __dtype_from_pep3118(stream, is_subdtype=False) - return dtype - -def __dtype_from_pep3118(stream, is_subdtype): - field_spec = dict( - names=[], - formats=[], - offsets=[], - itemsize=0 - ) - offset = 0 - common_alignment = 1 - is_padding = False - - # Parse spec - while stream: - value = None - - # End of structure, bail out to upper level - if stream.consume('}'): - break - - # Sub-arrays (1) - shape = None - if stream.consume('('): - shape = stream.consume_until(')') - shape = tuple(map(int, shape.split(','))) - - # Byte order - if stream.next in ('@', '=', '<', '>', '^', '!'): - byteorder = stream.advance(1) - if byteorder == '!': - byteorder = '>' - stream.byteorder = byteorder - - # Byte order characters also control native vs. standard type sizes - if stream.byteorder in ('@', '^'): - type_map = _pep3118_native_map - type_map_chars = _pep3118_native_typechars - else: - type_map = _pep3118_standard_map - type_map_chars = _pep3118_standard_typechars - - # Item sizes - itemsize_str = stream.consume_until(lambda c: not c.isdigit()) - if itemsize_str: - itemsize = int(itemsize_str) - else: - itemsize = 1 - - # Data types - is_padding = False - - if stream.consume('T{'): - value, align = __dtype_from_pep3118( - stream, is_subdtype=True) - elif stream.next in type_map_chars: - if stream.next == 'Z': - typechar = stream.advance(2) - else: - typechar = stream.advance(1) - - is_padding = (typechar == 'x') - dtypechar = type_map[typechar] - if dtypechar in 'USV': - dtypechar += '%d' % itemsize - itemsize = 1 - numpy_byteorder = {'@': '=', '^': '='}.get( - stream.byteorder, stream.byteorder) - value = dtype(numpy_byteorder + dtypechar) - align = value.alignment - elif stream.next in _pep3118_unsupported_map: - desc = _pep3118_unsupported_map[stream.next] - raise NotImplementedError( - "Unrepresentable PEP 3118 data type {!r} ({})" - .format(stream.next, desc)) - else: - raise ValueError("Unknown PEP 3118 data type specifier %r" % stream.s) - - # - # Native alignment may require padding - # - # Here we assume that the presence of a '@' character implicitly implies - # that the start of the array is *already* aligned. - # - extra_offset = 0 - if stream.byteorder == '@': - start_padding = (-offset) % align - intra_padding = (-value.itemsize) % align - - offset += start_padding - - if intra_padding != 0: - if itemsize > 1 or (shape is not None and _prod(shape) > 1): - # Inject internal padding to the end of the sub-item - value = _add_trailing_padding(value, intra_padding) - else: - # We can postpone the injection of internal padding, - # as the item appears at most once - extra_offset += intra_padding - - # Update common alignment - common_alignment = _lcm(align, common_alignment) - - # Convert itemsize to sub-array - if itemsize != 1: - value = dtype((value, (itemsize,))) - - # Sub-arrays (2) - if shape is not None: - value = dtype((value, shape)) - - # Field name - if stream.consume(':'): - name = stream.consume_until(':') - else: - name = None - - if not (is_padding and name is None): - if name is not None and name in field_spec['names']: - raise RuntimeError("Duplicate field name '%s' in PEP3118 format" - % name) - field_spec['names'].append(name) - field_spec['formats'].append(value) - field_spec['offsets'].append(offset) - - offset += value.itemsize - offset += extra_offset - - field_spec['itemsize'] = offset - - # extra final padding for aligned types - if stream.byteorder == '@': - field_spec['itemsize'] += (-offset) % common_alignment - - # Check if this was a simple 1-item type, and unwrap it - if (field_spec['names'] == [None] - and field_spec['offsets'][0] == 0 - and field_spec['itemsize'] == field_spec['formats'][0].itemsize - and not is_subdtype): - ret = field_spec['formats'][0] - else: - _fix_names(field_spec) - ret = dtype(field_spec) - - # Finished - return ret, common_alignment - -def _fix_names(field_spec): - """ Replace names which are None with the next unused f%d name """ - names = field_spec['names'] - for i, name in enumerate(names): - if name is not None: - continue - - j = 0 - while True: - name = 'f{}'.format(j) - if name not in names: - break - j = j + 1 - names[i] = name - -def _add_trailing_padding(value, padding): - """Inject the specified number of padding bytes at the end of a dtype""" - if value.fields is None: - field_spec = dict( - names=['f0'], - formats=[value], - offsets=[0], - itemsize=value.itemsize - ) - else: - fields = value.fields - names = value.names - field_spec = dict( - names=names, - formats=[fields[name][0] for name in names], - offsets=[fields[name][1] for name in names], - itemsize=value.itemsize - ) - - field_spec['itemsize'] += padding - return dtype(field_spec) - -def _prod(a): - p = 1 - for x in a: - p *= x - return p - -def _gcd(a, b): - """Calculate the greatest common divisor of a and b""" - while b: - a, b = b, a % b - return a - -def _lcm(a, b): - return a // _gcd(a, b) * b - -def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs): - """ Format the error message for when __array_ufunc__ gives up. """ - args_string = ', '.join(['{!r}'.format(arg) for arg in inputs] + - ['{}={!r}'.format(k, v) - for k, v in kwargs.items()]) - args = inputs + kwargs.get('out', ()) - types_string = ', '.join(repr(type(arg).__name__) for arg in args) - return ('operand type(s) all returned NotImplemented from ' - '__array_ufunc__({!r}, {!r}, {}): {}' - .format(ufunc, method, args_string, types_string)) - - -def array_function_errmsg_formatter(public_api, types): - """ Format the error message for when __array_ufunc__ gives up. """ - func_name = '{}.{}'.format(public_api.__module__, public_api.__name__) - return ("no implementation found for '{}' on types that implement " - '__array_function__: {}'.format(func_name, list(types))) - - -def _ufunc_doc_signature_formatter(ufunc): - """ - Builds a signature string which resembles PEP 457 - - This is used to construct the first line of the docstring - """ - - # input arguments are simple - if ufunc.nin == 1: - in_args = 'x' - else: - in_args = ', '.join('x{}'.format(i+1) for i in range(ufunc.nin)) - - # output arguments are both keyword or positional - if ufunc.nout == 0: - out_args = ', /, out=()' - elif ufunc.nout == 1: - out_args = ', /, out=None' - else: - out_args = '[, {positional}], / [, out={default}]'.format( - positional=', '.join( - 'out{}'.format(i+1) for i in range(ufunc.nout)), - default=repr((None,)*ufunc.nout) - ) - - # keyword only args depend on whether this is a gufunc - kwargs = ( - ", casting='same_kind'" - ", order='K'" - ", dtype=None" - ", subok=True" - "[, signature" - ", extobj]" - ) - if ufunc.signature is None: - kwargs = ", where=True" + kwargs - - # join all the parts together - return '{name}({in_args}{out_args}, *{kwargs})'.format( - name=ufunc.__name__, - in_args=in_args, - out_args=out_args, - kwargs=kwargs - ) - - -def npy_ctypes_check(cls): - # determine if a class comes from ctypes, in order to work around - # a bug in the buffer protocol for those objects, bpo-10746 - try: - # ctypes class are new-style, so have an __mro__. This probably fails - # for ctypes classes with multiple inheritance. - if IS_PYPY: - # (..., _ctypes.basics._CData, Bufferable, object) - ctype_base = cls.__mro__[-3] - else: - # # (..., _ctypes._CData, object) - ctype_base = cls.__mro__[-2] - # right now, they're part of the _ctypes module - return 'ctypes' in ctype_base.__module__ - except Exception: - return False - - -class recursive(object): - ''' - A decorator class for recursive nested functions. - Naive recursive nested functions hold a reference to themselves: - - def outer(*args): - def stringify_leaky(arg0, *arg1): - if len(arg1) > 0: - return stringify_leaky(*arg1) # <- HERE - return str(arg0) - stringify_leaky(*args) - - This design pattern creates a reference cycle that is difficult for a - garbage collector to resolve. The decorator class prevents the - cycle by passing the nested function in as an argument `self`: - - def outer(*args): - @recursive - def stringify(self, arg0, *arg1): - if len(arg1) > 0: - return self(*arg1) - return str(arg0) - stringify(*args) - - ''' - def __init__(self, func): - self.func = func - def __call__(self, *args, **kwargs): - return self.func(self, *args, **kwargs) - diff --git a/venv/lib/python3.7/site-packages/numpy/core/_methods.py b/venv/lib/python3.7/site-packages/numpy/core/_methods.py deleted file mode 100644 index 269e509..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/_methods.py +++ /dev/null @@ -1,244 +0,0 @@ -""" -Array methods which are called by both the C-code for the method -and the Python code for the NumPy-namespace function - -""" -from __future__ import division, absolute_import, print_function - -import warnings - -from numpy.core import multiarray as mu -from numpy.core import umath as um -from numpy.core._asarray import asanyarray -from numpy.core import numerictypes as nt -from numpy.core import _exceptions -from numpy._globals import _NoValue -from numpy.compat import pickle, os_fspath, contextlib_nullcontext - -# save those O(100) nanoseconds! -umr_maximum = um.maximum.reduce -umr_minimum = um.minimum.reduce -umr_sum = um.add.reduce -umr_prod = um.multiply.reduce -umr_any = um.logical_or.reduce -umr_all = um.logical_and.reduce - -# avoid keyword arguments to speed up parsing, saves about 15%-20% for very -# small reductions -def _amax(a, axis=None, out=None, keepdims=False, - initial=_NoValue, where=True): - return umr_maximum(a, axis, None, out, keepdims, initial, where) - -def _amin(a, axis=None, out=None, keepdims=False, - initial=_NoValue, where=True): - return umr_minimum(a, axis, None, out, keepdims, initial, where) - -def _sum(a, axis=None, dtype=None, out=None, keepdims=False, - initial=_NoValue, where=True): - return umr_sum(a, axis, dtype, out, keepdims, initial, where) - -def _prod(a, axis=None, dtype=None, out=None, keepdims=False, - initial=_NoValue, where=True): - return umr_prod(a, axis, dtype, out, keepdims, initial, where) - -def _any(a, axis=None, dtype=None, out=None, keepdims=False): - return umr_any(a, axis, dtype, out, keepdims) - -def _all(a, axis=None, dtype=None, out=None, keepdims=False): - return umr_all(a, axis, dtype, out, keepdims) - -def _count_reduce_items(arr, axis): - if axis is None: - axis = tuple(range(arr.ndim)) - if not isinstance(axis, tuple): - axis = (axis,) - items = 1 - for ax in axis: - items *= arr.shape[ax] - return items - -# Numpy 1.17.0, 2019-02-24 -# Various clip behavior deprecations, marked with _clip_dep as a prefix. - -def _clip_dep_is_scalar_nan(a): - # guarded to protect circular imports - from numpy.core.fromnumeric import ndim - if ndim(a) != 0: - return False - try: - return um.isnan(a) - except TypeError: - return False - -def _clip_dep_is_byte_swapped(a): - if isinstance(a, mu.ndarray): - return not a.dtype.isnative - return False - -def _clip_dep_invoke_with_casting(ufunc, *args, out=None, casting=None, **kwargs): - # normal path - if casting is not None: - return ufunc(*args, out=out, casting=casting, **kwargs) - - # try to deal with broken casting rules - try: - return ufunc(*args, out=out, **kwargs) - except _exceptions._UFuncOutputCastingError as e: - # Numpy 1.17.0, 2019-02-24 - warnings.warn( - "Converting the output of clip from {!r} to {!r} is deprecated. " - "Pass `casting=\"unsafe\"` explicitly to silence this warning, or " - "correct the type of the variables.".format(e.from_, e.to), - DeprecationWarning, - stacklevel=2 - ) - return ufunc(*args, out=out, casting="unsafe", **kwargs) - -def _clip(a, min=None, max=None, out=None, *, casting=None, **kwargs): - if min is None and max is None: - raise ValueError("One of max or min must be given") - - # Numpy 1.17.0, 2019-02-24 - # This deprecation probably incurs a substantial slowdown for small arrays, - # it will be good to get rid of it. - if not _clip_dep_is_byte_swapped(a) and not _clip_dep_is_byte_swapped(out): - using_deprecated_nan = False - if _clip_dep_is_scalar_nan(min): - min = -float('inf') - using_deprecated_nan = True - if _clip_dep_is_scalar_nan(max): - max = float('inf') - using_deprecated_nan = True - if using_deprecated_nan: - warnings.warn( - "Passing `np.nan` to mean no clipping in np.clip has always " - "been unreliable, and is now deprecated. " - "In future, this will always return nan, like it already does " - "when min or max are arrays that contain nan. " - "To skip a bound, pass either None or an np.inf of an " - "appropriate sign.", - DeprecationWarning, - stacklevel=2 - ) - - if min is None: - return _clip_dep_invoke_with_casting( - um.minimum, a, max, out=out, casting=casting, **kwargs) - elif max is None: - return _clip_dep_invoke_with_casting( - um.maximum, a, min, out=out, casting=casting, **kwargs) - else: - return _clip_dep_invoke_with_casting( - um.clip, a, min, max, out=out, casting=casting, **kwargs) - -def _mean(a, axis=None, dtype=None, out=None, keepdims=False): - arr = asanyarray(a) - - is_float16_result = False - rcount = _count_reduce_items(arr, axis) - # Make this warning show up first - if rcount == 0: - warnings.warn("Mean of empty slice.", RuntimeWarning, stacklevel=2) - - # Cast bool, unsigned int, and int to float64 by default - if dtype is None: - if issubclass(arr.dtype.type, (nt.integer, nt.bool_)): - dtype = mu.dtype('f8') - elif issubclass(arr.dtype.type, nt.float16): - dtype = mu.dtype('f4') - is_float16_result = True - - ret = umr_sum(arr, axis, dtype, out, keepdims) - if isinstance(ret, mu.ndarray): - ret = um.true_divide( - ret, rcount, out=ret, casting='unsafe', subok=False) - if is_float16_result and out is None: - ret = arr.dtype.type(ret) - elif hasattr(ret, 'dtype'): - if is_float16_result: - ret = arr.dtype.type(ret / rcount) - else: - ret = ret.dtype.type(ret / rcount) - else: - ret = ret / rcount - - return ret - -def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): - arr = asanyarray(a) - - rcount = _count_reduce_items(arr, axis) - # Make this warning show up on top. - if ddof >= rcount: - warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning, - stacklevel=2) - - # Cast bool, unsigned int, and int to float64 by default - if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool_)): - dtype = mu.dtype('f8') - - # Compute the mean. - # Note that if dtype is not of inexact type then arraymean will - # not be either. - arrmean = umr_sum(arr, axis, dtype, keepdims=True) - if isinstance(arrmean, mu.ndarray): - arrmean = um.true_divide( - arrmean, rcount, out=arrmean, casting='unsafe', subok=False) - else: - arrmean = arrmean.dtype.type(arrmean / rcount) - - # Compute sum of squared deviations from mean - # Note that x may not be inexact and that we need it to be an array, - # not a scalar. - x = asanyarray(arr - arrmean) - if issubclass(arr.dtype.type, (nt.floating, nt.integer)): - x = um.multiply(x, x, out=x) - else: - x = um.multiply(x, um.conjugate(x), out=x).real - - ret = umr_sum(x, axis, dtype, out, keepdims) - - # Compute degrees of freedom and make sure it is not negative. - rcount = max([rcount - ddof, 0]) - - # divide by degrees of freedom - if isinstance(ret, mu.ndarray): - ret = um.true_divide( - ret, rcount, out=ret, casting='unsafe', subok=False) - elif hasattr(ret, 'dtype'): - ret = ret.dtype.type(ret / rcount) - else: - ret = ret / rcount - - return ret - -def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): - ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof, - keepdims=keepdims) - - if isinstance(ret, mu.ndarray): - ret = um.sqrt(ret, out=ret) - elif hasattr(ret, 'dtype'): - ret = ret.dtype.type(um.sqrt(ret)) - else: - ret = um.sqrt(ret) - - return ret - -def _ptp(a, axis=None, out=None, keepdims=False): - return um.subtract( - umr_maximum(a, axis, None, out, keepdims), - umr_minimum(a, axis, None, None, keepdims), - out - ) - -def _dump(self, file, protocol=2): - if hasattr(file, 'write'): - ctx = contextlib_nullcontext(file) - else: - ctx = open(os_fspath(file), "wb") - with ctx as f: - pickle.dump(self, f, protocol=protocol) - -def _dumps(self, protocol=2): - return pickle.dumps(self, protocol=protocol) diff --git a/venv/lib/python3.7/site-packages/numpy/core/_multiarray_tests.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/numpy/core/_multiarray_tests.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index 737083c..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/core/_multiarray_tests.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/core/_multiarray_umath.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/numpy/core/_multiarray_umath.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index 606d590..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/core/_multiarray_umath.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/core/_operand_flag_tests.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/numpy/core/_operand_flag_tests.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index 341093e..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/core/_operand_flag_tests.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/core/_rational_tests.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/numpy/core/_rational_tests.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index c62e625..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/core/_rational_tests.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/core/_string_helpers.py b/venv/lib/python3.7/site-packages/numpy/core/_string_helpers.py deleted file mode 100644 index 45e6a73..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/_string_helpers.py +++ /dev/null @@ -1,100 +0,0 @@ -""" -String-handling utilities to avoid locale-dependence. - -Used primarily to generate type name aliases. -""" -# "import string" is costly to import! -# Construct the translation tables directly -# "A" = chr(65), "a" = chr(97) -_all_chars = [chr(_m) for _m in range(256)] -_ascii_upper = _all_chars[65:65+26] -_ascii_lower = _all_chars[97:97+26] -LOWER_TABLE = "".join(_all_chars[:65] + _ascii_lower + _all_chars[65+26:]) -UPPER_TABLE = "".join(_all_chars[:97] + _ascii_upper + _all_chars[97+26:]) - - -def english_lower(s): - """ Apply English case rules to convert ASCII strings to all lower case. - - This is an internal utility function to replace calls to str.lower() such - that we can avoid changing behavior with changing locales. In particular, - Turkish has distinct dotted and dotless variants of the Latin letter "I" in - both lowercase and uppercase. Thus, "I".lower() != "i" in a "tr" locale. - - Parameters - ---------- - s : str - - Returns - ------- - lowered : str - - Examples - -------- - >>> from numpy.core.numerictypes import english_lower - >>> english_lower('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_') - 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789_' - >>> english_lower('') - '' - """ - lowered = s.translate(LOWER_TABLE) - return lowered - - -def english_upper(s): - """ Apply English case rules to convert ASCII strings to all upper case. - - This is an internal utility function to replace calls to str.upper() such - that we can avoid changing behavior with changing locales. In particular, - Turkish has distinct dotted and dotless variants of the Latin letter "I" in - both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale. - - Parameters - ---------- - s : str - - Returns - ------- - uppered : str - - Examples - -------- - >>> from numpy.core.numerictypes import english_upper - >>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_') - 'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_' - >>> english_upper('') - '' - """ - uppered = s.translate(UPPER_TABLE) - return uppered - - -def english_capitalize(s): - """ Apply English case rules to convert the first character of an ASCII - string to upper case. - - This is an internal utility function to replace calls to str.capitalize() - such that we can avoid changing behavior with changing locales. - - Parameters - ---------- - s : str - - Returns - ------- - capitalized : str - - Examples - -------- - >>> from numpy.core.numerictypes import english_capitalize - >>> english_capitalize('int8') - 'Int8' - >>> english_capitalize('Int8') - 'Int8' - >>> english_capitalize('') - '' - """ - if s: - return english_upper(s[0]) + s[1:] - else: - return s diff --git a/venv/lib/python3.7/site-packages/numpy/core/_struct_ufunc_tests.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/numpy/core/_struct_ufunc_tests.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index b48d370..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/core/_struct_ufunc_tests.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/core/_type_aliases.py b/venv/lib/python3.7/site-packages/numpy/core/_type_aliases.py deleted file mode 100644 index d6e1a1f..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/_type_aliases.py +++ /dev/null @@ -1,282 +0,0 @@ -""" -Due to compatibility, numpy has a very large number of different naming -conventions for the scalar types (those subclassing from `numpy.generic`). -This file produces a convoluted set of dictionaries mapping names to types, -and sometimes other mappings too. - -.. data:: allTypes - A dictionary of names to types that will be exposed as attributes through - ``np.core.numerictypes.*`` - -.. data:: sctypeDict - Similar to `allTypes`, but maps a broader set of aliases to their types. - -.. data:: sctypeNA - NumArray-compatible names for the scalar types. Contains not only - ``name: type`` mappings, but ``char: name`` mappings too. - - .. deprecated:: 1.16 - -.. data:: sctypes - A dictionary keyed by a "type group" string, providing a list of types - under that group. - -""" -import warnings -import sys - -from numpy.compat import unicode -from numpy._globals import VisibleDeprecationWarning -from numpy.core._string_helpers import english_lower, english_capitalize -from numpy.core.multiarray import typeinfo, dtype -from numpy.core._dtype import _kind_name - - -sctypeDict = {} # Contains all leaf-node scalar types with aliases -class TypeNADict(dict): - def __getitem__(self, key): - # 2018-06-24, 1.16 - warnings.warn('sctypeNA and typeNA will be removed in v1.18 ' - 'of numpy', VisibleDeprecationWarning, stacklevel=2) - return dict.__getitem__(self, key) - def get(self, key, default=None): - # 2018-06-24, 1.16 - warnings.warn('sctypeNA and typeNA will be removed in v1.18 ' - 'of numpy', VisibleDeprecationWarning, stacklevel=2) - return dict.get(self, key, default) - -sctypeNA = TypeNADict() # Contails all leaf-node types -> numarray type equivalences -allTypes = {} # Collect the types we will add to the module - - -# separate the actual type info from the abstract base classes -_abstract_types = {} -_concrete_typeinfo = {} -for k, v in typeinfo.items(): - # make all the keys lowercase too - k = english_lower(k) - if isinstance(v, type): - _abstract_types[k] = v - else: - _concrete_typeinfo[k] = v - -_concrete_types = {v.type for k, v in _concrete_typeinfo.items()} - - -def _bits_of(obj): - try: - info = next(v for v in _concrete_typeinfo.values() if v.type is obj) - except StopIteration: - if obj in _abstract_types.values(): - raise ValueError("Cannot count the bits of an abstract type") - - # some third-party type - make a best-guess - return dtype(obj).itemsize * 8 - else: - return info.bits - - -def bitname(obj): - """Return a bit-width name for a given type object""" - bits = _bits_of(obj) - dt = dtype(obj) - char = dt.kind - base = _kind_name(dt) - - if base == 'object': - bits = 0 - - if bits != 0: - char = "%s%d" % (char, bits // 8) - - return base, bits, char - - -def _add_types(): - for name, info in _concrete_typeinfo.items(): - # define C-name and insert typenum and typechar references also - allTypes[name] = info.type - sctypeDict[name] = info.type - sctypeDict[info.char] = info.type - sctypeDict[info.num] = info.type - - for name, cls in _abstract_types.items(): - allTypes[name] = cls -_add_types() - -# This is the priority order used to assign the bit-sized NPY_INTxx names, which -# must match the order in npy_common.h in order for NPY_INTxx and np.intxx to be -# consistent. -# If two C types have the same size, then the earliest one in this list is used -# as the sized name. -_int_ctypes = ['long', 'longlong', 'int', 'short', 'byte'] -_uint_ctypes = list('u' + t for t in _int_ctypes) - -def _add_aliases(): - for name, info in _concrete_typeinfo.items(): - # these are handled by _add_integer_aliases - if name in _int_ctypes or name in _uint_ctypes: - continue - - # insert bit-width version for this class (if relevant) - base, bit, char = bitname(info.type) - - myname = "%s%d" % (base, bit) - - # ensure that (c)longdouble does not overwrite the aliases assigned to - # (c)double - if name in ('longdouble', 'clongdouble') and myname in allTypes: - continue - - base_capitalize = english_capitalize(base) - if base == 'complex': - na_name = '%s%d' % (base_capitalize, bit//2) - elif base == 'bool': - na_name = base_capitalize - else: - na_name = "%s%d" % (base_capitalize, bit) - - allTypes[myname] = info.type - - # add mapping for both the bit name and the numarray name - sctypeDict[myname] = info.type - sctypeDict[na_name] = info.type - - # add forward, reverse, and string mapping to numarray - sctypeNA[na_name] = info.type - sctypeNA[info.type] = na_name - sctypeNA[info.char] = na_name - - sctypeDict[char] = info.type - sctypeNA[char] = na_name -_add_aliases() - -def _add_integer_aliases(): - seen_bits = set() - for i_ctype, u_ctype in zip(_int_ctypes, _uint_ctypes): - i_info = _concrete_typeinfo[i_ctype] - u_info = _concrete_typeinfo[u_ctype] - bits = i_info.bits # same for both - - for info, charname, intname, Intname in [ - (i_info,'i%d' % (bits//8,), 'int%d' % bits, 'Int%d' % bits), - (u_info,'u%d' % (bits//8,), 'uint%d' % bits, 'UInt%d' % bits)]: - if bits not in seen_bits: - # sometimes two different types have the same number of bits - # if so, the one iterated over first takes precedence - allTypes[intname] = info.type - sctypeDict[intname] = info.type - sctypeDict[Intname] = info.type - sctypeDict[charname] = info.type - sctypeNA[Intname] = info.type - sctypeNA[charname] = info.type - sctypeNA[info.type] = Intname - sctypeNA[info.char] = Intname - - seen_bits.add(bits) - -_add_integer_aliases() - -# We use these later -void = allTypes['void'] - -# -# Rework the Python names (so that float and complex and int are consistent -# with Python usage) -# -def _set_up_aliases(): - type_pairs = [('complex_', 'cdouble'), - ('int0', 'intp'), - ('uint0', 'uintp'), - ('single', 'float'), - ('csingle', 'cfloat'), - ('singlecomplex', 'cfloat'), - ('float_', 'double'), - ('intc', 'int'), - ('uintc', 'uint'), - ('int_', 'long'), - ('uint', 'ulong'), - ('cfloat', 'cdouble'), - ('longfloat', 'longdouble'), - ('clongfloat', 'clongdouble'), - ('longcomplex', 'clongdouble'), - ('bool_', 'bool'), - ('bytes_', 'string'), - ('string_', 'string'), - ('unicode_', 'unicode'), - ('object_', 'object')] - if sys.version_info[0] >= 3: - type_pairs.extend([('str_', 'unicode')]) - else: - type_pairs.extend([('str_', 'string')]) - for alias, t in type_pairs: - allTypes[alias] = allTypes[t] - sctypeDict[alias] = sctypeDict[t] - # Remove aliases overriding python types and modules - to_remove = ['ulong', 'object', 'int', 'float', - 'complex', 'bool', 'string', 'datetime', 'timedelta'] - if sys.version_info[0] >= 3: - to_remove.extend(['bytes', 'str']) - else: - to_remove.extend(['unicode', 'long']) - - for t in to_remove: - try: - del allTypes[t] - del sctypeDict[t] - except KeyError: - pass -_set_up_aliases() - - -sctypes = {'int': [], - 'uint':[], - 'float':[], - 'complex':[], - 'others':[bool, object, bytes, unicode, void]} - -def _add_array_type(typename, bits): - try: - t = allTypes['%s%d' % (typename, bits)] - except KeyError: - pass - else: - sctypes[typename].append(t) - -def _set_array_types(): - ibytes = [1, 2, 4, 8, 16, 32, 64] - fbytes = [2, 4, 8, 10, 12, 16, 32, 64] - for bytes in ibytes: - bits = 8*bytes - _add_array_type('int', bits) - _add_array_type('uint', bits) - for bytes in fbytes: - bits = 8*bytes - _add_array_type('float', bits) - _add_array_type('complex', 2*bits) - _gi = dtype('p') - if _gi.type not in sctypes['int']: - indx = 0 - sz = _gi.itemsize - _lst = sctypes['int'] - while (indx < len(_lst) and sz >= _lst[indx](0).itemsize): - indx += 1 - sctypes['int'].insert(indx, _gi.type) - sctypes['uint'].insert(indx, dtype('P').type) -_set_array_types() - - -# Add additional strings to the sctypeDict -_toadd = ['int', 'float', 'complex', 'bool', 'object'] -if sys.version_info[0] >= 3: - _toadd.extend(['str', 'bytes', ('a', 'bytes_')]) -else: - _toadd.extend(['string', ('str', 'string_'), 'unicode', ('a', 'string_')]) - -for name in _toadd: - if isinstance(name, tuple): - sctypeDict[name[0]] = allTypes[name[1]] - else: - sctypeDict[name] = allTypes['%s_' % name] - -del _toadd, name diff --git a/venv/lib/python3.7/site-packages/numpy/core/_ufunc_config.py b/venv/lib/python3.7/site-packages/numpy/core/_ufunc_config.py deleted file mode 100644 index c3951cc..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/_ufunc_config.py +++ /dev/null @@ -1,458 +0,0 @@ -""" -Functions for changing global ufunc configuration - -This provides helpers which wrap `umath.geterrobj` and `umath.seterrobj` -""" -from __future__ import division, absolute_import, print_function - -try: - # Accessing collections abstract classes from collections - # has been deprecated since Python 3.3 - import collections.abc as collections_abc -except ImportError: - import collections as collections_abc -import contextlib - -from .overrides import set_module -from .umath import ( - UFUNC_BUFSIZE_DEFAULT, - ERR_IGNORE, ERR_WARN, ERR_RAISE, ERR_CALL, ERR_PRINT, ERR_LOG, ERR_DEFAULT, - SHIFT_DIVIDEBYZERO, SHIFT_OVERFLOW, SHIFT_UNDERFLOW, SHIFT_INVALID, -) -from . import umath - -__all__ = [ - "seterr", "geterr", "setbufsize", "getbufsize", "seterrcall", "geterrcall", - "errstate", -] - -_errdict = {"ignore": ERR_IGNORE, - "warn": ERR_WARN, - "raise": ERR_RAISE, - "call": ERR_CALL, - "print": ERR_PRINT, - "log": ERR_LOG} - -_errdict_rev = {value: key for key, value in _errdict.items()} - - -@set_module('numpy') -def seterr(all=None, divide=None, over=None, under=None, invalid=None): - """ - Set how floating-point errors are handled. - - Note that operations on integer scalar types (such as `int16`) are - handled like floating point, and are affected by these settings. - - Parameters - ---------- - all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional - Set treatment for all types of floating-point errors at once: - - - ignore: Take no action when the exception occurs. - - warn: Print a `RuntimeWarning` (via the Python `warnings` module). - - raise: Raise a `FloatingPointError`. - - call: Call a function specified using the `seterrcall` function. - - print: Print a warning directly to ``stdout``. - - log: Record error in a Log object specified by `seterrcall`. - - The default is not to change the current behavior. - divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional - Treatment for division by zero. - over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional - Treatment for floating-point overflow. - under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional - Treatment for floating-point underflow. - invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional - Treatment for invalid floating-point operation. - - Returns - ------- - old_settings : dict - Dictionary containing the old settings. - - See also - -------- - seterrcall : Set a callback function for the 'call' mode. - geterr, geterrcall, errstate - - Notes - ----- - The floating-point exceptions are defined in the IEEE 754 standard [1]_: - - - Division by zero: infinite result obtained from finite numbers. - - Overflow: result too large to be expressed. - - Underflow: result so close to zero that some precision - was lost. - - Invalid operation: result is not an expressible number, typically - indicates that a NaN was produced. - - .. [1] https://en.wikipedia.org/wiki/IEEE_754 - - Examples - -------- - >>> old_settings = np.seterr(all='ignore') #seterr to known value - >>> np.seterr(over='raise') - {'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'} - >>> np.seterr(**old_settings) # reset to default - {'divide': 'ignore', 'over': 'raise', 'under': 'ignore', 'invalid': 'ignore'} - - >>> np.int16(32000) * np.int16(3) - 30464 - >>> old_settings = np.seterr(all='warn', over='raise') - >>> np.int16(32000) * np.int16(3) - Traceback (most recent call last): - File "", line 1, in - FloatingPointError: overflow encountered in short_scalars - - >>> from collections import OrderedDict - >>> old_settings = np.seterr(all='print') - >>> OrderedDict(np.geterr()) - OrderedDict([('divide', 'print'), ('over', 'print'), ('under', 'print'), ('invalid', 'print')]) - >>> np.int16(32000) * np.int16(3) - 30464 - - """ - - pyvals = umath.geterrobj() - old = geterr() - - if divide is None: - divide = all or old['divide'] - if over is None: - over = all or old['over'] - if under is None: - under = all or old['under'] - if invalid is None: - invalid = all or old['invalid'] - - maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) + - (_errdict[over] << SHIFT_OVERFLOW) + - (_errdict[under] << SHIFT_UNDERFLOW) + - (_errdict[invalid] << SHIFT_INVALID)) - - pyvals[1] = maskvalue - umath.seterrobj(pyvals) - return old - - -@set_module('numpy') -def geterr(): - """ - Get the current way of handling floating-point errors. - - Returns - ------- - res : dict - A dictionary with keys "divide", "over", "under", and "invalid", - whose values are from the strings "ignore", "print", "log", "warn", - "raise", and "call". The keys represent possible floating-point - exceptions, and the values define how these exceptions are handled. - - See Also - -------- - geterrcall, seterr, seterrcall - - Notes - ----- - For complete documentation of the types of floating-point exceptions and - treatment options, see `seterr`. - - Examples - -------- - >>> from collections import OrderedDict - >>> sorted(np.geterr().items()) - [('divide', 'warn'), ('invalid', 'warn'), ('over', 'warn'), ('under', 'ignore')] - >>> np.arange(3.) / np.arange(3.) - array([nan, 1., 1.]) - - >>> oldsettings = np.seterr(all='warn', over='raise') - >>> OrderedDict(sorted(np.geterr().items())) - OrderedDict([('divide', 'warn'), ('invalid', 'warn'), ('over', 'raise'), ('under', 'warn')]) - >>> np.arange(3.) / np.arange(3.) - array([nan, 1., 1.]) - - """ - maskvalue = umath.geterrobj()[1] - mask = 7 - res = {} - val = (maskvalue >> SHIFT_DIVIDEBYZERO) & mask - res['divide'] = _errdict_rev[val] - val = (maskvalue >> SHIFT_OVERFLOW) & mask - res['over'] = _errdict_rev[val] - val = (maskvalue >> SHIFT_UNDERFLOW) & mask - res['under'] = _errdict_rev[val] - val = (maskvalue >> SHIFT_INVALID) & mask - res['invalid'] = _errdict_rev[val] - return res - - -@set_module('numpy') -def setbufsize(size): - """ - Set the size of the buffer used in ufuncs. - - Parameters - ---------- - size : int - Size of buffer. - - """ - if size > 10e6: - raise ValueError("Buffer size, %s, is too big." % size) - if size < 5: - raise ValueError("Buffer size, %s, is too small." % size) - if size % 16 != 0: - raise ValueError("Buffer size, %s, is not a multiple of 16." % size) - - pyvals = umath.geterrobj() - old = getbufsize() - pyvals[0] = size - umath.seterrobj(pyvals) - return old - - -@set_module('numpy') -def getbufsize(): - """ - Return the size of the buffer used in ufuncs. - - Returns - ------- - getbufsize : int - Size of ufunc buffer in bytes. - - """ - return umath.geterrobj()[0] - - -@set_module('numpy') -def seterrcall(func): - """ - Set the floating-point error callback function or log object. - - There are two ways to capture floating-point error messages. The first - is to set the error-handler to 'call', using `seterr`. Then, set - the function to call using this function. - - The second is to set the error-handler to 'log', using `seterr`. - Floating-point errors then trigger a call to the 'write' method of - the provided object. - - Parameters - ---------- - func : callable f(err, flag) or object with write method - Function to call upon floating-point errors ('call'-mode) or - object whose 'write' method is used to log such message ('log'-mode). - - The call function takes two arguments. The first is a string describing - the type of error (such as "divide by zero", "overflow", "underflow", - or "invalid value"), and the second is the status flag. The flag is a - byte, whose four least-significant bits indicate the type of error, one - of "divide", "over", "under", "invalid":: - - [0 0 0 0 divide over under invalid] - - In other words, ``flags = divide + 2*over + 4*under + 8*invalid``. - - If an object is provided, its write method should take one argument, - a string. - - Returns - ------- - h : callable, log instance or None - The old error handler. - - See Also - -------- - seterr, geterr, geterrcall - - Examples - -------- - Callback upon error: - - >>> def err_handler(type, flag): - ... print("Floating point error (%s), with flag %s" % (type, flag)) - ... - - >>> saved_handler = np.seterrcall(err_handler) - >>> save_err = np.seterr(all='call') - >>> from collections import OrderedDict - - >>> np.array([1, 2, 3]) / 0.0 - Floating point error (divide by zero), with flag 1 - array([inf, inf, inf]) - - >>> np.seterrcall(saved_handler) - - >>> OrderedDict(sorted(np.seterr(**save_err).items())) - OrderedDict([('divide', 'call'), ('invalid', 'call'), ('over', 'call'), ('under', 'call')]) - - Log error message: - - >>> class Log(object): - ... def write(self, msg): - ... print("LOG: %s" % msg) - ... - - >>> log = Log() - >>> saved_handler = np.seterrcall(log) - >>> save_err = np.seterr(all='log') - - >>> np.array([1, 2, 3]) / 0.0 - LOG: Warning: divide by zero encountered in true_divide - array([inf, inf, inf]) - - >>> np.seterrcall(saved_handler) - - >>> OrderedDict(sorted(np.seterr(**save_err).items())) - OrderedDict([('divide', 'log'), ('invalid', 'log'), ('over', 'log'), ('under', 'log')]) - - """ - if func is not None and not isinstance(func, collections_abc.Callable): - if not hasattr(func, 'write') or not isinstance(func.write, collections_abc.Callable): - raise ValueError("Only callable can be used as callback") - pyvals = umath.geterrobj() - old = geterrcall() - pyvals[2] = func - umath.seterrobj(pyvals) - return old - - -@set_module('numpy') -def geterrcall(): - """ - Return the current callback function used on floating-point errors. - - When the error handling for a floating-point error (one of "divide", - "over", "under", or "invalid") is set to 'call' or 'log', the function - that is called or the log instance that is written to is returned by - `geterrcall`. This function or log instance has been set with - `seterrcall`. - - Returns - ------- - errobj : callable, log instance or None - The current error handler. If no handler was set through `seterrcall`, - ``None`` is returned. - - See Also - -------- - seterrcall, seterr, geterr - - Notes - ----- - For complete documentation of the types of floating-point exceptions and - treatment options, see `seterr`. - - Examples - -------- - >>> np.geterrcall() # we did not yet set a handler, returns None - - >>> oldsettings = np.seterr(all='call') - >>> def err_handler(type, flag): - ... print("Floating point error (%s), with flag %s" % (type, flag)) - >>> oldhandler = np.seterrcall(err_handler) - >>> np.array([1, 2, 3]) / 0.0 - Floating point error (divide by zero), with flag 1 - array([inf, inf, inf]) - - >>> cur_handler = np.geterrcall() - >>> cur_handler is err_handler - True - - """ - return umath.geterrobj()[2] - - -class _unspecified(object): - pass - - -_Unspecified = _unspecified() - - -@set_module('numpy') -class errstate(contextlib.ContextDecorator): - """ - errstate(**kwargs) - - Context manager for floating-point error handling. - - Using an instance of `errstate` as a context manager allows statements in - that context to execute with a known error handling behavior. Upon entering - the context the error handling is set with `seterr` and `seterrcall`, and - upon exiting it is reset to what it was before. - - .. versionchanged:: 1.17.0 - `errstate` is also usable as a function decorator, saving - a level of indentation if an entire function is wrapped. - See :py:class:`contextlib.ContextDecorator` for more information. - - Parameters - ---------- - kwargs : {divide, over, under, invalid} - Keyword arguments. The valid keywords are the possible floating-point - exceptions. Each keyword should have a string value that defines the - treatment for the particular error. Possible values are - {'ignore', 'warn', 'raise', 'call', 'print', 'log'}. - - See Also - -------- - seterr, geterr, seterrcall, geterrcall - - Notes - ----- - For complete documentation of the types of floating-point exceptions and - treatment options, see `seterr`. - - Examples - -------- - >>> from collections import OrderedDict - >>> olderr = np.seterr(all='ignore') # Set error handling to known state. - - >>> np.arange(3) / 0. - array([nan, inf, inf]) - >>> with np.errstate(divide='warn'): - ... np.arange(3) / 0. - array([nan, inf, inf]) - - >>> np.sqrt(-1) - nan - >>> with np.errstate(invalid='raise'): - ... np.sqrt(-1) - Traceback (most recent call last): - File "", line 2, in - FloatingPointError: invalid value encountered in sqrt - - Outside the context the error handling behavior has not changed: - - >>> OrderedDict(sorted(np.geterr().items())) - OrderedDict([('divide', 'ignore'), ('invalid', 'ignore'), ('over', 'ignore'), ('under', 'ignore')]) - - """ - # Note that we don't want to run the above doctests because they will fail - # without a from __future__ import with_statement - - def __init__(self, **kwargs): - self.call = kwargs.pop('call', _Unspecified) - self.kwargs = kwargs - - def __enter__(self): - self.oldstate = seterr(**self.kwargs) - if self.call is not _Unspecified: - self.oldcall = seterrcall(self.call) - - def __exit__(self, *exc_info): - seterr(**self.oldstate) - if self.call is not _Unspecified: - seterrcall(self.oldcall) - - -def _setdef(): - defval = [UFUNC_BUFSIZE_DEFAULT, ERR_DEFAULT, None] - umath.seterrobj(defval) - - -# set the default values -_setdef() diff --git a/venv/lib/python3.7/site-packages/numpy/core/_umath_tests.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/numpy/core/_umath_tests.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index 5be1293..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/core/_umath_tests.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/core/arrayprint.py b/venv/lib/python3.7/site-packages/numpy/core/arrayprint.py deleted file mode 100644 index 4010180..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/arrayprint.py +++ /dev/null @@ -1,1622 +0,0 @@ -"""Array printing function - -$Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $ - -""" -from __future__ import division, absolute_import, print_function - -__all__ = ["array2string", "array_str", "array_repr", "set_string_function", - "set_printoptions", "get_printoptions", "printoptions", - "format_float_positional", "format_float_scientific"] -__docformat__ = 'restructuredtext' - -# -# Written by Konrad Hinsen -# last revision: 1996-3-13 -# modified by Jim Hugunin 1997-3-3 for repr's and str's (and other details) -# and by Perry Greenfield 2000-4-1 for numarray -# and by Travis Oliphant 2005-8-22 for numpy - - -# Note: Both scalartypes.c.src and arrayprint.py implement strs for numpy -# scalars but for different purposes. scalartypes.c.src has str/reprs for when -# the scalar is printed on its own, while arrayprint.py has strs for when -# scalars are printed inside an ndarray. Only the latter strs are currently -# user-customizable. - -import sys -import functools -import numbers -if sys.version_info[0] >= 3: - try: - from _thread import get_ident - except ImportError: - from _dummy_thread import get_ident -else: - try: - from thread import get_ident - except ImportError: - from dummy_thread import get_ident - -import numpy as np -from . import numerictypes as _nt -from .umath import absolute, not_equal, isnan, isinf, isfinite, isnat -from . import multiarray -from .multiarray import (array, dragon4_positional, dragon4_scientific, - datetime_as_string, datetime_data, ndarray, - set_legacy_print_mode) -from .fromnumeric import ravel, any -from .numeric import concatenate, asarray, errstate -from .numerictypes import (longlong, intc, int_, float_, complex_, bool_, - flexible) -from .overrides import array_function_dispatch, set_module -import warnings -import contextlib - -_format_options = { - 'edgeitems': 3, # repr N leading and trailing items of each dimension - 'threshold': 1000, # total items > triggers array summarization - 'floatmode': 'maxprec', - 'precision': 8, # precision of floating point representations - 'suppress': False, # suppress printing small floating values in exp format - 'linewidth': 75, - 'nanstr': 'nan', - 'infstr': 'inf', - 'sign': '-', - 'formatter': None, - 'legacy': False} - -def _make_options_dict(precision=None, threshold=None, edgeitems=None, - linewidth=None, suppress=None, nanstr=None, infstr=None, - sign=None, formatter=None, floatmode=None, legacy=None): - """ make a dictionary out of the non-None arguments, plus sanity checks """ - - options = {k: v for k, v in locals().items() if v is not None} - - if suppress is not None: - options['suppress'] = bool(suppress) - - modes = ['fixed', 'unique', 'maxprec', 'maxprec_equal'] - if floatmode not in modes + [None]: - raise ValueError("floatmode option must be one of " + - ", ".join('"{}"'.format(m) for m in modes)) - - if sign not in [None, '-', '+', ' ']: - raise ValueError("sign option must be one of ' ', '+', or '-'") - - if legacy not in [None, False, '1.13']: - warnings.warn("legacy printing option can currently only be '1.13' or " - "`False`", stacklevel=3) - if threshold is not None: - # forbid the bad threshold arg suggested by stack overflow, gh-12351 - if not isinstance(threshold, numbers.Number): - raise TypeError("threshold must be numeric") - if np.isnan(threshold): - raise ValueError("threshold must be non-NAN, try " - "sys.maxsize for untruncated representation") - return options - - -@set_module('numpy') -def set_printoptions(precision=None, threshold=None, edgeitems=None, - linewidth=None, suppress=None, nanstr=None, infstr=None, - formatter=None, sign=None, floatmode=None, **kwarg): - """ - Set printing options. - - These options determine the way floating point numbers, arrays and - other NumPy objects are displayed. - - Parameters - ---------- - precision : int or None, optional - Number of digits of precision for floating point output (default 8). - May be None if `floatmode` is not `fixed`, to print as many digits as - necessary to uniquely specify the value. - threshold : int, optional - Total number of array elements which trigger summarization - rather than full repr (default 1000). - To always use the full repr without summarization, pass `sys.maxsize`. - edgeitems : int, optional - Number of array items in summary at beginning and end of - each dimension (default 3). - linewidth : int, optional - The number of characters per line for the purpose of inserting - line breaks (default 75). - suppress : bool, optional - If True, always print floating point numbers using fixed point - notation, in which case numbers equal to zero in the current precision - will print as zero. If False, then scientific notation is used when - absolute value of the smallest number is < 1e-4 or the ratio of the - maximum absolute value to the minimum is > 1e3. The default is False. - nanstr : str, optional - String representation of floating point not-a-number (default nan). - infstr : str, optional - String representation of floating point infinity (default inf). - sign : string, either '-', '+', or ' ', optional - Controls printing of the sign of floating-point types. If '+', always - print the sign of positive values. If ' ', always prints a space - (whitespace character) in the sign position of positive values. If - '-', omit the sign character of positive values. (default '-') - formatter : dict of callables, optional - If not None, the keys should indicate the type(s) that the respective - formatting function applies to. Callables should return a string. - Types that are not specified (by their corresponding keys) are handled - by the default formatters. Individual types for which a formatter - can be set are: - - - 'bool' - - 'int' - - 'timedelta' : a `numpy.timedelta64` - - 'datetime' : a `numpy.datetime64` - - 'float' - - 'longfloat' : 128-bit floats - - 'complexfloat' - - 'longcomplexfloat' : composed of two 128-bit floats - - 'numpystr' : types `numpy.string_` and `numpy.unicode_` - - 'object' : `np.object_` arrays - - 'str' : all other strings - - Other keys that can be used to set a group of types at once are: - - - 'all' : sets all types - - 'int_kind' : sets 'int' - - 'float_kind' : sets 'float' and 'longfloat' - - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' - - 'str_kind' : sets 'str' and 'numpystr' - floatmode : str, optional - Controls the interpretation of the `precision` option for - floating-point types. Can take the following values - (default maxprec_equal): - - * 'fixed': Always print exactly `precision` fractional digits, - even if this would print more or fewer digits than - necessary to specify the value uniquely. - * 'unique': Print the minimum number of fractional digits necessary - to represent each value uniquely. Different elements may - have a different number of digits. The value of the - `precision` option is ignored. - * 'maxprec': Print at most `precision` fractional digits, but if - an element can be uniquely represented with fewer digits - only print it with that many. - * 'maxprec_equal': Print at most `precision` fractional digits, - but if every element in the array can be uniquely - represented with an equal number of fewer digits, use that - many digits for all elements. - legacy : string or `False`, optional - If set to the string `'1.13'` enables 1.13 legacy printing mode. This - approximates numpy 1.13 print output by including a space in the sign - position of floats and different behavior for 0d arrays. If set to - `False`, disables legacy mode. Unrecognized strings will be ignored - with a warning for forward compatibility. - - .. versionadded:: 1.14.0 - - See Also - -------- - get_printoptions, printoptions, set_string_function, array2string - - Notes - ----- - `formatter` is always reset with a call to `set_printoptions`. - - Use `printoptions` as a context manager to set the values temporarily. - - Examples - -------- - Floating point precision can be set: - - >>> np.set_printoptions(precision=4) - >>> np.array([1.123456789]) - [1.1235] - - Long arrays can be summarised: - - >>> np.set_printoptions(threshold=5) - >>> np.arange(10) - array([0, 1, 2, ..., 7, 8, 9]) - - Small results can be suppressed: - - >>> eps = np.finfo(float).eps - >>> x = np.arange(4.) - >>> x**2 - (x + eps)**2 - array([-4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00]) - >>> np.set_printoptions(suppress=True) - >>> x**2 - (x + eps)**2 - array([-0., -0., 0., 0.]) - - A custom formatter can be used to display array elements as desired: - - >>> np.set_printoptions(formatter={'all':lambda x: 'int: '+str(-x)}) - >>> x = np.arange(3) - >>> x - array([int: 0, int: -1, int: -2]) - >>> np.set_printoptions() # formatter gets reset - >>> x - array([0, 1, 2]) - - To put back the default options, you can use: - - >>> np.set_printoptions(edgeitems=3, infstr='inf', - ... linewidth=75, nanstr='nan', precision=8, - ... suppress=False, threshold=1000, formatter=None) - - Also to temporarily override options, use `printoptions` as a context manager: - - >>> with np.printoptions(precision=2, suppress=True, threshold=5): - ... np.linspace(0, 10, 10) - array([ 0. , 1.11, 2.22, ..., 7.78, 8.89, 10. ]) - - """ - legacy = kwarg.pop('legacy', None) - if kwarg: - msg = "set_printoptions() got unexpected keyword argument '{}'" - raise TypeError(msg.format(kwarg.popitem()[0])) - - opt = _make_options_dict(precision, threshold, edgeitems, linewidth, - suppress, nanstr, infstr, sign, formatter, - floatmode, legacy) - # formatter is always reset - opt['formatter'] = formatter - _format_options.update(opt) - - # set the C variable for legacy mode - if _format_options['legacy'] == '1.13': - set_legacy_print_mode(113) - # reset the sign option in legacy mode to avoid confusion - _format_options['sign'] = '-' - elif _format_options['legacy'] is False: - set_legacy_print_mode(0) - - -@set_module('numpy') -def get_printoptions(): - """ - Return the current print options. - - Returns - ------- - print_opts : dict - Dictionary of current print options with keys - - - precision : int - - threshold : int - - edgeitems : int - - linewidth : int - - suppress : bool - - nanstr : str - - infstr : str - - formatter : dict of callables - - sign : str - - For a full description of these options, see `set_printoptions`. - - See Also - -------- - set_printoptions, printoptions, set_string_function - - """ - return _format_options.copy() - - -@set_module('numpy') -@contextlib.contextmanager -def printoptions(*args, **kwargs): - """Context manager for setting print options. - - Set print options for the scope of the `with` block, and restore the old - options at the end. See `set_printoptions` for the full description of - available options. - - Examples - -------- - - >>> from numpy.testing import assert_equal - >>> with np.printoptions(precision=2): - ... np.array([2.0]) / 3 - array([0.67]) - - The `as`-clause of the `with`-statement gives the current print options: - - >>> with np.printoptions(precision=2) as opts: - ... assert_equal(opts, np.get_printoptions()) - - See Also - -------- - set_printoptions, get_printoptions - - """ - opts = np.get_printoptions() - try: - np.set_printoptions(*args, **kwargs) - yield np.get_printoptions() - finally: - np.set_printoptions(**opts) - - -def _leading_trailing(a, edgeitems, index=()): - """ - Keep only the N-D corners (leading and trailing edges) of an array. - - Should be passed a base-class ndarray, since it makes no guarantees about - preserving subclasses. - """ - axis = len(index) - if axis == a.ndim: - return a[index] - - if a.shape[axis] > 2*edgeitems: - return concatenate(( - _leading_trailing(a, edgeitems, index + np.index_exp[ :edgeitems]), - _leading_trailing(a, edgeitems, index + np.index_exp[-edgeitems:]) - ), axis=axis) - else: - return _leading_trailing(a, edgeitems, index + np.index_exp[:]) - - -def _object_format(o): - """ Object arrays containing lists should be printed unambiguously """ - if type(o) is list: - fmt = 'list({!r})' - else: - fmt = '{!r}' - return fmt.format(o) - -def repr_format(x): - return repr(x) - -def str_format(x): - return str(x) - -def _get_formatdict(data, **opt): - prec, fmode = opt['precision'], opt['floatmode'] - supp, sign = opt['suppress'], opt['sign'] - legacy = opt['legacy'] - - # wrapped in lambdas to avoid taking a code path with the wrong type of data - formatdict = { - 'bool': lambda: BoolFormat(data), - 'int': lambda: IntegerFormat(data), - 'float': lambda: - FloatingFormat(data, prec, fmode, supp, sign, legacy=legacy), - 'longfloat': lambda: - FloatingFormat(data, prec, fmode, supp, sign, legacy=legacy), - 'complexfloat': lambda: - ComplexFloatingFormat(data, prec, fmode, supp, sign, legacy=legacy), - 'longcomplexfloat': lambda: - ComplexFloatingFormat(data, prec, fmode, supp, sign, legacy=legacy), - 'datetime': lambda: DatetimeFormat(data, legacy=legacy), - 'timedelta': lambda: TimedeltaFormat(data), - 'object': lambda: _object_format, - 'void': lambda: str_format, - 'numpystr': lambda: repr_format, - 'str': lambda: str} - - # we need to wrap values in `formatter` in a lambda, so that the interface - # is the same as the above values. - def indirect(x): - return lambda: x - - formatter = opt['formatter'] - if formatter is not None: - fkeys = [k for k in formatter.keys() if formatter[k] is not None] - if 'all' in fkeys: - for key in formatdict.keys(): - formatdict[key] = indirect(formatter['all']) - if 'int_kind' in fkeys: - for key in ['int']: - formatdict[key] = indirect(formatter['int_kind']) - if 'float_kind' in fkeys: - for key in ['float', 'longfloat']: - formatdict[key] = indirect(formatter['float_kind']) - if 'complex_kind' in fkeys: - for key in ['complexfloat', 'longcomplexfloat']: - formatdict[key] = indirect(formatter['complex_kind']) - if 'str_kind' in fkeys: - for key in ['numpystr', 'str']: - formatdict[key] = indirect(formatter['str_kind']) - for key in formatdict.keys(): - if key in fkeys: - formatdict[key] = indirect(formatter[key]) - - return formatdict - -def _get_format_function(data, **options): - """ - find the right formatting function for the dtype_ - """ - dtype_ = data.dtype - dtypeobj = dtype_.type - formatdict = _get_formatdict(data, **options) - if issubclass(dtypeobj, _nt.bool_): - return formatdict['bool']() - elif issubclass(dtypeobj, _nt.integer): - if issubclass(dtypeobj, _nt.timedelta64): - return formatdict['timedelta']() - else: - return formatdict['int']() - elif issubclass(dtypeobj, _nt.floating): - if issubclass(dtypeobj, _nt.longfloat): - return formatdict['longfloat']() - else: - return formatdict['float']() - elif issubclass(dtypeobj, _nt.complexfloating): - if issubclass(dtypeobj, _nt.clongfloat): - return formatdict['longcomplexfloat']() - else: - return formatdict['complexfloat']() - elif issubclass(dtypeobj, (_nt.unicode_, _nt.string_)): - return formatdict['numpystr']() - elif issubclass(dtypeobj, _nt.datetime64): - return formatdict['datetime']() - elif issubclass(dtypeobj, _nt.object_): - return formatdict['object']() - elif issubclass(dtypeobj, _nt.void): - if dtype_.names is not None: - return StructuredVoidFormat.from_data(data, **options) - else: - return formatdict['void']() - else: - return formatdict['numpystr']() - - -def _recursive_guard(fillvalue='...'): - """ - Like the python 3.2 reprlib.recursive_repr, but forwards *args and **kwargs - - Decorates a function such that if it calls itself with the same first - argument, it returns `fillvalue` instead of recursing. - - Largely copied from reprlib.recursive_repr - """ - - def decorating_function(f): - repr_running = set() - - @functools.wraps(f) - def wrapper(self, *args, **kwargs): - key = id(self), get_ident() - if key in repr_running: - return fillvalue - repr_running.add(key) - try: - return f(self, *args, **kwargs) - finally: - repr_running.discard(key) - - return wrapper - - return decorating_function - - -# gracefully handle recursive calls, when object arrays contain themselves -@_recursive_guard() -def _array2string(a, options, separator=' ', prefix=""): - # The formatter __init__s in _get_format_function cannot deal with - # subclasses yet, and we also need to avoid recursion issues in - # _formatArray with subclasses which return 0d arrays in place of scalars - data = asarray(a) - if a.shape == (): - a = data - - if a.size > options['threshold']: - summary_insert = "..." - data = _leading_trailing(data, options['edgeitems']) - else: - summary_insert = "" - - # find the right formatting function for the array - format_function = _get_format_function(data, **options) - - # skip over "[" - next_line_prefix = " " - # skip over array( - next_line_prefix += " "*len(prefix) - - lst = _formatArray(a, format_function, options['linewidth'], - next_line_prefix, separator, options['edgeitems'], - summary_insert, options['legacy']) - return lst - - -def _array2string_dispatcher( - a, max_line_width=None, precision=None, - suppress_small=None, separator=None, prefix=None, - style=None, formatter=None, threshold=None, - edgeitems=None, sign=None, floatmode=None, suffix=None, - **kwarg): - return (a,) - - -@array_function_dispatch(_array2string_dispatcher, module='numpy') -def array2string(a, max_line_width=None, precision=None, - suppress_small=None, separator=' ', prefix="", - style=np._NoValue, formatter=None, threshold=None, - edgeitems=None, sign=None, floatmode=None, suffix="", - **kwarg): - """ - Return a string representation of an array. - - Parameters - ---------- - a : array_like - Input array. - max_line_width : int, optional - Inserts newlines if text is longer than `max_line_width`. - Defaults to ``numpy.get_printoptions()['linewidth']``. - precision : int or None, optional - Floating point precision. - Defaults to ``numpy.get_printoptions()['precision']``. - suppress_small : bool, optional - Represent numbers "very close" to zero as zero; default is False. - Very close is defined by precision: if the precision is 8, e.g., - numbers smaller (in absolute value) than 5e-9 are represented as - zero. - Defaults to ``numpy.get_printoptions()['suppress']``. - separator : str, optional - Inserted between elements. - prefix : str, optional - suffix: str, optional - The length of the prefix and suffix strings are used to respectively - align and wrap the output. An array is typically printed as:: - - prefix + array2string(a) + suffix - - The output is left-padded by the length of the prefix string, and - wrapping is forced at the column ``max_line_width - len(suffix)``. - It should be noted that the content of prefix and suffix strings are - not included in the output. - style : _NoValue, optional - Has no effect, do not use. - - .. deprecated:: 1.14.0 - formatter : dict of callables, optional - If not None, the keys should indicate the type(s) that the respective - formatting function applies to. Callables should return a string. - Types that are not specified (by their corresponding keys) are handled - by the default formatters. Individual types for which a formatter - can be set are: - - - 'bool' - - 'int' - - 'timedelta' : a `numpy.timedelta64` - - 'datetime' : a `numpy.datetime64` - - 'float' - - 'longfloat' : 128-bit floats - - 'complexfloat' - - 'longcomplexfloat' : composed of two 128-bit floats - - 'void' : type `numpy.void` - - 'numpystr' : types `numpy.string_` and `numpy.unicode_` - - 'str' : all other strings - - Other keys that can be used to set a group of types at once are: - - - 'all' : sets all types - - 'int_kind' : sets 'int' - - 'float_kind' : sets 'float' and 'longfloat' - - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' - - 'str_kind' : sets 'str' and 'numpystr' - threshold : int, optional - Total number of array elements which trigger summarization - rather than full repr. - Defaults to ``numpy.get_printoptions()['threshold']``. - edgeitems : int, optional - Number of array items in summary at beginning and end of - each dimension. - Defaults to ``numpy.get_printoptions()['edgeitems']``. - sign : string, either '-', '+', or ' ', optional - Controls printing of the sign of floating-point types. If '+', always - print the sign of positive values. If ' ', always prints a space - (whitespace character) in the sign position of positive values. If - '-', omit the sign character of positive values. - Defaults to ``numpy.get_printoptions()['sign']``. - floatmode : str, optional - Controls the interpretation of the `precision` option for - floating-point types. - Defaults to ``numpy.get_printoptions()['floatmode']``. - Can take the following values: - - - 'fixed': Always print exactly `precision` fractional digits, - even if this would print more or fewer digits than - necessary to specify the value uniquely. - - 'unique': Print the minimum number of fractional digits necessary - to represent each value uniquely. Different elements may - have a different number of digits. The value of the - `precision` option is ignored. - - 'maxprec': Print at most `precision` fractional digits, but if - an element can be uniquely represented with fewer digits - only print it with that many. - - 'maxprec_equal': Print at most `precision` fractional digits, - but if every element in the array can be uniquely - represented with an equal number of fewer digits, use that - many digits for all elements. - legacy : string or `False`, optional - If set to the string `'1.13'` enables 1.13 legacy printing mode. This - approximates numpy 1.13 print output by including a space in the sign - position of floats and different behavior for 0d arrays. If set to - `False`, disables legacy mode. Unrecognized strings will be ignored - with a warning for forward compatibility. - - .. versionadded:: 1.14.0 - - Returns - ------- - array_str : str - String representation of the array. - - Raises - ------ - TypeError - if a callable in `formatter` does not return a string. - - See Also - -------- - array_str, array_repr, set_printoptions, get_printoptions - - Notes - ----- - If a formatter is specified for a certain type, the `precision` keyword is - ignored for that type. - - This is a very flexible function; `array_repr` and `array_str` are using - `array2string` internally so keywords with the same name should work - identically in all three functions. - - Examples - -------- - >>> x = np.array([1e-16,1,2,3]) - >>> np.array2string(x, precision=2, separator=',', - ... suppress_small=True) - '[0.,1.,2.,3.]' - - >>> x = np.arange(3.) - >>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) - '[0.00 1.00 2.00]' - - >>> x = np.arange(3) - >>> np.array2string(x, formatter={'int':lambda x: hex(x)}) - '[0x0 0x1 0x2]' - - """ - legacy = kwarg.pop('legacy', None) - if kwarg: - msg = "array2string() got unexpected keyword argument '{}'" - raise TypeError(msg.format(kwarg.popitem()[0])) - - overrides = _make_options_dict(precision, threshold, edgeitems, - max_line_width, suppress_small, None, None, - sign, formatter, floatmode, legacy) - options = _format_options.copy() - options.update(overrides) - - if options['legacy'] == '1.13': - if style is np._NoValue: - style = repr - - if a.shape == () and a.dtype.names is None: - return style(a.item()) - elif style is not np._NoValue: - # Deprecation 11-9-2017 v1.14 - warnings.warn("'style' argument is deprecated and no longer functional" - " except in 1.13 'legacy' mode", - DeprecationWarning, stacklevel=3) - - if options['legacy'] != '1.13': - options['linewidth'] -= len(suffix) - - # treat as a null array if any of shape elements == 0 - if a.size == 0: - return "[]" - - return _array2string(a, options, separator, prefix) - - -def _extendLine(s, line, word, line_width, next_line_prefix, legacy): - needs_wrap = len(line) + len(word) > line_width - if legacy != '1.13': - s# don't wrap lines if it won't help - if len(line) <= len(next_line_prefix): - needs_wrap = False - - if needs_wrap: - s += line.rstrip() + "\n" - line = next_line_prefix - line += word - return s, line - - -def _formatArray(a, format_function, line_width, next_line_prefix, - separator, edge_items, summary_insert, legacy): - """formatArray is designed for two modes of operation: - - 1. Full output - - 2. Summarized output - - """ - def recurser(index, hanging_indent, curr_width): - """ - By using this local function, we don't need to recurse with all the - arguments. Since this function is not created recursively, the cost is - not significant - """ - axis = len(index) - axes_left = a.ndim - axis - - if axes_left == 0: - return format_function(a[index]) - - # when recursing, add a space to align with the [ added, and reduce the - # length of the line by 1 - next_hanging_indent = hanging_indent + ' ' - if legacy == '1.13': - next_width = curr_width - else: - next_width = curr_width - len(']') - - a_len = a.shape[axis] - show_summary = summary_insert and 2*edge_items < a_len - if show_summary: - leading_items = edge_items - trailing_items = edge_items - else: - leading_items = 0 - trailing_items = a_len - - # stringify the array with the hanging indent on the first line too - s = '' - - # last axis (rows) - wrap elements if they would not fit on one line - if axes_left == 1: - # the length up until the beginning of the separator / bracket - if legacy == '1.13': - elem_width = curr_width - len(separator.rstrip()) - else: - elem_width = curr_width - max(len(separator.rstrip()), len(']')) - - line = hanging_indent - for i in range(leading_items): - word = recurser(index + (i,), next_hanging_indent, next_width) - s, line = _extendLine( - s, line, word, elem_width, hanging_indent, legacy) - line += separator - - if show_summary: - s, line = _extendLine( - s, line, summary_insert, elem_width, hanging_indent, legacy) - if legacy == '1.13': - line += ", " - else: - line += separator - - for i in range(trailing_items, 1, -1): - word = recurser(index + (-i,), next_hanging_indent, next_width) - s, line = _extendLine( - s, line, word, elem_width, hanging_indent, legacy) - line += separator - - if legacy == '1.13': - # width of the separator is not considered on 1.13 - elem_width = curr_width - word = recurser(index + (-1,), next_hanging_indent, next_width) - s, line = _extendLine( - s, line, word, elem_width, hanging_indent, legacy) - - s += line - - # other axes - insert newlines between rows - else: - s = '' - line_sep = separator.rstrip() + '\n'*(axes_left - 1) - - for i in range(leading_items): - nested = recurser(index + (i,), next_hanging_indent, next_width) - s += hanging_indent + nested + line_sep - - if show_summary: - if legacy == '1.13': - # trailing space, fixed nbr of newlines, and fixed separator - s += hanging_indent + summary_insert + ", \n" - else: - s += hanging_indent + summary_insert + line_sep - - for i in range(trailing_items, 1, -1): - nested = recurser(index + (-i,), next_hanging_indent, - next_width) - s += hanging_indent + nested + line_sep - - nested = recurser(index + (-1,), next_hanging_indent, next_width) - s += hanging_indent + nested - - # remove the hanging indent, and wrap in [] - s = '[' + s[len(hanging_indent):] + ']' - return s - - try: - # invoke the recursive part with an initial index and prefix - return recurser(index=(), - hanging_indent=next_line_prefix, - curr_width=line_width) - finally: - # recursive closures have a cyclic reference to themselves, which - # requires gc to collect (gh-10620). To avoid this problem, for - # performance and PyPy friendliness, we break the cycle: - recurser = None - -def _none_or_positive_arg(x, name): - if x is None: - return -1 - if x < 0: - raise ValueError("{} must be >= 0".format(name)) - return x - -class FloatingFormat(object): - """ Formatter for subtypes of np.floating """ - def __init__(self, data, precision, floatmode, suppress_small, sign=False, - **kwarg): - # for backcompatibility, accept bools - if isinstance(sign, bool): - sign = '+' if sign else '-' - - self._legacy = kwarg.get('legacy', False) - if self._legacy == '1.13': - # when not 0d, legacy does not support '-' - if data.shape != () and sign == '-': - sign = ' ' - - self.floatmode = floatmode - if floatmode == 'unique': - self.precision = None - else: - self.precision = precision - - self.precision = _none_or_positive_arg(self.precision, 'precision') - - self.suppress_small = suppress_small - self.sign = sign - self.exp_format = False - self.large_exponent = False - - self.fillFormat(data) - - def fillFormat(self, data): - # only the finite values are used to compute the number of digits - finite_vals = data[isfinite(data)] - - # choose exponential mode based on the non-zero finite values: - abs_non_zero = absolute(finite_vals[finite_vals != 0]) - if len(abs_non_zero) != 0: - max_val = np.max(abs_non_zero) - min_val = np.min(abs_non_zero) - with errstate(over='ignore'): # division can overflow - if max_val >= 1.e8 or (not self.suppress_small and - (min_val < 0.0001 or max_val/min_val > 1000.)): - self.exp_format = True - - # do a first pass of printing all the numbers, to determine sizes - if len(finite_vals) == 0: - self.pad_left = 0 - self.pad_right = 0 - self.trim = '.' - self.exp_size = -1 - self.unique = True - elif self.exp_format: - trim, unique = '.', True - if self.floatmode == 'fixed' or self._legacy == '1.13': - trim, unique = 'k', False - strs = (dragon4_scientific(x, precision=self.precision, - unique=unique, trim=trim, sign=self.sign == '+') - for x in finite_vals) - frac_strs, _, exp_strs = zip(*(s.partition('e') for s in strs)) - int_part, frac_part = zip(*(s.split('.') for s in frac_strs)) - self.exp_size = max(len(s) for s in exp_strs) - 1 - - self.trim = 'k' - self.precision = max(len(s) for s in frac_part) - - # for back-compat with np 1.13, use 2 spaces & sign and full prec - if self._legacy == '1.13': - self.pad_left = 3 - else: - # this should be only 1 or 2. Can be calculated from sign. - self.pad_left = max(len(s) for s in int_part) - # pad_right is only needed for nan length calculation - self.pad_right = self.exp_size + 2 + self.precision - - self.unique = False - else: - # first pass printing to determine sizes - trim, unique = '.', True - if self.floatmode == 'fixed': - trim, unique = 'k', False - strs = (dragon4_positional(x, precision=self.precision, - fractional=True, - unique=unique, trim=trim, - sign=self.sign == '+') - for x in finite_vals) - int_part, frac_part = zip(*(s.split('.') for s in strs)) - if self._legacy == '1.13': - self.pad_left = 1 + max(len(s.lstrip('-+')) for s in int_part) - else: - self.pad_left = max(len(s) for s in int_part) - self.pad_right = max(len(s) for s in frac_part) - self.exp_size = -1 - - if self.floatmode in ['fixed', 'maxprec_equal']: - self.precision = self.pad_right - self.unique = False - self.trim = 'k' - else: - self.unique = True - self.trim = '.' - - if self._legacy != '1.13': - # account for sign = ' ' by adding one to pad_left - if self.sign == ' ' and not any(np.signbit(finite_vals)): - self.pad_left += 1 - - # if there are non-finite values, may need to increase pad_left - if data.size != finite_vals.size: - neginf = self.sign != '-' or any(data[isinf(data)] < 0) - nanlen = len(_format_options['nanstr']) - inflen = len(_format_options['infstr']) + neginf - offset = self.pad_right + 1 # +1 for decimal pt - self.pad_left = max(self.pad_left, nanlen - offset, inflen - offset) - - def __call__(self, x): - if not np.isfinite(x): - with errstate(invalid='ignore'): - if np.isnan(x): - sign = '+' if self.sign == '+' else '' - ret = sign + _format_options['nanstr'] - else: # isinf - sign = '-' if x < 0 else '+' if self.sign == '+' else '' - ret = sign + _format_options['infstr'] - return ' '*(self.pad_left + self.pad_right + 1 - len(ret)) + ret - - if self.exp_format: - return dragon4_scientific(x, - precision=self.precision, - unique=self.unique, - trim=self.trim, - sign=self.sign == '+', - pad_left=self.pad_left, - exp_digits=self.exp_size) - else: - return dragon4_positional(x, - precision=self.precision, - unique=self.unique, - fractional=True, - trim=self.trim, - sign=self.sign == '+', - pad_left=self.pad_left, - pad_right=self.pad_right) - - -@set_module('numpy') -def format_float_scientific(x, precision=None, unique=True, trim='k', - sign=False, pad_left=None, exp_digits=None): - """ - Format a floating-point scalar as a decimal string in scientific notation. - - Provides control over rounding, trimming and padding. Uses and assumes - IEEE unbiased rounding. Uses the "Dragon4" algorithm. - - Parameters - ---------- - x : python float or numpy floating scalar - Value to format. - precision : non-negative integer or None, optional - Maximum number of digits to print. May be None if `unique` is - `True`, but must be an integer if unique is `False`. - unique : boolean, optional - If `True`, use a digit-generation strategy which gives the shortest - representation which uniquely identifies the floating-point number from - other values of the same type, by judicious rounding. If `precision` - was omitted, print all necessary digits, otherwise digit generation is - cut off after `precision` digits and the remaining value is rounded. - If `False`, digits are generated as if printing an infinite-precision - value and stopping after `precision` digits, rounding the remaining - value. - trim : one of 'k', '.', '0', '-', optional - Controls post-processing trimming of trailing digits, as follows: - - * 'k' : keep trailing zeros, keep decimal point (no trimming) - * '.' : trim all trailing zeros, leave decimal point - * '0' : trim all but the zero before the decimal point. Insert the - zero if it is missing. - * '-' : trim trailing zeros and any trailing decimal point - sign : boolean, optional - Whether to show the sign for positive values. - pad_left : non-negative integer, optional - Pad the left side of the string with whitespace until at least that - many characters are to the left of the decimal point. - exp_digits : non-negative integer, optional - Pad the exponent with zeros until it contains at least this many digits. - If omitted, the exponent will be at least 2 digits. - - Returns - ------- - rep : string - The string representation of the floating point value - - See Also - -------- - format_float_positional - - Examples - -------- - >>> np.format_float_scientific(np.float32(np.pi)) - '3.1415927e+00' - >>> s = np.float32(1.23e24) - >>> np.format_float_scientific(s, unique=False, precision=15) - '1.230000071797338e+24' - >>> np.format_float_scientific(s, exp_digits=4) - '1.23e+0024' - """ - precision = _none_or_positive_arg(precision, 'precision') - pad_left = _none_or_positive_arg(pad_left, 'pad_left') - exp_digits = _none_or_positive_arg(exp_digits, 'exp_digits') - return dragon4_scientific(x, precision=precision, unique=unique, - trim=trim, sign=sign, pad_left=pad_left, - exp_digits=exp_digits) - - -@set_module('numpy') -def format_float_positional(x, precision=None, unique=True, - fractional=True, trim='k', sign=False, - pad_left=None, pad_right=None): - """ - Format a floating-point scalar as a decimal string in positional notation. - - Provides control over rounding, trimming and padding. Uses and assumes - IEEE unbiased rounding. Uses the "Dragon4" algorithm. - - Parameters - ---------- - x : python float or numpy floating scalar - Value to format. - precision : non-negative integer or None, optional - Maximum number of digits to print. May be None if `unique` is - `True`, but must be an integer if unique is `False`. - unique : boolean, optional - If `True`, use a digit-generation strategy which gives the shortest - representation which uniquely identifies the floating-point number from - other values of the same type, by judicious rounding. If `precision` - was omitted, print out all necessary digits, otherwise digit generation - is cut off after `precision` digits and the remaining value is rounded. - If `False`, digits are generated as if printing an infinite-precision - value and stopping after `precision` digits, rounding the remaining - value. - fractional : boolean, optional - If `True`, the cutoff of `precision` digits refers to the total number - of digits after the decimal point, including leading zeros. - If `False`, `precision` refers to the total number of significant - digits, before or after the decimal point, ignoring leading zeros. - trim : one of 'k', '.', '0', '-', optional - Controls post-processing trimming of trailing digits, as follows: - - * 'k' : keep trailing zeros, keep decimal point (no trimming) - * '.' : trim all trailing zeros, leave decimal point - * '0' : trim all but the zero before the decimal point. Insert the - zero if it is missing. - * '-' : trim trailing zeros and any trailing decimal point - sign : boolean, optional - Whether to show the sign for positive values. - pad_left : non-negative integer, optional - Pad the left side of the string with whitespace until at least that - many characters are to the left of the decimal point. - pad_right : non-negative integer, optional - Pad the right side of the string with whitespace until at least that - many characters are to the right of the decimal point. - - Returns - ------- - rep : string - The string representation of the floating point value - - See Also - -------- - format_float_scientific - - Examples - -------- - >>> np.format_float_positional(np.float32(np.pi)) - '3.1415927' - >>> np.format_float_positional(np.float16(np.pi)) - '3.14' - >>> np.format_float_positional(np.float16(0.3)) - '0.3' - >>> np.format_float_positional(np.float16(0.3), unique=False, precision=10) - '0.3000488281' - """ - precision = _none_or_positive_arg(precision, 'precision') - pad_left = _none_or_positive_arg(pad_left, 'pad_left') - pad_right = _none_or_positive_arg(pad_right, 'pad_right') - return dragon4_positional(x, precision=precision, unique=unique, - fractional=fractional, trim=trim, - sign=sign, pad_left=pad_left, - pad_right=pad_right) - - -class IntegerFormat(object): - def __init__(self, data): - if data.size > 0: - max_str_len = max(len(str(np.max(data))), - len(str(np.min(data)))) - else: - max_str_len = 0 - self.format = '%{}d'.format(max_str_len) - - def __call__(self, x): - return self.format % x - - -class BoolFormat(object): - def __init__(self, data, **kwargs): - # add an extra space so " True" and "False" have the same length and - # array elements align nicely when printed, except in 0d arrays - self.truestr = ' True' if data.shape != () else 'True' - - def __call__(self, x): - return self.truestr if x else "False" - - -class ComplexFloatingFormat(object): - """ Formatter for subtypes of np.complexfloating """ - def __init__(self, x, precision, floatmode, suppress_small, - sign=False, **kwarg): - # for backcompatibility, accept bools - if isinstance(sign, bool): - sign = '+' if sign else '-' - - floatmode_real = floatmode_imag = floatmode - if kwarg.get('legacy', False) == '1.13': - floatmode_real = 'maxprec_equal' - floatmode_imag = 'maxprec' - - self.real_format = FloatingFormat(x.real, precision, floatmode_real, - suppress_small, sign=sign, **kwarg) - self.imag_format = FloatingFormat(x.imag, precision, floatmode_imag, - suppress_small, sign='+', **kwarg) - - def __call__(self, x): - r = self.real_format(x.real) - i = self.imag_format(x.imag) - - # add the 'j' before the terminal whitespace in i - sp = len(i.rstrip()) - i = i[:sp] + 'j' + i[sp:] - - return r + i - - -class _TimelikeFormat(object): - def __init__(self, data): - non_nat = data[~isnat(data)] - if len(non_nat) > 0: - # Max str length of non-NaT elements - max_str_len = max(len(self._format_non_nat(np.max(non_nat))), - len(self._format_non_nat(np.min(non_nat)))) - else: - max_str_len = 0 - if len(non_nat) < data.size: - # data contains a NaT - max_str_len = max(max_str_len, 5) - self._format = '%{}s'.format(max_str_len) - self._nat = "'NaT'".rjust(max_str_len) - - def _format_non_nat(self, x): - # override in subclass - raise NotImplementedError - - def __call__(self, x): - if isnat(x): - return self._nat - else: - return self._format % self._format_non_nat(x) - - -class DatetimeFormat(_TimelikeFormat): - def __init__(self, x, unit=None, timezone=None, casting='same_kind', - legacy=False): - # Get the unit from the dtype - if unit is None: - if x.dtype.kind == 'M': - unit = datetime_data(x.dtype)[0] - else: - unit = 's' - - if timezone is None: - timezone = 'naive' - self.timezone = timezone - self.unit = unit - self.casting = casting - self.legacy = legacy - - # must be called after the above are configured - super(DatetimeFormat, self).__init__(x) - - def __call__(self, x): - if self.legacy == '1.13': - return self._format_non_nat(x) - return super(DatetimeFormat, self).__call__(x) - - def _format_non_nat(self, x): - return "'%s'" % datetime_as_string(x, - unit=self.unit, - timezone=self.timezone, - casting=self.casting) - - -class TimedeltaFormat(_TimelikeFormat): - def _format_non_nat(self, x): - return str(x.astype('i8')) - - -class SubArrayFormat(object): - def __init__(self, format_function): - self.format_function = format_function - - def __call__(self, arr): - if arr.ndim <= 1: - return "[" + ", ".join(self.format_function(a) for a in arr) + "]" - return "[" + ", ".join(self.__call__(a) for a in arr) + "]" - - -class StructuredVoidFormat(object): - """ - Formatter for structured np.void objects. - - This does not work on structured alias types like np.dtype(('i4', 'i2,i2')), - as alias scalars lose their field information, and the implementation - relies upon np.void.__getitem__. - """ - def __init__(self, format_functions): - self.format_functions = format_functions - - @classmethod - def from_data(cls, data, **options): - """ - This is a second way to initialize StructuredVoidFormat, using the raw data - as input. Added to avoid changing the signature of __init__. - """ - format_functions = [] - for field_name in data.dtype.names: - format_function = _get_format_function(data[field_name], **options) - if data.dtype[field_name].shape != (): - format_function = SubArrayFormat(format_function) - format_functions.append(format_function) - return cls(format_functions) - - def __call__(self, x): - str_fields = [ - format_function(field) - for field, format_function in zip(x, self.format_functions) - ] - if len(str_fields) == 1: - return "({},)".format(str_fields[0]) - else: - return "({})".format(", ".join(str_fields)) - - -def _void_scalar_repr(x): - """ - Implements the repr for structured-void scalars. It is called from the - scalartypes.c.src code, and is placed here because it uses the elementwise - formatters defined above. - """ - return StructuredVoidFormat.from_data(array(x), **_format_options)(x) - - -_typelessdata = [int_, float_, complex_, bool_] -if issubclass(intc, int): - _typelessdata.append(intc) -if issubclass(longlong, int): - _typelessdata.append(longlong) - - -def dtype_is_implied(dtype): - """ - Determine if the given dtype is implied by the representation of its values. - - Parameters - ---------- - dtype : dtype - Data type - - Returns - ------- - implied : bool - True if the dtype is implied by the representation of its values. - - Examples - -------- - >>> np.core.arrayprint.dtype_is_implied(int) - True - >>> np.array([1, 2, 3], int) - array([1, 2, 3]) - >>> np.core.arrayprint.dtype_is_implied(np.int8) - False - >>> np.array([1, 2, 3], np.int8) - array([1, 2, 3], dtype=int8) - """ - dtype = np.dtype(dtype) - if _format_options['legacy'] == '1.13' and dtype.type == bool_: - return False - - # not just void types can be structured, and names are not part of the repr - if dtype.names is not None: - return False - - return dtype.type in _typelessdata - - -def dtype_short_repr(dtype): - """ - Convert a dtype to a short form which evaluates to the same dtype. - - The intent is roughly that the following holds - - >>> from numpy import * - >>> dt = np.int64([1, 2]).dtype - >>> assert eval(dtype_short_repr(dt)) == dt - """ - if dtype.names is not None: - # structured dtypes give a list or tuple repr - return str(dtype) - elif issubclass(dtype.type, flexible): - # handle these separately so they don't give garbage like str256 - return "'%s'" % str(dtype) - - typename = dtype.name - # quote typenames which can't be represented as python variable names - if typename and not (typename[0].isalpha() and typename.isalnum()): - typename = repr(typename) - - return typename - - -def _array_repr_implementation( - arr, max_line_width=None, precision=None, suppress_small=None, - array2string=array2string): - """Internal version of array_repr() that allows overriding array2string.""" - if max_line_width is None: - max_line_width = _format_options['linewidth'] - - if type(arr) is not ndarray: - class_name = type(arr).__name__ - else: - class_name = "array" - - skipdtype = dtype_is_implied(arr.dtype) and arr.size > 0 - - prefix = class_name + "(" - suffix = ")" if skipdtype else "," - - if (_format_options['legacy'] == '1.13' and - arr.shape == () and not arr.dtype.names): - lst = repr(arr.item()) - elif arr.size > 0 or arr.shape == (0,): - lst = array2string(arr, max_line_width, precision, suppress_small, - ', ', prefix, suffix=suffix) - else: # show zero-length shape unless it is (0,) - lst = "[], shape=%s" % (repr(arr.shape),) - - arr_str = prefix + lst + suffix - - if skipdtype: - return arr_str - - dtype_str = "dtype={})".format(dtype_short_repr(arr.dtype)) - - # compute whether we should put dtype on a new line: Do so if adding the - # dtype would extend the last line past max_line_width. - # Note: This line gives the correct result even when rfind returns -1. - last_line_len = len(arr_str) - (arr_str.rfind('\n') + 1) - spacer = " " - if _format_options['legacy'] == '1.13': - if issubclass(arr.dtype.type, flexible): - spacer = '\n' + ' '*len(class_name + "(") - elif last_line_len + len(dtype_str) + 1 > max_line_width: - spacer = '\n' + ' '*len(class_name + "(") - - return arr_str + spacer + dtype_str - - -def _array_repr_dispatcher( - arr, max_line_width=None, precision=None, suppress_small=None): - return (arr,) - - -@array_function_dispatch(_array_repr_dispatcher, module='numpy') -def array_repr(arr, max_line_width=None, precision=None, suppress_small=None): - """ - Return the string representation of an array. - - Parameters - ---------- - arr : ndarray - Input array. - max_line_width : int, optional - Inserts newlines if text is longer than `max_line_width`. - Defaults to ``numpy.get_printoptions()['linewidth']``. - precision : int, optional - Floating point precision. - Defaults to ``numpy.get_printoptions()['precision']``. - suppress_small : bool, optional - Represent numbers "very close" to zero as zero; default is False. - Very close is defined by precision: if the precision is 8, e.g., - numbers smaller (in absolute value) than 5e-9 are represented as - zero. - Defaults to ``numpy.get_printoptions()['suppress']``. - - Returns - ------- - string : str - The string representation of an array. - - See Also - -------- - array_str, array2string, set_printoptions - - Examples - -------- - >>> np.array_repr(np.array([1,2])) - 'array([1, 2])' - >>> np.array_repr(np.ma.array([0.])) - 'MaskedArray([0.])' - >>> np.array_repr(np.array([], np.int32)) - 'array([], dtype=int32)' - - >>> x = np.array([1e-6, 4e-7, 2, 3]) - >>> np.array_repr(x, precision=6, suppress_small=True) - 'array([0.000001, 0. , 2. , 3. ])' - - """ - return _array_repr_implementation( - arr, max_line_width, precision, suppress_small) - - -@_recursive_guard() -def _guarded_repr_or_str(v): - if isinstance(v, bytes): - return repr(v) - return str(v) - - -def _array_str_implementation( - a, max_line_width=None, precision=None, suppress_small=None, - array2string=array2string): - """Internal version of array_str() that allows overriding array2string.""" - if (_format_options['legacy'] == '1.13' and - a.shape == () and not a.dtype.names): - return str(a.item()) - - # the str of 0d arrays is a special case: It should appear like a scalar, - # so floats are not truncated by `precision`, and strings are not wrapped - # in quotes. So we return the str of the scalar value. - if a.shape == (): - # obtain a scalar and call str on it, avoiding problems for subclasses - # for which indexing with () returns a 0d instead of a scalar by using - # ndarray's getindex. Also guard against recursive 0d object arrays. - return _guarded_repr_or_str(np.ndarray.__getitem__(a, ())) - - return array2string(a, max_line_width, precision, suppress_small, ' ', "") - - -def _array_str_dispatcher( - a, max_line_width=None, precision=None, suppress_small=None): - return (a,) - - -@array_function_dispatch(_array_str_dispatcher, module='numpy') -def array_str(a, max_line_width=None, precision=None, suppress_small=None): - """ - Return a string representation of the data in an array. - - The data in the array is returned as a single string. This function is - similar to `array_repr`, the difference being that `array_repr` also - returns information on the kind of array and its data type. - - Parameters - ---------- - a : ndarray - Input array. - max_line_width : int, optional - Inserts newlines if text is longer than `max_line_width`. - Defaults to ``numpy.get_printoptions()['linewidth']``. - precision : int, optional - Floating point precision. - Defaults to ``numpy.get_printoptions()['precision']``. - suppress_small : bool, optional - Represent numbers "very close" to zero as zero; default is False. - Very close is defined by precision: if the precision is 8, e.g., - numbers smaller (in absolute value) than 5e-9 are represented as - zero. - Defaults to ``numpy.get_printoptions()['suppress']``. - - See Also - -------- - array2string, array_repr, set_printoptions - - Examples - -------- - >>> np.array_str(np.arange(3)) - '[0 1 2]' - - """ - return _array_str_implementation( - a, max_line_width, precision, suppress_small) - - -# needed if __array_function__ is disabled -_array2string_impl = getattr(array2string, '__wrapped__', array2string) -_default_array_str = functools.partial(_array_str_implementation, - array2string=_array2string_impl) -_default_array_repr = functools.partial(_array_repr_implementation, - array2string=_array2string_impl) - - -def set_string_function(f, repr=True): - """ - Set a Python function to be used when pretty printing arrays. - - Parameters - ---------- - f : function or None - Function to be used to pretty print arrays. The function should expect - a single array argument and return a string of the representation of - the array. If None, the function is reset to the default NumPy function - to print arrays. - repr : bool, optional - If True (default), the function for pretty printing (``__repr__``) - is set, if False the function that returns the default string - representation (``__str__``) is set. - - See Also - -------- - set_printoptions, get_printoptions - - Examples - -------- - >>> def pprint(arr): - ... return 'HA! - What are you going to do now?' - ... - >>> np.set_string_function(pprint) - >>> a = np.arange(10) - >>> a - HA! - What are you going to do now? - >>> _ = a - >>> # [0 1 2 3 4 5 6 7 8 9] - - We can reset the function to the default: - - >>> np.set_string_function(None) - >>> a - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - - `repr` affects either pretty printing or normal string representation. - Note that ``__repr__`` is still affected by setting ``__str__`` - because the width of each array element in the returned string becomes - equal to the length of the result of ``__str__()``. - - >>> x = np.arange(4) - >>> np.set_string_function(lambda x:'random', repr=False) - >>> x.__str__() - 'random' - >>> x.__repr__() - 'array([0, 1, 2, 3])' - - """ - if f is None: - if repr: - return multiarray.set_string_function(_default_array_repr, 1) - else: - return multiarray.set_string_function(_default_array_str, 0) - else: - return multiarray.set_string_function(f, repr) - -set_string_function(_default_array_str, False) -set_string_function(_default_array_repr, True) diff --git a/venv/lib/python3.7/site-packages/numpy/core/cversions.py b/venv/lib/python3.7/site-packages/numpy/core/cversions.py deleted file mode 100644 index 7995dd9..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/cversions.py +++ /dev/null @@ -1,15 +0,0 @@ -"""Simple script to compute the api hash of the current API. - -The API has is defined by numpy_api_order and ufunc_api_order. - -""" -from __future__ import division, absolute_import, print_function - -from os.path import dirname - -from code_generators.genapi import fullapi_hash -from code_generators.numpy_api import full_api - -if __name__ == '__main__': - curdir = dirname(__file__) - print(fullapi_hash(full_api)) diff --git a/venv/lib/python3.7/site-packages/numpy/core/defchararray.py b/venv/lib/python3.7/site-packages/numpy/core/defchararray.py deleted file mode 100644 index 2d89d6f..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/defchararray.py +++ /dev/null @@ -1,2819 +0,0 @@ -""" -This module contains a set of functions for vectorized string -operations and methods. - -.. note:: - The `chararray` class exists for backwards compatibility with - Numarray, it is not recommended for new development. Starting from numpy - 1.4, if one needs arrays of strings, it is recommended to use arrays of - `dtype` `object_`, `string_` or `unicode_`, and use the free functions - in the `numpy.char` module for fast vectorized string operations. - -Some methods will only be available if the corresponding string method is -available in your version of Python. - -The preferred alias for `defchararray` is `numpy.char`. - -""" -from __future__ import division, absolute_import, print_function - -import functools -import sys -from .numerictypes import string_, unicode_, integer, object_, bool_, character -from .numeric import ndarray, compare_chararrays -from .numeric import array as narray -from numpy.core.multiarray import _vec_string -from numpy.core.overrides import set_module -from numpy.core import overrides -from numpy.compat import asbytes, long -import numpy - -__all__ = [ - 'equal', 'not_equal', 'greater_equal', 'less_equal', - 'greater', 'less', 'str_len', 'add', 'multiply', 'mod', 'capitalize', - 'center', 'count', 'decode', 'encode', 'endswith', 'expandtabs', - 'find', 'index', 'isalnum', 'isalpha', 'isdigit', 'islower', 'isspace', - 'istitle', 'isupper', 'join', 'ljust', 'lower', 'lstrip', 'partition', - 'replace', 'rfind', 'rindex', 'rjust', 'rpartition', 'rsplit', - 'rstrip', 'split', 'splitlines', 'startswith', 'strip', 'swapcase', - 'title', 'translate', 'upper', 'zfill', 'isnumeric', 'isdecimal', - 'array', 'asarray' - ] - - -_globalvar = 0 -if sys.version_info[0] >= 3: - _unicode = str - _bytes = bytes -else: - _unicode = unicode - _bytes = str -_len = len - -array_function_dispatch = functools.partial( - overrides.array_function_dispatch, module='numpy.char') - - -def _use_unicode(*args): - """ - Helper function for determining the output type of some string - operations. - - For an operation on two ndarrays, if at least one is unicode, the - result should be unicode. - """ - for x in args: - if (isinstance(x, _unicode) or - issubclass(numpy.asarray(x).dtype.type, unicode_)): - return unicode_ - return string_ - -def _to_string_or_unicode_array(result): - """ - Helper function to cast a result back into a string or unicode array - if an object array must be used as an intermediary. - """ - return numpy.asarray(result.tolist()) - -def _clean_args(*args): - """ - Helper function for delegating arguments to Python string - functions. - - Many of the Python string operations that have optional arguments - do not use 'None' to indicate a default value. In these cases, - we need to remove all None arguments, and those following them. - """ - newargs = [] - for chk in args: - if chk is None: - break - newargs.append(chk) - return newargs - -def _get_num_chars(a): - """ - Helper function that returns the number of characters per field in - a string or unicode array. This is to abstract out the fact that - for a unicode array this is itemsize / 4. - """ - if issubclass(a.dtype.type, unicode_): - return a.itemsize // 4 - return a.itemsize - - -def _binary_op_dispatcher(x1, x2): - return (x1, x2) - - -@array_function_dispatch(_binary_op_dispatcher) -def equal(x1, x2): - """ - Return (x1 == x2) element-wise. - - Unlike `numpy.equal`, this comparison is performed by first - stripping whitespace characters from the end of the string. This - behavior is provided for backward-compatibility with numarray. - - Parameters - ---------- - x1, x2 : array_like of str or unicode - Input arrays of the same shape. - - Returns - ------- - out : ndarray or bool - Output array of bools, or a single bool if x1 and x2 are scalars. - - See Also - -------- - not_equal, greater_equal, less_equal, greater, less - """ - return compare_chararrays(x1, x2, '==', True) - - -@array_function_dispatch(_binary_op_dispatcher) -def not_equal(x1, x2): - """ - Return (x1 != x2) element-wise. - - Unlike `numpy.not_equal`, this comparison is performed by first - stripping whitespace characters from the end of the string. This - behavior is provided for backward-compatibility with numarray. - - Parameters - ---------- - x1, x2 : array_like of str or unicode - Input arrays of the same shape. - - Returns - ------- - out : ndarray or bool - Output array of bools, or a single bool if x1 and x2 are scalars. - - See Also - -------- - equal, greater_equal, less_equal, greater, less - """ - return compare_chararrays(x1, x2, '!=', True) - - -@array_function_dispatch(_binary_op_dispatcher) -def greater_equal(x1, x2): - """ - Return (x1 >= x2) element-wise. - - Unlike `numpy.greater_equal`, this comparison is performed by - first stripping whitespace characters from the end of the string. - This behavior is provided for backward-compatibility with - numarray. - - Parameters - ---------- - x1, x2 : array_like of str or unicode - Input arrays of the same shape. - - Returns - ------- - out : ndarray or bool - Output array of bools, or a single bool if x1 and x2 are scalars. - - See Also - -------- - equal, not_equal, less_equal, greater, less - """ - return compare_chararrays(x1, x2, '>=', True) - - -@array_function_dispatch(_binary_op_dispatcher) -def less_equal(x1, x2): - """ - Return (x1 <= x2) element-wise. - - Unlike `numpy.less_equal`, this comparison is performed by first - stripping whitespace characters from the end of the string. This - behavior is provided for backward-compatibility with numarray. - - Parameters - ---------- - x1, x2 : array_like of str or unicode - Input arrays of the same shape. - - Returns - ------- - out : ndarray or bool - Output array of bools, or a single bool if x1 and x2 are scalars. - - See Also - -------- - equal, not_equal, greater_equal, greater, less - """ - return compare_chararrays(x1, x2, '<=', True) - - -@array_function_dispatch(_binary_op_dispatcher) -def greater(x1, x2): - """ - Return (x1 > x2) element-wise. - - Unlike `numpy.greater`, this comparison is performed by first - stripping whitespace characters from the end of the string. This - behavior is provided for backward-compatibility with numarray. - - Parameters - ---------- - x1, x2 : array_like of str or unicode - Input arrays of the same shape. - - Returns - ------- - out : ndarray or bool - Output array of bools, or a single bool if x1 and x2 are scalars. - - See Also - -------- - equal, not_equal, greater_equal, less_equal, less - """ - return compare_chararrays(x1, x2, '>', True) - - -@array_function_dispatch(_binary_op_dispatcher) -def less(x1, x2): - """ - Return (x1 < x2) element-wise. - - Unlike `numpy.greater`, this comparison is performed by first - stripping whitespace characters from the end of the string. This - behavior is provided for backward-compatibility with numarray. - - Parameters - ---------- - x1, x2 : array_like of str or unicode - Input arrays of the same shape. - - Returns - ------- - out : ndarray or bool - Output array of bools, or a single bool if x1 and x2 are scalars. - - See Also - -------- - equal, not_equal, greater_equal, less_equal, greater - """ - return compare_chararrays(x1, x2, '<', True) - - -def _unary_op_dispatcher(a): - return (a,) - - -@array_function_dispatch(_unary_op_dispatcher) -def str_len(a): - """ - Return len(a) element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - Returns - ------- - out : ndarray - Output array of integers - - See also - -------- - __builtin__.len - """ - return _vec_string(a, integer, '__len__') - - -@array_function_dispatch(_binary_op_dispatcher) -def add(x1, x2): - """ - Return element-wise string concatenation for two arrays of str or unicode. - - Arrays `x1` and `x2` must have the same shape. - - Parameters - ---------- - x1 : array_like of str or unicode - Input array. - x2 : array_like of str or unicode - Input array. - - Returns - ------- - add : ndarray - Output array of `string_` or `unicode_`, depending on input types - of the same shape as `x1` and `x2`. - - """ - arr1 = numpy.asarray(x1) - arr2 = numpy.asarray(x2) - out_size = _get_num_chars(arr1) + _get_num_chars(arr2) - dtype = _use_unicode(arr1, arr2) - return _vec_string(arr1, (dtype, out_size), '__add__', (arr2,)) - - -def _multiply_dispatcher(a, i): - return (a,) - - -@array_function_dispatch(_multiply_dispatcher) -def multiply(a, i): - """ - Return (a * i), that is string multiple concatenation, - element-wise. - - Values in `i` of less than 0 are treated as 0 (which yields an - empty string). - - Parameters - ---------- - a : array_like of str or unicode - - i : array_like of ints - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input types - - """ - a_arr = numpy.asarray(a) - i_arr = numpy.asarray(i) - if not issubclass(i_arr.dtype.type, integer): - raise ValueError("Can only multiply by integers") - out_size = _get_num_chars(a_arr) * max(long(i_arr.max()), 0) - return _vec_string( - a_arr, (a_arr.dtype.type, out_size), '__mul__', (i_arr,)) - - -def _mod_dispatcher(a, values): - return (a, values) - - -@array_function_dispatch(_mod_dispatcher) -def mod(a, values): - """ - Return (a % i), that is pre-Python 2.6 string formatting - (iterpolation), element-wise for a pair of array_likes of str - or unicode. - - Parameters - ---------- - a : array_like of str or unicode - - values : array_like of values - These values will be element-wise interpolated into the string. - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input types - - See also - -------- - str.__mod__ - - """ - return _to_string_or_unicode_array( - _vec_string(a, object_, '__mod__', (values,))) - - -@array_function_dispatch(_unary_op_dispatcher) -def capitalize(a): - """ - Return a copy of `a` with only the first character of each element - capitalized. - - Calls `str.capitalize` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like of str or unicode - Input array of strings to capitalize. - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input - types - - See also - -------- - str.capitalize - - Examples - -------- - >>> c = np.array(['a1b2','1b2a','b2a1','2a1b'],'S4'); c - array(['a1b2', '1b2a', 'b2a1', '2a1b'], - dtype='|S4') - >>> np.char.capitalize(c) - array(['A1b2', '1b2a', 'B2a1', '2a1b'], - dtype='|S4') - - """ - a_arr = numpy.asarray(a) - return _vec_string(a_arr, a_arr.dtype, 'capitalize') - - -def _center_dispatcher(a, width, fillchar=None): - return (a,) - - -@array_function_dispatch(_center_dispatcher) -def center(a, width, fillchar=' '): - """ - Return a copy of `a` with its elements centered in a string of - length `width`. - - Calls `str.center` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - width : int - The length of the resulting strings - fillchar : str or unicode, optional - The padding character to use (default is space). - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input - types - - See also - -------- - str.center - - """ - a_arr = numpy.asarray(a) - width_arr = numpy.asarray(width) - size = long(numpy.max(width_arr.flat)) - if numpy.issubdtype(a_arr.dtype, numpy.string_): - fillchar = asbytes(fillchar) - return _vec_string( - a_arr, (a_arr.dtype.type, size), 'center', (width_arr, fillchar)) - - -def _count_dispatcher(a, sub, start=None, end=None): - return (a,) - - -@array_function_dispatch(_count_dispatcher) -def count(a, sub, start=0, end=None): - """ - Returns an array with the number of non-overlapping occurrences of - substring `sub` in the range [`start`, `end`]. - - Calls `str.count` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - sub : str or unicode - The substring to search for. - - start, end : int, optional - Optional arguments `start` and `end` are interpreted as slice - notation to specify the range in which to count. - - Returns - ------- - out : ndarray - Output array of ints. - - See also - -------- - str.count - - Examples - -------- - >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) - >>> c - array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> np.char.count(c, 'A') - array([3, 1, 1]) - >>> np.char.count(c, 'aA') - array([3, 1, 0]) - >>> np.char.count(c, 'A', start=1, end=4) - array([2, 1, 1]) - >>> np.char.count(c, 'A', start=1, end=3) - array([1, 0, 0]) - - """ - return _vec_string(a, integer, 'count', [sub, start] + _clean_args(end)) - - -def _code_dispatcher(a, encoding=None, errors=None): - return (a,) - - -@array_function_dispatch(_code_dispatcher) -def decode(a, encoding=None, errors=None): - """ - Calls `str.decode` element-wise. - - The set of available codecs comes from the Python standard library, - and may be extended at runtime. For more information, see the - :mod:`codecs` module. - - Parameters - ---------- - a : array_like of str or unicode - - encoding : str, optional - The name of an encoding - - errors : str, optional - Specifies how to handle encoding errors - - Returns - ------- - out : ndarray - - See also - -------- - str.decode - - Notes - ----- - The type of the result will depend on the encoding specified. - - Examples - -------- - >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) - >>> c - array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> np.char.encode(c, encoding='cp037') - array(['\\x81\\xc1\\x81\\xc1\\x81\\xc1', '@@\\x81\\xc1@@', - '\\x81\\x82\\xc2\\xc1\\xc2\\x82\\x81'], - dtype='|S7') - - """ - return _to_string_or_unicode_array( - _vec_string(a, object_, 'decode', _clean_args(encoding, errors))) - - -@array_function_dispatch(_code_dispatcher) -def encode(a, encoding=None, errors=None): - """ - Calls `str.encode` element-wise. - - The set of available codecs comes from the Python standard library, - and may be extended at runtime. For more information, see the codecs - module. - - Parameters - ---------- - a : array_like of str or unicode - - encoding : str, optional - The name of an encoding - - errors : str, optional - Specifies how to handle encoding errors - - Returns - ------- - out : ndarray - - See also - -------- - str.encode - - Notes - ----- - The type of the result will depend on the encoding specified. - - """ - return _to_string_or_unicode_array( - _vec_string(a, object_, 'encode', _clean_args(encoding, errors))) - - -def _endswith_dispatcher(a, suffix, start=None, end=None): - return (a,) - - -@array_function_dispatch(_endswith_dispatcher) -def endswith(a, suffix, start=0, end=None): - """ - Returns a boolean array which is `True` where the string element - in `a` ends with `suffix`, otherwise `False`. - - Calls `str.endswith` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - suffix : str - - start, end : int, optional - With optional `start`, test beginning at that position. With - optional `end`, stop comparing at that position. - - Returns - ------- - out : ndarray - Outputs an array of bools. - - See also - -------- - str.endswith - - Examples - -------- - >>> s = np.array(['foo', 'bar']) - >>> s[0] = 'foo' - >>> s[1] = 'bar' - >>> s - array(['foo', 'bar'], dtype='>> np.char.endswith(s, 'ar') - array([False, True]) - >>> np.char.endswith(s, 'a', start=1, end=2) - array([False, True]) - - """ - return _vec_string( - a, bool_, 'endswith', [suffix, start] + _clean_args(end)) - - -def _expandtabs_dispatcher(a, tabsize=None): - return (a,) - - -@array_function_dispatch(_expandtabs_dispatcher) -def expandtabs(a, tabsize=8): - """ - Return a copy of each string element where all tab characters are - replaced by one or more spaces. - - Calls `str.expandtabs` element-wise. - - Return a copy of each string element where all tab characters are - replaced by one or more spaces, depending on the current column - and the given `tabsize`. The column number is reset to zero after - each newline occurring in the string. This doesn't understand other - non-printing characters or escape sequences. - - Parameters - ---------- - a : array_like of str or unicode - Input array - tabsize : int, optional - Replace tabs with `tabsize` number of spaces. If not given defaults - to 8 spaces. - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.expandtabs - - """ - return _to_string_or_unicode_array( - _vec_string(a, object_, 'expandtabs', (tabsize,))) - - -@array_function_dispatch(_count_dispatcher) -def find(a, sub, start=0, end=None): - """ - For each element, return the lowest index in the string where - substring `sub` is found. - - Calls `str.find` element-wise. - - For each element, return the lowest index in the string where - substring `sub` is found, such that `sub` is contained in the - range [`start`, `end`]. - - Parameters - ---------- - a : array_like of str or unicode - - sub : str or unicode - - start, end : int, optional - Optional arguments `start` and `end` are interpreted as in - slice notation. - - Returns - ------- - out : ndarray or int - Output array of ints. Returns -1 if `sub` is not found. - - See also - -------- - str.find - - """ - return _vec_string( - a, integer, 'find', [sub, start] + _clean_args(end)) - - -@array_function_dispatch(_count_dispatcher) -def index(a, sub, start=0, end=None): - """ - Like `find`, but raises `ValueError` when the substring is not found. - - Calls `str.index` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - sub : str or unicode - - start, end : int, optional - - Returns - ------- - out : ndarray - Output array of ints. Returns -1 if `sub` is not found. - - See also - -------- - find, str.find - - """ - return _vec_string( - a, integer, 'index', [sub, start] + _clean_args(end)) - - -@array_function_dispatch(_unary_op_dispatcher) -def isalnum(a): - """ - Returns true for each element if all characters in the string are - alphanumeric and there is at least one character, false otherwise. - - Calls `str.isalnum` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like of str or unicode - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.isalnum - """ - return _vec_string(a, bool_, 'isalnum') - - -@array_function_dispatch(_unary_op_dispatcher) -def isalpha(a): - """ - Returns true for each element if all characters in the string are - alphabetic and there is at least one character, false otherwise. - - Calls `str.isalpha` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like of str or unicode - - Returns - ------- - out : ndarray - Output array of bools - - See also - -------- - str.isalpha - """ - return _vec_string(a, bool_, 'isalpha') - - -@array_function_dispatch(_unary_op_dispatcher) -def isdigit(a): - """ - Returns true for each element if all characters in the string are - digits and there is at least one character, false otherwise. - - Calls `str.isdigit` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like of str or unicode - - Returns - ------- - out : ndarray - Output array of bools - - See also - -------- - str.isdigit - """ - return _vec_string(a, bool_, 'isdigit') - - -@array_function_dispatch(_unary_op_dispatcher) -def islower(a): - """ - Returns true for each element if all cased characters in the - string are lowercase and there is at least one cased character, - false otherwise. - - Calls `str.islower` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like of str or unicode - - Returns - ------- - out : ndarray - Output array of bools - - See also - -------- - str.islower - """ - return _vec_string(a, bool_, 'islower') - - -@array_function_dispatch(_unary_op_dispatcher) -def isspace(a): - """ - Returns true for each element if there are only whitespace - characters in the string and there is at least one character, - false otherwise. - - Calls `str.isspace` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like of str or unicode - - Returns - ------- - out : ndarray - Output array of bools - - See also - -------- - str.isspace - """ - return _vec_string(a, bool_, 'isspace') - - -@array_function_dispatch(_unary_op_dispatcher) -def istitle(a): - """ - Returns true for each element if the element is a titlecased - string and there is at least one character, false otherwise. - - Call `str.istitle` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like of str or unicode - - Returns - ------- - out : ndarray - Output array of bools - - See also - -------- - str.istitle - """ - return _vec_string(a, bool_, 'istitle') - - -@array_function_dispatch(_unary_op_dispatcher) -def isupper(a): - """ - Returns true for each element if all cased characters in the - string are uppercase and there is at least one character, false - otherwise. - - Call `str.isupper` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like of str or unicode - - Returns - ------- - out : ndarray - Output array of bools - - See also - -------- - str.isupper - """ - return _vec_string(a, bool_, 'isupper') - - -def _join_dispatcher(sep, seq): - return (sep, seq) - - -@array_function_dispatch(_join_dispatcher) -def join(sep, seq): - """ - Return a string which is the concatenation of the strings in the - sequence `seq`. - - Calls `str.join` element-wise. - - Parameters - ---------- - sep : array_like of str or unicode - seq : array_like of str or unicode - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input types - - See also - -------- - str.join - """ - return _to_string_or_unicode_array( - _vec_string(sep, object_, 'join', (seq,))) - - - -def _just_dispatcher(a, width, fillchar=None): - return (a,) - - -@array_function_dispatch(_just_dispatcher) -def ljust(a, width, fillchar=' '): - """ - Return an array with the elements of `a` left-justified in a - string of length `width`. - - Calls `str.ljust` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - width : int - The length of the resulting strings - fillchar : str or unicode, optional - The character to use for padding - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.ljust - - """ - a_arr = numpy.asarray(a) - width_arr = numpy.asarray(width) - size = long(numpy.max(width_arr.flat)) - if numpy.issubdtype(a_arr.dtype, numpy.string_): - fillchar = asbytes(fillchar) - return _vec_string( - a_arr, (a_arr.dtype.type, size), 'ljust', (width_arr, fillchar)) - - -@array_function_dispatch(_unary_op_dispatcher) -def lower(a): - """ - Return an array with the elements converted to lowercase. - - Call `str.lower` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like, {str, unicode} - Input array. - - Returns - ------- - out : ndarray, {str, unicode} - Output array of str or unicode, depending on input type - - See also - -------- - str.lower - - Examples - -------- - >>> c = np.array(['A1B C', '1BCA', 'BCA1']); c - array(['A1B C', '1BCA', 'BCA1'], dtype='>> np.char.lower(c) - array(['a1b c', '1bca', 'bca1'], dtype='>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) - >>> c - array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> np.char.lstrip(c, 'a') - array(['AaAaA', ' aA ', 'bBABba'], dtype='>> np.char.lstrip(c, 'A') # leaves c unchanged - array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, '')).all() - ... # XXX: is this a regression? This used to return True - ... # np.char.lstrip(c,'') does not modify c at all. - False - >>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, None)).all() - True - - """ - a_arr = numpy.asarray(a) - return _vec_string(a_arr, a_arr.dtype, 'lstrip', (chars,)) - - -def _partition_dispatcher(a, sep): - return (a,) - - -@array_function_dispatch(_partition_dispatcher) -def partition(a, sep): - """ - Partition each element in `a` around `sep`. - - Calls `str.partition` element-wise. - - For each element in `a`, split the element as the first - occurrence of `sep`, and return 3 strings containing the part - before the separator, the separator itself, and the part after - the separator. If the separator is not found, return 3 strings - containing the string itself, followed by two empty strings. - - Parameters - ---------- - a : array_like, {str, unicode} - Input array - sep : {str, unicode} - Separator to split each string element in `a`. - - Returns - ------- - out : ndarray, {str, unicode} - Output array of str or unicode, depending on input type. - The output array will have an extra dimension with 3 - elements per input element. - - See also - -------- - str.partition - - """ - return _to_string_or_unicode_array( - _vec_string(a, object_, 'partition', (sep,))) - - -def _replace_dispatcher(a, old, new, count=None): - return (a,) - - -@array_function_dispatch(_replace_dispatcher) -def replace(a, old, new, count=None): - """ - For each element in `a`, return a copy of the string with all - occurrences of substring `old` replaced by `new`. - - Calls `str.replace` element-wise. - - Parameters - ---------- - a : array-like of str or unicode - - old, new : str or unicode - - count : int, optional - If the optional argument `count` is given, only the first - `count` occurrences are replaced. - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.replace - - """ - return _to_string_or_unicode_array( - _vec_string( - a, object_, 'replace', [old, new] + _clean_args(count))) - - -@array_function_dispatch(_count_dispatcher) -def rfind(a, sub, start=0, end=None): - """ - For each element in `a`, return the highest index in the string - where substring `sub` is found, such that `sub` is contained - within [`start`, `end`]. - - Calls `str.rfind` element-wise. - - Parameters - ---------- - a : array-like of str or unicode - - sub : str or unicode - - start, end : int, optional - Optional arguments `start` and `end` are interpreted as in - slice notation. - - Returns - ------- - out : ndarray - Output array of ints. Return -1 on failure. - - See also - -------- - str.rfind - - """ - return _vec_string( - a, integer, 'rfind', [sub, start] + _clean_args(end)) - - -@array_function_dispatch(_count_dispatcher) -def rindex(a, sub, start=0, end=None): - """ - Like `rfind`, but raises `ValueError` when the substring `sub` is - not found. - - Calls `str.rindex` element-wise. - - Parameters - ---------- - a : array-like of str or unicode - - sub : str or unicode - - start, end : int, optional - - Returns - ------- - out : ndarray - Output array of ints. - - See also - -------- - rfind, str.rindex - - """ - return _vec_string( - a, integer, 'rindex', [sub, start] + _clean_args(end)) - - -@array_function_dispatch(_just_dispatcher) -def rjust(a, width, fillchar=' '): - """ - Return an array with the elements of `a` right-justified in a - string of length `width`. - - Calls `str.rjust` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - width : int - The length of the resulting strings - fillchar : str or unicode, optional - The character to use for padding - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.rjust - - """ - a_arr = numpy.asarray(a) - width_arr = numpy.asarray(width) - size = long(numpy.max(width_arr.flat)) - if numpy.issubdtype(a_arr.dtype, numpy.string_): - fillchar = asbytes(fillchar) - return _vec_string( - a_arr, (a_arr.dtype.type, size), 'rjust', (width_arr, fillchar)) - - -@array_function_dispatch(_partition_dispatcher) -def rpartition(a, sep): - """ - Partition (split) each element around the right-most separator. - - Calls `str.rpartition` element-wise. - - For each element in `a`, split the element as the last - occurrence of `sep`, and return 3 strings containing the part - before the separator, the separator itself, and the part after - the separator. If the separator is not found, return 3 strings - containing the string itself, followed by two empty strings. - - Parameters - ---------- - a : array_like of str or unicode - Input array - sep : str or unicode - Right-most separator to split each element in array. - - Returns - ------- - out : ndarray - Output array of string or unicode, depending on input - type. The output array will have an extra dimension with - 3 elements per input element. - - See also - -------- - str.rpartition - - """ - return _to_string_or_unicode_array( - _vec_string(a, object_, 'rpartition', (sep,))) - - -def _split_dispatcher(a, sep=None, maxsplit=None): - return (a,) - - -@array_function_dispatch(_split_dispatcher) -def rsplit(a, sep=None, maxsplit=None): - """ - For each element in `a`, return a list of the words in the - string, using `sep` as the delimiter string. - - Calls `str.rsplit` element-wise. - - Except for splitting from the right, `rsplit` - behaves like `split`. - - Parameters - ---------- - a : array_like of str or unicode - - sep : str or unicode, optional - If `sep` is not specified or None, any whitespace string - is a separator. - maxsplit : int, optional - If `maxsplit` is given, at most `maxsplit` splits are done, - the rightmost ones. - - Returns - ------- - out : ndarray - Array of list objects - - See also - -------- - str.rsplit, split - - """ - # This will return an array of lists of different sizes, so we - # leave it as an object array - return _vec_string( - a, object_, 'rsplit', [sep] + _clean_args(maxsplit)) - - -def _strip_dispatcher(a, chars=None): - return (a,) - - -@array_function_dispatch(_strip_dispatcher) -def rstrip(a, chars=None): - """ - For each element in `a`, return a copy with the trailing - characters removed. - - Calls `str.rstrip` element-wise. - - Parameters - ---------- - a : array-like of str or unicode - - chars : str or unicode, optional - The `chars` argument is a string specifying the set of - characters to be removed. If omitted or None, the `chars` - argument defaults to removing whitespace. The `chars` argument - is not a suffix; rather, all combinations of its values are - stripped. - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.rstrip - - Examples - -------- - >>> c = np.array(['aAaAaA', 'abBABba'], dtype='S7'); c - array(['aAaAaA', 'abBABba'], - dtype='|S7') - >>> np.char.rstrip(c, b'a') - array(['aAaAaA', 'abBABb'], - dtype='|S7') - >>> np.char.rstrip(c, b'A') - array(['aAaAa', 'abBABba'], - dtype='|S7') - - """ - a_arr = numpy.asarray(a) - return _vec_string(a_arr, a_arr.dtype, 'rstrip', (chars,)) - - -@array_function_dispatch(_split_dispatcher) -def split(a, sep=None, maxsplit=None): - """ - For each element in `a`, return a list of the words in the - string, using `sep` as the delimiter string. - - Calls `str.split` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - sep : str or unicode, optional - If `sep` is not specified or None, any whitespace string is a - separator. - - maxsplit : int, optional - If `maxsplit` is given, at most `maxsplit` splits are done. - - Returns - ------- - out : ndarray - Array of list objects - - See also - -------- - str.split, rsplit - - """ - # This will return an array of lists of different sizes, so we - # leave it as an object array - return _vec_string( - a, object_, 'split', [sep] + _clean_args(maxsplit)) - - -def _splitlines_dispatcher(a, keepends=None): - return (a,) - - -@array_function_dispatch(_splitlines_dispatcher) -def splitlines(a, keepends=None): - """ - For each element in `a`, return a list of the lines in the - element, breaking at line boundaries. - - Calls `str.splitlines` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - keepends : bool, optional - Line breaks are not included in the resulting list unless - keepends is given and true. - - Returns - ------- - out : ndarray - Array of list objects - - See also - -------- - str.splitlines - - """ - return _vec_string( - a, object_, 'splitlines', _clean_args(keepends)) - - -def _startswith_dispatcher(a, prefix, start=None, end=None): - return (a,) - - -@array_function_dispatch(_startswith_dispatcher) -def startswith(a, prefix, start=0, end=None): - """ - Returns a boolean array which is `True` where the string element - in `a` starts with `prefix`, otherwise `False`. - - Calls `str.startswith` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - prefix : str - - start, end : int, optional - With optional `start`, test beginning at that position. With - optional `end`, stop comparing at that position. - - Returns - ------- - out : ndarray - Array of booleans - - See also - -------- - str.startswith - - """ - return _vec_string( - a, bool_, 'startswith', [prefix, start] + _clean_args(end)) - - -@array_function_dispatch(_strip_dispatcher) -def strip(a, chars=None): - """ - For each element in `a`, return a copy with the leading and - trailing characters removed. - - Calls `str.strip` element-wise. - - Parameters - ---------- - a : array-like of str or unicode - - chars : str or unicode, optional - The `chars` argument is a string specifying the set of - characters to be removed. If omitted or None, the `chars` - argument defaults to removing whitespace. The `chars` argument - is not a prefix or suffix; rather, all combinations of its - values are stripped. - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.strip - - Examples - -------- - >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) - >>> c - array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> np.char.strip(c) - array(['aAaAaA', 'aA', 'abBABba'], dtype='>> np.char.strip(c, 'a') # 'a' unstripped from c[1] because whitespace leads - array(['AaAaA', ' aA ', 'bBABb'], dtype='>> np.char.strip(c, 'A') # 'A' unstripped from c[1] because (unprinted) ws trails - array(['aAaAa', ' aA ', 'abBABba'], dtype='>> c=np.array(['a1B c','1b Ca','b Ca1','cA1b'],'S5'); c - array(['a1B c', '1b Ca', 'b Ca1', 'cA1b'], - dtype='|S5') - >>> np.char.swapcase(c) - array(['A1b C', '1B cA', 'B cA1', 'Ca1B'], - dtype='|S5') - - """ - a_arr = numpy.asarray(a) - return _vec_string(a_arr, a_arr.dtype, 'swapcase') - - -@array_function_dispatch(_unary_op_dispatcher) -def title(a): - """ - Return element-wise title cased version of string or unicode. - - Title case words start with uppercase characters, all remaining cased - characters are lowercase. - - Calls `str.title` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like, {str, unicode} - Input array. - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.title - - Examples - -------- - >>> c=np.array(['a1b c','1b ca','b ca1','ca1b'],'S5'); c - array(['a1b c', '1b ca', 'b ca1', 'ca1b'], - dtype='|S5') - >>> np.char.title(c) - array(['A1B C', '1B Ca', 'B Ca1', 'Ca1B'], - dtype='|S5') - - """ - a_arr = numpy.asarray(a) - return _vec_string(a_arr, a_arr.dtype, 'title') - - -def _translate_dispatcher(a, table, deletechars=None): - return (a,) - - -@array_function_dispatch(_translate_dispatcher) -def translate(a, table, deletechars=None): - """ - For each element in `a`, return a copy of the string where all - characters occurring in the optional argument `deletechars` are - removed, and the remaining characters have been mapped through the - given translation table. - - Calls `str.translate` element-wise. - - Parameters - ---------- - a : array-like of str or unicode - - table : str of length 256 - - deletechars : str - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.translate - - """ - a_arr = numpy.asarray(a) - if issubclass(a_arr.dtype.type, unicode_): - return _vec_string( - a_arr, a_arr.dtype, 'translate', (table,)) - else: - return _vec_string( - a_arr, a_arr.dtype, 'translate', [table] + _clean_args(deletechars)) - - -@array_function_dispatch(_unary_op_dispatcher) -def upper(a): - """ - Return an array with the elements converted to uppercase. - - Calls `str.upper` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like, {str, unicode} - Input array. - - Returns - ------- - out : ndarray, {str, unicode} - Output array of str or unicode, depending on input type - - See also - -------- - str.upper - - Examples - -------- - >>> c = np.array(['a1b c', '1bca', 'bca1']); c - array(['a1b c', '1bca', 'bca1'], dtype='>> np.char.upper(c) - array(['A1B C', '1BCA', 'BCA1'], dtype='= 2`` and ``order='F'``, in which case `strides` - is in "Fortran order". - - Methods - ------- - astype - argsort - copy - count - decode - dump - dumps - encode - endswith - expandtabs - fill - find - flatten - getfield - index - isalnum - isalpha - isdecimal - isdigit - islower - isnumeric - isspace - istitle - isupper - item - join - ljust - lower - lstrip - nonzero - put - ravel - repeat - replace - reshape - resize - rfind - rindex - rjust - rsplit - rstrip - searchsorted - setfield - setflags - sort - split - splitlines - squeeze - startswith - strip - swapaxes - swapcase - take - title - tofile - tolist - tostring - translate - transpose - upper - view - zfill - - Parameters - ---------- - shape : tuple - Shape of the array. - itemsize : int, optional - Length of each array element, in number of characters. Default is 1. - unicode : bool, optional - Are the array elements of type unicode (True) or string (False). - Default is False. - buffer : int, optional - Memory address of the start of the array data. Default is None, - in which case a new array is created. - offset : int, optional - Fixed stride displacement from the beginning of an axis? - Default is 0. Needs to be >=0. - strides : array_like of ints, optional - Strides for the array (see `ndarray.strides` for full description). - Default is None. - order : {'C', 'F'}, optional - The order in which the array data is stored in memory: 'C' -> - "row major" order (the default), 'F' -> "column major" - (Fortran) order. - - Examples - -------- - >>> charar = np.chararray((3, 3)) - >>> charar[:] = 'a' - >>> charar - chararray([[b'a', b'a', b'a'], - [b'a', b'a', b'a'], - [b'a', b'a', b'a']], dtype='|S1') - - >>> charar = np.chararray(charar.shape, itemsize=5) - >>> charar[:] = 'abc' - >>> charar - chararray([[b'abc', b'abc', b'abc'], - [b'abc', b'abc', b'abc'], - [b'abc', b'abc', b'abc']], dtype='|S5') - - """ - def __new__(subtype, shape, itemsize=1, unicode=False, buffer=None, - offset=0, strides=None, order='C'): - global _globalvar - - if unicode: - dtype = unicode_ - else: - dtype = string_ - - # force itemsize to be a Python long, since using NumPy integer - # types results in itemsize.itemsize being used as the size of - # strings in the new array. - itemsize = long(itemsize) - - if sys.version_info[0] >= 3 and isinstance(buffer, _unicode): - # On Py3, unicode objects do not have the buffer interface - filler = buffer - buffer = None - else: - filler = None - - _globalvar = 1 - if buffer is None: - self = ndarray.__new__(subtype, shape, (dtype, itemsize), - order=order) - else: - self = ndarray.__new__(subtype, shape, (dtype, itemsize), - buffer=buffer, - offset=offset, strides=strides, - order=order) - if filler is not None: - self[...] = filler - _globalvar = 0 - return self - - def __array_finalize__(self, obj): - # The b is a special case because it is used for reconstructing. - if not _globalvar and self.dtype.char not in 'SUbc': - raise ValueError("Can only create a chararray from string data.") - - def __getitem__(self, obj): - val = ndarray.__getitem__(self, obj) - - if isinstance(val, character): - temp = val.rstrip() - if _len(temp) == 0: - val = '' - else: - val = temp - - return val - - # IMPLEMENTATION NOTE: Most of the methods of this class are - # direct delegations to the free functions in this module. - # However, those that return an array of strings should instead - # return a chararray, so some extra wrapping is required. - - def __eq__(self, other): - """ - Return (self == other) element-wise. - - See also - -------- - equal - """ - return equal(self, other) - - def __ne__(self, other): - """ - Return (self != other) element-wise. - - See also - -------- - not_equal - """ - return not_equal(self, other) - - def __ge__(self, other): - """ - Return (self >= other) element-wise. - - See also - -------- - greater_equal - """ - return greater_equal(self, other) - - def __le__(self, other): - """ - Return (self <= other) element-wise. - - See also - -------- - less_equal - """ - return less_equal(self, other) - - def __gt__(self, other): - """ - Return (self > other) element-wise. - - See also - -------- - greater - """ - return greater(self, other) - - def __lt__(self, other): - """ - Return (self < other) element-wise. - - See also - -------- - less - """ - return less(self, other) - - def __add__(self, other): - """ - Return (self + other), that is string concatenation, - element-wise for a pair of array_likes of str or unicode. - - See also - -------- - add - """ - return asarray(add(self, other)) - - def __radd__(self, other): - """ - Return (other + self), that is string concatenation, - element-wise for a pair of array_likes of `string_` or `unicode_`. - - See also - -------- - add - """ - return asarray(add(numpy.asarray(other), self)) - - def __mul__(self, i): - """ - Return (self * i), that is string multiple concatenation, - element-wise. - - See also - -------- - multiply - """ - return asarray(multiply(self, i)) - - def __rmul__(self, i): - """ - Return (self * i), that is string multiple concatenation, - element-wise. - - See also - -------- - multiply - """ - return asarray(multiply(self, i)) - - def __mod__(self, i): - """ - Return (self % i), that is pre-Python 2.6 string formatting - (iterpolation), element-wise for a pair of array_likes of `string_` - or `unicode_`. - - See also - -------- - mod - """ - return asarray(mod(self, i)) - - def __rmod__(self, other): - return NotImplemented - - def argsort(self, axis=-1, kind=None, order=None): - """ - Return the indices that sort the array lexicographically. - - For full documentation see `numpy.argsort`, for which this method is - in fact merely a "thin wrapper." - - Examples - -------- - >>> c = np.array(['a1b c', '1b ca', 'b ca1', 'Ca1b'], 'S5') - >>> c = c.view(np.chararray); c - chararray(['a1b c', '1b ca', 'b ca1', 'Ca1b'], - dtype='|S5') - >>> c[c.argsort()] - chararray(['1b ca', 'Ca1b', 'a1b c', 'b ca1'], - dtype='|S5') - - """ - return self.__array__().argsort(axis, kind, order) - argsort.__doc__ = ndarray.argsort.__doc__ - - def capitalize(self): - """ - Return a copy of `self` with only the first character of each element - capitalized. - - See also - -------- - char.capitalize - - """ - return asarray(capitalize(self)) - - def center(self, width, fillchar=' '): - """ - Return a copy of `self` with its elements centered in a - string of length `width`. - - See also - -------- - center - """ - return asarray(center(self, width, fillchar)) - - def count(self, sub, start=0, end=None): - """ - Returns an array with the number of non-overlapping occurrences of - substring `sub` in the range [`start`, `end`]. - - See also - -------- - char.count - - """ - return count(self, sub, start, end) - - def decode(self, encoding=None, errors=None): - """ - Calls `str.decode` element-wise. - - See also - -------- - char.decode - - """ - return decode(self, encoding, errors) - - def encode(self, encoding=None, errors=None): - """ - Calls `str.encode` element-wise. - - See also - -------- - char.encode - - """ - return encode(self, encoding, errors) - - def endswith(self, suffix, start=0, end=None): - """ - Returns a boolean array which is `True` where the string element - in `self` ends with `suffix`, otherwise `False`. - - See also - -------- - char.endswith - - """ - return endswith(self, suffix, start, end) - - def expandtabs(self, tabsize=8): - """ - Return a copy of each string element where all tab characters are - replaced by one or more spaces. - - See also - -------- - char.expandtabs - - """ - return asarray(expandtabs(self, tabsize)) - - def find(self, sub, start=0, end=None): - """ - For each element, return the lowest index in the string where - substring `sub` is found. - - See also - -------- - char.find - - """ - return find(self, sub, start, end) - - def index(self, sub, start=0, end=None): - """ - Like `find`, but raises `ValueError` when the substring is not found. - - See also - -------- - char.index - - """ - return index(self, sub, start, end) - - def isalnum(self): - """ - Returns true for each element if all characters in the string - are alphanumeric and there is at least one character, false - otherwise. - - See also - -------- - char.isalnum - - """ - return isalnum(self) - - def isalpha(self): - """ - Returns true for each element if all characters in the string - are alphabetic and there is at least one character, false - otherwise. - - See also - -------- - char.isalpha - - """ - return isalpha(self) - - def isdigit(self): - """ - Returns true for each element if all characters in the string are - digits and there is at least one character, false otherwise. - - See also - -------- - char.isdigit - - """ - return isdigit(self) - - def islower(self): - """ - Returns true for each element if all cased characters in the - string are lowercase and there is at least one cased character, - false otherwise. - - See also - -------- - char.islower - - """ - return islower(self) - - def isspace(self): - """ - Returns true for each element if there are only whitespace - characters in the string and there is at least one character, - false otherwise. - - See also - -------- - char.isspace - - """ - return isspace(self) - - def istitle(self): - """ - Returns true for each element if the element is a titlecased - string and there is at least one character, false otherwise. - - See also - -------- - char.istitle - - """ - return istitle(self) - - def isupper(self): - """ - Returns true for each element if all cased characters in the - string are uppercase and there is at least one character, false - otherwise. - - See also - -------- - char.isupper - - """ - return isupper(self) - - def join(self, seq): - """ - Return a string which is the concatenation of the strings in the - sequence `seq`. - - See also - -------- - char.join - - """ - return join(self, seq) - - def ljust(self, width, fillchar=' '): - """ - Return an array with the elements of `self` left-justified in a - string of length `width`. - - See also - -------- - char.ljust - - """ - return asarray(ljust(self, width, fillchar)) - - def lower(self): - """ - Return an array with the elements of `self` converted to - lowercase. - - See also - -------- - char.lower - - """ - return asarray(lower(self)) - - def lstrip(self, chars=None): - """ - For each element in `self`, return a copy with the leading characters - removed. - - See also - -------- - char.lstrip - - """ - return asarray(lstrip(self, chars)) - - def partition(self, sep): - """ - Partition each element in `self` around `sep`. - - See also - -------- - partition - """ - return asarray(partition(self, sep)) - - def replace(self, old, new, count=None): - """ - For each element in `self`, return a copy of the string with all - occurrences of substring `old` replaced by `new`. - - See also - -------- - char.replace - - """ - return asarray(replace(self, old, new, count)) - - def rfind(self, sub, start=0, end=None): - """ - For each element in `self`, return the highest index in the string - where substring `sub` is found, such that `sub` is contained - within [`start`, `end`]. - - See also - -------- - char.rfind - - """ - return rfind(self, sub, start, end) - - def rindex(self, sub, start=0, end=None): - """ - Like `rfind`, but raises `ValueError` when the substring `sub` is - not found. - - See also - -------- - char.rindex - - """ - return rindex(self, sub, start, end) - - def rjust(self, width, fillchar=' '): - """ - Return an array with the elements of `self` - right-justified in a string of length `width`. - - See also - -------- - char.rjust - - """ - return asarray(rjust(self, width, fillchar)) - - def rpartition(self, sep): - """ - Partition each element in `self` around `sep`. - - See also - -------- - rpartition - """ - return asarray(rpartition(self, sep)) - - def rsplit(self, sep=None, maxsplit=None): - """ - For each element in `self`, return a list of the words in - the string, using `sep` as the delimiter string. - - See also - -------- - char.rsplit - - """ - return rsplit(self, sep, maxsplit) - - def rstrip(self, chars=None): - """ - For each element in `self`, return a copy with the trailing - characters removed. - - See also - -------- - char.rstrip - - """ - return asarray(rstrip(self, chars)) - - def split(self, sep=None, maxsplit=None): - """ - For each element in `self`, return a list of the words in the - string, using `sep` as the delimiter string. - - See also - -------- - char.split - - """ - return split(self, sep, maxsplit) - - def splitlines(self, keepends=None): - """ - For each element in `self`, return a list of the lines in the - element, breaking at line boundaries. - - See also - -------- - char.splitlines - - """ - return splitlines(self, keepends) - - def startswith(self, prefix, start=0, end=None): - """ - Returns a boolean array which is `True` where the string element - in `self` starts with `prefix`, otherwise `False`. - - See also - -------- - char.startswith - - """ - return startswith(self, prefix, start, end) - - def strip(self, chars=None): - """ - For each element in `self`, return a copy with the leading and - trailing characters removed. - - See also - -------- - char.strip - - """ - return asarray(strip(self, chars)) - - def swapcase(self): - """ - For each element in `self`, return a copy of the string with - uppercase characters converted to lowercase and vice versa. - - See also - -------- - char.swapcase - - """ - return asarray(swapcase(self)) - - def title(self): - """ - For each element in `self`, return a titlecased version of the - string: words start with uppercase characters, all remaining cased - characters are lowercase. - - See also - -------- - char.title - - """ - return asarray(title(self)) - - def translate(self, table, deletechars=None): - """ - For each element in `self`, return a copy of the string where - all characters occurring in the optional argument - `deletechars` are removed, and the remaining characters have - been mapped through the given translation table. - - See also - -------- - char.translate - - """ - return asarray(translate(self, table, deletechars)) - - def upper(self): - """ - Return an array with the elements of `self` converted to - uppercase. - - See also - -------- - char.upper - - """ - return asarray(upper(self)) - - def zfill(self, width): - """ - Return the numeric string left-filled with zeros in a string of - length `width`. - - See also - -------- - char.zfill - - """ - return asarray(zfill(self, width)) - - def isnumeric(self): - """ - For each element in `self`, return True if there are only - numeric characters in the element. - - See also - -------- - char.isnumeric - - """ - return isnumeric(self) - - def isdecimal(self): - """ - For each element in `self`, return True if there are only - decimal characters in the element. - - See also - -------- - char.isdecimal - - """ - return isdecimal(self) - - -def array(obj, itemsize=None, copy=True, unicode=None, order=None): - """ - Create a `chararray`. - - .. note:: - This class is provided for numarray backward-compatibility. - New code (not concerned with numarray compatibility) should use - arrays of type `string_` or `unicode_` and use the free functions - in :mod:`numpy.char ` for fast - vectorized string operations instead. - - Versus a regular NumPy array of type `str` or `unicode`, this - class adds the following functionality: - - 1) values automatically have whitespace removed from the end - when indexed - - 2) comparison operators automatically remove whitespace from the - end when comparing values - - 3) vectorized string operations are provided as methods - (e.g. `str.endswith`) and infix operators (e.g. ``+, *, %``) - - Parameters - ---------- - obj : array of str or unicode-like - - itemsize : int, optional - `itemsize` is the number of characters per scalar in the - resulting array. If `itemsize` is None, and `obj` is an - object array or a Python list, the `itemsize` will be - automatically determined. If `itemsize` is provided and `obj` - is of type str or unicode, then the `obj` string will be - chunked into `itemsize` pieces. - - copy : bool, optional - If true (default), then the object is copied. Otherwise, a copy - will only be made if __array__ returns a copy, if obj is a - nested sequence, or if a copy is needed to satisfy any of the other - requirements (`itemsize`, unicode, `order`, etc.). - - unicode : bool, optional - When true, the resulting `chararray` can contain Unicode - characters, when false only 8-bit characters. If unicode is - None and `obj` is one of the following: - - - a `chararray`, - - an ndarray of type `str` or `unicode` - - a Python str or unicode object, - - then the unicode setting of the output array will be - automatically determined. - - order : {'C', 'F', 'A'}, optional - Specify the order of the array. If order is 'C' (default), then the - array will be in C-contiguous order (last-index varies the - fastest). If order is 'F', then the returned array - will be in Fortran-contiguous order (first-index varies the - fastest). If order is 'A', then the returned array may - be in any order (either C-, Fortran-contiguous, or even - discontiguous). - """ - if isinstance(obj, (_bytes, _unicode)): - if unicode is None: - if isinstance(obj, _unicode): - unicode = True - else: - unicode = False - - if itemsize is None: - itemsize = _len(obj) - shape = _len(obj) // itemsize - - if unicode: - if sys.maxunicode == 0xffff: - # On a narrow Python build, the buffer for Unicode - # strings is UCS2, which doesn't match the buffer for - # NumPy Unicode types, which is ALWAYS UCS4. - # Therefore, we need to convert the buffer. On Python - # 2.6 and later, we can use the utf_32 codec. Earlier - # versions don't have that codec, so we convert to a - # numerical array that matches the input buffer, and - # then use NumPy to convert it to UCS4. All of this - # should happen in native endianness. - obj = obj.encode('utf_32') - else: - obj = _unicode(obj) - else: - # Let the default Unicode -> string encoding (if any) take - # precedence. - obj = _bytes(obj) - - return chararray(shape, itemsize=itemsize, unicode=unicode, - buffer=obj, order=order) - - if isinstance(obj, (list, tuple)): - obj = numpy.asarray(obj) - - if isinstance(obj, ndarray) and issubclass(obj.dtype.type, character): - # If we just have a vanilla chararray, create a chararray - # view around it. - if not isinstance(obj, chararray): - obj = obj.view(chararray) - - if itemsize is None: - itemsize = obj.itemsize - # itemsize is in 8-bit chars, so for Unicode, we need - # to divide by the size of a single Unicode character, - # which for NumPy is always 4 - if issubclass(obj.dtype.type, unicode_): - itemsize //= 4 - - if unicode is None: - if issubclass(obj.dtype.type, unicode_): - unicode = True - else: - unicode = False - - if unicode: - dtype = unicode_ - else: - dtype = string_ - - if order is not None: - obj = numpy.asarray(obj, order=order) - if (copy or - (itemsize != obj.itemsize) or - (not unicode and isinstance(obj, unicode_)) or - (unicode and isinstance(obj, string_))): - obj = obj.astype((dtype, long(itemsize))) - return obj - - if isinstance(obj, ndarray) and issubclass(obj.dtype.type, object): - if itemsize is None: - # Since no itemsize was specified, convert the input array to - # a list so the ndarray constructor will automatically - # determine the itemsize for us. - obj = obj.tolist() - # Fall through to the default case - - if unicode: - dtype = unicode_ - else: - dtype = string_ - - if itemsize is None: - val = narray(obj, dtype=dtype, order=order, subok=True) - else: - val = narray(obj, dtype=(dtype, itemsize), order=order, subok=True) - return val.view(chararray) - - -def asarray(obj, itemsize=None, unicode=None, order=None): - """ - Convert the input to a `chararray`, copying the data only if - necessary. - - Versus a regular NumPy array of type `str` or `unicode`, this - class adds the following functionality: - - 1) values automatically have whitespace removed from the end - when indexed - - 2) comparison operators automatically remove whitespace from the - end when comparing values - - 3) vectorized string operations are provided as methods - (e.g. `str.endswith`) and infix operators (e.g. ``+``, ``*``,``%``) - - Parameters - ---------- - obj : array of str or unicode-like - - itemsize : int, optional - `itemsize` is the number of characters per scalar in the - resulting array. If `itemsize` is None, and `obj` is an - object array or a Python list, the `itemsize` will be - automatically determined. If `itemsize` is provided and `obj` - is of type str or unicode, then the `obj` string will be - chunked into `itemsize` pieces. - - unicode : bool, optional - When true, the resulting `chararray` can contain Unicode - characters, when false only 8-bit characters. If unicode is - None and `obj` is one of the following: - - - a `chararray`, - - an ndarray of type `str` or 'unicode` - - a Python str or unicode object, - - then the unicode setting of the output array will be - automatically determined. - - order : {'C', 'F'}, optional - Specify the order of the array. If order is 'C' (default), then the - array will be in C-contiguous order (last-index varies the - fastest). If order is 'F', then the returned array - will be in Fortran-contiguous order (first-index varies the - fastest). - """ - return array(obj, itemsize, copy=False, - unicode=unicode, order=order) diff --git a/venv/lib/python3.7/site-packages/numpy/core/einsumfunc.py b/venv/lib/python3.7/site-packages/numpy/core/einsumfunc.py deleted file mode 100644 index 3412c3f..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/einsumfunc.py +++ /dev/null @@ -1,1432 +0,0 @@ -""" -Implementation of optimized einsum. - -""" -from __future__ import division, absolute_import, print_function - -import itertools - -from numpy.compat import basestring -from numpy.core.multiarray import c_einsum -from numpy.core.numeric import asanyarray, tensordot -from numpy.core.overrides import array_function_dispatch - -__all__ = ['einsum', 'einsum_path'] - -einsum_symbols = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' -einsum_symbols_set = set(einsum_symbols) - - -def _flop_count(idx_contraction, inner, num_terms, size_dictionary): - """ - Computes the number of FLOPS in the contraction. - - Parameters - ---------- - idx_contraction : iterable - The indices involved in the contraction - inner : bool - Does this contraction require an inner product? - num_terms : int - The number of terms in a contraction - size_dictionary : dict - The size of each of the indices in idx_contraction - - Returns - ------- - flop_count : int - The total number of FLOPS required for the contraction. - - Examples - -------- - - >>> _flop_count('abc', False, 1, {'a': 2, 'b':3, 'c':5}) - 30 - - >>> _flop_count('abc', True, 2, {'a': 2, 'b':3, 'c':5}) - 60 - - """ - - overall_size = _compute_size_by_dict(idx_contraction, size_dictionary) - op_factor = max(1, num_terms - 1) - if inner: - op_factor += 1 - - return overall_size * op_factor - -def _compute_size_by_dict(indices, idx_dict): - """ - Computes the product of the elements in indices based on the dictionary - idx_dict. - - Parameters - ---------- - indices : iterable - Indices to base the product on. - idx_dict : dictionary - Dictionary of index sizes - - Returns - ------- - ret : int - The resulting product. - - Examples - -------- - >>> _compute_size_by_dict('abbc', {'a': 2, 'b':3, 'c':5}) - 90 - - """ - ret = 1 - for i in indices: - ret *= idx_dict[i] - return ret - - -def _find_contraction(positions, input_sets, output_set): - """ - Finds the contraction for a given set of input and output sets. - - Parameters - ---------- - positions : iterable - Integer positions of terms used in the contraction. - input_sets : list - List of sets that represent the lhs side of the einsum subscript - output_set : set - Set that represents the rhs side of the overall einsum subscript - - Returns - ------- - new_result : set - The indices of the resulting contraction - remaining : list - List of sets that have not been contracted, the new set is appended to - the end of this list - idx_removed : set - Indices removed from the entire contraction - idx_contraction : set - The indices used in the current contraction - - Examples - -------- - - # A simple dot product test case - >>> pos = (0, 1) - >>> isets = [set('ab'), set('bc')] - >>> oset = set('ac') - >>> _find_contraction(pos, isets, oset) - ({'a', 'c'}, [{'a', 'c'}], {'b'}, {'a', 'b', 'c'}) - - # A more complex case with additional terms in the contraction - >>> pos = (0, 2) - >>> isets = [set('abd'), set('ac'), set('bdc')] - >>> oset = set('ac') - >>> _find_contraction(pos, isets, oset) - ({'a', 'c'}, [{'a', 'c'}, {'a', 'c'}], {'b', 'd'}, {'a', 'b', 'c', 'd'}) - """ - - idx_contract = set() - idx_remain = output_set.copy() - remaining = [] - for ind, value in enumerate(input_sets): - if ind in positions: - idx_contract |= value - else: - remaining.append(value) - idx_remain |= value - - new_result = idx_remain & idx_contract - idx_removed = (idx_contract - new_result) - remaining.append(new_result) - - return (new_result, remaining, idx_removed, idx_contract) - - -def _optimal_path(input_sets, output_set, idx_dict, memory_limit): - """ - Computes all possible pair contractions, sieves the results based - on ``memory_limit`` and returns the lowest cost path. This algorithm - scales factorial with respect to the elements in the list ``input_sets``. - - Parameters - ---------- - input_sets : list - List of sets that represent the lhs side of the einsum subscript - output_set : set - Set that represents the rhs side of the overall einsum subscript - idx_dict : dictionary - Dictionary of index sizes - memory_limit : int - The maximum number of elements in a temporary array - - Returns - ------- - path : list - The optimal contraction order within the memory limit constraint. - - Examples - -------- - >>> isets = [set('abd'), set('ac'), set('bdc')] - >>> oset = set() - >>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4} - >>> _optimal_path(isets, oset, idx_sizes, 5000) - [(0, 2), (0, 1)] - """ - - full_results = [(0, [], input_sets)] - for iteration in range(len(input_sets) - 1): - iter_results = [] - - # Compute all unique pairs - for curr in full_results: - cost, positions, remaining = curr - for con in itertools.combinations(range(len(input_sets) - iteration), 2): - - # Find the contraction - cont = _find_contraction(con, remaining, output_set) - new_result, new_input_sets, idx_removed, idx_contract = cont - - # Sieve the results based on memory_limit - new_size = _compute_size_by_dict(new_result, idx_dict) - if new_size > memory_limit: - continue - - # Build (total_cost, positions, indices_remaining) - total_cost = cost + _flop_count(idx_contract, idx_removed, len(con), idx_dict) - new_pos = positions + [con] - iter_results.append((total_cost, new_pos, new_input_sets)) - - # Update combinatorial list, if we did not find anything return best - # path + remaining contractions - if iter_results: - full_results = iter_results - else: - path = min(full_results, key=lambda x: x[0])[1] - path += [tuple(range(len(input_sets) - iteration))] - return path - - # If we have not found anything return single einsum contraction - if len(full_results) == 0: - return [tuple(range(len(input_sets)))] - - path = min(full_results, key=lambda x: x[0])[1] - return path - -def _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost, naive_cost): - """Compute the cost (removed size + flops) and resultant indices for - performing the contraction specified by ``positions``. - - Parameters - ---------- - positions : tuple of int - The locations of the proposed tensors to contract. - input_sets : list of sets - The indices found on each tensors. - output_set : set - The output indices of the expression. - idx_dict : dict - Mapping of each index to its size. - memory_limit : int - The total allowed size for an intermediary tensor. - path_cost : int - The contraction cost so far. - naive_cost : int - The cost of the unoptimized expression. - - Returns - ------- - cost : (int, int) - A tuple containing the size of any indices removed, and the flop cost. - positions : tuple of int - The locations of the proposed tensors to contract. - new_input_sets : list of sets - The resulting new list of indices if this proposed contraction is performed. - - """ - - # Find the contraction - contract = _find_contraction(positions, input_sets, output_set) - idx_result, new_input_sets, idx_removed, idx_contract = contract - - # Sieve the results based on memory_limit - new_size = _compute_size_by_dict(idx_result, idx_dict) - if new_size > memory_limit: - return None - - # Build sort tuple - old_sizes = (_compute_size_by_dict(input_sets[p], idx_dict) for p in positions) - removed_size = sum(old_sizes) - new_size - - # NB: removed_size used to be just the size of any removed indices i.e.: - # helpers.compute_size_by_dict(idx_removed, idx_dict) - cost = _flop_count(idx_contract, idx_removed, len(positions), idx_dict) - sort = (-removed_size, cost) - - # Sieve based on total cost as well - if (path_cost + cost) > naive_cost: - return None - - # Add contraction to possible choices - return [sort, positions, new_input_sets] - - -def _update_other_results(results, best): - """Update the positions and provisional input_sets of ``results`` based on - performing the contraction result ``best``. Remove any involving the tensors - contracted. - - Parameters - ---------- - results : list - List of contraction results produced by ``_parse_possible_contraction``. - best : list - The best contraction of ``results`` i.e. the one that will be performed. - - Returns - ------- - mod_results : list - The list of modified results, updated with outcome of ``best`` contraction. - """ - - best_con = best[1] - bx, by = best_con - mod_results = [] - - for cost, (x, y), con_sets in results: - - # Ignore results involving tensors just contracted - if x in best_con or y in best_con: - continue - - # Update the input_sets - del con_sets[by - int(by > x) - int(by > y)] - del con_sets[bx - int(bx > x) - int(bx > y)] - con_sets.insert(-1, best[2][-1]) - - # Update the position indices - mod_con = x - int(x > bx) - int(x > by), y - int(y > bx) - int(y > by) - mod_results.append((cost, mod_con, con_sets)) - - return mod_results - -def _greedy_path(input_sets, output_set, idx_dict, memory_limit): - """ - Finds the path by contracting the best pair until the input list is - exhausted. The best pair is found by minimizing the tuple - ``(-prod(indices_removed), cost)``. What this amounts to is prioritizing - matrix multiplication or inner product operations, then Hadamard like - operations, and finally outer operations. Outer products are limited by - ``memory_limit``. This algorithm scales cubically with respect to the - number of elements in the list ``input_sets``. - - Parameters - ---------- - input_sets : list - List of sets that represent the lhs side of the einsum subscript - output_set : set - Set that represents the rhs side of the overall einsum subscript - idx_dict : dictionary - Dictionary of index sizes - memory_limit_limit : int - The maximum number of elements in a temporary array - - Returns - ------- - path : list - The greedy contraction order within the memory limit constraint. - - Examples - -------- - >>> isets = [set('abd'), set('ac'), set('bdc')] - >>> oset = set() - >>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4} - >>> _greedy_path(isets, oset, idx_sizes, 5000) - [(0, 2), (0, 1)] - """ - - # Handle trivial cases that leaked through - if len(input_sets) == 1: - return [(0,)] - elif len(input_sets) == 2: - return [(0, 1)] - - # Build up a naive cost - contract = _find_contraction(range(len(input_sets)), input_sets, output_set) - idx_result, new_input_sets, idx_removed, idx_contract = contract - naive_cost = _flop_count(idx_contract, idx_removed, len(input_sets), idx_dict) - - # Initially iterate over all pairs - comb_iter = itertools.combinations(range(len(input_sets)), 2) - known_contractions = [] - - path_cost = 0 - path = [] - - for iteration in range(len(input_sets) - 1): - - # Iterate over all pairs on first step, only previously found pairs on subsequent steps - for positions in comb_iter: - - # Always initially ignore outer products - if input_sets[positions[0]].isdisjoint(input_sets[positions[1]]): - continue - - result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost, - naive_cost) - if result is not None: - known_contractions.append(result) - - # If we do not have a inner contraction, rescan pairs including outer products - if len(known_contractions) == 0: - - # Then check the outer products - for positions in itertools.combinations(range(len(input_sets)), 2): - result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, - path_cost, naive_cost) - if result is not None: - known_contractions.append(result) - - # If we still did not find any remaining contractions, default back to einsum like behavior - if len(known_contractions) == 0: - path.append(tuple(range(len(input_sets)))) - break - - # Sort based on first index - best = min(known_contractions, key=lambda x: x[0]) - - # Now propagate as many unused contractions as possible to next iteration - known_contractions = _update_other_results(known_contractions, best) - - # Next iteration only compute contractions with the new tensor - # All other contractions have been accounted for - input_sets = best[2] - new_tensor_pos = len(input_sets) - 1 - comb_iter = ((i, new_tensor_pos) for i in range(new_tensor_pos)) - - # Update path and total cost - path.append(best[1]) - path_cost += best[0][1] - - return path - - -def _can_dot(inputs, result, idx_removed): - """ - Checks if we can use BLAS (np.tensordot) call and its beneficial to do so. - - Parameters - ---------- - inputs : list of str - Specifies the subscripts for summation. - result : str - Resulting summation. - idx_removed : set - Indices that are removed in the summation - - - Returns - ------- - type : bool - Returns true if BLAS should and can be used, else False - - Notes - ----- - If the operations is BLAS level 1 or 2 and is not already aligned - we default back to einsum as the memory movement to copy is more - costly than the operation itself. - - - Examples - -------- - - # Standard GEMM operation - >>> _can_dot(['ij', 'jk'], 'ik', set('j')) - True - - # Can use the standard BLAS, but requires odd data movement - >>> _can_dot(['ijj', 'jk'], 'ik', set('j')) - False - - # DDOT where the memory is not aligned - >>> _can_dot(['ijk', 'ikj'], '', set('ijk')) - False - - """ - - # All `dot` calls remove indices - if len(idx_removed) == 0: - return False - - # BLAS can only handle two operands - if len(inputs) != 2: - return False - - input_left, input_right = inputs - - for c in set(input_left + input_right): - # can't deal with repeated indices on same input or more than 2 total - nl, nr = input_left.count(c), input_right.count(c) - if (nl > 1) or (nr > 1) or (nl + nr > 2): - return False - - # can't do implicit summation or dimension collapse e.g. - # "ab,bc->c" (implicitly sum over 'a') - # "ab,ca->ca" (take diagonal of 'a') - if nl + nr - 1 == int(c in result): - return False - - # Build a few temporaries - set_left = set(input_left) - set_right = set(input_right) - keep_left = set_left - idx_removed - keep_right = set_right - idx_removed - rs = len(idx_removed) - - # At this point we are a DOT, GEMV, or GEMM operation - - # Handle inner products - - # DDOT with aligned data - if input_left == input_right: - return True - - # DDOT without aligned data (better to use einsum) - if set_left == set_right: - return False - - # Handle the 4 possible (aligned) GEMV or GEMM cases - - # GEMM or GEMV no transpose - if input_left[-rs:] == input_right[:rs]: - return True - - # GEMM or GEMV transpose both - if input_left[:rs] == input_right[-rs:]: - return True - - # GEMM or GEMV transpose right - if input_left[-rs:] == input_right[-rs:]: - return True - - # GEMM or GEMV transpose left - if input_left[:rs] == input_right[:rs]: - return True - - # Einsum is faster than GEMV if we have to copy data - if not keep_left or not keep_right: - return False - - # We are a matrix-matrix product, but we need to copy data - return True - - -def _parse_einsum_input(operands): - """ - A reproduction of einsum c side einsum parsing in python. - - Returns - ------- - input_strings : str - Parsed input strings - output_string : str - Parsed output string - operands : list of array_like - The operands to use in the numpy contraction - - Examples - -------- - The operand list is simplified to reduce printing: - - >>> np.random.seed(123) - >>> a = np.random.rand(4, 4) - >>> b = np.random.rand(4, 4, 4) - >>> _parse_einsum_input(('...a,...a->...', a, b)) - ('za,xza', 'xz', [a, b]) # may vary - - >>> _parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0])) - ('za,xza', 'xz', [a, b]) # may vary - """ - - if len(operands) == 0: - raise ValueError("No input operands") - - if isinstance(operands[0], basestring): - subscripts = operands[0].replace(" ", "") - operands = [asanyarray(v) for v in operands[1:]] - - # Ensure all characters are valid - for s in subscripts: - if s in '.,->': - continue - if s not in einsum_symbols: - raise ValueError("Character %s is not a valid symbol." % s) - - else: - tmp_operands = list(operands) - operand_list = [] - subscript_list = [] - for p in range(len(operands) // 2): - operand_list.append(tmp_operands.pop(0)) - subscript_list.append(tmp_operands.pop(0)) - - output_list = tmp_operands[-1] if len(tmp_operands) else None - operands = [asanyarray(v) for v in operand_list] - subscripts = "" - last = len(subscript_list) - 1 - for num, sub in enumerate(subscript_list): - for s in sub: - if s is Ellipsis: - subscripts += "..." - elif isinstance(s, int): - subscripts += einsum_symbols[s] - else: - raise TypeError("For this input type lists must contain " - "either int or Ellipsis") - if num != last: - subscripts += "," - - if output_list is not None: - subscripts += "->" - for s in output_list: - if s is Ellipsis: - subscripts += "..." - elif isinstance(s, int): - subscripts += einsum_symbols[s] - else: - raise TypeError("For this input type lists must contain " - "either int or Ellipsis") - # Check for proper "->" - if ("-" in subscripts) or (">" in subscripts): - invalid = (subscripts.count("-") > 1) or (subscripts.count(">") > 1) - if invalid or (subscripts.count("->") != 1): - raise ValueError("Subscripts can only contain one '->'.") - - # Parse ellipses - if "." in subscripts: - used = subscripts.replace(".", "").replace(",", "").replace("->", "") - unused = list(einsum_symbols_set - set(used)) - ellipse_inds = "".join(unused) - longest = 0 - - if "->" in subscripts: - input_tmp, output_sub = subscripts.split("->") - split_subscripts = input_tmp.split(",") - out_sub = True - else: - split_subscripts = subscripts.split(',') - out_sub = False - - for num, sub in enumerate(split_subscripts): - if "." in sub: - if (sub.count(".") != 3) or (sub.count("...") != 1): - raise ValueError("Invalid Ellipses.") - - # Take into account numerical values - if operands[num].shape == (): - ellipse_count = 0 - else: - ellipse_count = max(operands[num].ndim, 1) - ellipse_count -= (len(sub) - 3) - - if ellipse_count > longest: - longest = ellipse_count - - if ellipse_count < 0: - raise ValueError("Ellipses lengths do not match.") - elif ellipse_count == 0: - split_subscripts[num] = sub.replace('...', '') - else: - rep_inds = ellipse_inds[-ellipse_count:] - split_subscripts[num] = sub.replace('...', rep_inds) - - subscripts = ",".join(split_subscripts) - if longest == 0: - out_ellipse = "" - else: - out_ellipse = ellipse_inds[-longest:] - - if out_sub: - subscripts += "->" + output_sub.replace("...", out_ellipse) - else: - # Special care for outputless ellipses - output_subscript = "" - tmp_subscripts = subscripts.replace(",", "") - for s in sorted(set(tmp_subscripts)): - if s not in (einsum_symbols): - raise ValueError("Character %s is not a valid symbol." % s) - if tmp_subscripts.count(s) == 1: - output_subscript += s - normal_inds = ''.join(sorted(set(output_subscript) - - set(out_ellipse))) - - subscripts += "->" + out_ellipse + normal_inds - - # Build output string if does not exist - if "->" in subscripts: - input_subscripts, output_subscript = subscripts.split("->") - else: - input_subscripts = subscripts - # Build output subscripts - tmp_subscripts = subscripts.replace(",", "") - output_subscript = "" - for s in sorted(set(tmp_subscripts)): - if s not in einsum_symbols: - raise ValueError("Character %s is not a valid symbol." % s) - if tmp_subscripts.count(s) == 1: - output_subscript += s - - # Make sure output subscripts are in the input - for char in output_subscript: - if char not in input_subscripts: - raise ValueError("Output character %s did not appear in the input" - % char) - - # Make sure number operands is equivalent to the number of terms - if len(input_subscripts.split(',')) != len(operands): - raise ValueError("Number of einsum subscripts must be equal to the " - "number of operands.") - - return (input_subscripts, output_subscript, operands) - - -def _einsum_path_dispatcher(*operands, **kwargs): - # NOTE: technically, we should only dispatch on array-like arguments, not - # subscripts (given as strings). But separating operands into - # arrays/subscripts is a little tricky/slow (given einsum's two supported - # signatures), so as a practical shortcut we dispatch on everything. - # Strings will be ignored for dispatching since they don't define - # __array_function__. - return operands - - -@array_function_dispatch(_einsum_path_dispatcher, module='numpy') -def einsum_path(*operands, **kwargs): - """ - einsum_path(subscripts, *operands, optimize='greedy') - - Evaluates the lowest cost contraction order for an einsum expression by - considering the creation of intermediate arrays. - - Parameters - ---------- - subscripts : str - Specifies the subscripts for summation. - *operands : list of array_like - These are the arrays for the operation. - optimize : {bool, list, tuple, 'greedy', 'optimal'} - Choose the type of path. If a tuple is provided, the second argument is - assumed to be the maximum intermediate size created. If only a single - argument is provided the largest input or output array size is used - as a maximum intermediate size. - - * if a list is given that starts with ``einsum_path``, uses this as the - contraction path - * if False no optimization is taken - * if True defaults to the 'greedy' algorithm - * 'optimal' An algorithm that combinatorially explores all possible - ways of contracting the listed tensors and choosest the least costly - path. Scales exponentially with the number of terms in the - contraction. - * 'greedy' An algorithm that chooses the best pair contraction - at each step. Effectively, this algorithm searches the largest inner, - Hadamard, and then outer products at each step. Scales cubically with - the number of terms in the contraction. Equivalent to the 'optimal' - path for most contractions. - - Default is 'greedy'. - - Returns - ------- - path : list of tuples - A list representation of the einsum path. - string_repr : str - A printable representation of the einsum path. - - Notes - ----- - The resulting path indicates which terms of the input contraction should be - contracted first, the result of this contraction is then appended to the - end of the contraction list. This list can then be iterated over until all - intermediate contractions are complete. - - See Also - -------- - einsum, linalg.multi_dot - - Examples - -------- - - We can begin with a chain dot example. In this case, it is optimal to - contract the ``b`` and ``c`` tensors first as represented by the first - element of the path ``(1, 2)``. The resulting tensor is added to the end - of the contraction and the remaining contraction ``(0, 1)`` is then - completed. - - >>> np.random.seed(123) - >>> a = np.random.rand(2, 2) - >>> b = np.random.rand(2, 5) - >>> c = np.random.rand(5, 2) - >>> path_info = np.einsum_path('ij,jk,kl->il', a, b, c, optimize='greedy') - >>> print(path_info[0]) - ['einsum_path', (1, 2), (0, 1)] - >>> print(path_info[1]) - Complete contraction: ij,jk,kl->il # may vary - Naive scaling: 4 - Optimized scaling: 3 - Naive FLOP count: 1.600e+02 - Optimized FLOP count: 5.600e+01 - Theoretical speedup: 2.857 - Largest intermediate: 4.000e+00 elements - ------------------------------------------------------------------------- - scaling current remaining - ------------------------------------------------------------------------- - 3 kl,jk->jl ij,jl->il - 3 jl,ij->il il->il - - - A more complex index transformation example. - - >>> I = np.random.rand(10, 10, 10, 10) - >>> C = np.random.rand(10, 10) - >>> path_info = np.einsum_path('ea,fb,abcd,gc,hd->efgh', C, C, I, C, C, - ... optimize='greedy') - - >>> print(path_info[0]) - ['einsum_path', (0, 2), (0, 3), (0, 2), (0, 1)] - >>> print(path_info[1]) - Complete contraction: ea,fb,abcd,gc,hd->efgh # may vary - Naive scaling: 8 - Optimized scaling: 5 - Naive FLOP count: 8.000e+08 - Optimized FLOP count: 8.000e+05 - Theoretical speedup: 1000.000 - Largest intermediate: 1.000e+04 elements - -------------------------------------------------------------------------- - scaling current remaining - -------------------------------------------------------------------------- - 5 abcd,ea->bcde fb,gc,hd,bcde->efgh - 5 bcde,fb->cdef gc,hd,cdef->efgh - 5 cdef,gc->defg hd,defg->efgh - 5 defg,hd->efgh efgh->efgh - """ - - # Make sure all keywords are valid - valid_contract_kwargs = ['optimize', 'einsum_call'] - unknown_kwargs = [k for (k, v) in kwargs.items() if k - not in valid_contract_kwargs] - if len(unknown_kwargs): - raise TypeError("Did not understand the following kwargs:" - " %s" % unknown_kwargs) - - # Figure out what the path really is - path_type = kwargs.pop('optimize', True) - if path_type is True: - path_type = 'greedy' - if path_type is None: - path_type = False - - memory_limit = None - - # No optimization or a named path algorithm - if (path_type is False) or isinstance(path_type, basestring): - pass - - # Given an explicit path - elif len(path_type) and (path_type[0] == 'einsum_path'): - pass - - # Path tuple with memory limit - elif ((len(path_type) == 2) and isinstance(path_type[0], basestring) and - isinstance(path_type[1], (int, float))): - memory_limit = int(path_type[1]) - path_type = path_type[0] - - else: - raise TypeError("Did not understand the path: %s" % str(path_type)) - - # Hidden option, only einsum should call this - einsum_call_arg = kwargs.pop("einsum_call", False) - - # Python side parsing - input_subscripts, output_subscript, operands = _parse_einsum_input(operands) - - # Build a few useful list and sets - input_list = input_subscripts.split(',') - input_sets = [set(x) for x in input_list] - output_set = set(output_subscript) - indices = set(input_subscripts.replace(',', '')) - - # Get length of each unique dimension and ensure all dimensions are correct - dimension_dict = {} - broadcast_indices = [[] for x in range(len(input_list))] - for tnum, term in enumerate(input_list): - sh = operands[tnum].shape - if len(sh) != len(term): - raise ValueError("Einstein sum subscript %s does not contain the " - "correct number of indices for operand %d." - % (input_subscripts[tnum], tnum)) - for cnum, char in enumerate(term): - dim = sh[cnum] - - # Build out broadcast indices - if dim == 1: - broadcast_indices[tnum].append(char) - - if char in dimension_dict.keys(): - # For broadcasting cases we always want the largest dim size - if dimension_dict[char] == 1: - dimension_dict[char] = dim - elif dim not in (1, dimension_dict[char]): - raise ValueError("Size of label '%s' for operand %d (%d) " - "does not match previous terms (%d)." - % (char, tnum, dimension_dict[char], dim)) - else: - dimension_dict[char] = dim - - # Convert broadcast inds to sets - broadcast_indices = [set(x) for x in broadcast_indices] - - # Compute size of each input array plus the output array - size_list = [_compute_size_by_dict(term, dimension_dict) - for term in input_list + [output_subscript]] - max_size = max(size_list) - - if memory_limit is None: - memory_arg = max_size - else: - memory_arg = memory_limit - - # Compute naive cost - # This isn't quite right, need to look into exactly how einsum does this - inner_product = (sum(len(x) for x in input_sets) - len(indices)) > 0 - naive_cost = _flop_count(indices, inner_product, len(input_list), dimension_dict) - - # Compute the path - if (path_type is False) or (len(input_list) in [1, 2]) or (indices == output_set): - # Nothing to be optimized, leave it to einsum - path = [tuple(range(len(input_list)))] - elif path_type == "greedy": - path = _greedy_path(input_sets, output_set, dimension_dict, memory_arg) - elif path_type == "optimal": - path = _optimal_path(input_sets, output_set, dimension_dict, memory_arg) - elif path_type[0] == 'einsum_path': - path = path_type[1:] - else: - raise KeyError("Path name %s not found", path_type) - - cost_list, scale_list, size_list, contraction_list = [], [], [], [] - - # Build contraction tuple (positions, gemm, einsum_str, remaining) - for cnum, contract_inds in enumerate(path): - # Make sure we remove inds from right to left - contract_inds = tuple(sorted(list(contract_inds), reverse=True)) - - contract = _find_contraction(contract_inds, input_sets, output_set) - out_inds, input_sets, idx_removed, idx_contract = contract - - cost = _flop_count(idx_contract, idx_removed, len(contract_inds), dimension_dict) - cost_list.append(cost) - scale_list.append(len(idx_contract)) - size_list.append(_compute_size_by_dict(out_inds, dimension_dict)) - - bcast = set() - tmp_inputs = [] - for x in contract_inds: - tmp_inputs.append(input_list.pop(x)) - bcast |= broadcast_indices.pop(x) - - new_bcast_inds = bcast - idx_removed - - # If we're broadcasting, nix blas - if not len(idx_removed & bcast): - do_blas = _can_dot(tmp_inputs, out_inds, idx_removed) - else: - do_blas = False - - # Last contraction - if (cnum - len(path)) == -1: - idx_result = output_subscript - else: - sort_result = [(dimension_dict[ind], ind) for ind in out_inds] - idx_result = "".join([x[1] for x in sorted(sort_result)]) - - input_list.append(idx_result) - broadcast_indices.append(new_bcast_inds) - einsum_str = ",".join(tmp_inputs) + "->" + idx_result - - contraction = (contract_inds, idx_removed, einsum_str, input_list[:], do_blas) - contraction_list.append(contraction) - - opt_cost = sum(cost_list) + 1 - - if einsum_call_arg: - return (operands, contraction_list) - - # Return the path along with a nice string representation - overall_contraction = input_subscripts + "->" + output_subscript - header = ("scaling", "current", "remaining") - - speedup = naive_cost / opt_cost - max_i = max(size_list) - - path_print = " Complete contraction: %s\n" % overall_contraction - path_print += " Naive scaling: %d\n" % len(indices) - path_print += " Optimized scaling: %d\n" % max(scale_list) - path_print += " Naive FLOP count: %.3e\n" % naive_cost - path_print += " Optimized FLOP count: %.3e\n" % opt_cost - path_print += " Theoretical speedup: %3.3f\n" % speedup - path_print += " Largest intermediate: %.3e elements\n" % max_i - path_print += "-" * 74 + "\n" - path_print += "%6s %24s %40s\n" % header - path_print += "-" * 74 - - for n, contraction in enumerate(contraction_list): - inds, idx_rm, einsum_str, remaining, blas = contraction - remaining_str = ",".join(remaining) + "->" + output_subscript - path_run = (scale_list[n], einsum_str, remaining_str) - path_print += "\n%4d %24s %40s" % path_run - - path = ['einsum_path'] + path - return (path, path_print) - - -def _einsum_dispatcher(*operands, **kwargs): - # Arguably we dispatch on more arguments that we really should; see note in - # _einsum_path_dispatcher for why. - for op in operands: - yield op - yield kwargs.get('out') - - -# Rewrite einsum to handle different cases -@array_function_dispatch(_einsum_dispatcher, module='numpy') -def einsum(*operands, **kwargs): - """ - einsum(subscripts, *operands, out=None, dtype=None, order='K', - casting='safe', optimize=False) - - Evaluates the Einstein summation convention on the operands. - - Using the Einstein summation convention, many common multi-dimensional, - linear algebraic array operations can be represented in a simple fashion. - In *implicit* mode `einsum` computes these values. - - In *explicit* mode, `einsum` provides further flexibility to compute - other array operations that might not be considered classical Einstein - summation operations, by disabling, or forcing summation over specified - subscript labels. - - See the notes and examples for clarification. - - Parameters - ---------- - subscripts : str - Specifies the subscripts for summation as comma separated list of - subscript labels. An implicit (classical Einstein summation) - calculation is performed unless the explicit indicator '->' is - included as well as subscript labels of the precise output form. - operands : list of array_like - These are the arrays for the operation. - out : ndarray, optional - If provided, the calculation is done into this array. - dtype : {data-type, None}, optional - If provided, forces the calculation to use the data type specified. - Note that you may have to also give a more liberal `casting` - parameter to allow the conversions. Default is None. - order : {'C', 'F', 'A', 'K'}, optional - Controls the memory layout of the output. 'C' means it should - be C contiguous. 'F' means it should be Fortran contiguous, - 'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise. - 'K' means it should be as close to the layout as the inputs as - is possible, including arbitrarily permuted axes. - Default is 'K'. - casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional - Controls what kind of data casting may occur. Setting this to - 'unsafe' is not recommended, as it can adversely affect accumulations. - - * 'no' means the data types should not be cast at all. - * 'equiv' means only byte-order changes are allowed. - * 'safe' means only casts which can preserve values are allowed. - * 'same_kind' means only safe casts or casts within a kind, - like float64 to float32, are allowed. - * 'unsafe' means any data conversions may be done. - - Default is 'safe'. - optimize : {False, True, 'greedy', 'optimal'}, optional - Controls if intermediate optimization should occur. No optimization - will occur if False and True will default to the 'greedy' algorithm. - Also accepts an explicit contraction list from the ``np.einsum_path`` - function. See ``np.einsum_path`` for more details. Defaults to False. - - Returns - ------- - output : ndarray - The calculation based on the Einstein summation convention. - - See Also - -------- - einsum_path, dot, inner, outer, tensordot, linalg.multi_dot - - Notes - ----- - .. versionadded:: 1.6.0 - - The Einstein summation convention can be used to compute - many multi-dimensional, linear algebraic array operations. `einsum` - provides a succinct way of representing these. - - A non-exhaustive list of these operations, - which can be computed by `einsum`, is shown below along with examples: - - * Trace of an array, :py:func:`numpy.trace`. - * Return a diagonal, :py:func:`numpy.diag`. - * Array axis summations, :py:func:`numpy.sum`. - * Transpositions and permutations, :py:func:`numpy.transpose`. - * Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`. - * Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`. - * Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`. - * Tensor contractions, :py:func:`numpy.tensordot`. - * Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`. - - The subscripts string is a comma-separated list of subscript labels, - where each label refers to a dimension of the corresponding operand. - Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)`` - is equivalent to :py:func:`np.inner(a,b) `. If a label - appears only once, it is not summed, so ``np.einsum('i', a)`` produces a - view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)`` - describes traditional matrix multiplication and is equivalent to - :py:func:`np.matmul(a,b) `. Repeated subscript labels in one - operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent - to :py:func:`np.trace(a) `. - - In *implicit mode*, the chosen subscripts are important - since the axes of the output are reordered alphabetically. This - means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while - ``np.einsum('ji', a)`` takes its transpose. Additionally, - ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while, - ``np.einsum('ij,jh', a, b)`` returns the transpose of the - multiplication since subscript 'h' precedes subscript 'i'. - - In *explicit mode* the output can be directly controlled by - specifying output subscript labels. This requires the - identifier '->' as well as the list of output subscript labels. - This feature increases the flexibility of the function since - summing can be disabled or forced when required. The call - ``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) `, - and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) `. - The difference is that `einsum` does not allow broadcasting by default. - Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the - order of the output subscript labels and therefore returns matrix - multiplication, unlike the example above in implicit mode. - - To enable and control broadcasting, use an ellipsis. Default - NumPy-style broadcasting is done by adding an ellipsis - to the left of each term, like ``np.einsum('...ii->...i', a)``. - To take the trace along the first and last axes, - you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix - product with the left-most indices instead of rightmost, one can do - ``np.einsum('ij...,jk...->ik...', a, b)``. - - When there is only one operand, no axes are summed, and no output - parameter is provided, a view into the operand is returned instead - of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)`` - produces a view (changed in version 1.10.0). - - `einsum` also provides an alternative way to provide the subscripts - and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. - If the output shape is not provided in this format `einsum` will be - calculated in implicit mode, otherwise it will be performed explicitly. - The examples below have corresponding `einsum` calls with the two - parameter methods. - - .. versionadded:: 1.10.0 - - Views returned from einsum are now writeable whenever the input array - is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now - have the same effect as :py:func:`np.swapaxes(a, 0, 2) ` - and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal - of a 2D array. - - .. versionadded:: 1.12.0 - - Added the ``optimize`` argument which will optimize the contraction order - of an einsum expression. For a contraction with three or more operands this - can greatly increase the computational efficiency at the cost of a larger - memory footprint during computation. - - Typically a 'greedy' algorithm is applied which empirical tests have shown - returns the optimal path in the majority of cases. In some cases 'optimal' - will return the superlative path through a more expensive, exhaustive search. - For iterative calculations it may be advisable to calculate the optimal path - once and reuse that path by supplying it as an argument. An example is given - below. - - See :py:func:`numpy.einsum_path` for more details. - - Examples - -------- - >>> a = np.arange(25).reshape(5,5) - >>> b = np.arange(5) - >>> c = np.arange(6).reshape(2,3) - - Trace of a matrix: - - >>> np.einsum('ii', a) - 60 - >>> np.einsum(a, [0,0]) - 60 - >>> np.trace(a) - 60 - - Extract the diagonal (requires explicit form): - - >>> np.einsum('ii->i', a) - array([ 0, 6, 12, 18, 24]) - >>> np.einsum(a, [0,0], [0]) - array([ 0, 6, 12, 18, 24]) - >>> np.diag(a) - array([ 0, 6, 12, 18, 24]) - - Sum over an axis (requires explicit form): - - >>> np.einsum('ij->i', a) - array([ 10, 35, 60, 85, 110]) - >>> np.einsum(a, [0,1], [0]) - array([ 10, 35, 60, 85, 110]) - >>> np.sum(a, axis=1) - array([ 10, 35, 60, 85, 110]) - - For higher dimensional arrays summing a single axis can be done with ellipsis: - - >>> np.einsum('...j->...', a) - array([ 10, 35, 60, 85, 110]) - >>> np.einsum(a, [Ellipsis,1], [Ellipsis]) - array([ 10, 35, 60, 85, 110]) - - Compute a matrix transpose, or reorder any number of axes: - - >>> np.einsum('ji', c) - array([[0, 3], - [1, 4], - [2, 5]]) - >>> np.einsum('ij->ji', c) - array([[0, 3], - [1, 4], - [2, 5]]) - >>> np.einsum(c, [1,0]) - array([[0, 3], - [1, 4], - [2, 5]]) - >>> np.transpose(c) - array([[0, 3], - [1, 4], - [2, 5]]) - - Vector inner products: - - >>> np.einsum('i,i', b, b) - 30 - >>> np.einsum(b, [0], b, [0]) - 30 - >>> np.inner(b,b) - 30 - - Matrix vector multiplication: - - >>> np.einsum('ij,j', a, b) - array([ 30, 80, 130, 180, 230]) - >>> np.einsum(a, [0,1], b, [1]) - array([ 30, 80, 130, 180, 230]) - >>> np.dot(a, b) - array([ 30, 80, 130, 180, 230]) - >>> np.einsum('...j,j', a, b) - array([ 30, 80, 130, 180, 230]) - - Broadcasting and scalar multiplication: - - >>> np.einsum('..., ...', 3, c) - array([[ 0, 3, 6], - [ 9, 12, 15]]) - >>> np.einsum(',ij', 3, c) - array([[ 0, 3, 6], - [ 9, 12, 15]]) - >>> np.einsum(3, [Ellipsis], c, [Ellipsis]) - array([[ 0, 3, 6], - [ 9, 12, 15]]) - >>> np.multiply(3, c) - array([[ 0, 3, 6], - [ 9, 12, 15]]) - - Vector outer product: - - >>> np.einsum('i,j', np.arange(2)+1, b) - array([[0, 1, 2, 3, 4], - [0, 2, 4, 6, 8]]) - >>> np.einsum(np.arange(2)+1, [0], b, [1]) - array([[0, 1, 2, 3, 4], - [0, 2, 4, 6, 8]]) - >>> np.outer(np.arange(2)+1, b) - array([[0, 1, 2, 3, 4], - [0, 2, 4, 6, 8]]) - - Tensor contraction: - - >>> a = np.arange(60.).reshape(3,4,5) - >>> b = np.arange(24.).reshape(4,3,2) - >>> np.einsum('ijk,jil->kl', a, b) - array([[4400., 4730.], - [4532., 4874.], - [4664., 5018.], - [4796., 5162.], - [4928., 5306.]]) - >>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3]) - array([[4400., 4730.], - [4532., 4874.], - [4664., 5018.], - [4796., 5162.], - [4928., 5306.]]) - >>> np.tensordot(a,b, axes=([1,0],[0,1])) - array([[4400., 4730.], - [4532., 4874.], - [4664., 5018.], - [4796., 5162.], - [4928., 5306.]]) - - Writeable returned arrays (since version 1.10.0): - - >>> a = np.zeros((3, 3)) - >>> np.einsum('ii->i', a)[:] = 1 - >>> a - array([[1., 0., 0.], - [0., 1., 0.], - [0., 0., 1.]]) - - Example of ellipsis use: - - >>> a = np.arange(6).reshape((3,2)) - >>> b = np.arange(12).reshape((4,3)) - >>> np.einsum('ki,jk->ij', a, b) - array([[10, 28, 46, 64], - [13, 40, 67, 94]]) - >>> np.einsum('ki,...k->i...', a, b) - array([[10, 28, 46, 64], - [13, 40, 67, 94]]) - >>> np.einsum('k...,jk', a, b) - array([[10, 28, 46, 64], - [13, 40, 67, 94]]) - - Chained array operations. For more complicated contractions, speed ups - might be achieved by repeatedly computing a 'greedy' path or pre-computing the - 'optimal' path and repeatedly applying it, using an - `einsum_path` insertion (since version 1.12.0). Performance improvements can be - particularly significant with larger arrays: - - >>> a = np.ones(64).reshape(2,4,8) - - Basic `einsum`: ~1520ms (benchmarked on 3.1GHz Intel i5.) - - >>> for iteration in range(500): - ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a) - - Sub-optimal `einsum` (due to repeated path calculation time): ~330ms - - >>> for iteration in range(500): - ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal') - - Greedy `einsum` (faster optimal path approximation): ~160ms - - >>> for iteration in range(500): - ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='greedy') - - Optimal `einsum` (best usage pattern in some use cases): ~110ms - - >>> path = np.einsum_path('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')[0] - >>> for iteration in range(500): - ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=path) - - """ - - # Grab non-einsum kwargs; do not optimize by default. - optimize_arg = kwargs.pop('optimize', False) - - # If no optimization, run pure einsum - if optimize_arg is False: - return c_einsum(*operands, **kwargs) - - valid_einsum_kwargs = ['out', 'dtype', 'order', 'casting'] - einsum_kwargs = {k: v for (k, v) in kwargs.items() if - k in valid_einsum_kwargs} - - # Make sure all keywords are valid - valid_contract_kwargs = ['optimize'] + valid_einsum_kwargs - unknown_kwargs = [k for (k, v) in kwargs.items() if - k not in valid_contract_kwargs] - - if len(unknown_kwargs): - raise TypeError("Did not understand the following kwargs: %s" - % unknown_kwargs) - - # Special handeling if out is specified - specified_out = False - out_array = einsum_kwargs.pop('out', None) - if out_array is not None: - specified_out = True - - # Build the contraction list and operand - operands, contraction_list = einsum_path(*operands, optimize=optimize_arg, - einsum_call=True) - - handle_out = False - - # Start contraction loop - for num, contraction in enumerate(contraction_list): - inds, idx_rm, einsum_str, remaining, blas = contraction - tmp_operands = [operands.pop(x) for x in inds] - - # Do we need to deal with the output? - handle_out = specified_out and ((num + 1) == len(contraction_list)) - - # Call tensordot if still possible - if blas: - # Checks have already been handled - input_str, results_index = einsum_str.split('->') - input_left, input_right = input_str.split(',') - - tensor_result = input_left + input_right - for s in idx_rm: - tensor_result = tensor_result.replace(s, "") - - # Find indices to contract over - left_pos, right_pos = [], [] - for s in sorted(idx_rm): - left_pos.append(input_left.find(s)) - right_pos.append(input_right.find(s)) - - # Contract! - new_view = tensordot(*tmp_operands, axes=(tuple(left_pos), tuple(right_pos))) - - # Build a new view if needed - if (tensor_result != results_index) or handle_out: - if handle_out: - einsum_kwargs["out"] = out_array - new_view = c_einsum(tensor_result + '->' + results_index, new_view, **einsum_kwargs) - - # Call einsum - else: - # If out was specified - if handle_out: - einsum_kwargs["out"] = out_array - - # Do the contraction - new_view = c_einsum(einsum_str, *tmp_operands, **einsum_kwargs) - - # Append new items and dereference what we can - operands.append(new_view) - del tmp_operands, new_view - - if specified_out: - return out_array - else: - return operands[0] diff --git a/venv/lib/python3.7/site-packages/numpy/core/fromnumeric.py b/venv/lib/python3.7/site-packages/numpy/core/fromnumeric.py deleted file mode 100644 index d454480..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/fromnumeric.py +++ /dev/null @@ -1,3649 +0,0 @@ -"""Module containing non-deprecated functions borrowed from Numeric. - -""" -from __future__ import division, absolute_import, print_function - -import functools -import types -import warnings - -import numpy as np -from .. import VisibleDeprecationWarning -from . import multiarray as mu -from . import overrides -from . import umath as um -from . import numerictypes as nt -from ._asarray import asarray, array, asanyarray -from .multiarray import concatenate -from . import _methods - -_dt_ = nt.sctype2char - -# functions that are methods -__all__ = [ - 'alen', 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax', - 'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip', - 'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean', - 'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put', - 'ravel', 'repeat', 'reshape', 'resize', 'round_', - 'searchsorted', 'shape', 'size', 'sometrue', 'sort', 'squeeze', - 'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var', -] - -_gentype = types.GeneratorType -# save away Python sum -_sum_ = sum - -array_function_dispatch = functools.partial( - overrides.array_function_dispatch, module='numpy') - - -# functions that are now methods -def _wrapit(obj, method, *args, **kwds): - try: - wrap = obj.__array_wrap__ - except AttributeError: - wrap = None - result = getattr(asarray(obj), method)(*args, **kwds) - if wrap: - if not isinstance(result, mu.ndarray): - result = asarray(result) - result = wrap(result) - return result - - -def _wrapfunc(obj, method, *args, **kwds): - bound = getattr(obj, method, None) - if bound is None: - return _wrapit(obj, method, *args, **kwds) - - try: - return bound(*args, **kwds) - except TypeError: - # A TypeError occurs if the object does have such a method in its - # class, but its signature is not identical to that of NumPy's. This - # situation has occurred in the case of a downstream library like - # 'pandas'. - # - # Call _wrapit from within the except clause to ensure a potential - # exception has a traceback chain. - return _wrapit(obj, method, *args, **kwds) - - -def _wrapreduction(obj, ufunc, method, axis, dtype, out, **kwargs): - passkwargs = {k: v for k, v in kwargs.items() - if v is not np._NoValue} - - if type(obj) is not mu.ndarray: - try: - reduction = getattr(obj, method) - except AttributeError: - pass - else: - # This branch is needed for reductions like any which don't - # support a dtype. - if dtype is not None: - return reduction(axis=axis, dtype=dtype, out=out, **passkwargs) - else: - return reduction(axis=axis, out=out, **passkwargs) - - return ufunc.reduce(obj, axis, dtype, out, **passkwargs) - - -def _take_dispatcher(a, indices, axis=None, out=None, mode=None): - return (a, out) - - -@array_function_dispatch(_take_dispatcher) -def take(a, indices, axis=None, out=None, mode='raise'): - """ - Take elements from an array along an axis. - - When axis is not None, this function does the same thing as "fancy" - indexing (indexing arrays using arrays); however, it can be easier to use - if you need elements along a given axis. A call such as - ``np.take(arr, indices, axis=3)`` is equivalent to - ``arr[:,:,:,indices,...]``. - - Explained without fancy indexing, this is equivalent to the following use - of `ndindex`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of - indices:: - - Ni, Nk = a.shape[:axis], a.shape[axis+1:] - Nj = indices.shape - for ii in ndindex(Ni): - for jj in ndindex(Nj): - for kk in ndindex(Nk): - out[ii + jj + kk] = a[ii + (indices[jj],) + kk] - - Parameters - ---------- - a : array_like (Ni..., M, Nk...) - The source array. - indices : array_like (Nj...) - The indices of the values to extract. - - .. versionadded:: 1.8.0 - - Also allow scalars for indices. - axis : int, optional - The axis over which to select values. By default, the flattened - input array is used. - out : ndarray, optional (Ni..., Nj..., Nk...) - If provided, the result will be placed in this array. It should - be of the appropriate shape and dtype. Note that `out` is always - buffered if `mode='raise'`; use other modes for better performance. - mode : {'raise', 'wrap', 'clip'}, optional - Specifies how out-of-bounds indices will behave. - - * 'raise' -- raise an error (default) - * 'wrap' -- wrap around - * 'clip' -- clip to the range - - 'clip' mode means that all indices that are too large are replaced - by the index that addresses the last element along that axis. Note - that this disables indexing with negative numbers. - - Returns - ------- - out : ndarray (Ni..., Nj..., Nk...) - The returned array has the same type as `a`. - - See Also - -------- - compress : Take elements using a boolean mask - ndarray.take : equivalent method - take_along_axis : Take elements by matching the array and the index arrays - - Notes - ----- - - By eliminating the inner loop in the description above, and using `s_` to - build simple slice objects, `take` can be expressed in terms of applying - fancy indexing to each 1-d slice:: - - Ni, Nk = a.shape[:axis], a.shape[axis+1:] - for ii in ndindex(Ni): - for kk in ndindex(Nj): - out[ii + s_[...,] + kk] = a[ii + s_[:,] + kk][indices] - - For this reason, it is equivalent to (but faster than) the following use - of `apply_along_axis`:: - - out = np.apply_along_axis(lambda a_1d: a_1d[indices], axis, a) - - Examples - -------- - >>> a = [4, 3, 5, 7, 6, 8] - >>> indices = [0, 1, 4] - >>> np.take(a, indices) - array([4, 3, 6]) - - In this example if `a` is an ndarray, "fancy" indexing can be used. - - >>> a = np.array(a) - >>> a[indices] - array([4, 3, 6]) - - If `indices` is not one dimensional, the output also has these dimensions. - - >>> np.take(a, [[0, 1], [2, 3]]) - array([[4, 3], - [5, 7]]) - """ - return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode) - - -def _reshape_dispatcher(a, newshape, order=None): - return (a,) - - -# not deprecated --- copy if necessary, view otherwise -@array_function_dispatch(_reshape_dispatcher) -def reshape(a, newshape, order='C'): - """ - Gives a new shape to an array without changing its data. - - Parameters - ---------- - a : array_like - Array to be reshaped. - newshape : int or tuple of ints - The new shape should be compatible with the original shape. If - an integer, then the result will be a 1-D array of that length. - One shape dimension can be -1. In this case, the value is - inferred from the length of the array and remaining dimensions. - order : {'C', 'F', 'A'}, optional - Read the elements of `a` using this index order, and place the - elements into the reshaped array using this index order. 'C' - means to read / write the elements using C-like index order, - with the last axis index changing fastest, back to the first - axis index changing slowest. 'F' means to read / write the - elements using Fortran-like index order, with the first index - changing fastest, and the last index changing slowest. Note that - the 'C' and 'F' options take no account of the memory layout of - the underlying array, and only refer to the order of indexing. - 'A' means to read / write the elements in Fortran-like index - order if `a` is Fortran *contiguous* in memory, C-like order - otherwise. - - Returns - ------- - reshaped_array : ndarray - This will be a new view object if possible; otherwise, it will - be a copy. Note there is no guarantee of the *memory layout* (C- or - Fortran- contiguous) of the returned array. - - See Also - -------- - ndarray.reshape : Equivalent method. - - Notes - ----- - It is not always possible to change the shape of an array without - copying the data. If you want an error to be raised when the data is copied, - you should assign the new shape to the shape attribute of the array:: - - >>> a = np.zeros((10, 2)) - - # A transpose makes the array non-contiguous - >>> b = a.T - - # Taking a view makes it possible to modify the shape without modifying - # the initial object. - >>> c = b.view() - >>> c.shape = (20) - Traceback (most recent call last): - ... - AttributeError: incompatible shape for a non-contiguous array - - The `order` keyword gives the index ordering both for *fetching* the values - from `a`, and then *placing* the values into the output array. - For example, let's say you have an array: - - >>> a = np.arange(6).reshape((3, 2)) - >>> a - array([[0, 1], - [2, 3], - [4, 5]]) - - You can think of reshaping as first raveling the array (using the given - index order), then inserting the elements from the raveled array into the - new array using the same kind of index ordering as was used for the - raveling. - - >>> np.reshape(a, (2, 3)) # C-like index ordering - array([[0, 1, 2], - [3, 4, 5]]) - >>> np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape - array([[0, 1, 2], - [3, 4, 5]]) - >>> np.reshape(a, (2, 3), order='F') # Fortran-like index ordering - array([[0, 4, 3], - [2, 1, 5]]) - >>> np.reshape(np.ravel(a, order='F'), (2, 3), order='F') - array([[0, 4, 3], - [2, 1, 5]]) - - Examples - -------- - >>> a = np.array([[1,2,3], [4,5,6]]) - >>> np.reshape(a, 6) - array([1, 2, 3, 4, 5, 6]) - >>> np.reshape(a, 6, order='F') - array([1, 4, 2, 5, 3, 6]) - - >>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2 - array([[1, 2], - [3, 4], - [5, 6]]) - """ - return _wrapfunc(a, 'reshape', newshape, order=order) - - -def _choose_dispatcher(a, choices, out=None, mode=None): - yield a - for c in choices: - yield c - yield out - - -@array_function_dispatch(_choose_dispatcher) -def choose(a, choices, out=None, mode='raise'): - """ - Construct an array from an index array and a set of arrays to choose from. - - First of all, if confused or uncertain, definitely look at the Examples - - in its full generality, this function is less simple than it might - seem from the following code description (below ndi = - `numpy.lib.index_tricks`): - - ``np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)])``. - - But this omits some subtleties. Here is a fully general summary: - - Given an "index" array (`a`) of integers and a sequence of `n` arrays - (`choices`), `a` and each choice array are first broadcast, as necessary, - to arrays of a common shape; calling these *Ba* and *Bchoices[i], i = - 0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape`` - for each `i`. Then, a new array with shape ``Ba.shape`` is created as - follows: - - * if ``mode=raise`` (the default), then, first of all, each element of - `a` (and thus `Ba`) must be in the range `[0, n-1]`; now, suppose that - `i` (in that range) is the value at the `(j0, j1, ..., jm)` position - in `Ba` - then the value at the same position in the new array is the - value in `Bchoices[i]` at that same position; - - * if ``mode=wrap``, values in `a` (and thus `Ba`) may be any (signed) - integer; modular arithmetic is used to map integers outside the range - `[0, n-1]` back into that range; and then the new array is constructed - as above; - - * if ``mode=clip``, values in `a` (and thus `Ba`) may be any (signed) - integer; negative integers are mapped to 0; values greater than `n-1` - are mapped to `n-1`; and then the new array is constructed as above. - - Parameters - ---------- - a : int array - This array must contain integers in `[0, n-1]`, where `n` is the number - of choices, unless ``mode=wrap`` or ``mode=clip``, in which cases any - integers are permissible. - choices : sequence of arrays - Choice arrays. `a` and all of the choices must be broadcastable to the - same shape. If `choices` is itself an array (not recommended), then - its outermost dimension (i.e., the one corresponding to - ``choices.shape[0]``) is taken as defining the "sequence". - out : array, optional - If provided, the result will be inserted into this array. It should - be of the appropriate shape and dtype. Note that `out` is always - buffered if `mode='raise'`; use other modes for better performance. - mode : {'raise' (default), 'wrap', 'clip'}, optional - Specifies how indices outside `[0, n-1]` will be treated: - - * 'raise' : an exception is raised - * 'wrap' : value becomes value mod `n` - * 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1 - - Returns - ------- - merged_array : array - The merged result. - - Raises - ------ - ValueError: shape mismatch - If `a` and each choice array are not all broadcastable to the same - shape. - - See Also - -------- - ndarray.choose : equivalent method - numpy.take_along_axis : Preferable if `choices` is an array - - Notes - ----- - To reduce the chance of misinterpretation, even though the following - "abuse" is nominally supported, `choices` should neither be, nor be - thought of as, a single array, i.e., the outermost sequence-like container - should be either a list or a tuple. - - Examples - -------- - - >>> choices = [[0, 1, 2, 3], [10, 11, 12, 13], - ... [20, 21, 22, 23], [30, 31, 32, 33]] - >>> np.choose([2, 3, 1, 0], choices - ... # the first element of the result will be the first element of the - ... # third (2+1) "array" in choices, namely, 20; the second element - ... # will be the second element of the fourth (3+1) choice array, i.e., - ... # 31, etc. - ... ) - array([20, 31, 12, 3]) - >>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1) - array([20, 31, 12, 3]) - >>> # because there are 4 choice arrays - >>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4) - array([20, 1, 12, 3]) - >>> # i.e., 0 - - A couple examples illustrating how choose broadcasts: - - >>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]] - >>> choices = [-10, 10] - >>> np.choose(a, choices) - array([[ 10, -10, 10], - [-10, 10, -10], - [ 10, -10, 10]]) - - >>> # With thanks to Anne Archibald - >>> a = np.array([0, 1]).reshape((2,1,1)) - >>> c1 = np.array([1, 2, 3]).reshape((1,3,1)) - >>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5)) - >>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2 - array([[[ 1, 1, 1, 1, 1], - [ 2, 2, 2, 2, 2], - [ 3, 3, 3, 3, 3]], - [[-1, -2, -3, -4, -5], - [-1, -2, -3, -4, -5], - [-1, -2, -3, -4, -5]]]) - - """ - return _wrapfunc(a, 'choose', choices, out=out, mode=mode) - - -def _repeat_dispatcher(a, repeats, axis=None): - return (a,) - - -@array_function_dispatch(_repeat_dispatcher) -def repeat(a, repeats, axis=None): - """ - Repeat elements of an array. - - Parameters - ---------- - a : array_like - Input array. - repeats : int or array of ints - The number of repetitions for each element. `repeats` is broadcasted - to fit the shape of the given axis. - axis : int, optional - The axis along which to repeat values. By default, use the - flattened input array, and return a flat output array. - - Returns - ------- - repeated_array : ndarray - Output array which has the same shape as `a`, except along - the given axis. - - See Also - -------- - tile : Tile an array. - - Examples - -------- - >>> np.repeat(3, 4) - array([3, 3, 3, 3]) - >>> x = np.array([[1,2],[3,4]]) - >>> np.repeat(x, 2) - array([1, 1, 2, 2, 3, 3, 4, 4]) - >>> np.repeat(x, 3, axis=1) - array([[1, 1, 1, 2, 2, 2], - [3, 3, 3, 4, 4, 4]]) - >>> np.repeat(x, [1, 2], axis=0) - array([[1, 2], - [3, 4], - [3, 4]]) - - """ - return _wrapfunc(a, 'repeat', repeats, axis=axis) - - -def _put_dispatcher(a, ind, v, mode=None): - return (a, ind, v) - - -@array_function_dispatch(_put_dispatcher) -def put(a, ind, v, mode='raise'): - """ - Replaces specified elements of an array with given values. - - The indexing works on the flattened target array. `put` is roughly - equivalent to: - - :: - - a.flat[ind] = v - - Parameters - ---------- - a : ndarray - Target array. - ind : array_like - Target indices, interpreted as integers. - v : array_like - Values to place in `a` at target indices. If `v` is shorter than - `ind` it will be repeated as necessary. - mode : {'raise', 'wrap', 'clip'}, optional - Specifies how out-of-bounds indices will behave. - - * 'raise' -- raise an error (default) - * 'wrap' -- wrap around - * 'clip' -- clip to the range - - 'clip' mode means that all indices that are too large are replaced - by the index that addresses the last element along that axis. Note - that this disables indexing with negative numbers. In 'raise' mode, - if an exception occurs the target array may still be modified. - - See Also - -------- - putmask, place - put_along_axis : Put elements by matching the array and the index arrays - - Examples - -------- - >>> a = np.arange(5) - >>> np.put(a, [0, 2], [-44, -55]) - >>> a - array([-44, 1, -55, 3, 4]) - - >>> a = np.arange(5) - >>> np.put(a, 22, -5, mode='clip') - >>> a - array([ 0, 1, 2, 3, -5]) - - """ - try: - put = a.put - except AttributeError: - raise TypeError("argument 1 must be numpy.ndarray, " - "not {name}".format(name=type(a).__name__)) - - return put(ind, v, mode=mode) - - -def _swapaxes_dispatcher(a, axis1, axis2): - return (a,) - - -@array_function_dispatch(_swapaxes_dispatcher) -def swapaxes(a, axis1, axis2): - """ - Interchange two axes of an array. - - Parameters - ---------- - a : array_like - Input array. - axis1 : int - First axis. - axis2 : int - Second axis. - - Returns - ------- - a_swapped : ndarray - For NumPy >= 1.10.0, if `a` is an ndarray, then a view of `a` is - returned; otherwise a new array is created. For earlier NumPy - versions a view of `a` is returned only if the order of the - axes is changed, otherwise the input array is returned. - - Examples - -------- - >>> x = np.array([[1,2,3]]) - >>> np.swapaxes(x,0,1) - array([[1], - [2], - [3]]) - - >>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]]) - >>> x - array([[[0, 1], - [2, 3]], - [[4, 5], - [6, 7]]]) - - >>> np.swapaxes(x,0,2) - array([[[0, 4], - [2, 6]], - [[1, 5], - [3, 7]]]) - - """ - return _wrapfunc(a, 'swapaxes', axis1, axis2) - - -def _transpose_dispatcher(a, axes=None): - return (a,) - - -@array_function_dispatch(_transpose_dispatcher) -def transpose(a, axes=None): - """ - Permute the dimensions of an array. - - Parameters - ---------- - a : array_like - Input array. - axes : list of ints, optional - By default, reverse the dimensions, otherwise permute the axes - according to the values given. - - Returns - ------- - p : ndarray - `a` with its axes permuted. A view is returned whenever - possible. - - See Also - -------- - moveaxis - argsort - - Notes - ----- - Use `transpose(a, argsort(axes))` to invert the transposition of tensors - when using the `axes` keyword argument. - - Transposing a 1-D array returns an unchanged view of the original array. - - Examples - -------- - >>> x = np.arange(4).reshape((2,2)) - >>> x - array([[0, 1], - [2, 3]]) - - >>> np.transpose(x) - array([[0, 2], - [1, 3]]) - - >>> x = np.ones((1, 2, 3)) - >>> np.transpose(x, (1, 0, 2)).shape - (2, 1, 3) - - """ - return _wrapfunc(a, 'transpose', axes) - - -def _partition_dispatcher(a, kth, axis=None, kind=None, order=None): - return (a,) - - -@array_function_dispatch(_partition_dispatcher) -def partition(a, kth, axis=-1, kind='introselect', order=None): - """ - Return a partitioned copy of an array. - - Creates a copy of the array with its elements rearranged in such a - way that the value of the element in k-th position is in the - position it would be in a sorted array. All elements smaller than - the k-th element are moved before this element and all equal or - greater are moved behind it. The ordering of the elements in the two - partitions is undefined. - - .. versionadded:: 1.8.0 - - Parameters - ---------- - a : array_like - Array to be sorted. - kth : int or sequence of ints - Element index to partition by. The k-th value of the element - will be in its final sorted position and all smaller elements - will be moved before it and all equal or greater elements behind - it. The order of all elements in the partitions is undefined. If - provided with a sequence of k-th it will partition all elements - indexed by k-th of them into their sorted position at once. - axis : int or None, optional - Axis along which to sort. If None, the array is flattened before - sorting. The default is -1, which sorts along the last axis. - kind : {'introselect'}, optional - Selection algorithm. Default is 'introselect'. - order : str or list of str, optional - When `a` is an array with fields defined, this argument - specifies which fields to compare first, second, etc. A single - field can be specified as a string. Not all fields need be - specified, but unspecified fields will still be used, in the - order in which they come up in the dtype, to break ties. - - Returns - ------- - partitioned_array : ndarray - Array of the same type and shape as `a`. - - See Also - -------- - ndarray.partition : Method to sort an array in-place. - argpartition : Indirect partition. - sort : Full sorting - - Notes - ----- - The various selection algorithms are characterized by their average - speed, worst case performance, work space size, and whether they are - stable. A stable sort keeps items with the same key in the same - relative order. The available algorithms have the following - properties: - - ================= ======= ============= ============ ======= - kind speed worst case work space stable - ================= ======= ============= ============ ======= - 'introselect' 1 O(n) 0 no - ================= ======= ============= ============ ======= - - All the partition algorithms make temporary copies of the data when - partitioning along any but the last axis. Consequently, - partitioning along the last axis is faster and uses less space than - partitioning along any other axis. - - The sort order for complex numbers is lexicographic. If both the - real and imaginary parts are non-nan then the order is determined by - the real parts except when they are equal, in which case the order - is determined by the imaginary parts. - - Examples - -------- - >>> a = np.array([3, 4, 2, 1]) - >>> np.partition(a, 3) - array([2, 1, 3, 4]) - - >>> np.partition(a, (1, 3)) - array([1, 2, 3, 4]) - - """ - if axis is None: - # flatten returns (1, N) for np.matrix, so always use the last axis - a = asanyarray(a).flatten() - axis = -1 - else: - a = asanyarray(a).copy(order="K") - a.partition(kth, axis=axis, kind=kind, order=order) - return a - - -def _argpartition_dispatcher(a, kth, axis=None, kind=None, order=None): - return (a,) - - -@array_function_dispatch(_argpartition_dispatcher) -def argpartition(a, kth, axis=-1, kind='introselect', order=None): - """ - Perform an indirect partition along the given axis using the - algorithm specified by the `kind` keyword. It returns an array of - indices of the same shape as `a` that index data along the given - axis in partitioned order. - - .. versionadded:: 1.8.0 - - Parameters - ---------- - a : array_like - Array to sort. - kth : int or sequence of ints - Element index to partition by. The k-th element will be in its - final sorted position and all smaller elements will be moved - before it and all larger elements behind it. The order all - elements in the partitions is undefined. If provided with a - sequence of k-th it will partition all of them into their sorted - position at once. - axis : int or None, optional - Axis along which to sort. The default is -1 (the last axis). If - None, the flattened array is used. - kind : {'introselect'}, optional - Selection algorithm. Default is 'introselect' - order : str or list of str, optional - When `a` is an array with fields defined, this argument - specifies which fields to compare first, second, etc. A single - field can be specified as a string, and not all fields need be - specified, but unspecified fields will still be used, in the - order in which they come up in the dtype, to break ties. - - Returns - ------- - index_array : ndarray, int - Array of indices that partition `a` along the specified axis. - If `a` is one-dimensional, ``a[index_array]`` yields a partitioned `a`. - More generally, ``np.take_along_axis(a, index_array, axis=a)`` always - yields the partitioned `a`, irrespective of dimensionality. - - See Also - -------- - partition : Describes partition algorithms used. - ndarray.partition : Inplace partition. - argsort : Full indirect sort. - take_along_axis : Apply ``index_array`` from argpartition - to an array as if by calling partition. - - Notes - ----- - See `partition` for notes on the different selection algorithms. - - Examples - -------- - One dimensional array: - - >>> x = np.array([3, 4, 2, 1]) - >>> x[np.argpartition(x, 3)] - array([2, 1, 3, 4]) - >>> x[np.argpartition(x, (1, 3))] - array([1, 2, 3, 4]) - - >>> x = [3, 4, 2, 1] - >>> np.array(x)[np.argpartition(x, 3)] - array([2, 1, 3, 4]) - - Multi-dimensional array: - - >>> x = np.array([[3, 4, 2], [1, 3, 1]]) - >>> index_array = np.argpartition(x, kth=1, axis=-1) - >>> np.take_along_axis(x, index_array, axis=-1) # same as np.partition(x, kth=1) - array([[2, 3, 4], - [1, 1, 3]]) - - """ - return _wrapfunc(a, 'argpartition', kth, axis=axis, kind=kind, order=order) - - -def _sort_dispatcher(a, axis=None, kind=None, order=None): - return (a,) - - -@array_function_dispatch(_sort_dispatcher) -def sort(a, axis=-1, kind=None, order=None): - """ - Return a sorted copy of an array. - - Parameters - ---------- - a : array_like - Array to be sorted. - axis : int or None, optional - Axis along which to sort. If None, the array is flattened before - sorting. The default is -1, which sorts along the last axis. - kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional - Sorting algorithm. The default is 'quicksort'. Note that both 'stable' - and 'mergesort' use timsort or radix sort under the covers and, in general, - the actual implementation will vary with data type. The 'mergesort' option - is retained for backwards compatibility. - - .. versionchanged:: 1.15.0. - The 'stable' option was added. - - order : str or list of str, optional - When `a` is an array with fields defined, this argument specifies - which fields to compare first, second, etc. A single field can - be specified as a string, and not all fields need be specified, - but unspecified fields will still be used, in the order in which - they come up in the dtype, to break ties. - - Returns - ------- - sorted_array : ndarray - Array of the same type and shape as `a`. - - See Also - -------- - ndarray.sort : Method to sort an array in-place. - argsort : Indirect sort. - lexsort : Indirect stable sort on multiple keys. - searchsorted : Find elements in a sorted array. - partition : Partial sort. - - Notes - ----- - The various sorting algorithms are characterized by their average speed, - worst case performance, work space size, and whether they are stable. A - stable sort keeps items with the same key in the same relative - order. The four algorithms implemented in NumPy have the following - properties: - - =========== ======= ============= ============ ======== - kind speed worst case work space stable - =========== ======= ============= ============ ======== - 'quicksort' 1 O(n^2) 0 no - 'heapsort' 3 O(n*log(n)) 0 no - 'mergesort' 2 O(n*log(n)) ~n/2 yes - 'timsort' 2 O(n*log(n)) ~n/2 yes - =========== ======= ============= ============ ======== - - .. note:: The datatype determines which of 'mergesort' or 'timsort' - is actually used, even if 'mergesort' is specified. User selection - at a finer scale is not currently available. - - All the sort algorithms make temporary copies of the data when - sorting along any but the last axis. Consequently, sorting along - the last axis is faster and uses less space than sorting along - any other axis. - - The sort order for complex numbers is lexicographic. If both the real - and imaginary parts are non-nan then the order is determined by the - real parts except when they are equal, in which case the order is - determined by the imaginary parts. - - Previous to numpy 1.4.0 sorting real and complex arrays containing nan - values led to undefined behaviour. In numpy versions >= 1.4.0 nan - values are sorted to the end. The extended sort order is: - - * Real: [R, nan] - * Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj] - - where R is a non-nan real value. Complex values with the same nan - placements are sorted according to the non-nan part if it exists. - Non-nan values are sorted as before. - - .. versionadded:: 1.12.0 - - quicksort has been changed to `introsort `_. - When sorting does not make enough progress it switches to - `heapsort `_. - This implementation makes quicksort O(n*log(n)) in the worst case. - - 'stable' automatically chooses the best stable sorting algorithm - for the data type being sorted. - It, along with 'mergesort' is currently mapped to - `timsort `_ - or `radix sort `_ - depending on the data type. - API forward compatibility currently limits the - ability to select the implementation and it is hardwired for the different - data types. - - .. versionadded:: 1.17.0 - - Timsort is added for better performance on already or nearly - sorted data. On random data timsort is almost identical to - mergesort. It is now used for stable sort while quicksort is still the - default sort if none is chosen. For timsort details, refer to - `CPython listsort.txt `_. - 'mergesort' and 'stable' are mapped to radix sort for integer data types. Radix sort is an - O(n) sort instead of O(n log n). - - .. versionchanged:: 1.17.0 - - NaT now sorts to the end of arrays for consistency with NaN. - - Examples - -------- - >>> a = np.array([[1,4],[3,1]]) - >>> np.sort(a) # sort along the last axis - array([[1, 4], - [1, 3]]) - >>> np.sort(a, axis=None) # sort the flattened array - array([1, 1, 3, 4]) - >>> np.sort(a, axis=0) # sort along the first axis - array([[1, 1], - [3, 4]]) - - Use the `order` keyword to specify a field to use when sorting a - structured array: - - >>> dtype = [('name', 'S10'), ('height', float), ('age', int)] - >>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38), - ... ('Galahad', 1.7, 38)] - >>> a = np.array(values, dtype=dtype) # create a structured array - >>> np.sort(a, order='height') # doctest: +SKIP - array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41), - ('Lancelot', 1.8999999999999999, 38)], - dtype=[('name', '|S10'), ('height', '>> np.sort(a, order=['age', 'height']) # doctest: +SKIP - array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38), - ('Arthur', 1.8, 41)], - dtype=[('name', '|S10'), ('height', '>> x = np.array([3, 1, 2]) - >>> np.argsort(x) - array([1, 2, 0]) - - Two-dimensional array: - - >>> x = np.array([[0, 3], [2, 2]]) - >>> x - array([[0, 3], - [2, 2]]) - - >>> ind = np.argsort(x, axis=0) # sorts along first axis (down) - >>> ind - array([[0, 1], - [1, 0]]) - >>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0) - array([[0, 2], - [2, 3]]) - - >>> ind = np.argsort(x, axis=1) # sorts along last axis (across) - >>> ind - array([[0, 1], - [0, 1]]) - >>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1) - array([[0, 3], - [2, 2]]) - - Indices of the sorted elements of a N-dimensional array: - - >>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape) - >>> ind - (array([0, 1, 1, 0]), array([0, 0, 1, 1])) - >>> x[ind] # same as np.sort(x, axis=None) - array([0, 2, 2, 3]) - - Sorting with keys: - - >>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '>> x - array([(1, 0), (0, 1)], - dtype=[('x', '>> np.argsort(x, order=('x','y')) - array([1, 0]) - - >>> np.argsort(x, order=('y','x')) - array([0, 1]) - - """ - return _wrapfunc(a, 'argsort', axis=axis, kind=kind, order=order) - - -def _argmax_dispatcher(a, axis=None, out=None): - return (a, out) - - -@array_function_dispatch(_argmax_dispatcher) -def argmax(a, axis=None, out=None): - """ - Returns the indices of the maximum values along an axis. - - Parameters - ---------- - a : array_like - Input array. - axis : int, optional - By default, the index is into the flattened array, otherwise - along the specified axis. - out : array, optional - If provided, the result will be inserted into this array. It should - be of the appropriate shape and dtype. - - Returns - ------- - index_array : ndarray of ints - Array of indices into the array. It has the same shape as `a.shape` - with the dimension along `axis` removed. - - See Also - -------- - ndarray.argmax, argmin - amax : The maximum value along a given axis. - unravel_index : Convert a flat index into an index tuple. - take_along_axis : Apply ``np.expand_dims(index_array, axis)`` - from argmax to an array as if by calling max. - - Notes - ----- - In case of multiple occurrences of the maximum values, the indices - corresponding to the first occurrence are returned. - - Examples - -------- - >>> a = np.arange(6).reshape(2,3) + 10 - >>> a - array([[10, 11, 12], - [13, 14, 15]]) - >>> np.argmax(a) - 5 - >>> np.argmax(a, axis=0) - array([1, 1, 1]) - >>> np.argmax(a, axis=1) - array([2, 2]) - - Indexes of the maximal elements of a N-dimensional array: - - >>> ind = np.unravel_index(np.argmax(a, axis=None), a.shape) - >>> ind - (1, 2) - >>> a[ind] - 15 - - >>> b = np.arange(6) - >>> b[1] = 5 - >>> b - array([0, 5, 2, 3, 4, 5]) - >>> np.argmax(b) # Only the first occurrence is returned. - 1 - - >>> x = np.array([[4,2,3], [1,0,3]]) - >>> index_array = np.argmax(x, axis=-1) - >>> # Same as np.max(x, axis=-1, keepdims=True) - >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1) - array([[4], - [3]]) - >>> # Same as np.max(x, axis=-1) - >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1) - array([4, 3]) - - """ - return _wrapfunc(a, 'argmax', axis=axis, out=out) - - -def _argmin_dispatcher(a, axis=None, out=None): - return (a, out) - - -@array_function_dispatch(_argmin_dispatcher) -def argmin(a, axis=None, out=None): - """ - Returns the indices of the minimum values along an axis. - - Parameters - ---------- - a : array_like - Input array. - axis : int, optional - By default, the index is into the flattened array, otherwise - along the specified axis. - out : array, optional - If provided, the result will be inserted into this array. It should - be of the appropriate shape and dtype. - - Returns - ------- - index_array : ndarray of ints - Array of indices into the array. It has the same shape as `a.shape` - with the dimension along `axis` removed. - - See Also - -------- - ndarray.argmin, argmax - amin : The minimum value along a given axis. - unravel_index : Convert a flat index into an index tuple. - take_along_axis : Apply ``np.expand_dims(index_array, axis)`` - from argmin to an array as if by calling min. - - Notes - ----- - In case of multiple occurrences of the minimum values, the indices - corresponding to the first occurrence are returned. - - Examples - -------- - >>> a = np.arange(6).reshape(2,3) + 10 - >>> a - array([[10, 11, 12], - [13, 14, 15]]) - >>> np.argmin(a) - 0 - >>> np.argmin(a, axis=0) - array([0, 0, 0]) - >>> np.argmin(a, axis=1) - array([0, 0]) - - Indices of the minimum elements of a N-dimensional array: - - >>> ind = np.unravel_index(np.argmin(a, axis=None), a.shape) - >>> ind - (0, 0) - >>> a[ind] - 10 - - >>> b = np.arange(6) + 10 - >>> b[4] = 10 - >>> b - array([10, 11, 12, 13, 10, 15]) - >>> np.argmin(b) # Only the first occurrence is returned. - 0 - - >>> x = np.array([[4,2,3], [1,0,3]]) - >>> index_array = np.argmin(x, axis=-1) - >>> # Same as np.min(x, axis=-1, keepdims=True) - >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1) - array([[2], - [0]]) - >>> # Same as np.max(x, axis=-1) - >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1) - array([2, 0]) - - """ - return _wrapfunc(a, 'argmin', axis=axis, out=out) - - -def _searchsorted_dispatcher(a, v, side=None, sorter=None): - return (a, v, sorter) - - -@array_function_dispatch(_searchsorted_dispatcher) -def searchsorted(a, v, side='left', sorter=None): - """ - Find indices where elements should be inserted to maintain order. - - Find the indices into a sorted array `a` such that, if the - corresponding elements in `v` were inserted before the indices, the - order of `a` would be preserved. - - Assuming that `a` is sorted: - - ====== ============================ - `side` returned index `i` satisfies - ====== ============================ - left ``a[i-1] < v <= a[i]`` - right ``a[i-1] <= v < a[i]`` - ====== ============================ - - Parameters - ---------- - a : 1-D array_like - Input array. If `sorter` is None, then it must be sorted in - ascending order, otherwise `sorter` must be an array of indices - that sort it. - v : array_like - Values to insert into `a`. - side : {'left', 'right'}, optional - If 'left', the index of the first suitable location found is given. - If 'right', return the last such index. If there is no suitable - index, return either 0 or N (where N is the length of `a`). - sorter : 1-D array_like, optional - Optional array of integer indices that sort array a into ascending - order. They are typically the result of argsort. - - .. versionadded:: 1.7.0 - - Returns - ------- - indices : array of ints - Array of insertion points with the same shape as `v`. - - See Also - -------- - sort : Return a sorted copy of an array. - histogram : Produce histogram from 1-D data. - - Notes - ----- - Binary search is used to find the required insertion points. - - As of NumPy 1.4.0 `searchsorted` works with real/complex arrays containing - `nan` values. The enhanced sort order is documented in `sort`. - - This function uses the same algorithm as the builtin python `bisect.bisect_left` - (``side='left'``) and `bisect.bisect_right` (``side='right'``) functions, - which is also vectorized in the `v` argument. - - Examples - -------- - >>> np.searchsorted([1,2,3,4,5], 3) - 2 - >>> np.searchsorted([1,2,3,4,5], 3, side='right') - 3 - >>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3]) - array([0, 5, 1, 2]) - - """ - return _wrapfunc(a, 'searchsorted', v, side=side, sorter=sorter) - - -def _resize_dispatcher(a, new_shape): - return (a,) - - -@array_function_dispatch(_resize_dispatcher) -def resize(a, new_shape): - """ - Return a new array with the specified shape. - - If the new array is larger than the original array, then the new - array is filled with repeated copies of `a`. Note that this behavior - is different from a.resize(new_shape) which fills with zeros instead - of repeated copies of `a`. - - Parameters - ---------- - a : array_like - Array to be resized. - - new_shape : int or tuple of int - Shape of resized array. - - Returns - ------- - reshaped_array : ndarray - The new array is formed from the data in the old array, repeated - if necessary to fill out the required number of elements. The - data are repeated in the order that they are stored in memory. - - See Also - -------- - ndarray.resize : resize an array in-place. - - Notes - ----- - Warning: This functionality does **not** consider axes separately, - i.e. it does not apply interpolation/extrapolation. - It fills the return array with the required number of elements, taken - from `a` as they are laid out in memory, disregarding strides and axes. - (This is in case the new shape is smaller. For larger, see above.) - This functionality is therefore not suitable to resize images, - or data where each axis represents a separate and distinct entity. - - Examples - -------- - >>> a=np.array([[0,1],[2,3]]) - >>> np.resize(a,(2,3)) - array([[0, 1, 2], - [3, 0, 1]]) - >>> np.resize(a,(1,4)) - array([[0, 1, 2, 3]]) - >>> np.resize(a,(2,4)) - array([[0, 1, 2, 3], - [0, 1, 2, 3]]) - - """ - if isinstance(new_shape, (int, nt.integer)): - new_shape = (new_shape,) - a = ravel(a) - Na = len(a) - total_size = um.multiply.reduce(new_shape) - if Na == 0 or total_size == 0: - return mu.zeros(new_shape, a.dtype) - - n_copies = int(total_size / Na) - extra = total_size % Na - - if extra != 0: - n_copies = n_copies + 1 - extra = Na - extra - - a = concatenate((a,) * n_copies) - if extra > 0: - a = a[:-extra] - - return reshape(a, new_shape) - - -def _squeeze_dispatcher(a, axis=None): - return (a,) - - -@array_function_dispatch(_squeeze_dispatcher) -def squeeze(a, axis=None): - """ - Remove single-dimensional entries from the shape of an array. - - Parameters - ---------- - a : array_like - Input data. - axis : None or int or tuple of ints, optional - .. versionadded:: 1.7.0 - - Selects a subset of the single-dimensional entries in the - shape. If an axis is selected with shape entry greater than - one, an error is raised. - - Returns - ------- - squeezed : ndarray - The input array, but with all or a subset of the - dimensions of length 1 removed. This is always `a` itself - or a view into `a`. - - Raises - ------ - ValueError - If `axis` is not None, and an axis being squeezed is not of length 1 - - See Also - -------- - expand_dims : The inverse operation, adding singleton dimensions - reshape : Insert, remove, and combine dimensions, and resize existing ones - - Examples - -------- - >>> x = np.array([[[0], [1], [2]]]) - >>> x.shape - (1, 3, 1) - >>> np.squeeze(x).shape - (3,) - >>> np.squeeze(x, axis=0).shape - (3, 1) - >>> np.squeeze(x, axis=1).shape - Traceback (most recent call last): - ... - ValueError: cannot select an axis to squeeze out which has size not equal to one - >>> np.squeeze(x, axis=2).shape - (1, 3) - - """ - try: - squeeze = a.squeeze - except AttributeError: - return _wrapit(a, 'squeeze', axis=axis) - if axis is None: - return squeeze() - else: - return squeeze(axis=axis) - - -def _diagonal_dispatcher(a, offset=None, axis1=None, axis2=None): - return (a,) - - -@array_function_dispatch(_diagonal_dispatcher) -def diagonal(a, offset=0, axis1=0, axis2=1): - """ - Return specified diagonals. - - If `a` is 2-D, returns the diagonal of `a` with the given offset, - i.e., the collection of elements of the form ``a[i, i+offset]``. If - `a` has more than two dimensions, then the axes specified by `axis1` - and `axis2` are used to determine the 2-D sub-array whose diagonal is - returned. The shape of the resulting array can be determined by - removing `axis1` and `axis2` and appending an index to the right equal - to the size of the resulting diagonals. - - In versions of NumPy prior to 1.7, this function always returned a new, - independent array containing a copy of the values in the diagonal. - - In NumPy 1.7 and 1.8, it continues to return a copy of the diagonal, - but depending on this fact is deprecated. Writing to the resulting - array continues to work as it used to, but a FutureWarning is issued. - - Starting in NumPy 1.9 it returns a read-only view on the original array. - Attempting to write to the resulting array will produce an error. - - In some future release, it will return a read/write view and writing to - the returned array will alter your original array. The returned array - will have the same type as the input array. - - If you don't write to the array returned by this function, then you can - just ignore all of the above. - - If you depend on the current behavior, then we suggest copying the - returned array explicitly, i.e., use ``np.diagonal(a).copy()`` instead - of just ``np.diagonal(a)``. This will work with both past and future - versions of NumPy. - - Parameters - ---------- - a : array_like - Array from which the diagonals are taken. - offset : int, optional - Offset of the diagonal from the main diagonal. Can be positive or - negative. Defaults to main diagonal (0). - axis1 : int, optional - Axis to be used as the first axis of the 2-D sub-arrays from which - the diagonals should be taken. Defaults to first axis (0). - axis2 : int, optional - Axis to be used as the second axis of the 2-D sub-arrays from - which the diagonals should be taken. Defaults to second axis (1). - - Returns - ------- - array_of_diagonals : ndarray - If `a` is 2-D, then a 1-D array containing the diagonal and of the - same type as `a` is returned unless `a` is a `matrix`, in which case - a 1-D array rather than a (2-D) `matrix` is returned in order to - maintain backward compatibility. - - If ``a.ndim > 2``, then the dimensions specified by `axis1` and `axis2` - are removed, and a new axis inserted at the end corresponding to the - diagonal. - - Raises - ------ - ValueError - If the dimension of `a` is less than 2. - - See Also - -------- - diag : MATLAB work-a-like for 1-D and 2-D arrays. - diagflat : Create diagonal arrays. - trace : Sum along diagonals. - - Examples - -------- - >>> a = np.arange(4).reshape(2,2) - >>> a - array([[0, 1], - [2, 3]]) - >>> a.diagonal() - array([0, 3]) - >>> a.diagonal(1) - array([1]) - - A 3-D example: - - >>> a = np.arange(8).reshape(2,2,2); a - array([[[0, 1], - [2, 3]], - [[4, 5], - [6, 7]]]) - >>> a.diagonal(0, # Main diagonals of two arrays created by skipping - ... 0, # across the outer(left)-most axis last and - ... 1) # the "middle" (row) axis first. - array([[0, 6], - [1, 7]]) - - The sub-arrays whose main diagonals we just obtained; note that each - corresponds to fixing the right-most (column) axis, and that the - diagonals are "packed" in rows. - - >>> a[:,:,0] # main diagonal is [0 6] - array([[0, 2], - [4, 6]]) - >>> a[:,:,1] # main diagonal is [1 7] - array([[1, 3], - [5, 7]]) - - The anti-diagonal can be obtained by reversing the order of elements - using either `numpy.flipud` or `numpy.fliplr`. - - >>> a = np.arange(9).reshape(3, 3) - >>> a - array([[0, 1, 2], - [3, 4, 5], - [6, 7, 8]]) - >>> np.fliplr(a).diagonal() # Horizontal flip - array([2, 4, 6]) - >>> np.flipud(a).diagonal() # Vertical flip - array([6, 4, 2]) - - Note that the order in which the diagonal is retrieved varies depending - on the flip function. - """ - if isinstance(a, np.matrix): - # Make diagonal of matrix 1-D to preserve backward compatibility. - return asarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2) - else: - return asanyarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2) - - -def _trace_dispatcher( - a, offset=None, axis1=None, axis2=None, dtype=None, out=None): - return (a, out) - - -@array_function_dispatch(_trace_dispatcher) -def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None): - """ - Return the sum along diagonals of the array. - - If `a` is 2-D, the sum along its diagonal with the given offset - is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i. - - If `a` has more than two dimensions, then the axes specified by axis1 and - axis2 are used to determine the 2-D sub-arrays whose traces are returned. - The shape of the resulting array is the same as that of `a` with `axis1` - and `axis2` removed. - - Parameters - ---------- - a : array_like - Input array, from which the diagonals are taken. - offset : int, optional - Offset of the diagonal from the main diagonal. Can be both positive - and negative. Defaults to 0. - axis1, axis2 : int, optional - Axes to be used as the first and second axis of the 2-D sub-arrays - from which the diagonals should be taken. Defaults are the first two - axes of `a`. - dtype : dtype, optional - Determines the data-type of the returned array and of the accumulator - where the elements are summed. If dtype has the value None and `a` is - of integer type of precision less than the default integer - precision, then the default integer precision is used. Otherwise, - the precision is the same as that of `a`. - out : ndarray, optional - Array into which the output is placed. Its type is preserved and - it must be of the right shape to hold the output. - - Returns - ------- - sum_along_diagonals : ndarray - If `a` is 2-D, the sum along the diagonal is returned. If `a` has - larger dimensions, then an array of sums along diagonals is returned. - - See Also - -------- - diag, diagonal, diagflat - - Examples - -------- - >>> np.trace(np.eye(3)) - 3.0 - >>> a = np.arange(8).reshape((2,2,2)) - >>> np.trace(a) - array([6, 8]) - - >>> a = np.arange(24).reshape((2,2,2,3)) - >>> np.trace(a).shape - (2, 3) - - """ - if isinstance(a, np.matrix): - # Get trace of matrix via an array to preserve backward compatibility. - return asarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out) - else: - return asanyarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out) - - -def _ravel_dispatcher(a, order=None): - return (a,) - - -@array_function_dispatch(_ravel_dispatcher) -def ravel(a, order='C'): - """Return a contiguous flattened array. - - A 1-D array, containing the elements of the input, is returned. A copy is - made only if needed. - - As of NumPy 1.10, the returned array will have the same type as the input - array. (for example, a masked array will be returned for a masked array - input) - - Parameters - ---------- - a : array_like - Input array. The elements in `a` are read in the order specified by - `order`, and packed as a 1-D array. - order : {'C','F', 'A', 'K'}, optional - - The elements of `a` are read using this index order. 'C' means - to index the elements in row-major, C-style order, - with the last axis index changing fastest, back to the first - axis index changing slowest. 'F' means to index the elements - in column-major, Fortran-style order, with the - first index changing fastest, and the last index changing - slowest. Note that the 'C' and 'F' options take no account of - the memory layout of the underlying array, and only refer to - the order of axis indexing. 'A' means to read the elements in - Fortran-like index order if `a` is Fortran *contiguous* in - memory, C-like order otherwise. 'K' means to read the - elements in the order they occur in memory, except for - reversing the data when strides are negative. By default, 'C' - index order is used. - - Returns - ------- - y : array_like - y is an array of the same subtype as `a`, with shape ``(a.size,)``. - Note that matrices are special cased for backward compatibility, if `a` - is a matrix, then y is a 1-D ndarray. - - See Also - -------- - ndarray.flat : 1-D iterator over an array. - ndarray.flatten : 1-D array copy of the elements of an array - in row-major order. - ndarray.reshape : Change the shape of an array without changing its data. - - Notes - ----- - In row-major, C-style order, in two dimensions, the row index - varies the slowest, and the column index the quickest. This can - be generalized to multiple dimensions, where row-major order - implies that the index along the first axis varies slowest, and - the index along the last quickest. The opposite holds for - column-major, Fortran-style index ordering. - - When a view is desired in as many cases as possible, ``arr.reshape(-1)`` - may be preferable. - - Examples - -------- - It is equivalent to ``reshape(-1, order=order)``. - - >>> x = np.array([[1, 2, 3], [4, 5, 6]]) - >>> np.ravel(x) - array([1, 2, 3, 4, 5, 6]) - - >>> x.reshape(-1) - array([1, 2, 3, 4, 5, 6]) - - >>> np.ravel(x, order='F') - array([1, 4, 2, 5, 3, 6]) - - When ``order`` is 'A', it will preserve the array's 'C' or 'F' ordering: - - >>> np.ravel(x.T) - array([1, 4, 2, 5, 3, 6]) - >>> np.ravel(x.T, order='A') - array([1, 2, 3, 4, 5, 6]) - - When ``order`` is 'K', it will preserve orderings that are neither 'C' - nor 'F', but won't reverse axes: - - >>> a = np.arange(3)[::-1]; a - array([2, 1, 0]) - >>> a.ravel(order='C') - array([2, 1, 0]) - >>> a.ravel(order='K') - array([2, 1, 0]) - - >>> a = np.arange(12).reshape(2,3,2).swapaxes(1,2); a - array([[[ 0, 2, 4], - [ 1, 3, 5]], - [[ 6, 8, 10], - [ 7, 9, 11]]]) - >>> a.ravel(order='C') - array([ 0, 2, 4, 1, 3, 5, 6, 8, 10, 7, 9, 11]) - >>> a.ravel(order='K') - array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) - - """ - if isinstance(a, np.matrix): - return asarray(a).ravel(order=order) - else: - return asanyarray(a).ravel(order=order) - - -def _nonzero_dispatcher(a): - return (a,) - - -@array_function_dispatch(_nonzero_dispatcher) -def nonzero(a): - """ - Return the indices of the elements that are non-zero. - - Returns a tuple of arrays, one for each dimension of `a`, - containing the indices of the non-zero elements in that - dimension. The values in `a` are always tested and returned in - row-major, C-style order. - - To group the indices by element, rather than dimension, use `argwhere`, - which returns a row for each non-zero element. - - .. note:: - - When called on a zero-d array or scalar, ``nonzero(a)`` is treated - as ``nonzero(atleast1d(a))``. - - .. deprecated:: 1.17.0 - - Use `atleast1d` explicitly if this behavior is deliberate. - - Parameters - ---------- - a : array_like - Input array. - - Returns - ------- - tuple_of_arrays : tuple - Indices of elements that are non-zero. - - See Also - -------- - flatnonzero : - Return indices that are non-zero in the flattened version of the input - array. - ndarray.nonzero : - Equivalent ndarray method. - count_nonzero : - Counts the number of non-zero elements in the input array. - - Notes - ----- - While the nonzero values can be obtained with ``a[nonzero(a)]``, it is - recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which - will correctly handle 0-d arrays. - - Examples - -------- - >>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]]) - >>> x - array([[3, 0, 0], - [0, 4, 0], - [5, 6, 0]]) - >>> np.nonzero(x) - (array([0, 1, 2, 2]), array([0, 1, 0, 1])) - - >>> x[np.nonzero(x)] - array([3, 4, 5, 6]) - >>> np.transpose(np.nonzero(x)) - array([[0, 0], - [1, 1], - [2, 0], - [2, 1]]) - - A common use for ``nonzero`` is to find the indices of an array, where - a condition is True. Given an array `a`, the condition `a` > 3 is a - boolean array and since False is interpreted as 0, np.nonzero(a > 3) - yields the indices of the `a` where the condition is true. - - >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) - >>> a > 3 - array([[False, False, False], - [ True, True, True], - [ True, True, True]]) - >>> np.nonzero(a > 3) - (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) - - Using this result to index `a` is equivalent to using the mask directly: - - >>> a[np.nonzero(a > 3)] - array([4, 5, 6, 7, 8, 9]) - >>> a[a > 3] # prefer this spelling - array([4, 5, 6, 7, 8, 9]) - - ``nonzero`` can also be called as a method of the array. - - >>> (a > 3).nonzero() - (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) - - """ - return _wrapfunc(a, 'nonzero') - - -def _shape_dispatcher(a): - return (a,) - - -@array_function_dispatch(_shape_dispatcher) -def shape(a): - """ - Return the shape of an array. - - Parameters - ---------- - a : array_like - Input array. - - Returns - ------- - shape : tuple of ints - The elements of the shape tuple give the lengths of the - corresponding array dimensions. - - See Also - -------- - alen - ndarray.shape : Equivalent array method. - - Examples - -------- - >>> np.shape(np.eye(3)) - (3, 3) - >>> np.shape([[1, 2]]) - (1, 2) - >>> np.shape([0]) - (1,) - >>> np.shape(0) - () - - >>> a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) - >>> np.shape(a) - (2,) - >>> a.shape - (2,) - - """ - try: - result = a.shape - except AttributeError: - result = asarray(a).shape - return result - - -def _compress_dispatcher(condition, a, axis=None, out=None): - return (condition, a, out) - - -@array_function_dispatch(_compress_dispatcher) -def compress(condition, a, axis=None, out=None): - """ - Return selected slices of an array along given axis. - - When working along a given axis, a slice along that axis is returned in - `output` for each index where `condition` evaluates to True. When - working on a 1-D array, `compress` is equivalent to `extract`. - - Parameters - ---------- - condition : 1-D array of bools - Array that selects which entries to return. If len(condition) - is less than the size of `a` along the given axis, then output is - truncated to the length of the condition array. - a : array_like - Array from which to extract a part. - axis : int, optional - Axis along which to take slices. If None (default), work on the - flattened array. - out : ndarray, optional - Output array. Its type is preserved and it must be of the right - shape to hold the output. - - Returns - ------- - compressed_array : ndarray - A copy of `a` without the slices along axis for which `condition` - is false. - - See Also - -------- - take, choose, diag, diagonal, select - ndarray.compress : Equivalent method in ndarray - np.extract: Equivalent method when working on 1-D arrays - ufuncs-output-type - - Examples - -------- - >>> a = np.array([[1, 2], [3, 4], [5, 6]]) - >>> a - array([[1, 2], - [3, 4], - [5, 6]]) - >>> np.compress([0, 1], a, axis=0) - array([[3, 4]]) - >>> np.compress([False, True, True], a, axis=0) - array([[3, 4], - [5, 6]]) - >>> np.compress([False, True], a, axis=1) - array([[2], - [4], - [6]]) - - Working on the flattened array does not return slices along an axis but - selects elements. - - >>> np.compress([False, True], a) - array([2]) - - """ - return _wrapfunc(a, 'compress', condition, axis=axis, out=out) - - -def _clip_dispatcher(a, a_min, a_max, out=None, **kwargs): - return (a, a_min, a_max) - - -@array_function_dispatch(_clip_dispatcher) -def clip(a, a_min, a_max, out=None, **kwargs): - """ - Clip (limit) the values in an array. - - Given an interval, values outside the interval are clipped to - the interval edges. For example, if an interval of ``[0, 1]`` - is specified, values smaller than 0 become 0, and values larger - than 1 become 1. - - Equivalent to but faster than ``np.maximum(a_min, np.minimum(a, a_max))``. - No check is performed to ensure ``a_min < a_max``. - - Parameters - ---------- - a : array_like - Array containing elements to clip. - a_min : scalar or array_like or None - Minimum value. If None, clipping is not performed on lower - interval edge. Not more than one of `a_min` and `a_max` may be - None. - a_max : scalar or array_like or None - Maximum value. If None, clipping is not performed on upper - interval edge. Not more than one of `a_min` and `a_max` may be - None. If `a_min` or `a_max` are array_like, then the three - arrays will be broadcasted to match their shapes. - out : ndarray, optional - The results will be placed in this array. It may be the input - array for in-place clipping. `out` must be of the right shape - to hold the output. Its type is preserved. - **kwargs - For other keyword-only arguments, see the - :ref:`ufunc docs `. - - .. versionadded:: 1.17.0 - - Returns - ------- - clipped_array : ndarray - An array with the elements of `a`, but where values - < `a_min` are replaced with `a_min`, and those > `a_max` - with `a_max`. - - See Also - -------- - ufuncs-output-type - - Examples - -------- - >>> a = np.arange(10) - >>> np.clip(a, 1, 8) - array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8]) - >>> a - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - >>> np.clip(a, 3, 6, out=a) - array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6]) - >>> a = np.arange(10) - >>> a - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - >>> np.clip(a, [3, 4, 1, 1, 1, 4, 4, 4, 4, 4], 8) - array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8]) - - """ - return _wrapfunc(a, 'clip', a_min, a_max, out=out, **kwargs) - - -def _sum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, - initial=None, where=None): - return (a, out) - - -@array_function_dispatch(_sum_dispatcher) -def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, - initial=np._NoValue, where=np._NoValue): - """ - Sum of array elements over a given axis. - - Parameters - ---------- - a : array_like - Elements to sum. - axis : None or int or tuple of ints, optional - Axis or axes along which a sum is performed. The default, - axis=None, will sum all of the elements of the input array. If - axis is negative it counts from the last to the first axis. - - .. versionadded:: 1.7.0 - - If axis is a tuple of ints, a sum is performed on all of the axes - specified in the tuple instead of a single axis or all the axes as - before. - dtype : dtype, optional - The type of the returned array and of the accumulator in which the - elements are summed. The dtype of `a` is used by default unless `a` - has an integer dtype of less precision than the default platform - integer. In that case, if `a` is signed then the platform integer - is used while if `a` is unsigned then an unsigned integer of the - same precision as the platform integer is used. - out : ndarray, optional - Alternative output array in which to place the result. It must have - the same shape as the expected output, but the type of the output - values will be cast if necessary. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the input array. - - If the default value is passed, then `keepdims` will not be - passed through to the `sum` method of sub-classes of - `ndarray`, however any non-default value will be. If the - sub-class' method does not implement `keepdims` any - exceptions will be raised. - initial : scalar, optional - Starting value for the sum. See `~numpy.ufunc.reduce` for details. - - .. versionadded:: 1.15.0 - - where : array_like of bool, optional - Elements to include in the sum. See `~numpy.ufunc.reduce` for details. - - .. versionadded:: 1.17.0 - - Returns - ------- - sum_along_axis : ndarray - An array with the same shape as `a`, with the specified - axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar - is returned. If an output array is specified, a reference to - `out` is returned. - - See Also - -------- - ndarray.sum : Equivalent method. - - add.reduce : Equivalent functionality of `add`. - - cumsum : Cumulative sum of array elements. - - trapz : Integration of array values using the composite trapezoidal rule. - - mean, average - - Notes - ----- - Arithmetic is modular when using integer types, and no error is - raised on overflow. - - The sum of an empty array is the neutral element 0: - - >>> np.sum([]) - 0.0 - - For floating point numbers the numerical precision of sum (and - ``np.add.reduce``) is in general limited by directly adding each number - individually to the result causing rounding errors in every step. - However, often numpy will use a numerically better approach (partial - pairwise summation) leading to improved precision in many use-cases. - This improved precision is always provided when no ``axis`` is given. - When ``axis`` is given, it will depend on which axis is summed. - Technically, to provide the best speed possible, the improved precision - is only used when the summation is along the fast axis in memory. - Note that the exact precision may vary depending on other parameters. - In contrast to NumPy, Python's ``math.fsum`` function uses a slower but - more precise approach to summation. - Especially when summing a large number of lower precision floating point - numbers, such as ``float32``, numerical errors can become significant. - In such cases it can be advisable to use `dtype="float64"` to use a higher - precision for the output. - - Examples - -------- - >>> np.sum([0.5, 1.5]) - 2.0 - >>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32) - 1 - >>> np.sum([[0, 1], [0, 5]]) - 6 - >>> np.sum([[0, 1], [0, 5]], axis=0) - array([0, 6]) - >>> np.sum([[0, 1], [0, 5]], axis=1) - array([1, 5]) - >>> np.sum([[0, 1], [np.nan, 5]], where=[False, True], axis=1) - array([1., 5.]) - - If the accumulator is too small, overflow occurs: - - >>> np.ones(128, dtype=np.int8).sum(dtype=np.int8) - -128 - - You can also start the sum with a value other than zero: - - >>> np.sum([10], initial=5) - 15 - """ - if isinstance(a, _gentype): - # 2018-02-25, 1.15.0 - warnings.warn( - "Calling np.sum(generator) is deprecated, and in the future will give a different result. " - "Use np.sum(np.fromiter(generator)) or the python sum builtin instead.", - DeprecationWarning, stacklevel=3) - - res = _sum_(a) - if out is not None: - out[...] = res - return out - return res - - return _wrapreduction(a, np.add, 'sum', axis, dtype, out, keepdims=keepdims, - initial=initial, where=where) - - -def _any_dispatcher(a, axis=None, out=None, keepdims=None): - return (a, out) - - -@array_function_dispatch(_any_dispatcher) -def any(a, axis=None, out=None, keepdims=np._NoValue): - """ - Test whether any array element along a given axis evaluates to True. - - Returns single boolean unless `axis` is not ``None`` - - Parameters - ---------- - a : array_like - Input array or object that can be converted to an array. - axis : None or int or tuple of ints, optional - Axis or axes along which a logical OR reduction is performed. - The default (``axis=None``) is to perform a logical OR over all - the dimensions of the input array. `axis` may be negative, in - which case it counts from the last to the first axis. - - .. versionadded:: 1.7.0 - - If this is a tuple of ints, a reduction is performed on multiple - axes, instead of a single axis or all the axes as before. - out : ndarray, optional - Alternate output array in which to place the result. It must have - the same shape as the expected output and its type is preserved - (e.g., if it is of type float, then it will remain so, returning - 1.0 for True and 0.0 for False, regardless of the type of `a`). - See `ufuncs-output-type` for more details. - - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the input array. - - If the default value is passed, then `keepdims` will not be - passed through to the `any` method of sub-classes of - `ndarray`, however any non-default value will be. If the - sub-class' method does not implement `keepdims` any - exceptions will be raised. - - Returns - ------- - any : bool or ndarray - A new boolean or `ndarray` is returned unless `out` is specified, - in which case a reference to `out` is returned. - - See Also - -------- - ndarray.any : equivalent method - - all : Test whether all elements along a given axis evaluate to True. - - Notes - ----- - Not a Number (NaN), positive infinity and negative infinity evaluate - to `True` because these are not equal to zero. - - Examples - -------- - >>> np.any([[True, False], [True, True]]) - True - - >>> np.any([[True, False], [False, False]], axis=0) - array([ True, False]) - - >>> np.any([-1, 0, 5]) - True - - >>> np.any(np.nan) - True - - >>> o=np.array(False) - >>> z=np.any([-1, 4, 5], out=o) - >>> z, o - (array(True), array(True)) - >>> # Check now that z is a reference to o - >>> z is o - True - >>> id(z), id(o) # identity of z and o # doctest: +SKIP - (191614240, 191614240) - - """ - return _wrapreduction(a, np.logical_or, 'any', axis, None, out, keepdims=keepdims) - - -def _all_dispatcher(a, axis=None, out=None, keepdims=None): - return (a, out) - - -@array_function_dispatch(_all_dispatcher) -def all(a, axis=None, out=None, keepdims=np._NoValue): - """ - Test whether all array elements along a given axis evaluate to True. - - Parameters - ---------- - a : array_like - Input array or object that can be converted to an array. - axis : None or int or tuple of ints, optional - Axis or axes along which a logical AND reduction is performed. - The default (``axis=None``) is to perform a logical AND over all - the dimensions of the input array. `axis` may be negative, in - which case it counts from the last to the first axis. - - .. versionadded:: 1.7.0 - - If this is a tuple of ints, a reduction is performed on multiple - axes, instead of a single axis or all the axes as before. - out : ndarray, optional - Alternate output array in which to place the result. - It must have the same shape as the expected output and its - type is preserved (e.g., if ``dtype(out)`` is float, the result - will consist of 0.0's and 1.0's). See `ufuncs-output-type` for more - details. - - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the input array. - - If the default value is passed, then `keepdims` will not be - passed through to the `all` method of sub-classes of - `ndarray`, however any non-default value will be. If the - sub-class' method does not implement `keepdims` any - exceptions will be raised. - - Returns - ------- - all : ndarray, bool - A new boolean or array is returned unless `out` is specified, - in which case a reference to `out` is returned. - - See Also - -------- - ndarray.all : equivalent method - - any : Test whether any element along a given axis evaluates to True. - - Notes - ----- - Not a Number (NaN), positive infinity and negative infinity - evaluate to `True` because these are not equal to zero. - - Examples - -------- - >>> np.all([[True,False],[True,True]]) - False - - >>> np.all([[True,False],[True,True]], axis=0) - array([ True, False]) - - >>> np.all([-1, 4, 5]) - True - - >>> np.all([1.0, np.nan]) - True - - >>> o=np.array(False) - >>> z=np.all([-1, 4, 5], out=o) - >>> id(z), id(o), z - (28293632, 28293632, array(True)) # may vary - - """ - return _wrapreduction(a, np.logical_and, 'all', axis, None, out, keepdims=keepdims) - - -def _cumsum_dispatcher(a, axis=None, dtype=None, out=None): - return (a, out) - - -@array_function_dispatch(_cumsum_dispatcher) -def cumsum(a, axis=None, dtype=None, out=None): - """ - Return the cumulative sum of the elements along a given axis. - - Parameters - ---------- - a : array_like - Input array. - axis : int, optional - Axis along which the cumulative sum is computed. The default - (None) is to compute the cumsum over the flattened array. - dtype : dtype, optional - Type of the returned array and of the accumulator in which the - elements are summed. If `dtype` is not specified, it defaults - to the dtype of `a`, unless `a` has an integer dtype with a - precision less than that of the default platform integer. In - that case, the default platform integer is used. - out : ndarray, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output - but the type will be cast if necessary. See `ufuncs-output-type` for - more details. - - Returns - ------- - cumsum_along_axis : ndarray. - A new array holding the result is returned unless `out` is - specified, in which case a reference to `out` is returned. The - result has the same size as `a`, and the same shape as `a` if - `axis` is not None or `a` is a 1-d array. - - - See Also - -------- - sum : Sum array elements. - - trapz : Integration of array values using the composite trapezoidal rule. - - diff : Calculate the n-th discrete difference along given axis. - - Notes - ----- - Arithmetic is modular when using integer types, and no error is - raised on overflow. - - Examples - -------- - >>> a = np.array([[1,2,3], [4,5,6]]) - >>> a - array([[1, 2, 3], - [4, 5, 6]]) - >>> np.cumsum(a) - array([ 1, 3, 6, 10, 15, 21]) - >>> np.cumsum(a, dtype=float) # specifies type of output value(s) - array([ 1., 3., 6., 10., 15., 21.]) - - >>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns - array([[1, 2, 3], - [5, 7, 9]]) - >>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows - array([[ 1, 3, 6], - [ 4, 9, 15]]) - - """ - return _wrapfunc(a, 'cumsum', axis=axis, dtype=dtype, out=out) - - -def _ptp_dispatcher(a, axis=None, out=None, keepdims=None): - return (a, out) - - -@array_function_dispatch(_ptp_dispatcher) -def ptp(a, axis=None, out=None, keepdims=np._NoValue): - """ - Range of values (maximum - minimum) along an axis. - - The name of the function comes from the acronym for 'peak to peak'. - - Parameters - ---------- - a : array_like - Input values. - axis : None or int or tuple of ints, optional - Axis along which to find the peaks. By default, flatten the - array. `axis` may be negative, in - which case it counts from the last to the first axis. - - .. versionadded:: 1.15.0 - - If this is a tuple of ints, a reduction is performed on multiple - axes, instead of a single axis or all the axes as before. - out : array_like - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output, - but the type of the output values will be cast if necessary. - - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the input array. - - If the default value is passed, then `keepdims` will not be - passed through to the `ptp` method of sub-classes of - `ndarray`, however any non-default value will be. If the - sub-class' method does not implement `keepdims` any - exceptions will be raised. - - Returns - ------- - ptp : ndarray - A new array holding the result, unless `out` was - specified, in which case a reference to `out` is returned. - - Examples - -------- - >>> x = np.arange(4).reshape((2,2)) - >>> x - array([[0, 1], - [2, 3]]) - - >>> np.ptp(x, axis=0) - array([2, 2]) - - >>> np.ptp(x, axis=1) - array([1, 1]) - - """ - kwargs = {} - if keepdims is not np._NoValue: - kwargs['keepdims'] = keepdims - if type(a) is not mu.ndarray: - try: - ptp = a.ptp - except AttributeError: - pass - else: - return ptp(axis=axis, out=out, **kwargs) - return _methods._ptp(a, axis=axis, out=out, **kwargs) - - -def _amax_dispatcher(a, axis=None, out=None, keepdims=None, initial=None, - where=None): - return (a, out) - - -@array_function_dispatch(_amax_dispatcher) -def amax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, - where=np._NoValue): - """ - Return the maximum of an array or maximum along an axis. - - Parameters - ---------- - a : array_like - Input data. - axis : None or int or tuple of ints, optional - Axis or axes along which to operate. By default, flattened input is - used. - - .. versionadded:: 1.7.0 - - If this is a tuple of ints, the maximum is selected over multiple axes, - instead of a single axis or all the axes as before. - out : ndarray, optional - Alternative output array in which to place the result. Must - be of the same shape and buffer length as the expected output. - See `ufuncs-output-type` for more details. - - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the input array. - - If the default value is passed, then `keepdims` will not be - passed through to the `amax` method of sub-classes of - `ndarray`, however any non-default value will be. If the - sub-class' method does not implement `keepdims` any - exceptions will be raised. - - initial : scalar, optional - The minimum value of an output element. Must be present to allow - computation on empty slice. See `~numpy.ufunc.reduce` for details. - - .. versionadded:: 1.15.0 - - where : array_like of bool, optional - Elements to compare for the maximum. See `~numpy.ufunc.reduce` - for details. - - .. versionadded:: 1.17.0 - - Returns - ------- - amax : ndarray or scalar - Maximum of `a`. If `axis` is None, the result is a scalar value. - If `axis` is given, the result is an array of dimension - ``a.ndim - 1``. - - See Also - -------- - amin : - The minimum value of an array along a given axis, propagating any NaNs. - nanmax : - The maximum value of an array along a given axis, ignoring any NaNs. - maximum : - Element-wise maximum of two arrays, propagating any NaNs. - fmax : - Element-wise maximum of two arrays, ignoring any NaNs. - argmax : - Return the indices of the maximum values. - - nanmin, minimum, fmin - - Notes - ----- - NaN values are propagated, that is if at least one item is NaN, the - corresponding max value will be NaN as well. To ignore NaN values - (MATLAB behavior), please use nanmax. - - Don't use `amax` for element-wise comparison of 2 arrays; when - ``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than - ``amax(a, axis=0)``. - - Examples - -------- - >>> a = np.arange(4).reshape((2,2)) - >>> a - array([[0, 1], - [2, 3]]) - >>> np.amax(a) # Maximum of the flattened array - 3 - >>> np.amax(a, axis=0) # Maxima along the first axis - array([2, 3]) - >>> np.amax(a, axis=1) # Maxima along the second axis - array([1, 3]) - >>> np.amax(a, where=[False, True], initial=-1, axis=0) - array([-1, 3]) - >>> b = np.arange(5, dtype=float) - >>> b[2] = np.NaN - >>> np.amax(b) - nan - >>> np.amax(b, where=~np.isnan(b), initial=-1) - 4.0 - >>> np.nanmax(b) - 4.0 - - You can use an initial value to compute the maximum of an empty slice, or - to initialize it to a different value: - - >>> np.max([[-50], [10]], axis=-1, initial=0) - array([ 0, 10]) - - Notice that the initial value is used as one of the elements for which the - maximum is determined, unlike for the default argument Python's max - function, which is only used for empty iterables. - - >>> np.max([5], initial=6) - 6 - >>> max([5], default=6) - 5 - """ - return _wrapreduction(a, np.maximum, 'max', axis, None, out, - keepdims=keepdims, initial=initial, where=where) - - -def _amin_dispatcher(a, axis=None, out=None, keepdims=None, initial=None, - where=None): - return (a, out) - - -@array_function_dispatch(_amin_dispatcher) -def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, - where=np._NoValue): - """ - Return the minimum of an array or minimum along an axis. - - Parameters - ---------- - a : array_like - Input data. - axis : None or int or tuple of ints, optional - Axis or axes along which to operate. By default, flattened input is - used. - - .. versionadded:: 1.7.0 - - If this is a tuple of ints, the minimum is selected over multiple axes, - instead of a single axis or all the axes as before. - out : ndarray, optional - Alternative output array in which to place the result. Must - be of the same shape and buffer length as the expected output. - See `ufuncs-output-type` for more details. - - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the input array. - - If the default value is passed, then `keepdims` will not be - passed through to the `amin` method of sub-classes of - `ndarray`, however any non-default value will be. If the - sub-class' method does not implement `keepdims` any - exceptions will be raised. - - initial : scalar, optional - The maximum value of an output element. Must be present to allow - computation on empty slice. See `~numpy.ufunc.reduce` for details. - - .. versionadded:: 1.15.0 - - where : array_like of bool, optional - Elements to compare for the minimum. See `~numpy.ufunc.reduce` - for details. - - .. versionadded:: 1.17.0 - - Returns - ------- - amin : ndarray or scalar - Minimum of `a`. If `axis` is None, the result is a scalar value. - If `axis` is given, the result is an array of dimension - ``a.ndim - 1``. - - See Also - -------- - amax : - The maximum value of an array along a given axis, propagating any NaNs. - nanmin : - The minimum value of an array along a given axis, ignoring any NaNs. - minimum : - Element-wise minimum of two arrays, propagating any NaNs. - fmin : - Element-wise minimum of two arrays, ignoring any NaNs. - argmin : - Return the indices of the minimum values. - - nanmax, maximum, fmax - - Notes - ----- - NaN values are propagated, that is if at least one item is NaN, the - corresponding min value will be NaN as well. To ignore NaN values - (MATLAB behavior), please use nanmin. - - Don't use `amin` for element-wise comparison of 2 arrays; when - ``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than - ``amin(a, axis=0)``. - - Examples - -------- - >>> a = np.arange(4).reshape((2,2)) - >>> a - array([[0, 1], - [2, 3]]) - >>> np.amin(a) # Minimum of the flattened array - 0 - >>> np.amin(a, axis=0) # Minima along the first axis - array([0, 1]) - >>> np.amin(a, axis=1) # Minima along the second axis - array([0, 2]) - >>> np.amin(a, where=[False, True], initial=10, axis=0) - array([10, 1]) - - >>> b = np.arange(5, dtype=float) - >>> b[2] = np.NaN - >>> np.amin(b) - nan - >>> np.amin(b, where=~np.isnan(b), initial=10) - 0.0 - >>> np.nanmin(b) - 0.0 - - >>> np.min([[-50], [10]], axis=-1, initial=0) - array([-50, 0]) - - Notice that the initial value is used as one of the elements for which the - minimum is determined, unlike for the default argument Python's max - function, which is only used for empty iterables. - - Notice that this isn't the same as Python's ``default`` argument. - - >>> np.min([6], initial=5) - 5 - >>> min([6], default=5) - 6 - """ - return _wrapreduction(a, np.minimum, 'min', axis, None, out, - keepdims=keepdims, initial=initial, where=where) - - -def _alen_dispathcer(a): - return (a,) - - -@array_function_dispatch(_alen_dispathcer) -def alen(a): - """ - Return the length of the first dimension of the input array. - - Parameters - ---------- - a : array_like - Input array. - - Returns - ------- - alen : int - Length of the first dimension of `a`. - - See Also - -------- - shape, size - - Examples - -------- - >>> a = np.zeros((7,4,5)) - >>> a.shape[0] - 7 - >>> np.alen(a) - 7 - - """ - # NumPy 1.18.0, 2019-08-02 - warnings.warn( - "`np.alen` is deprecated, use `len` instead", - DeprecationWarning, stacklevel=2) - try: - return len(a) - except TypeError: - return len(array(a, ndmin=1)) - - -def _prod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, - initial=None, where=None): - return (a, out) - - -@array_function_dispatch(_prod_dispatcher) -def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, - initial=np._NoValue, where=np._NoValue): - """ - Return the product of array elements over a given axis. - - Parameters - ---------- - a : array_like - Input data. - axis : None or int or tuple of ints, optional - Axis or axes along which a product is performed. The default, - axis=None, will calculate the product of all the elements in the - input array. If axis is negative it counts from the last to the - first axis. - - .. versionadded:: 1.7.0 - - If axis is a tuple of ints, a product is performed on all of the - axes specified in the tuple instead of a single axis or all the - axes as before. - dtype : dtype, optional - The type of the returned array, as well as of the accumulator in - which the elements are multiplied. The dtype of `a` is used by - default unless `a` has an integer dtype of less precision than the - default platform integer. In that case, if `a` is signed then the - platform integer is used while if `a` is unsigned then an unsigned - integer of the same precision as the platform integer is used. - out : ndarray, optional - Alternative output array in which to place the result. It must have - the same shape as the expected output, but the type of the output - values will be cast if necessary. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left in the - result as dimensions with size one. With this option, the result - will broadcast correctly against the input array. - - If the default value is passed, then `keepdims` will not be - passed through to the `prod` method of sub-classes of - `ndarray`, however any non-default value will be. If the - sub-class' method does not implement `keepdims` any - exceptions will be raised. - initial : scalar, optional - The starting value for this product. See `~numpy.ufunc.reduce` for details. - - .. versionadded:: 1.15.0 - - where : array_like of bool, optional - Elements to include in the product. See `~numpy.ufunc.reduce` for details. - - .. versionadded:: 1.17.0 - - Returns - ------- - product_along_axis : ndarray, see `dtype` parameter above. - An array shaped as `a` but with the specified axis removed. - Returns a reference to `out` if specified. - - See Also - -------- - ndarray.prod : equivalent method - ufuncs-output-type - - Notes - ----- - Arithmetic is modular when using integer types, and no error is - raised on overflow. That means that, on a 32-bit platform: - - >>> x = np.array([536870910, 536870910, 536870910, 536870910]) - >>> np.prod(x) - 16 # may vary - - The product of an empty array is the neutral element 1: - - >>> np.prod([]) - 1.0 - - Examples - -------- - By default, calculate the product of all elements: - - >>> np.prod([1.,2.]) - 2.0 - - Even when the input array is two-dimensional: - - >>> np.prod([[1.,2.],[3.,4.]]) - 24.0 - - But we can also specify the axis over which to multiply: - - >>> np.prod([[1.,2.],[3.,4.]], axis=1) - array([ 2., 12.]) - - Or select specific elements to include: - - >>> np.prod([1., np.nan, 3.], where=[True, False, True]) - 3.0 - - If the type of `x` is unsigned, then the output type is - the unsigned platform integer: - - >>> x = np.array([1, 2, 3], dtype=np.uint8) - >>> np.prod(x).dtype == np.uint - True - - If `x` is of a signed integer type, then the output type - is the default platform integer: - - >>> x = np.array([1, 2, 3], dtype=np.int8) - >>> np.prod(x).dtype == int - True - - You can also start the product with a value other than one: - - >>> np.prod([1, 2], initial=5) - 10 - """ - return _wrapreduction(a, np.multiply, 'prod', axis, dtype, out, - keepdims=keepdims, initial=initial, where=where) - - -def _cumprod_dispatcher(a, axis=None, dtype=None, out=None): - return (a, out) - - -@array_function_dispatch(_cumprod_dispatcher) -def cumprod(a, axis=None, dtype=None, out=None): - """ - Return the cumulative product of elements along a given axis. - - Parameters - ---------- - a : array_like - Input array. - axis : int, optional - Axis along which the cumulative product is computed. By default - the input is flattened. - dtype : dtype, optional - Type of the returned array, as well as of the accumulator in which - the elements are multiplied. If *dtype* is not specified, it - defaults to the dtype of `a`, unless `a` has an integer dtype with - a precision less than that of the default platform integer. In - that case, the default platform integer is used instead. - out : ndarray, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output - but the type of the resulting values will be cast if necessary. - - Returns - ------- - cumprod : ndarray - A new array holding the result is returned unless `out` is - specified, in which case a reference to out is returned. - - See Also - -------- - ufuncs-output-type - - Notes - ----- - Arithmetic is modular when using integer types, and no error is - raised on overflow. - - Examples - -------- - >>> a = np.array([1,2,3]) - >>> np.cumprod(a) # intermediate results 1, 1*2 - ... # total product 1*2*3 = 6 - array([1, 2, 6]) - >>> a = np.array([[1, 2, 3], [4, 5, 6]]) - >>> np.cumprod(a, dtype=float) # specify type of output - array([ 1., 2., 6., 24., 120., 720.]) - - The cumulative product for each column (i.e., over the rows) of `a`: - - >>> np.cumprod(a, axis=0) - array([[ 1, 2, 3], - [ 4, 10, 18]]) - - The cumulative product for each row (i.e. over the columns) of `a`: - - >>> np.cumprod(a,axis=1) - array([[ 1, 2, 6], - [ 4, 20, 120]]) - - """ - return _wrapfunc(a, 'cumprod', axis=axis, dtype=dtype, out=out) - - -def _ndim_dispatcher(a): - return (a,) - - -@array_function_dispatch(_ndim_dispatcher) -def ndim(a): - """ - Return the number of dimensions of an array. - - Parameters - ---------- - a : array_like - Input array. If it is not already an ndarray, a conversion is - attempted. - - Returns - ------- - number_of_dimensions : int - The number of dimensions in `a`. Scalars are zero-dimensional. - - See Also - -------- - ndarray.ndim : equivalent method - shape : dimensions of array - ndarray.shape : dimensions of array - - Examples - -------- - >>> np.ndim([[1,2,3],[4,5,6]]) - 2 - >>> np.ndim(np.array([[1,2,3],[4,5,6]])) - 2 - >>> np.ndim(1) - 0 - - """ - try: - return a.ndim - except AttributeError: - return asarray(a).ndim - - -def _size_dispatcher(a, axis=None): - return (a,) - - -@array_function_dispatch(_size_dispatcher) -def size(a, axis=None): - """ - Return the number of elements along a given axis. - - Parameters - ---------- - a : array_like - Input data. - axis : int, optional - Axis along which the elements are counted. By default, give - the total number of elements. - - Returns - ------- - element_count : int - Number of elements along the specified axis. - - See Also - -------- - shape : dimensions of array - ndarray.shape : dimensions of array - ndarray.size : number of elements in array - - Examples - -------- - >>> a = np.array([[1,2,3],[4,5,6]]) - >>> np.size(a) - 6 - >>> np.size(a,1) - 3 - >>> np.size(a,0) - 2 - - """ - if axis is None: - try: - return a.size - except AttributeError: - return asarray(a).size - else: - try: - return a.shape[axis] - except AttributeError: - return asarray(a).shape[axis] - - -def _around_dispatcher(a, decimals=None, out=None): - return (a, out) - - -@array_function_dispatch(_around_dispatcher) -def around(a, decimals=0, out=None): - """ - Evenly round to the given number of decimals. - - Parameters - ---------- - a : array_like - Input data. - decimals : int, optional - Number of decimal places to round to (default: 0). If - decimals is negative, it specifies the number of positions to - the left of the decimal point. - out : ndarray, optional - Alternative output array in which to place the result. It must have - the same shape as the expected output, but the type of the output - values will be cast if necessary. See `ufuncs-output-type` for more - details. - - Returns - ------- - rounded_array : ndarray - An array of the same type as `a`, containing the rounded values. - Unless `out` was specified, a new array is created. A reference to - the result is returned. - - The real and imaginary parts of complex numbers are rounded - separately. The result of rounding a float is a float. - - See Also - -------- - ndarray.round : equivalent method - - ceil, fix, floor, rint, trunc - - - Notes - ----- - For values exactly halfway between rounded decimal values, NumPy - rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0, - -0.5 and 0.5 round to 0.0, etc. - - ``np.around`` uses a fast but sometimes inexact algorithm to round - floating-point datatypes. For positive `decimals` it is equivalent to - ``np.true_divide(np.rint(a * 10**decimals), 10**decimals)``, which has - error due to the inexact representation of decimal fractions in the IEEE - floating point standard [1]_ and errors introduced when scaling by powers - of ten. For instance, note the extra "1" in the following: - - >>> np.round(56294995342131.5, 3) - 56294995342131.51 - - If your goal is to print such values with a fixed number of decimals, it is - preferable to use numpy's float printing routines to limit the number of - printed decimals: - - >>> np.format_float_positional(56294995342131.5, precision=3) - '56294995342131.5' - - The float printing routines use an accurate but much more computationally - demanding algorithm to compute the number of digits after the decimal - point. - - Alternatively, Python's builtin `round` function uses a more accurate - but slower algorithm for 64-bit floating point values: - - >>> round(56294995342131.5, 3) - 56294995342131.5 - >>> np.round(16.055, 2), round(16.055, 2) # equals 16.0549999999999997 - (16.06, 16.05) - - - References - ---------- - .. [1] "Lecture Notes on the Status of IEEE 754", William Kahan, - https://people.eecs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF - .. [2] "How Futile are Mindless Assessments of - Roundoff in Floating-Point Computation?", William Kahan, - https://people.eecs.berkeley.edu/~wkahan/Mindless.pdf - - Examples - -------- - >>> np.around([0.37, 1.64]) - array([0., 2.]) - >>> np.around([0.37, 1.64], decimals=1) - array([0.4, 1.6]) - >>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value - array([0., 2., 2., 4., 4.]) - >>> np.around([1,2,3,11], decimals=1) # ndarray of ints is returned - array([ 1, 2, 3, 11]) - >>> np.around([1,2,3,11], decimals=-1) - array([ 0, 0, 0, 10]) - - """ - return _wrapfunc(a, 'round', decimals=decimals, out=out) - - -def _mean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None): - return (a, out) - - -@array_function_dispatch(_mean_dispatcher) -def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): - """ - Compute the arithmetic mean along the specified axis. - - Returns the average of the array elements. The average is taken over - the flattened array by default, otherwise over the specified axis. - `float64` intermediate and return values are used for integer inputs. - - Parameters - ---------- - a : array_like - Array containing numbers whose mean is desired. If `a` is not an - array, a conversion is attempted. - axis : None or int or tuple of ints, optional - Axis or axes along which the means are computed. The default is to - compute the mean of the flattened array. - - .. versionadded:: 1.7.0 - - If this is a tuple of ints, a mean is performed over multiple axes, - instead of a single axis or all the axes as before. - dtype : data-type, optional - Type to use in computing the mean. For integer inputs, the default - is `float64`; for floating point inputs, it is the same as the - input dtype. - out : ndarray, optional - Alternate output array in which to place the result. The default - is ``None``; if provided, it must have the same shape as the - expected output, but the type will be cast if necessary. - See `ufuncs-output-type` for more details. - - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the input array. - - If the default value is passed, then `keepdims` will not be - passed through to the `mean` method of sub-classes of - `ndarray`, however any non-default value will be. If the - sub-class' method does not implement `keepdims` any - exceptions will be raised. - - Returns - ------- - m : ndarray, see dtype parameter above - If `out=None`, returns a new array containing the mean values, - otherwise a reference to the output array is returned. - - See Also - -------- - average : Weighted average - std, var, nanmean, nanstd, nanvar - - Notes - ----- - The arithmetic mean is the sum of the elements along the axis divided - by the number of elements. - - Note that for floating-point input, the mean is computed using the - same precision the input has. Depending on the input data, this can - cause the results to be inaccurate, especially for `float32` (see - example below). Specifying a higher-precision accumulator using the - `dtype` keyword can alleviate this issue. - - By default, `float16` results are computed using `float32` intermediates - for extra precision. - - Examples - -------- - >>> a = np.array([[1, 2], [3, 4]]) - >>> np.mean(a) - 2.5 - >>> np.mean(a, axis=0) - array([2., 3.]) - >>> np.mean(a, axis=1) - array([1.5, 3.5]) - - In single precision, `mean` can be inaccurate: - - >>> a = np.zeros((2, 512*512), dtype=np.float32) - >>> a[0, :] = 1.0 - >>> a[1, :] = 0.1 - >>> np.mean(a) - 0.54999924 - - Computing the mean in float64 is more accurate: - - >>> np.mean(a, dtype=np.float64) - 0.55000000074505806 # may vary - - """ - kwargs = {} - if keepdims is not np._NoValue: - kwargs['keepdims'] = keepdims - if type(a) is not mu.ndarray: - try: - mean = a.mean - except AttributeError: - pass - else: - return mean(axis=axis, dtype=dtype, out=out, **kwargs) - - return _methods._mean(a, axis=axis, dtype=dtype, - out=out, **kwargs) - - -def _std_dispatcher( - a, axis=None, dtype=None, out=None, ddof=None, keepdims=None): - return (a, out) - - -@array_function_dispatch(_std_dispatcher) -def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): - """ - Compute the standard deviation along the specified axis. - - Returns the standard deviation, a measure of the spread of a distribution, - of the array elements. The standard deviation is computed for the - flattened array by default, otherwise over the specified axis. - - Parameters - ---------- - a : array_like - Calculate the standard deviation of these values. - axis : None or int or tuple of ints, optional - Axis or axes along which the standard deviation is computed. The - default is to compute the standard deviation of the flattened array. - - .. versionadded:: 1.7.0 - - If this is a tuple of ints, a standard deviation is performed over - multiple axes, instead of a single axis or all the axes as before. - dtype : dtype, optional - Type to use in computing the standard deviation. For arrays of - integer type the default is float64, for arrays of float types it is - the same as the array type. - out : ndarray, optional - Alternative output array in which to place the result. It must have - the same shape as the expected output but the type (of the calculated - values) will be cast if necessary. - ddof : int, optional - Means Delta Degrees of Freedom. The divisor used in calculations - is ``N - ddof``, where ``N`` represents the number of elements. - By default `ddof` is zero. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the input array. - - If the default value is passed, then `keepdims` will not be - passed through to the `std` method of sub-classes of - `ndarray`, however any non-default value will be. If the - sub-class' method does not implement `keepdims` any - exceptions will be raised. - - Returns - ------- - standard_deviation : ndarray, see dtype parameter above. - If `out` is None, return a new array containing the standard deviation, - otherwise return a reference to the output array. - - See Also - -------- - var, mean, nanmean, nanstd, nanvar - ufuncs-output-type - - Notes - ----- - The standard deviation is the square root of the average of the squared - deviations from the mean, i.e., ``std = sqrt(mean(abs(x - x.mean())**2))``. - - The average squared deviation is normally calculated as - ``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is specified, - the divisor ``N - ddof`` is used instead. In standard statistical - practice, ``ddof=1`` provides an unbiased estimator of the variance - of the infinite population. ``ddof=0`` provides a maximum likelihood - estimate of the variance for normally distributed variables. The - standard deviation computed in this function is the square root of - the estimated variance, so even with ``ddof=1``, it will not be an - unbiased estimate of the standard deviation per se. - - Note that, for complex numbers, `std` takes the absolute - value before squaring, so that the result is always real and nonnegative. - - For floating-point input, the *std* is computed using the same - precision the input has. Depending on the input data, this can cause - the results to be inaccurate, especially for float32 (see example below). - Specifying a higher-accuracy accumulator using the `dtype` keyword can - alleviate this issue. - - Examples - -------- - >>> a = np.array([[1, 2], [3, 4]]) - >>> np.std(a) - 1.1180339887498949 # may vary - >>> np.std(a, axis=0) - array([1., 1.]) - >>> np.std(a, axis=1) - array([0.5, 0.5]) - - In single precision, std() can be inaccurate: - - >>> a = np.zeros((2, 512*512), dtype=np.float32) - >>> a[0, :] = 1.0 - >>> a[1, :] = 0.1 - >>> np.std(a) - 0.45000005 - - Computing the standard deviation in float64 is more accurate: - - >>> np.std(a, dtype=np.float64) - 0.44999999925494177 # may vary - - """ - kwargs = {} - if keepdims is not np._NoValue: - kwargs['keepdims'] = keepdims - - if type(a) is not mu.ndarray: - try: - std = a.std - except AttributeError: - pass - else: - return std(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs) - - return _methods._std(a, axis=axis, dtype=dtype, out=out, ddof=ddof, - **kwargs) - - -def _var_dispatcher( - a, axis=None, dtype=None, out=None, ddof=None, keepdims=None): - return (a, out) - - -@array_function_dispatch(_var_dispatcher) -def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): - """ - Compute the variance along the specified axis. - - Returns the variance of the array elements, a measure of the spread of a - distribution. The variance is computed for the flattened array by - default, otherwise over the specified axis. - - Parameters - ---------- - a : array_like - Array containing numbers whose variance is desired. If `a` is not an - array, a conversion is attempted. - axis : None or int or tuple of ints, optional - Axis or axes along which the variance is computed. The default is to - compute the variance of the flattened array. - - .. versionadded:: 1.7.0 - - If this is a tuple of ints, a variance is performed over multiple axes, - instead of a single axis or all the axes as before. - dtype : data-type, optional - Type to use in computing the variance. For arrays of integer type - the default is `float64`; for arrays of float types it is the same as - the array type. - out : ndarray, optional - Alternate output array in which to place the result. It must have - the same shape as the expected output, but the type is cast if - necessary. - ddof : int, optional - "Delta Degrees of Freedom": the divisor used in the calculation is - ``N - ddof``, where ``N`` represents the number of elements. By - default `ddof` is zero. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the input array. - - If the default value is passed, then `keepdims` will not be - passed through to the `var` method of sub-classes of - `ndarray`, however any non-default value will be. If the - sub-class' method does not implement `keepdims` any - exceptions will be raised. - - Returns - ------- - variance : ndarray, see dtype parameter above - If ``out=None``, returns a new array containing the variance; - otherwise, a reference to the output array is returned. - - See Also - -------- - std, mean, nanmean, nanstd, nanvar - ufuncs-output-type - - Notes - ----- - The variance is the average of the squared deviations from the mean, - i.e., ``var = mean(abs(x - x.mean())**2)``. - - The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``. - If, however, `ddof` is specified, the divisor ``N - ddof`` is used - instead. In standard statistical practice, ``ddof=1`` provides an - unbiased estimator of the variance of a hypothetical infinite population. - ``ddof=0`` provides a maximum likelihood estimate of the variance for - normally distributed variables. - - Note that for complex numbers, the absolute value is taken before - squaring, so that the result is always real and nonnegative. - - For floating-point input, the variance is computed using the same - precision the input has. Depending on the input data, this can cause - the results to be inaccurate, especially for `float32` (see example - below). Specifying a higher-accuracy accumulator using the ``dtype`` - keyword can alleviate this issue. - - Examples - -------- - >>> a = np.array([[1, 2], [3, 4]]) - >>> np.var(a) - 1.25 - >>> np.var(a, axis=0) - array([1., 1.]) - >>> np.var(a, axis=1) - array([0.25, 0.25]) - - In single precision, var() can be inaccurate: - - >>> a = np.zeros((2, 512*512), dtype=np.float32) - >>> a[0, :] = 1.0 - >>> a[1, :] = 0.1 - >>> np.var(a) - 0.20250003 - - Computing the variance in float64 is more accurate: - - >>> np.var(a, dtype=np.float64) - 0.20249999932944759 # may vary - >>> ((1-0.55)**2 + (0.1-0.55)**2)/2 - 0.2025 - - """ - kwargs = {} - if keepdims is not np._NoValue: - kwargs['keepdims'] = keepdims - - if type(a) is not mu.ndarray: - try: - var = a.var - - except AttributeError: - pass - else: - return var(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs) - - return _methods._var(a, axis=axis, dtype=dtype, out=out, ddof=ddof, - **kwargs) - - -# Aliases of other functions. These have their own definitions only so that -# they can have unique docstrings. - -@array_function_dispatch(_around_dispatcher) -def round_(a, decimals=0, out=None): - """ - Round an array to the given number of decimals. - - See Also - -------- - around : equivalent function; see for details. - """ - return around(a, decimals=decimals, out=out) - - -@array_function_dispatch(_prod_dispatcher, verify=False) -def product(*args, **kwargs): - """ - Return the product of array elements over a given axis. - - See Also - -------- - prod : equivalent function; see for details. - """ - return prod(*args, **kwargs) - - -@array_function_dispatch(_cumprod_dispatcher, verify=False) -def cumproduct(*args, **kwargs): - """ - Return the cumulative product over the given axis. - - See Also - -------- - cumprod : equivalent function; see for details. - """ - return cumprod(*args, **kwargs) - - -@array_function_dispatch(_any_dispatcher, verify=False) -def sometrue(*args, **kwargs): - """ - Check whether some values are true. - - Refer to `any` for full documentation. - - See Also - -------- - any : equivalent function; see for details. - """ - return any(*args, **kwargs) - - -@array_function_dispatch(_all_dispatcher, verify=False) -def alltrue(*args, **kwargs): - """ - Check if all elements of input array are true. - - See Also - -------- - numpy.all : Equivalent function; see for details. - """ - return all(*args, **kwargs) diff --git a/venv/lib/python3.7/site-packages/numpy/core/function_base.py b/venv/lib/python3.7/site-packages/numpy/core/function_base.py deleted file mode 100644 index 538ac8b..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/function_base.py +++ /dev/null @@ -1,514 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import functools -import warnings -import operator -import types - -from . import numeric as _nx -from .numeric import (result_type, NaN, shares_memory, MAY_SHARE_BOUNDS, - TooHardError, asanyarray, ndim) -from numpy.core.multiarray import add_docstring -from numpy.core import overrides - -__all__ = ['logspace', 'linspace', 'geomspace'] - - -array_function_dispatch = functools.partial( - overrides.array_function_dispatch, module='numpy') - - -def _linspace_dispatcher(start, stop, num=None, endpoint=None, retstep=None, - dtype=None, axis=None): - return (start, stop) - - -@array_function_dispatch(_linspace_dispatcher) -def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, - axis=0): - """ - Return evenly spaced numbers over a specified interval. - - Returns `num` evenly spaced samples, calculated over the - interval [`start`, `stop`]. - - The endpoint of the interval can optionally be excluded. - - .. versionchanged:: 1.16.0 - Non-scalar `start` and `stop` are now supported. - - Parameters - ---------- - start : array_like - The starting value of the sequence. - stop : array_like - The end value of the sequence, unless `endpoint` is set to False. - In that case, the sequence consists of all but the last of ``num + 1`` - evenly spaced samples, so that `stop` is excluded. Note that the step - size changes when `endpoint` is False. - num : int, optional - Number of samples to generate. Default is 50. Must be non-negative. - endpoint : bool, optional - If True, `stop` is the last sample. Otherwise, it is not included. - Default is True. - retstep : bool, optional - If True, return (`samples`, `step`), where `step` is the spacing - between samples. - dtype : dtype, optional - The type of the output array. If `dtype` is not given, infer the data - type from the other input arguments. - - .. versionadded:: 1.9.0 - - axis : int, optional - The axis in the result to store the samples. Relevant only if start - or stop are array-like. By default (0), the samples will be along a - new axis inserted at the beginning. Use -1 to get an axis at the end. - - .. versionadded:: 1.16.0 - - Returns - ------- - samples : ndarray - There are `num` equally spaced samples in the closed interval - ``[start, stop]`` or the half-open interval ``[start, stop)`` - (depending on whether `endpoint` is True or False). - step : float, optional - Only returned if `retstep` is True - - Size of spacing between samples. - - - See Also - -------- - arange : Similar to `linspace`, but uses a step size (instead of the - number of samples). - geomspace : Similar to `linspace`, but with numbers spaced evenly on a log - scale (a geometric progression). - logspace : Similar to `geomspace`, but with the end points specified as - logarithms. - - Examples - -------- - >>> np.linspace(2.0, 3.0, num=5) - array([2. , 2.25, 2.5 , 2.75, 3. ]) - >>> np.linspace(2.0, 3.0, num=5, endpoint=False) - array([2. , 2.2, 2.4, 2.6, 2.8]) - >>> np.linspace(2.0, 3.0, num=5, retstep=True) - (array([2. , 2.25, 2.5 , 2.75, 3. ]), 0.25) - - Graphical illustration: - - >>> import matplotlib.pyplot as plt - >>> N = 8 - >>> y = np.zeros(N) - >>> x1 = np.linspace(0, 10, N, endpoint=True) - >>> x2 = np.linspace(0, 10, N, endpoint=False) - >>> plt.plot(x1, y, 'o') - [] - >>> plt.plot(x2, y + 0.5, 'o') - [] - >>> plt.ylim([-0.5, 1]) - (-0.5, 1) - >>> plt.show() - - """ - try: - num = operator.index(num) - except TypeError: - raise TypeError( - "object of type {} cannot be safely interpreted as an integer." - .format(type(num))) - - if num < 0: - raise ValueError("Number of samples, %s, must be non-negative." % num) - div = (num - 1) if endpoint else num - - # Convert float/complex array scalars to float, gh-3504 - # and make sure one can use variables that have an __array_interface__, gh-6634 - start = asanyarray(start) * 1.0 - stop = asanyarray(stop) * 1.0 - - dt = result_type(start, stop, float(num)) - if dtype is None: - dtype = dt - - delta = stop - start - y = _nx.arange(0, num, dtype=dt).reshape((-1,) + (1,) * ndim(delta)) - # In-place multiplication y *= delta/div is faster, but prevents the multiplicant - # from overriding what class is produced, and thus prevents, e.g. use of Quantities, - # see gh-7142. Hence, we multiply in place only for standard scalar types. - _mult_inplace = _nx.isscalar(delta) - if div > 0: - step = delta / div - if _nx.any(step == 0): - # Special handling for denormal numbers, gh-5437 - y /= div - if _mult_inplace: - y *= delta - else: - y = y * delta - else: - if _mult_inplace: - y *= step - else: - y = y * step - else: - # sequences with 0 items or 1 item with endpoint=True (i.e. div <= 0) - # have an undefined step - step = NaN - # Multiply with delta to allow possible override of output class. - y = y * delta - - y += start - - if endpoint and num > 1: - y[-1] = stop - - if axis != 0: - y = _nx.moveaxis(y, 0, axis) - - if retstep: - return y.astype(dtype, copy=False), step - else: - return y.astype(dtype, copy=False) - - -def _logspace_dispatcher(start, stop, num=None, endpoint=None, base=None, - dtype=None, axis=None): - return (start, stop) - - -@array_function_dispatch(_logspace_dispatcher) -def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, - axis=0): - """ - Return numbers spaced evenly on a log scale. - - In linear space, the sequence starts at ``base ** start`` - (`base` to the power of `start`) and ends with ``base ** stop`` - (see `endpoint` below). - - .. versionchanged:: 1.16.0 - Non-scalar `start` and `stop` are now supported. - - Parameters - ---------- - start : array_like - ``base ** start`` is the starting value of the sequence. - stop : array_like - ``base ** stop`` is the final value of the sequence, unless `endpoint` - is False. In that case, ``num + 1`` values are spaced over the - interval in log-space, of which all but the last (a sequence of - length `num`) are returned. - num : integer, optional - Number of samples to generate. Default is 50. - endpoint : boolean, optional - If true, `stop` is the last sample. Otherwise, it is not included. - Default is True. - base : float, optional - The base of the log space. The step size between the elements in - ``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform. - Default is 10.0. - dtype : dtype - The type of the output array. If `dtype` is not given, infer the data - type from the other input arguments. - axis : int, optional - The axis in the result to store the samples. Relevant only if start - or stop are array-like. By default (0), the samples will be along a - new axis inserted at the beginning. Use -1 to get an axis at the end. - - .. versionadded:: 1.16.0 - - - Returns - ------- - samples : ndarray - `num` samples, equally spaced on a log scale. - - See Also - -------- - arange : Similar to linspace, with the step size specified instead of the - number of samples. Note that, when used with a float endpoint, the - endpoint may or may not be included. - linspace : Similar to logspace, but with the samples uniformly distributed - in linear space, instead of log space. - geomspace : Similar to logspace, but with endpoints specified directly. - - Notes - ----- - Logspace is equivalent to the code - - >>> y = np.linspace(start, stop, num=num, endpoint=endpoint) - ... # doctest: +SKIP - >>> power(base, y).astype(dtype) - ... # doctest: +SKIP - - Examples - -------- - >>> np.logspace(2.0, 3.0, num=4) - array([ 100. , 215.443469 , 464.15888336, 1000. ]) - >>> np.logspace(2.0, 3.0, num=4, endpoint=False) - array([100. , 177.827941 , 316.22776602, 562.34132519]) - >>> np.logspace(2.0, 3.0, num=4, base=2.0) - array([4. , 5.0396842 , 6.34960421, 8. ]) - - Graphical illustration: - - >>> import matplotlib.pyplot as plt - >>> N = 10 - >>> x1 = np.logspace(0.1, 1, N, endpoint=True) - >>> x2 = np.logspace(0.1, 1, N, endpoint=False) - >>> y = np.zeros(N) - >>> plt.plot(x1, y, 'o') - [] - >>> plt.plot(x2, y + 0.5, 'o') - [] - >>> plt.ylim([-0.5, 1]) - (-0.5, 1) - >>> plt.show() - - """ - y = linspace(start, stop, num=num, endpoint=endpoint, axis=axis) - if dtype is None: - return _nx.power(base, y) - return _nx.power(base, y).astype(dtype, copy=False) - - -def _geomspace_dispatcher(start, stop, num=None, endpoint=None, dtype=None, - axis=None): - return (start, stop) - - -@array_function_dispatch(_geomspace_dispatcher) -def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0): - """ - Return numbers spaced evenly on a log scale (a geometric progression). - - This is similar to `logspace`, but with endpoints specified directly. - Each output sample is a constant multiple of the previous. - - .. versionchanged:: 1.16.0 - Non-scalar `start` and `stop` are now supported. - - Parameters - ---------- - start : array_like - The starting value of the sequence. - stop : array_like - The final value of the sequence, unless `endpoint` is False. - In that case, ``num + 1`` values are spaced over the - interval in log-space, of which all but the last (a sequence of - length `num`) are returned. - num : integer, optional - Number of samples to generate. Default is 50. - endpoint : boolean, optional - If true, `stop` is the last sample. Otherwise, it is not included. - Default is True. - dtype : dtype - The type of the output array. If `dtype` is not given, infer the data - type from the other input arguments. - axis : int, optional - The axis in the result to store the samples. Relevant only if start - or stop are array-like. By default (0), the samples will be along a - new axis inserted at the beginning. Use -1 to get an axis at the end. - - .. versionadded:: 1.16.0 - - Returns - ------- - samples : ndarray - `num` samples, equally spaced on a log scale. - - See Also - -------- - logspace : Similar to geomspace, but with endpoints specified using log - and base. - linspace : Similar to geomspace, but with arithmetic instead of geometric - progression. - arange : Similar to linspace, with the step size specified instead of the - number of samples. - - Notes - ----- - If the inputs or dtype are complex, the output will follow a logarithmic - spiral in the complex plane. (There are an infinite number of spirals - passing through two points; the output will follow the shortest such path.) - - Examples - -------- - >>> np.geomspace(1, 1000, num=4) - array([ 1., 10., 100., 1000.]) - >>> np.geomspace(1, 1000, num=3, endpoint=False) - array([ 1., 10., 100.]) - >>> np.geomspace(1, 1000, num=4, endpoint=False) - array([ 1. , 5.62341325, 31.6227766 , 177.827941 ]) - >>> np.geomspace(1, 256, num=9) - array([ 1., 2., 4., 8., 16., 32., 64., 128., 256.]) - - Note that the above may not produce exact integers: - - >>> np.geomspace(1, 256, num=9, dtype=int) - array([ 1, 2, 4, 7, 16, 32, 63, 127, 256]) - >>> np.around(np.geomspace(1, 256, num=9)).astype(int) - array([ 1, 2, 4, 8, 16, 32, 64, 128, 256]) - - Negative, decreasing, and complex inputs are allowed: - - >>> np.geomspace(1000, 1, num=4) - array([1000., 100., 10., 1.]) - >>> np.geomspace(-1000, -1, num=4) - array([-1000., -100., -10., -1.]) - >>> np.geomspace(1j, 1000j, num=4) # Straight line - array([0. +1.j, 0. +10.j, 0. +100.j, 0.+1000.j]) - >>> np.geomspace(-1+0j, 1+0j, num=5) # Circle - array([-1.00000000e+00+1.22464680e-16j, -7.07106781e-01+7.07106781e-01j, - 6.12323400e-17+1.00000000e+00j, 7.07106781e-01+7.07106781e-01j, - 1.00000000e+00+0.00000000e+00j]) - - Graphical illustration of ``endpoint`` parameter: - - >>> import matplotlib.pyplot as plt - >>> N = 10 - >>> y = np.zeros(N) - >>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=True), y + 1, 'o') - [] - >>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=False), y + 2, 'o') - [] - >>> plt.axis([0.5, 2000, 0, 3]) - [0.5, 2000, 0, 3] - >>> plt.grid(True, color='0.7', linestyle='-', which='both', axis='both') - >>> plt.show() - - """ - start = asanyarray(start) - stop = asanyarray(stop) - if _nx.any(start == 0) or _nx.any(stop == 0): - raise ValueError('Geometric sequence cannot include zero') - - dt = result_type(start, stop, float(num), _nx.zeros((), dtype)) - if dtype is None: - dtype = dt - else: - # complex to dtype('complex128'), for instance - dtype = _nx.dtype(dtype) - - # Promote both arguments to the same dtype in case, for instance, one is - # complex and another is negative and log would produce NaN otherwise. - # Copy since we may change things in-place further down. - start = start.astype(dt, copy=True) - stop = stop.astype(dt, copy=True) - - out_sign = _nx.ones(_nx.broadcast(start, stop).shape, dt) - # Avoid negligible real or imaginary parts in output by rotating to - # positive real, calculating, then undoing rotation - if _nx.issubdtype(dt, _nx.complexfloating): - all_imag = (start.real == 0.) & (stop.real == 0.) - if _nx.any(all_imag): - start[all_imag] = start[all_imag].imag - stop[all_imag] = stop[all_imag].imag - out_sign[all_imag] = 1j - - both_negative = (_nx.sign(start) == -1) & (_nx.sign(stop) == -1) - if _nx.any(both_negative): - _nx.negative(start, out=start, where=both_negative) - _nx.negative(stop, out=stop, where=both_negative) - _nx.negative(out_sign, out=out_sign, where=both_negative) - - log_start = _nx.log10(start) - log_stop = _nx.log10(stop) - result = out_sign * logspace(log_start, log_stop, num=num, - endpoint=endpoint, base=10.0, dtype=dtype) - if axis != 0: - result = _nx.moveaxis(result, 0, axis) - - return result.astype(dtype, copy=False) - - -def _needs_add_docstring(obj): - """ - Returns true if the only way to set the docstring of `obj` from python is - via add_docstring. - - This function errs on the side of being overly conservative. - """ - Py_TPFLAGS_HEAPTYPE = 1 << 9 - - if isinstance(obj, (types.FunctionType, types.MethodType, property)): - return False - - if isinstance(obj, type) and obj.__flags__ & Py_TPFLAGS_HEAPTYPE: - return False - - return True - - -def _add_docstring(obj, doc, warn_on_python): - if warn_on_python and not _needs_add_docstring(obj): - warnings.warn( - "add_newdoc was used on a pure-python object {}. " - "Prefer to attach it directly to the source." - .format(obj), - UserWarning, - stacklevel=3) - try: - add_docstring(obj, doc) - except Exception: - pass - - -def add_newdoc(place, obj, doc, warn_on_python=True): - """ - Add documentation to an existing object, typically one defined in C - - The purpose is to allow easier editing of the docstrings without requiring - a re-compile. This exists primarily for internal use within numpy itself. - - Parameters - ---------- - place : str - The absolute name of the module to import from - obj : str - The name of the object to add documentation to, typically a class or - function name - doc : {str, Tuple[str, str], List[Tuple[str, str]]} - If a string, the documentation to apply to `obj` - - If a tuple, then the first element is interpreted as an attribute of - `obj` and the second as the docstring to apply - ``(method, docstring)`` - - If a list, then each element of the list should be a tuple of length - two - ``[(method1, docstring1), (method2, docstring2), ...]`` - warn_on_python : bool - If True, the default, emit `UserWarning` if this is used to attach - documentation to a pure-python object. - - Notes - ----- - This routine never raises an error if the docstring can't be written, but - will raise an error if the object being documented does not exist. - - This routine cannot modify read-only docstrings, as appear - in new-style classes or built-in functions. Because this - routine never raises an error the caller must check manually - that the docstrings were changed. - - Since this function grabs the ``char *`` from a c-level str object and puts - it into the ``tp_doc`` slot of the type of `obj`, it violates a number of - C-API best-practices, by: - - - modifying a `PyTypeObject` after calling `PyType_Ready` - - calling `Py_INCREF` on the str and losing the reference, so the str - will never be released - - If possible it should be avoided. - """ - new = getattr(__import__(place, globals(), {}, [obj]), obj) - if isinstance(doc, str): - _add_docstring(new, doc.strip(), warn_on_python) - elif isinstance(doc, tuple): - attr, docstring = doc - _add_docstring(getattr(new, attr), docstring.strip(), warn_on_python) - elif isinstance(doc, list): - for attr, docstring in doc: - _add_docstring(getattr(new, attr), docstring.strip(), warn_on_python) diff --git a/venv/lib/python3.7/site-packages/numpy/core/generate_numpy_api.py b/venv/lib/python3.7/site-packages/numpy/core/generate_numpy_api.py deleted file mode 100644 index 5e04fb8..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/generate_numpy_api.py +++ /dev/null @@ -1,254 +0,0 @@ -from __future__ import division, print_function - -import os -import genapi - -from genapi import \ - TypeApi, GlobalVarApi, FunctionApi, BoolValuesApi - -import numpy_api - -# use annotated api when running under cpychecker -h_template = r""" -#if defined(_MULTIARRAYMODULE) || defined(WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE) - -typedef struct { - PyObject_HEAD - npy_bool obval; -} PyBoolScalarObject; - -extern NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type; -extern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type; -extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; - -%s - -#else - -#if defined(PY_ARRAY_UNIQUE_SYMBOL) -#define PyArray_API PY_ARRAY_UNIQUE_SYMBOL -#endif - -#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY) -extern void **PyArray_API; -#else -#if defined(PY_ARRAY_UNIQUE_SYMBOL) -void **PyArray_API; -#else -static void **PyArray_API=NULL; -#endif -#endif - -%s - -#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT) -static int -_import_array(void) -{ - int st; - PyObject *numpy = PyImport_ImportModule("numpy.core._multiarray_umath"); - PyObject *c_api = NULL; - - if (numpy == NULL) { - return -1; - } - c_api = PyObject_GetAttrString(numpy, "_ARRAY_API"); - Py_DECREF(numpy); - if (c_api == NULL) { - PyErr_SetString(PyExc_AttributeError, "_ARRAY_API not found"); - return -1; - } - -#if PY_VERSION_HEX >= 0x03000000 - if (!PyCapsule_CheckExact(c_api)) { - PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCapsule object"); - Py_DECREF(c_api); - return -1; - } - PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL); -#else - if (!PyCObject_Check(c_api)) { - PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCObject object"); - Py_DECREF(c_api); - return -1; - } - PyArray_API = (void **)PyCObject_AsVoidPtr(c_api); -#endif - Py_DECREF(c_api); - if (PyArray_API == NULL) { - PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is NULL pointer"); - return -1; - } - - /* Perform runtime check of C API version */ - if (NPY_VERSION != PyArray_GetNDArrayCVersion()) { - PyErr_Format(PyExc_RuntimeError, "module compiled against "\ - "ABI version 0x%%x but this version of numpy is 0x%%x", \ - (int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion()); - return -1; - } - if (NPY_FEATURE_VERSION > PyArray_GetNDArrayCFeatureVersion()) { - PyErr_Format(PyExc_RuntimeError, "module compiled against "\ - "API version 0x%%x but this version of numpy is 0x%%x", \ - (int) NPY_FEATURE_VERSION, (int) PyArray_GetNDArrayCFeatureVersion()); - return -1; - } - - /* - * Perform runtime check of endianness and check it matches the one set by - * the headers (npy_endian.h) as a safeguard - */ - st = PyArray_GetEndianness(); - if (st == NPY_CPU_UNKNOWN_ENDIAN) { - PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as unknown endian"); - return -1; - } -#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN - if (st != NPY_CPU_BIG) { - PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\ - "big endian, but detected different endianness at runtime"); - return -1; - } -#elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN - if (st != NPY_CPU_LITTLE) { - PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\ - "little endian, but detected different endianness at runtime"); - return -1; - } -#endif - - return 0; -} - -#if PY_VERSION_HEX >= 0x03000000 -#define NUMPY_IMPORT_ARRAY_RETVAL NULL -#else -#define NUMPY_IMPORT_ARRAY_RETVAL -#endif - -#define import_array() {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return NUMPY_IMPORT_ARRAY_RETVAL; } } - -#define import_array1(ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return ret; } } - -#define import_array2(msg, ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, msg); return ret; } } - -#endif - -#endif -""" - - -c_template = r""" -/* These pointers will be stored in the C-object for use in other - extension modules -*/ - -void *PyArray_API[] = { -%s -}; -""" - -c_api_header = """ -=========== -NumPy C-API -=========== -""" - -def generate_api(output_dir, force=False): - basename = 'multiarray_api' - - h_file = os.path.join(output_dir, '__%s.h' % basename) - c_file = os.path.join(output_dir, '__%s.c' % basename) - d_file = os.path.join(output_dir, '%s.txt' % basename) - targets = (h_file, c_file, d_file) - - sources = numpy_api.multiarray_api - - if (not force and not genapi.should_rebuild(targets, [numpy_api.__file__, __file__])): - return targets - else: - do_generate_api(targets, sources) - - return targets - -def do_generate_api(targets, sources): - header_file = targets[0] - c_file = targets[1] - doc_file = targets[2] - - global_vars = sources[0] - scalar_bool_values = sources[1] - types_api = sources[2] - multiarray_funcs = sources[3] - - multiarray_api = sources[:] - - module_list = [] - extension_list = [] - init_list = [] - - # Check multiarray api indexes - multiarray_api_index = genapi.merge_api_dicts(multiarray_api) - genapi.check_api_dict(multiarray_api_index) - - numpyapi_list = genapi.get_api_functions('NUMPY_API', - multiarray_funcs) - - # FIXME: ordered_funcs_api is unused - ordered_funcs_api = genapi.order_dict(multiarray_funcs) - - # Create dict name -> *Api instance - api_name = 'PyArray_API' - multiarray_api_dict = {} - for f in numpyapi_list: - name = f.name - index = multiarray_funcs[name][0] - annotations = multiarray_funcs[name][1:] - multiarray_api_dict[f.name] = FunctionApi(f.name, index, annotations, - f.return_type, - f.args, api_name) - - for name, val in global_vars.items(): - index, type = val - multiarray_api_dict[name] = GlobalVarApi(name, index, type, api_name) - - for name, val in scalar_bool_values.items(): - index = val[0] - multiarray_api_dict[name] = BoolValuesApi(name, index, api_name) - - for name, val in types_api.items(): - index = val[0] - multiarray_api_dict[name] = TypeApi(name, index, 'PyTypeObject', api_name) - - if len(multiarray_api_dict) != len(multiarray_api_index): - keys_dict = set(multiarray_api_dict.keys()) - keys_index = set(multiarray_api_index.keys()) - raise AssertionError( - "Multiarray API size mismatch - " - "index has extra keys {}, dict has extra keys {}" - .format(keys_index - keys_dict, keys_dict - keys_index) - ) - - extension_list = [] - for name, index in genapi.order_dict(multiarray_api_index): - api_item = multiarray_api_dict[name] - extension_list.append(api_item.define_from_array_api_string()) - init_list.append(api_item.array_api_define()) - module_list.append(api_item.internal_define()) - - # Write to header - s = h_template % ('\n'.join(module_list), '\n'.join(extension_list)) - genapi.write_file(header_file, s) - - # Write to c-code - s = c_template % ',\n'.join(init_list) - genapi.write_file(c_file, s) - - # write to documentation - s = c_api_header - for func in numpyapi_list: - s += func.to_ReST() - s += '\n\n' - genapi.write_file(doc_file, s) - - return targets diff --git a/venv/lib/python3.7/site-packages/numpy/core/getlimits.py b/venv/lib/python3.7/site-packages/numpy/core/getlimits.py deleted file mode 100644 index 31fa6b9..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/getlimits.py +++ /dev/null @@ -1,548 +0,0 @@ -"""Machine limits for Float32 and Float64 and (long double) if available... - -""" -from __future__ import division, absolute_import, print_function - -__all__ = ['finfo', 'iinfo'] - -import warnings - -from .machar import MachAr -from .overrides import set_module -from . import numeric -from . import numerictypes as ntypes -from .numeric import array, inf -from .umath import log10, exp2 -from . import umath - - -def _fr0(a): - """fix rank-0 --> rank-1""" - if a.ndim == 0: - a = a.copy() - a.shape = (1,) - return a - - -def _fr1(a): - """fix rank > 0 --> rank-0""" - if a.size == 1: - a = a.copy() - a.shape = () - return a - -class MachArLike(object): - """ Object to simulate MachAr instance """ - - def __init__(self, - ftype, - **kwargs): - params = _MACHAR_PARAMS[ftype] - float_conv = lambda v: array([v], ftype) - float_to_float = lambda v : _fr1(float_conv(v)) - float_to_str = lambda v: (params['fmt'] % array(_fr0(v)[0], ftype)) - - self.title = params['title'] - # Parameter types same as for discovered MachAr object. - self.epsilon = self.eps = float_to_float(kwargs.pop('eps')) - self.epsneg = float_to_float(kwargs.pop('epsneg')) - self.xmax = self.huge = float_to_float(kwargs.pop('huge')) - self.xmin = self.tiny = float_to_float(kwargs.pop('tiny')) - self.ibeta = params['itype'](kwargs.pop('ibeta')) - self.__dict__.update(kwargs) - self.precision = int(-log10(self.eps)) - self.resolution = float_to_float(float_conv(10) ** (-self.precision)) - self._str_eps = float_to_str(self.eps) - self._str_epsneg = float_to_str(self.epsneg) - self._str_xmin = float_to_str(self.xmin) - self._str_xmax = float_to_str(self.xmax) - self._str_resolution = float_to_str(self.resolution) - -_convert_to_float = { - ntypes.csingle: ntypes.single, - ntypes.complex_: ntypes.float_, - ntypes.clongfloat: ntypes.longfloat - } - -# Parameters for creating MachAr / MachAr-like objects -_title_fmt = 'numpy {} precision floating point number' -_MACHAR_PARAMS = { - ntypes.double: dict( - itype = ntypes.int64, - fmt = '%24.16e', - title = _title_fmt.format('double')), - ntypes.single: dict( - itype = ntypes.int32, - fmt = '%15.7e', - title = _title_fmt.format('single')), - ntypes.longdouble: dict( - itype = ntypes.longlong, - fmt = '%s', - title = _title_fmt.format('long double')), - ntypes.half: dict( - itype = ntypes.int16, - fmt = '%12.5e', - title = _title_fmt.format('half'))} - -# Key to identify the floating point type. Key is result of -# ftype('-0.1').newbyteorder('<').tobytes() -# See: -# https://perl5.git.perl.org/perl.git/blob/3118d7d684b56cbeb702af874f4326683c45f045:/Configure -_KNOWN_TYPES = {} -def _register_type(machar, bytepat): - _KNOWN_TYPES[bytepat] = machar -_float_ma = {} - -def _register_known_types(): - # Known parameters for float16 - # See docstring of MachAr class for description of parameters. - f16 = ntypes.float16 - float16_ma = MachArLike(f16, - machep=-10, - negep=-11, - minexp=-14, - maxexp=16, - it=10, - iexp=5, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(f16(-10)), - epsneg=exp2(f16(-11)), - huge=f16(65504), - tiny=f16(2 ** -14)) - _register_type(float16_ma, b'f\xae') - _float_ma[16] = float16_ma - - # Known parameters for float32 - f32 = ntypes.float32 - float32_ma = MachArLike(f32, - machep=-23, - negep=-24, - minexp=-126, - maxexp=128, - it=23, - iexp=8, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(f32(-23)), - epsneg=exp2(f32(-24)), - huge=f32((1 - 2 ** -24) * 2**128), - tiny=exp2(f32(-126))) - _register_type(float32_ma, b'\xcd\xcc\xcc\xbd') - _float_ma[32] = float32_ma - - # Known parameters for float64 - f64 = ntypes.float64 - epsneg_f64 = 2.0 ** -53.0 - tiny_f64 = 2.0 ** -1022.0 - float64_ma = MachArLike(f64, - machep=-52, - negep=-53, - minexp=-1022, - maxexp=1024, - it=52, - iexp=11, - ibeta=2, - irnd=5, - ngrd=0, - eps=2.0 ** -52.0, - epsneg=epsneg_f64, - huge=(1.0 - epsneg_f64) / tiny_f64 * f64(4), - tiny=tiny_f64) - _register_type(float64_ma, b'\x9a\x99\x99\x99\x99\x99\xb9\xbf') - _float_ma[64] = float64_ma - - # Known parameters for IEEE 754 128-bit binary float - ld = ntypes.longdouble - epsneg_f128 = exp2(ld(-113)) - tiny_f128 = exp2(ld(-16382)) - # Ignore runtime error when this is not f128 - with numeric.errstate(all='ignore'): - huge_f128 = (ld(1) - epsneg_f128) / tiny_f128 * ld(4) - float128_ma = MachArLike(ld, - machep=-112, - negep=-113, - minexp=-16382, - maxexp=16384, - it=112, - iexp=15, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(ld(-112)), - epsneg=epsneg_f128, - huge=huge_f128, - tiny=tiny_f128) - # IEEE 754 128-bit binary float - _register_type(float128_ma, - b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf') - _register_type(float128_ma, - b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf') - _float_ma[128] = float128_ma - - # Known parameters for float80 (Intel 80-bit extended precision) - epsneg_f80 = exp2(ld(-64)) - tiny_f80 = exp2(ld(-16382)) - # Ignore runtime error when this is not f80 - with numeric.errstate(all='ignore'): - huge_f80 = (ld(1) - epsneg_f80) / tiny_f80 * ld(4) - float80_ma = MachArLike(ld, - machep=-63, - negep=-64, - minexp=-16382, - maxexp=16384, - it=63, - iexp=15, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(ld(-63)), - epsneg=epsneg_f80, - huge=huge_f80, - tiny=tiny_f80) - # float80, first 10 bytes containing actual storage - _register_type(float80_ma, b'\xcd\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf') - _float_ma[80] = float80_ma - - # Guessed / known parameters for double double; see: - # https://en.wikipedia.org/wiki/Quadruple-precision_floating-point_format#Double-double_arithmetic - # These numbers have the same exponent range as float64, but extended number of - # digits in the significand. - huge_dd = (umath.nextafter(ld(inf), ld(0)) - if hasattr(umath, 'nextafter') # Missing on some platforms? - else float64_ma.huge) - float_dd_ma = MachArLike(ld, - machep=-105, - negep=-106, - minexp=-1022, - maxexp=1024, - it=105, - iexp=11, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(ld(-105)), - epsneg= exp2(ld(-106)), - huge=huge_dd, - tiny=exp2(ld(-1022))) - # double double; low, high order (e.g. PPC 64) - _register_type(float_dd_ma, - b'\x9a\x99\x99\x99\x99\x99Y<\x9a\x99\x99\x99\x99\x99\xb9\xbf') - # double double; high, low order (e.g. PPC 64 le) - _register_type(float_dd_ma, - b'\x9a\x99\x99\x99\x99\x99\xb9\xbf\x9a\x99\x99\x99\x99\x99Y<') - _float_ma['dd'] = float_dd_ma - - -def _get_machar(ftype): - """ Get MachAr instance or MachAr-like instance - - Get parameters for floating point type, by first trying signatures of - various known floating point types, then, if none match, attempting to - identify parameters by analysis. - - Parameters - ---------- - ftype : class - Numpy floating point type class (e.g. ``np.float64``) - - Returns - ------- - ma_like : instance of :class:`MachAr` or :class:`MachArLike` - Object giving floating point parameters for `ftype`. - - Warns - ----- - UserWarning - If the binary signature of the float type is not in the dictionary of - known float types. - """ - params = _MACHAR_PARAMS.get(ftype) - if params is None: - raise ValueError(repr(ftype)) - # Detect known / suspected types - key = ftype('-0.1').newbyteorder('<').tobytes() - ma_like = _KNOWN_TYPES.get(key) - # Could be 80 bit == 10 byte extended precision, where last bytes can be - # random garbage. Try comparing first 10 bytes to pattern. - if ma_like is None and ftype == ntypes.longdouble: - ma_like = _KNOWN_TYPES.get(key[:10]) - if ma_like is not None: - return ma_like - # Fall back to parameter discovery - warnings.warn( - 'Signature {} for {} does not match any known type: ' - 'falling back to type probe function'.format(key, ftype), - UserWarning, stacklevel=2) - return _discovered_machar(ftype) - - -def _discovered_machar(ftype): - """ Create MachAr instance with found information on float types - """ - params = _MACHAR_PARAMS[ftype] - return MachAr(lambda v: array([v], ftype), - lambda v:_fr0(v.astype(params['itype']))[0], - lambda v:array(_fr0(v)[0], ftype), - lambda v: params['fmt'] % array(_fr0(v)[0], ftype), - params['title']) - - -@set_module('numpy') -class finfo(object): - """ - finfo(dtype) - - Machine limits for floating point types. - - Attributes - ---------- - bits : int - The number of bits occupied by the type. - eps : float - The smallest representable positive number such that - ``1.0 + eps != 1.0``. Type of `eps` is an appropriate floating - point type. - epsneg : floating point number of the appropriate type - The smallest representable positive number such that - ``1.0 - epsneg != 1.0``. - iexp : int - The number of bits in the exponent portion of the floating point - representation. - machar : MachAr - The object which calculated these parameters and holds more - detailed information. - machep : int - The exponent that yields `eps`. - max : floating point number of the appropriate type - The largest representable number. - maxexp : int - The smallest positive power of the base (2) that causes overflow. - min : floating point number of the appropriate type - The smallest representable number, typically ``-max``. - minexp : int - The most negative power of the base (2) consistent with there - being no leading 0's in the mantissa. - negep : int - The exponent that yields `epsneg`. - nexp : int - The number of bits in the exponent including its sign and bias. - nmant : int - The number of bits in the mantissa. - precision : int - The approximate number of decimal digits to which this kind of - float is precise. - resolution : floating point number of the appropriate type - The approximate decimal resolution of this type, i.e., - ``10**-precision``. - tiny : float - The smallest positive usable number. Type of `tiny` is an - appropriate floating point type. - - Parameters - ---------- - dtype : float, dtype, or instance - Kind of floating point data-type about which to get information. - - See Also - -------- - MachAr : The implementation of the tests that produce this information. - iinfo : The equivalent for integer data types. - - Notes - ----- - For developers of NumPy: do not instantiate this at the module level. - The initial calculation of these parameters is expensive and negatively - impacts import times. These objects are cached, so calling ``finfo()`` - repeatedly inside your functions is not a problem. - - """ - - _finfo_cache = {} - - def __new__(cls, dtype): - try: - dtype = numeric.dtype(dtype) - except TypeError: - # In case a float instance was given - dtype = numeric.dtype(type(dtype)) - - obj = cls._finfo_cache.get(dtype, None) - if obj is not None: - return obj - dtypes = [dtype] - newdtype = numeric.obj2sctype(dtype) - if newdtype is not dtype: - dtypes.append(newdtype) - dtype = newdtype - if not issubclass(dtype, numeric.inexact): - raise ValueError("data type %r not inexact" % (dtype)) - obj = cls._finfo_cache.get(dtype, None) - if obj is not None: - return obj - if not issubclass(dtype, numeric.floating): - newdtype = _convert_to_float[dtype] - if newdtype is not dtype: - dtypes.append(newdtype) - dtype = newdtype - obj = cls._finfo_cache.get(dtype, None) - if obj is not None: - return obj - obj = object.__new__(cls)._init(dtype) - for dt in dtypes: - cls._finfo_cache[dt] = obj - return obj - - def _init(self, dtype): - self.dtype = numeric.dtype(dtype) - machar = _get_machar(dtype) - - for word in ['precision', 'iexp', - 'maxexp', 'minexp', 'negep', - 'machep']: - setattr(self, word, getattr(machar, word)) - for word in ['tiny', 'resolution', 'epsneg']: - setattr(self, word, getattr(machar, word).flat[0]) - self.bits = self.dtype.itemsize * 8 - self.max = machar.huge.flat[0] - self.min = -self.max - self.eps = machar.eps.flat[0] - self.nexp = machar.iexp - self.nmant = machar.it - self.machar = machar - self._str_tiny = machar._str_xmin.strip() - self._str_max = machar._str_xmax.strip() - self._str_epsneg = machar._str_epsneg.strip() - self._str_eps = machar._str_eps.strip() - self._str_resolution = machar._str_resolution.strip() - return self - - def __str__(self): - fmt = ( - 'Machine parameters for %(dtype)s\n' - '---------------------------------------------------------------\n' - 'precision = %(precision)3s resolution = %(_str_resolution)s\n' - 'machep = %(machep)6s eps = %(_str_eps)s\n' - 'negep = %(negep)6s epsneg = %(_str_epsneg)s\n' - 'minexp = %(minexp)6s tiny = %(_str_tiny)s\n' - 'maxexp = %(maxexp)6s max = %(_str_max)s\n' - 'nexp = %(nexp)6s min = -max\n' - '---------------------------------------------------------------\n' - ) - return fmt % self.__dict__ - - def __repr__(self): - c = self.__class__.__name__ - d = self.__dict__.copy() - d['klass'] = c - return (("%(klass)s(resolution=%(resolution)s, min=-%(_str_max)s," - " max=%(_str_max)s, dtype=%(dtype)s)") % d) - - -@set_module('numpy') -class iinfo(object): - """ - iinfo(type) - - Machine limits for integer types. - - Attributes - ---------- - bits : int - The number of bits occupied by the type. - min : int - The smallest integer expressible by the type. - max : int - The largest integer expressible by the type. - - Parameters - ---------- - int_type : integer type, dtype, or instance - The kind of integer data type to get information about. - - See Also - -------- - finfo : The equivalent for floating point data types. - - Examples - -------- - With types: - - >>> ii16 = np.iinfo(np.int16) - >>> ii16.min - -32768 - >>> ii16.max - 32767 - >>> ii32 = np.iinfo(np.int32) - >>> ii32.min - -2147483648 - >>> ii32.max - 2147483647 - - With instances: - - >>> ii32 = np.iinfo(np.int32(10)) - >>> ii32.min - -2147483648 - >>> ii32.max - 2147483647 - - """ - - _min_vals = {} - _max_vals = {} - - def __init__(self, int_type): - try: - self.dtype = numeric.dtype(int_type) - except TypeError: - self.dtype = numeric.dtype(type(int_type)) - self.kind = self.dtype.kind - self.bits = self.dtype.itemsize * 8 - self.key = "%s%d" % (self.kind, self.bits) - if self.kind not in 'iu': - raise ValueError("Invalid integer data type %r." % (self.kind,)) - - @property - def min(self): - """Minimum value of given dtype.""" - if self.kind == 'u': - return 0 - else: - try: - val = iinfo._min_vals[self.key] - except KeyError: - val = int(-(1 << (self.bits-1))) - iinfo._min_vals[self.key] = val - return val - - @property - def max(self): - """Maximum value of given dtype.""" - try: - val = iinfo._max_vals[self.key] - except KeyError: - if self.kind == 'u': - val = int((1 << self.bits) - 1) - else: - val = int((1 << (self.bits-1)) - 1) - iinfo._max_vals[self.key] = val - return val - - def __str__(self): - """String representation.""" - fmt = ( - 'Machine parameters for %(dtype)s\n' - '---------------------------------------------------------------\n' - 'min = %(min)s\n' - 'max = %(max)s\n' - '---------------------------------------------------------------\n' - ) - return fmt % {'dtype': self.dtype, 'min': self.min, 'max': self.max} - - def __repr__(self): - return "%s(min=%s, max=%s, dtype=%s)" % (self.__class__.__name__, - self.min, self.max, self.dtype) - diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/__multiarray_api.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/__multiarray_api.h deleted file mode 100644 index 01de270..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/__multiarray_api.h +++ /dev/null @@ -1,1554 +0,0 @@ - -#if defined(_MULTIARRAYMODULE) || defined(WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE) - -typedef struct { - PyObject_HEAD - npy_bool obval; -} PyBoolScalarObject; - -extern NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type; -extern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type; -extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; - -NPY_NO_EXPORT unsigned int PyArray_GetNDArrayCVersion \ - (void); -extern NPY_NO_EXPORT PyTypeObject PyBigArray_Type; - -extern NPY_NO_EXPORT PyTypeObject PyArray_Type; - -extern NPY_NO_EXPORT PyTypeObject PyArrayDescr_Type; - -extern NPY_NO_EXPORT PyTypeObject PyArrayFlags_Type; - -extern NPY_NO_EXPORT PyTypeObject PyArrayIter_Type; - -extern NPY_NO_EXPORT PyTypeObject PyArrayMultiIter_Type; - -extern NPY_NO_EXPORT int NPY_NUMUSERTYPES; - -extern NPY_NO_EXPORT PyTypeObject PyBoolArrType_Type; - -extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; - -extern NPY_NO_EXPORT PyTypeObject PyGenericArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyNumberArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyIntegerArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PySignedIntegerArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyUnsignedIntegerArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyInexactArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyFloatingArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyComplexFloatingArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyFlexibleArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyCharacterArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyByteArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyShortArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyIntArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyLongArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyLongLongArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyUByteArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyUShortArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyUIntArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyULongArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyULongLongArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyFloatArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyDoubleArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyLongDoubleArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyCFloatArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyCDoubleArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyCLongDoubleArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyObjectArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyStringArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyUnicodeArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyVoidArrType_Type; - -NPY_NO_EXPORT int PyArray_SetNumericOps \ - (PyObject *); -NPY_NO_EXPORT PyObject * PyArray_GetNumericOps \ - (void); -NPY_NO_EXPORT int PyArray_INCREF \ - (PyArrayObject *); -NPY_NO_EXPORT int PyArray_XDECREF \ - (PyArrayObject *); -NPY_NO_EXPORT void PyArray_SetStringFunction \ - (PyObject *, int); -NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromType \ - (int); -NPY_NO_EXPORT PyObject * PyArray_TypeObjectFromType \ - (int); -NPY_NO_EXPORT char * PyArray_Zero \ - (PyArrayObject *); -NPY_NO_EXPORT char * PyArray_One \ - (PyArrayObject *); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) NPY_GCC_NONNULL(2) PyObject * PyArray_CastToType \ - (PyArrayObject *, PyArray_Descr *, int); -NPY_NO_EXPORT int PyArray_CastTo \ - (PyArrayObject *, PyArrayObject *); -NPY_NO_EXPORT int PyArray_CastAnyTo \ - (PyArrayObject *, PyArrayObject *); -NPY_NO_EXPORT int PyArray_CanCastSafely \ - (int, int); -NPY_NO_EXPORT npy_bool PyArray_CanCastTo \ - (PyArray_Descr *, PyArray_Descr *); -NPY_NO_EXPORT int PyArray_ObjectType \ - (PyObject *, int); -NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromObject \ - (PyObject *, PyArray_Descr *); -NPY_NO_EXPORT PyArrayObject ** PyArray_ConvertToCommonType \ - (PyObject *, int *); -NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromScalar \ - (PyObject *); -NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromTypeObject \ - (PyObject *); -NPY_NO_EXPORT npy_intp PyArray_Size \ - (PyObject *); -NPY_NO_EXPORT PyObject * PyArray_Scalar \ - (void *, PyArray_Descr *, PyObject *); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromScalar \ - (PyObject *, PyArray_Descr *); -NPY_NO_EXPORT void PyArray_ScalarAsCtype \ - (PyObject *, void *); -NPY_NO_EXPORT int PyArray_CastScalarToCtype \ - (PyObject *, void *, PyArray_Descr *); -NPY_NO_EXPORT int PyArray_CastScalarDirect \ - (PyObject *, PyArray_Descr *, void *, int); -NPY_NO_EXPORT PyObject * PyArray_ScalarFromObject \ - (PyObject *); -NPY_NO_EXPORT PyArray_VectorUnaryFunc * PyArray_GetCastFunc \ - (PyArray_Descr *, int); -NPY_NO_EXPORT PyObject * PyArray_FromDims \ - (int NPY_UNUSED(nd), int *NPY_UNUSED(d), int NPY_UNUSED(type)); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_FromDimsAndDataAndDescr \ - (int NPY_UNUSED(nd), int *NPY_UNUSED(d), PyArray_Descr *, char *NPY_UNUSED(data)); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromAny \ - (PyObject *, PyArray_Descr *, int, int, int, PyObject *); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_EnsureArray \ - (PyObject *); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_EnsureAnyArray \ - (PyObject *); -NPY_NO_EXPORT PyObject * PyArray_FromFile \ - (FILE *, PyArray_Descr *, npy_intp, char *); -NPY_NO_EXPORT PyObject * PyArray_FromString \ - (char *, npy_intp, PyArray_Descr *, npy_intp, char *); -NPY_NO_EXPORT PyObject * PyArray_FromBuffer \ - (PyObject *, PyArray_Descr *, npy_intp, npy_intp); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromIter \ - (PyObject *, PyArray_Descr *, npy_intp); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_Return \ - (PyArrayObject *); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) NPY_GCC_NONNULL(2) PyObject * PyArray_GetField \ - (PyArrayObject *, PyArray_Descr *, int); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) NPY_GCC_NONNULL(2) int PyArray_SetField \ - (PyArrayObject *, PyArray_Descr *, int, PyObject *); -NPY_NO_EXPORT PyObject * PyArray_Byteswap \ - (PyArrayObject *, npy_bool); -NPY_NO_EXPORT PyObject * PyArray_Resize \ - (PyArrayObject *, PyArray_Dims *, int, NPY_ORDER NPY_UNUSED(order)); -NPY_NO_EXPORT int PyArray_MoveInto \ - (PyArrayObject *, PyArrayObject *); -NPY_NO_EXPORT int PyArray_CopyInto \ - (PyArrayObject *, PyArrayObject *); -NPY_NO_EXPORT int PyArray_CopyAnyInto \ - (PyArrayObject *, PyArrayObject *); -NPY_NO_EXPORT int PyArray_CopyObject \ - (PyArrayObject *, PyObject *); -NPY_NO_EXPORT NPY_GCC_NONNULL(1) PyObject * PyArray_NewCopy \ - (PyArrayObject *, NPY_ORDER); -NPY_NO_EXPORT PyObject * PyArray_ToList \ - (PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_ToString \ - (PyArrayObject *, NPY_ORDER); -NPY_NO_EXPORT int PyArray_ToFile \ - (PyArrayObject *, FILE *, char *, char *); -NPY_NO_EXPORT int PyArray_Dump \ - (PyObject *, PyObject *, int); -NPY_NO_EXPORT PyObject * PyArray_Dumps \ - (PyObject *, int); -NPY_NO_EXPORT int PyArray_ValidType \ - (int); -NPY_NO_EXPORT void PyArray_UpdateFlags \ - (PyArrayObject *, int); -NPY_NO_EXPORT NPY_GCC_NONNULL(1) PyObject * PyArray_New \ - (PyTypeObject *, int, npy_intp const *, int, npy_intp const *, void *, int, int, PyObject *); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) NPY_GCC_NONNULL(1) NPY_GCC_NONNULL(2) PyObject * PyArray_NewFromDescr \ - (PyTypeObject *, PyArray_Descr *, int, npy_intp const *, npy_intp const *, void *, int, PyObject *); -NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNew \ - (PyArray_Descr *); -NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNewFromType \ - (int); -NPY_NO_EXPORT double PyArray_GetPriority \ - (PyObject *, double); -NPY_NO_EXPORT PyObject * PyArray_IterNew \ - (PyObject *); -NPY_NO_EXPORT PyObject* PyArray_MultiIterNew \ - (int, ...); -NPY_NO_EXPORT int PyArray_PyIntAsInt \ - (PyObject *); -NPY_NO_EXPORT npy_intp PyArray_PyIntAsIntp \ - (PyObject *); -NPY_NO_EXPORT int PyArray_Broadcast \ - (PyArrayMultiIterObject *); -NPY_NO_EXPORT void PyArray_FillObjectArray \ - (PyArrayObject *, PyObject *); -NPY_NO_EXPORT int PyArray_FillWithScalar \ - (PyArrayObject *, PyObject *); -NPY_NO_EXPORT npy_bool PyArray_CheckStrides \ - (int, int, npy_intp, npy_intp, npy_intp *, npy_intp *); -NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNewByteorder \ - (PyArray_Descr *, char); -NPY_NO_EXPORT PyObject * PyArray_IterAllButAxis \ - (PyObject *, int *); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_CheckFromAny \ - (PyObject *, PyArray_Descr *, int, int, int, PyObject *); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromArray \ - (PyArrayObject *, PyArray_Descr *, int); -NPY_NO_EXPORT PyObject * PyArray_FromInterface \ - (PyObject *); -NPY_NO_EXPORT PyObject * PyArray_FromStructInterface \ - (PyObject *); -NPY_NO_EXPORT PyObject * PyArray_FromArrayAttr \ - (PyObject *, PyArray_Descr *, PyObject *); -NPY_NO_EXPORT NPY_SCALARKIND PyArray_ScalarKind \ - (int, PyArrayObject **); -NPY_NO_EXPORT int PyArray_CanCoerceScalar \ - (int, int, NPY_SCALARKIND); -NPY_NO_EXPORT PyObject * PyArray_NewFlagsObject \ - (PyObject *); -NPY_NO_EXPORT npy_bool PyArray_CanCastScalar \ - (PyTypeObject *, PyTypeObject *); -NPY_NO_EXPORT int PyArray_CompareUCS4 \ - (npy_ucs4 *, npy_ucs4 *, size_t); -NPY_NO_EXPORT int PyArray_RemoveSmallest \ - (PyArrayMultiIterObject *); -NPY_NO_EXPORT int PyArray_ElementStrides \ - (PyObject *); -NPY_NO_EXPORT void PyArray_Item_INCREF \ - (char *, PyArray_Descr *); -NPY_NO_EXPORT void PyArray_Item_XDECREF \ - (char *, PyArray_Descr *); -NPY_NO_EXPORT PyObject * PyArray_FieldNames \ - (PyObject *); -NPY_NO_EXPORT PyObject * PyArray_Transpose \ - (PyArrayObject *, PyArray_Dims *); -NPY_NO_EXPORT PyObject * PyArray_TakeFrom \ - (PyArrayObject *, PyObject *, int, PyArrayObject *, NPY_CLIPMODE); -NPY_NO_EXPORT PyObject * PyArray_PutTo \ - (PyArrayObject *, PyObject*, PyObject *, NPY_CLIPMODE); -NPY_NO_EXPORT PyObject * PyArray_PutMask \ - (PyArrayObject *, PyObject*, PyObject*); -NPY_NO_EXPORT PyObject * PyArray_Repeat \ - (PyArrayObject *, PyObject *, int); -NPY_NO_EXPORT PyObject * PyArray_Choose \ - (PyArrayObject *, PyObject *, PyArrayObject *, NPY_CLIPMODE); -NPY_NO_EXPORT int PyArray_Sort \ - (PyArrayObject *, int, NPY_SORTKIND); -NPY_NO_EXPORT PyObject * PyArray_ArgSort \ - (PyArrayObject *, int, NPY_SORTKIND); -NPY_NO_EXPORT PyObject * PyArray_SearchSorted \ - (PyArrayObject *, PyObject *, NPY_SEARCHSIDE, PyObject *); -NPY_NO_EXPORT PyObject * PyArray_ArgMax \ - (PyArrayObject *, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_ArgMin \ - (PyArrayObject *, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Reshape \ - (PyArrayObject *, PyObject *); -NPY_NO_EXPORT PyObject * PyArray_Newshape \ - (PyArrayObject *, PyArray_Dims *, NPY_ORDER); -NPY_NO_EXPORT PyObject * PyArray_Squeeze \ - (PyArrayObject *); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_View \ - (PyArrayObject *, PyArray_Descr *, PyTypeObject *); -NPY_NO_EXPORT PyObject * PyArray_SwapAxes \ - (PyArrayObject *, int, int); -NPY_NO_EXPORT PyObject * PyArray_Max \ - (PyArrayObject *, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Min \ - (PyArrayObject *, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Ptp \ - (PyArrayObject *, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Mean \ - (PyArrayObject *, int, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Trace \ - (PyArrayObject *, int, int, int, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Diagonal \ - (PyArrayObject *, int, int, int); -NPY_NO_EXPORT PyObject * PyArray_Clip \ - (PyArrayObject *, PyObject *, PyObject *, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Conjugate \ - (PyArrayObject *, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Nonzero \ - (PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Std \ - (PyArrayObject *, int, int, PyArrayObject *, int); -NPY_NO_EXPORT PyObject * PyArray_Sum \ - (PyArrayObject *, int, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_CumSum \ - (PyArrayObject *, int, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Prod \ - (PyArrayObject *, int, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_CumProd \ - (PyArrayObject *, int, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_All \ - (PyArrayObject *, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Any \ - (PyArrayObject *, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Compress \ - (PyArrayObject *, PyObject *, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Flatten \ - (PyArrayObject *, NPY_ORDER); -NPY_NO_EXPORT PyObject * PyArray_Ravel \ - (PyArrayObject *, NPY_ORDER); -NPY_NO_EXPORT npy_intp PyArray_MultiplyList \ - (npy_intp const *, int); -NPY_NO_EXPORT int PyArray_MultiplyIntList \ - (int const *, int); -NPY_NO_EXPORT void * PyArray_GetPtr \ - (PyArrayObject *, npy_intp const*); -NPY_NO_EXPORT int PyArray_CompareLists \ - (npy_intp const *, npy_intp const *, int); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(5) int PyArray_AsCArray \ - (PyObject **, void *, npy_intp *, int, PyArray_Descr*); -NPY_NO_EXPORT int PyArray_As1D \ - (PyObject **NPY_UNUSED(op), char **NPY_UNUSED(ptr), int *NPY_UNUSED(d1), int NPY_UNUSED(typecode)); -NPY_NO_EXPORT int PyArray_As2D \ - (PyObject **NPY_UNUSED(op), char ***NPY_UNUSED(ptr), int *NPY_UNUSED(d1), int *NPY_UNUSED(d2), int NPY_UNUSED(typecode)); -NPY_NO_EXPORT int PyArray_Free \ - (PyObject *, void *); -NPY_NO_EXPORT int PyArray_Converter \ - (PyObject *, PyObject **); -NPY_NO_EXPORT int PyArray_IntpFromSequence \ - (PyObject *, npy_intp *, int); -NPY_NO_EXPORT PyObject * PyArray_Concatenate \ - (PyObject *, int); -NPY_NO_EXPORT PyObject * PyArray_InnerProduct \ - (PyObject *, PyObject *); -NPY_NO_EXPORT PyObject * PyArray_MatrixProduct \ - (PyObject *, PyObject *); -NPY_NO_EXPORT PyObject * PyArray_CopyAndTranspose \ - (PyObject *); -NPY_NO_EXPORT PyObject * PyArray_Correlate \ - (PyObject *, PyObject *, int); -NPY_NO_EXPORT int PyArray_TypestrConvert \ - (int, int); -NPY_NO_EXPORT int PyArray_DescrConverter \ - (PyObject *, PyArray_Descr **); -NPY_NO_EXPORT int PyArray_DescrConverter2 \ - (PyObject *, PyArray_Descr **); -NPY_NO_EXPORT int PyArray_IntpConverter \ - (PyObject *, PyArray_Dims *); -NPY_NO_EXPORT int PyArray_BufferConverter \ - (PyObject *, PyArray_Chunk *); -NPY_NO_EXPORT int PyArray_AxisConverter \ - (PyObject *, int *); -NPY_NO_EXPORT int PyArray_BoolConverter \ - (PyObject *, npy_bool *); -NPY_NO_EXPORT int PyArray_ByteorderConverter \ - (PyObject *, char *); -NPY_NO_EXPORT int PyArray_OrderConverter \ - (PyObject *, NPY_ORDER *); -NPY_NO_EXPORT unsigned char PyArray_EquivTypes \ - (PyArray_Descr *, PyArray_Descr *); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_Zeros \ - (int, npy_intp const *, PyArray_Descr *, int); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_Empty \ - (int, npy_intp const *, PyArray_Descr *, int); -NPY_NO_EXPORT PyObject * PyArray_Where \ - (PyObject *, PyObject *, PyObject *); -NPY_NO_EXPORT PyObject * PyArray_Arange \ - (double, double, double, int); -NPY_NO_EXPORT PyObject * PyArray_ArangeObj \ - (PyObject *, PyObject *, PyObject *, PyArray_Descr *); -NPY_NO_EXPORT int PyArray_SortkindConverter \ - (PyObject *, NPY_SORTKIND *); -NPY_NO_EXPORT PyObject * PyArray_LexSort \ - (PyObject *, int); -NPY_NO_EXPORT PyObject * PyArray_Round \ - (PyArrayObject *, int, PyArrayObject *); -NPY_NO_EXPORT unsigned char PyArray_EquivTypenums \ - (int, int); -NPY_NO_EXPORT int PyArray_RegisterDataType \ - (PyArray_Descr *); -NPY_NO_EXPORT int PyArray_RegisterCastFunc \ - (PyArray_Descr *, int, PyArray_VectorUnaryFunc *); -NPY_NO_EXPORT int PyArray_RegisterCanCast \ - (PyArray_Descr *, int, NPY_SCALARKIND); -NPY_NO_EXPORT void PyArray_InitArrFuncs \ - (PyArray_ArrFuncs *); -NPY_NO_EXPORT PyObject * PyArray_IntTupleFromIntp \ - (int, npy_intp *); -NPY_NO_EXPORT int PyArray_TypeNumFromName \ - (char *); -NPY_NO_EXPORT int PyArray_ClipmodeConverter \ - (PyObject *, NPY_CLIPMODE *); -NPY_NO_EXPORT int PyArray_OutputConverter \ - (PyObject *, PyArrayObject **); -NPY_NO_EXPORT PyObject * PyArray_BroadcastToShape \ - (PyObject *, npy_intp *, int); -NPY_NO_EXPORT void _PyArray_SigintHandler \ - (int); -NPY_NO_EXPORT void* _PyArray_GetSigintBuf \ - (void); -NPY_NO_EXPORT int PyArray_DescrAlignConverter \ - (PyObject *, PyArray_Descr **); -NPY_NO_EXPORT int PyArray_DescrAlignConverter2 \ - (PyObject *, PyArray_Descr **); -NPY_NO_EXPORT int PyArray_SearchsideConverter \ - (PyObject *, void *); -NPY_NO_EXPORT PyObject * PyArray_CheckAxis \ - (PyArrayObject *, int *, int); -NPY_NO_EXPORT npy_intp PyArray_OverflowMultiplyList \ - (npy_intp *, int); -NPY_NO_EXPORT int PyArray_CompareString \ - (const char *, const char *, size_t); -NPY_NO_EXPORT PyObject* PyArray_MultiIterFromObjects \ - (PyObject **, int, int, ...); -NPY_NO_EXPORT int PyArray_GetEndianness \ - (void); -NPY_NO_EXPORT unsigned int PyArray_GetNDArrayCFeatureVersion \ - (void); -NPY_NO_EXPORT PyObject * PyArray_Correlate2 \ - (PyObject *, PyObject *, int); -NPY_NO_EXPORT PyObject* PyArray_NeighborhoodIterNew \ - (PyArrayIterObject *, const npy_intp *, int, PyArrayObject*); -extern NPY_NO_EXPORT PyTypeObject PyTimeIntegerArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyDatetimeArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyTimedeltaArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject PyHalfArrType_Type; - -extern NPY_NO_EXPORT PyTypeObject NpyIter_Type; - -NPY_NO_EXPORT void PyArray_SetDatetimeParseFunction \ - (PyObject *NPY_UNUSED(op)); -NPY_NO_EXPORT void PyArray_DatetimeToDatetimeStruct \ - (npy_datetime NPY_UNUSED(val), NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_datetimestruct *); -NPY_NO_EXPORT void PyArray_TimedeltaToTimedeltaStruct \ - (npy_timedelta NPY_UNUSED(val), NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_timedeltastruct *); -NPY_NO_EXPORT npy_datetime PyArray_DatetimeStructToDatetime \ - (NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_datetimestruct *NPY_UNUSED(d)); -NPY_NO_EXPORT npy_datetime PyArray_TimedeltaStructToTimedelta \ - (NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_timedeltastruct *NPY_UNUSED(d)); -NPY_NO_EXPORT NpyIter * NpyIter_New \ - (PyArrayObject *, npy_uint32, NPY_ORDER, NPY_CASTING, PyArray_Descr*); -NPY_NO_EXPORT NpyIter * NpyIter_MultiNew \ - (int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **); -NPY_NO_EXPORT NpyIter * NpyIter_AdvancedNew \ - (int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **, int, int **, npy_intp *, npy_intp); -NPY_NO_EXPORT NpyIter * NpyIter_Copy \ - (NpyIter *); -NPY_NO_EXPORT int NpyIter_Deallocate \ - (NpyIter *); -NPY_NO_EXPORT npy_bool NpyIter_HasDelayedBufAlloc \ - (NpyIter *); -NPY_NO_EXPORT npy_bool NpyIter_HasExternalLoop \ - (NpyIter *); -NPY_NO_EXPORT int NpyIter_EnableExternalLoop \ - (NpyIter *); -NPY_NO_EXPORT npy_intp * NpyIter_GetInnerStrideArray \ - (NpyIter *); -NPY_NO_EXPORT npy_intp * NpyIter_GetInnerLoopSizePtr \ - (NpyIter *); -NPY_NO_EXPORT int NpyIter_Reset \ - (NpyIter *, char **); -NPY_NO_EXPORT int NpyIter_ResetBasePointers \ - (NpyIter *, char **, char **); -NPY_NO_EXPORT int NpyIter_ResetToIterIndexRange \ - (NpyIter *, npy_intp, npy_intp, char **); -NPY_NO_EXPORT int NpyIter_GetNDim \ - (NpyIter *); -NPY_NO_EXPORT int NpyIter_GetNOp \ - (NpyIter *); -NPY_NO_EXPORT NpyIter_IterNextFunc * NpyIter_GetIterNext \ - (NpyIter *, char **); -NPY_NO_EXPORT npy_intp NpyIter_GetIterSize \ - (NpyIter *); -NPY_NO_EXPORT void NpyIter_GetIterIndexRange \ - (NpyIter *, npy_intp *, npy_intp *); -NPY_NO_EXPORT npy_intp NpyIter_GetIterIndex \ - (NpyIter *); -NPY_NO_EXPORT int NpyIter_GotoIterIndex \ - (NpyIter *, npy_intp); -NPY_NO_EXPORT npy_bool NpyIter_HasMultiIndex \ - (NpyIter *); -NPY_NO_EXPORT int NpyIter_GetShape \ - (NpyIter *, npy_intp *); -NPY_NO_EXPORT NpyIter_GetMultiIndexFunc * NpyIter_GetGetMultiIndex \ - (NpyIter *, char **); -NPY_NO_EXPORT int NpyIter_GotoMultiIndex \ - (NpyIter *, npy_intp const *); -NPY_NO_EXPORT int NpyIter_RemoveMultiIndex \ - (NpyIter *); -NPY_NO_EXPORT npy_bool NpyIter_HasIndex \ - (NpyIter *); -NPY_NO_EXPORT npy_bool NpyIter_IsBuffered \ - (NpyIter *); -NPY_NO_EXPORT npy_bool NpyIter_IsGrowInner \ - (NpyIter *); -NPY_NO_EXPORT npy_intp NpyIter_GetBufferSize \ - (NpyIter *); -NPY_NO_EXPORT npy_intp * NpyIter_GetIndexPtr \ - (NpyIter *); -NPY_NO_EXPORT int NpyIter_GotoIndex \ - (NpyIter *, npy_intp); -NPY_NO_EXPORT char ** NpyIter_GetDataPtrArray \ - (NpyIter *); -NPY_NO_EXPORT PyArray_Descr ** NpyIter_GetDescrArray \ - (NpyIter *); -NPY_NO_EXPORT PyArrayObject ** NpyIter_GetOperandArray \ - (NpyIter *); -NPY_NO_EXPORT PyArrayObject * NpyIter_GetIterView \ - (NpyIter *, npy_intp); -NPY_NO_EXPORT void NpyIter_GetReadFlags \ - (NpyIter *, char *); -NPY_NO_EXPORT void NpyIter_GetWriteFlags \ - (NpyIter *, char *); -NPY_NO_EXPORT void NpyIter_DebugPrint \ - (NpyIter *); -NPY_NO_EXPORT npy_bool NpyIter_IterationNeedsAPI \ - (NpyIter *); -NPY_NO_EXPORT void NpyIter_GetInnerFixedStrideArray \ - (NpyIter *, npy_intp *); -NPY_NO_EXPORT int NpyIter_RemoveAxis \ - (NpyIter *, int); -NPY_NO_EXPORT npy_intp * NpyIter_GetAxisStrideArray \ - (NpyIter *, int); -NPY_NO_EXPORT npy_bool NpyIter_RequiresBuffering \ - (NpyIter *); -NPY_NO_EXPORT char ** NpyIter_GetInitialDataPtrArray \ - (NpyIter *); -NPY_NO_EXPORT int NpyIter_CreateCompatibleStrides \ - (NpyIter *, npy_intp, npy_intp *); -NPY_NO_EXPORT int PyArray_CastingConverter \ - (PyObject *, NPY_CASTING *); -NPY_NO_EXPORT npy_intp PyArray_CountNonzero \ - (PyArrayObject *); -NPY_NO_EXPORT PyArray_Descr * PyArray_PromoteTypes \ - (PyArray_Descr *, PyArray_Descr *); -NPY_NO_EXPORT PyArray_Descr * PyArray_MinScalarType \ - (PyArrayObject *); -NPY_NO_EXPORT PyArray_Descr * PyArray_ResultType \ - (npy_intp, PyArrayObject **, npy_intp, PyArray_Descr **); -NPY_NO_EXPORT npy_bool PyArray_CanCastArrayTo \ - (PyArrayObject *, PyArray_Descr *, NPY_CASTING); -NPY_NO_EXPORT npy_bool PyArray_CanCastTypeTo \ - (PyArray_Descr *, PyArray_Descr *, NPY_CASTING); -NPY_NO_EXPORT PyArrayObject * PyArray_EinsteinSum \ - (char *, npy_intp, PyArrayObject **, PyArray_Descr *, NPY_ORDER, NPY_CASTING, PyArrayObject *); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) NPY_GCC_NONNULL(1) PyObject * PyArray_NewLikeArray \ - (PyArrayObject *, NPY_ORDER, PyArray_Descr *, int); -NPY_NO_EXPORT int PyArray_GetArrayParamsFromObject \ - (PyObject *, PyArray_Descr *, npy_bool, PyArray_Descr **, int *, npy_intp *, PyArrayObject **, PyObject *); -NPY_NO_EXPORT int PyArray_ConvertClipmodeSequence \ - (PyObject *, NPY_CLIPMODE *, int); -NPY_NO_EXPORT PyObject * PyArray_MatrixProduct2 \ - (PyObject *, PyObject *, PyArrayObject*); -NPY_NO_EXPORT npy_bool NpyIter_IsFirstVisit \ - (NpyIter *, int); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) int PyArray_SetBaseObject \ - (PyArrayObject *, PyObject *); -NPY_NO_EXPORT void PyArray_CreateSortedStridePerm \ - (int, npy_intp const *, npy_stride_sort_item *); -NPY_NO_EXPORT void PyArray_RemoveAxesInPlace \ - (PyArrayObject *, const npy_bool *); -NPY_NO_EXPORT void PyArray_DebugPrint \ - (PyArrayObject *); -NPY_NO_EXPORT int PyArray_FailUnlessWriteable \ - (PyArrayObject *, const char *); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) int PyArray_SetUpdateIfCopyBase \ - (PyArrayObject *, PyArrayObject *); -NPY_NO_EXPORT void * PyDataMem_NEW \ - (size_t); -NPY_NO_EXPORT void PyDataMem_FREE \ - (void *); -NPY_NO_EXPORT void * PyDataMem_RENEW \ - (void *, size_t); -NPY_NO_EXPORT PyDataMem_EventHookFunc * PyDataMem_SetEventHook \ - (PyDataMem_EventHookFunc *, void *, void **); -extern NPY_NO_EXPORT NPY_CASTING NPY_DEFAULT_ASSIGN_CASTING; - -NPY_NO_EXPORT void PyArray_MapIterSwapAxes \ - (PyArrayMapIterObject *, PyArrayObject **, int); -NPY_NO_EXPORT PyObject * PyArray_MapIterArray \ - (PyArrayObject *, PyObject *); -NPY_NO_EXPORT void PyArray_MapIterNext \ - (PyArrayMapIterObject *); -NPY_NO_EXPORT int PyArray_Partition \ - (PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND); -NPY_NO_EXPORT PyObject * PyArray_ArgPartition \ - (PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND); -NPY_NO_EXPORT int PyArray_SelectkindConverter \ - (PyObject *, NPY_SELECTKIND *); -NPY_NO_EXPORT void * PyDataMem_NEW_ZEROED \ - (size_t, size_t); -NPY_NO_EXPORT NPY_GCC_NONNULL(1) int PyArray_CheckAnyScalarExact \ - (PyObject *); -NPY_NO_EXPORT PyObject * PyArray_MapIterArrayCopyIfOverlap \ - (PyArrayObject *, PyObject *, int, PyArrayObject *); -NPY_NO_EXPORT int PyArray_ResolveWritebackIfCopy \ - (PyArrayObject *); -NPY_NO_EXPORT int PyArray_SetWritebackIfCopyBase \ - (PyArrayObject *, PyArrayObject *); - -#else - -#if defined(PY_ARRAY_UNIQUE_SYMBOL) -#define PyArray_API PY_ARRAY_UNIQUE_SYMBOL -#endif - -#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY) -extern void **PyArray_API; -#else -#if defined(PY_ARRAY_UNIQUE_SYMBOL) -void **PyArray_API; -#else -static void **PyArray_API=NULL; -#endif -#endif - -#define PyArray_GetNDArrayCVersion \ - (*(unsigned int (*)(void)) \ - PyArray_API[0]) -#define PyBigArray_Type (*(PyTypeObject *)PyArray_API[1]) -#define PyArray_Type (*(PyTypeObject *)PyArray_API[2]) -#define PyArrayDescr_Type (*(PyTypeObject *)PyArray_API[3]) -#define PyArrayFlags_Type (*(PyTypeObject *)PyArray_API[4]) -#define PyArrayIter_Type (*(PyTypeObject *)PyArray_API[5]) -#define PyArrayMultiIter_Type (*(PyTypeObject *)PyArray_API[6]) -#define NPY_NUMUSERTYPES (*(int *)PyArray_API[7]) -#define PyBoolArrType_Type (*(PyTypeObject *)PyArray_API[8]) -#define _PyArrayScalar_BoolValues ((PyBoolScalarObject *)PyArray_API[9]) -#define PyGenericArrType_Type (*(PyTypeObject *)PyArray_API[10]) -#define PyNumberArrType_Type (*(PyTypeObject *)PyArray_API[11]) -#define PyIntegerArrType_Type (*(PyTypeObject *)PyArray_API[12]) -#define PySignedIntegerArrType_Type (*(PyTypeObject *)PyArray_API[13]) -#define PyUnsignedIntegerArrType_Type (*(PyTypeObject *)PyArray_API[14]) -#define PyInexactArrType_Type (*(PyTypeObject *)PyArray_API[15]) -#define PyFloatingArrType_Type (*(PyTypeObject *)PyArray_API[16]) -#define PyComplexFloatingArrType_Type (*(PyTypeObject *)PyArray_API[17]) -#define PyFlexibleArrType_Type (*(PyTypeObject *)PyArray_API[18]) -#define PyCharacterArrType_Type (*(PyTypeObject *)PyArray_API[19]) -#define PyByteArrType_Type (*(PyTypeObject *)PyArray_API[20]) -#define PyShortArrType_Type (*(PyTypeObject *)PyArray_API[21]) -#define PyIntArrType_Type (*(PyTypeObject *)PyArray_API[22]) -#define PyLongArrType_Type (*(PyTypeObject *)PyArray_API[23]) -#define PyLongLongArrType_Type (*(PyTypeObject *)PyArray_API[24]) -#define PyUByteArrType_Type (*(PyTypeObject *)PyArray_API[25]) -#define PyUShortArrType_Type (*(PyTypeObject *)PyArray_API[26]) -#define PyUIntArrType_Type (*(PyTypeObject *)PyArray_API[27]) -#define PyULongArrType_Type (*(PyTypeObject *)PyArray_API[28]) -#define PyULongLongArrType_Type (*(PyTypeObject *)PyArray_API[29]) -#define PyFloatArrType_Type (*(PyTypeObject *)PyArray_API[30]) -#define PyDoubleArrType_Type (*(PyTypeObject *)PyArray_API[31]) -#define PyLongDoubleArrType_Type (*(PyTypeObject *)PyArray_API[32]) -#define PyCFloatArrType_Type (*(PyTypeObject *)PyArray_API[33]) -#define PyCDoubleArrType_Type (*(PyTypeObject *)PyArray_API[34]) -#define PyCLongDoubleArrType_Type (*(PyTypeObject *)PyArray_API[35]) -#define PyObjectArrType_Type (*(PyTypeObject *)PyArray_API[36]) -#define PyStringArrType_Type (*(PyTypeObject *)PyArray_API[37]) -#define PyUnicodeArrType_Type (*(PyTypeObject *)PyArray_API[38]) -#define PyVoidArrType_Type (*(PyTypeObject *)PyArray_API[39]) -#define PyArray_SetNumericOps \ - (*(int (*)(PyObject *)) \ - PyArray_API[40]) -#define PyArray_GetNumericOps \ - (*(PyObject * (*)(void)) \ - PyArray_API[41]) -#define PyArray_INCREF \ - (*(int (*)(PyArrayObject *)) \ - PyArray_API[42]) -#define PyArray_XDECREF \ - (*(int (*)(PyArrayObject *)) \ - PyArray_API[43]) -#define PyArray_SetStringFunction \ - (*(void (*)(PyObject *, int)) \ - PyArray_API[44]) -#define PyArray_DescrFromType \ - (*(PyArray_Descr * (*)(int)) \ - PyArray_API[45]) -#define PyArray_TypeObjectFromType \ - (*(PyObject * (*)(int)) \ - PyArray_API[46]) -#define PyArray_Zero \ - (*(char * (*)(PyArrayObject *)) \ - PyArray_API[47]) -#define PyArray_One \ - (*(char * (*)(PyArrayObject *)) \ - PyArray_API[48]) -#define PyArray_CastToType \ - (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \ - PyArray_API[49]) -#define PyArray_CastTo \ - (*(int (*)(PyArrayObject *, PyArrayObject *)) \ - PyArray_API[50]) -#define PyArray_CastAnyTo \ - (*(int (*)(PyArrayObject *, PyArrayObject *)) \ - PyArray_API[51]) -#define PyArray_CanCastSafely \ - (*(int (*)(int, int)) \ - PyArray_API[52]) -#define PyArray_CanCastTo \ - (*(npy_bool (*)(PyArray_Descr *, PyArray_Descr *)) \ - PyArray_API[53]) -#define PyArray_ObjectType \ - (*(int (*)(PyObject *, int)) \ - PyArray_API[54]) -#define PyArray_DescrFromObject \ - (*(PyArray_Descr * (*)(PyObject *, PyArray_Descr *)) \ - PyArray_API[55]) -#define PyArray_ConvertToCommonType \ - (*(PyArrayObject ** (*)(PyObject *, int *)) \ - PyArray_API[56]) -#define PyArray_DescrFromScalar \ - (*(PyArray_Descr * (*)(PyObject *)) \ - PyArray_API[57]) -#define PyArray_DescrFromTypeObject \ - (*(PyArray_Descr * (*)(PyObject *)) \ - PyArray_API[58]) -#define PyArray_Size \ - (*(npy_intp (*)(PyObject *)) \ - PyArray_API[59]) -#define PyArray_Scalar \ - (*(PyObject * (*)(void *, PyArray_Descr *, PyObject *)) \ - PyArray_API[60]) -#define PyArray_FromScalar \ - (*(PyObject * (*)(PyObject *, PyArray_Descr *)) \ - PyArray_API[61]) -#define PyArray_ScalarAsCtype \ - (*(void (*)(PyObject *, void *)) \ - PyArray_API[62]) -#define PyArray_CastScalarToCtype \ - (*(int (*)(PyObject *, void *, PyArray_Descr *)) \ - PyArray_API[63]) -#define PyArray_CastScalarDirect \ - (*(int (*)(PyObject *, PyArray_Descr *, void *, int)) \ - PyArray_API[64]) -#define PyArray_ScalarFromObject \ - (*(PyObject * (*)(PyObject *)) \ - PyArray_API[65]) -#define PyArray_GetCastFunc \ - (*(PyArray_VectorUnaryFunc * (*)(PyArray_Descr *, int)) \ - PyArray_API[66]) -#define PyArray_FromDims \ - (*(PyObject * (*)(int NPY_UNUSED(nd), int *NPY_UNUSED(d), int NPY_UNUSED(type))) \ - PyArray_API[67]) -#define PyArray_FromDimsAndDataAndDescr \ - (*(PyObject * (*)(int NPY_UNUSED(nd), int *NPY_UNUSED(d), PyArray_Descr *, char *NPY_UNUSED(data))) \ - PyArray_API[68]) -#define PyArray_FromAny \ - (*(PyObject * (*)(PyObject *, PyArray_Descr *, int, int, int, PyObject *)) \ - PyArray_API[69]) -#define PyArray_EnsureArray \ - (*(PyObject * (*)(PyObject *)) \ - PyArray_API[70]) -#define PyArray_EnsureAnyArray \ - (*(PyObject * (*)(PyObject *)) \ - PyArray_API[71]) -#define PyArray_FromFile \ - (*(PyObject * (*)(FILE *, PyArray_Descr *, npy_intp, char *)) \ - PyArray_API[72]) -#define PyArray_FromString \ - (*(PyObject * (*)(char *, npy_intp, PyArray_Descr *, npy_intp, char *)) \ - PyArray_API[73]) -#define PyArray_FromBuffer \ - (*(PyObject * (*)(PyObject *, PyArray_Descr *, npy_intp, npy_intp)) \ - PyArray_API[74]) -#define PyArray_FromIter \ - (*(PyObject * (*)(PyObject *, PyArray_Descr *, npy_intp)) \ - PyArray_API[75]) -#define PyArray_Return \ - (*(PyObject * (*)(PyArrayObject *)) \ - PyArray_API[76]) -#define PyArray_GetField \ - (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \ - PyArray_API[77]) -#define PyArray_SetField \ - (*(int (*)(PyArrayObject *, PyArray_Descr *, int, PyObject *)) \ - PyArray_API[78]) -#define PyArray_Byteswap \ - (*(PyObject * (*)(PyArrayObject *, npy_bool)) \ - PyArray_API[79]) -#define PyArray_Resize \ - (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *, int, NPY_ORDER NPY_UNUSED(order))) \ - PyArray_API[80]) -#define PyArray_MoveInto \ - (*(int (*)(PyArrayObject *, PyArrayObject *)) \ - PyArray_API[81]) -#define PyArray_CopyInto \ - (*(int (*)(PyArrayObject *, PyArrayObject *)) \ - PyArray_API[82]) -#define PyArray_CopyAnyInto \ - (*(int (*)(PyArrayObject *, PyArrayObject *)) \ - PyArray_API[83]) -#define PyArray_CopyObject \ - (*(int (*)(PyArrayObject *, PyObject *)) \ - PyArray_API[84]) -#define PyArray_NewCopy \ - (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ - PyArray_API[85]) -#define PyArray_ToList \ - (*(PyObject * (*)(PyArrayObject *)) \ - PyArray_API[86]) -#define PyArray_ToString \ - (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ - PyArray_API[87]) -#define PyArray_ToFile \ - (*(int (*)(PyArrayObject *, FILE *, char *, char *)) \ - PyArray_API[88]) -#define PyArray_Dump \ - (*(int (*)(PyObject *, PyObject *, int)) \ - PyArray_API[89]) -#define PyArray_Dumps \ - (*(PyObject * (*)(PyObject *, int)) \ - PyArray_API[90]) -#define PyArray_ValidType \ - (*(int (*)(int)) \ - PyArray_API[91]) -#define PyArray_UpdateFlags \ - (*(void (*)(PyArrayObject *, int)) \ - PyArray_API[92]) -#define PyArray_New \ - (*(PyObject * (*)(PyTypeObject *, int, npy_intp const *, int, npy_intp const *, void *, int, int, PyObject *)) \ - PyArray_API[93]) -#define PyArray_NewFromDescr \ - (*(PyObject * (*)(PyTypeObject *, PyArray_Descr *, int, npy_intp const *, npy_intp const *, void *, int, PyObject *)) \ - PyArray_API[94]) -#define PyArray_DescrNew \ - (*(PyArray_Descr * (*)(PyArray_Descr *)) \ - PyArray_API[95]) -#define PyArray_DescrNewFromType \ - (*(PyArray_Descr * (*)(int)) \ - PyArray_API[96]) -#define PyArray_GetPriority \ - (*(double (*)(PyObject *, double)) \ - PyArray_API[97]) -#define PyArray_IterNew \ - (*(PyObject * (*)(PyObject *)) \ - PyArray_API[98]) -#define PyArray_MultiIterNew \ - (*(PyObject* (*)(int, ...)) \ - PyArray_API[99]) -#define PyArray_PyIntAsInt \ - (*(int (*)(PyObject *)) \ - PyArray_API[100]) -#define PyArray_PyIntAsIntp \ - (*(npy_intp (*)(PyObject *)) \ - PyArray_API[101]) -#define PyArray_Broadcast \ - (*(int (*)(PyArrayMultiIterObject *)) \ - PyArray_API[102]) -#define PyArray_FillObjectArray \ - (*(void (*)(PyArrayObject *, PyObject *)) \ - PyArray_API[103]) -#define PyArray_FillWithScalar \ - (*(int (*)(PyArrayObject *, PyObject *)) \ - PyArray_API[104]) -#define PyArray_CheckStrides \ - (*(npy_bool (*)(int, int, npy_intp, npy_intp, npy_intp *, npy_intp *)) \ - PyArray_API[105]) -#define PyArray_DescrNewByteorder \ - (*(PyArray_Descr * (*)(PyArray_Descr *, char)) \ - PyArray_API[106]) -#define PyArray_IterAllButAxis \ - (*(PyObject * (*)(PyObject *, int *)) \ - PyArray_API[107]) -#define PyArray_CheckFromAny \ - (*(PyObject * (*)(PyObject *, PyArray_Descr *, int, int, int, PyObject *)) \ - PyArray_API[108]) -#define PyArray_FromArray \ - (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \ - PyArray_API[109]) -#define PyArray_FromInterface \ - (*(PyObject * (*)(PyObject *)) \ - PyArray_API[110]) -#define PyArray_FromStructInterface \ - (*(PyObject * (*)(PyObject *)) \ - PyArray_API[111]) -#define PyArray_FromArrayAttr \ - (*(PyObject * (*)(PyObject *, PyArray_Descr *, PyObject *)) \ - PyArray_API[112]) -#define PyArray_ScalarKind \ - (*(NPY_SCALARKIND (*)(int, PyArrayObject **)) \ - PyArray_API[113]) -#define PyArray_CanCoerceScalar \ - (*(int (*)(int, int, NPY_SCALARKIND)) \ - PyArray_API[114]) -#define PyArray_NewFlagsObject \ - (*(PyObject * (*)(PyObject *)) \ - PyArray_API[115]) -#define PyArray_CanCastScalar \ - (*(npy_bool (*)(PyTypeObject *, PyTypeObject *)) \ - PyArray_API[116]) -#define PyArray_CompareUCS4 \ - (*(int (*)(npy_ucs4 *, npy_ucs4 *, size_t)) \ - PyArray_API[117]) -#define PyArray_RemoveSmallest \ - (*(int (*)(PyArrayMultiIterObject *)) \ - PyArray_API[118]) -#define PyArray_ElementStrides \ - (*(int (*)(PyObject *)) \ - PyArray_API[119]) -#define PyArray_Item_INCREF \ - (*(void (*)(char *, PyArray_Descr *)) \ - PyArray_API[120]) -#define PyArray_Item_XDECREF \ - (*(void (*)(char *, PyArray_Descr *)) \ - PyArray_API[121]) -#define PyArray_FieldNames \ - (*(PyObject * (*)(PyObject *)) \ - PyArray_API[122]) -#define PyArray_Transpose \ - (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *)) \ - PyArray_API[123]) -#define PyArray_TakeFrom \ - (*(PyObject * (*)(PyArrayObject *, PyObject *, int, PyArrayObject *, NPY_CLIPMODE)) \ - PyArray_API[124]) -#define PyArray_PutTo \ - (*(PyObject * (*)(PyArrayObject *, PyObject*, PyObject *, NPY_CLIPMODE)) \ - PyArray_API[125]) -#define PyArray_PutMask \ - (*(PyObject * (*)(PyArrayObject *, PyObject*, PyObject*)) \ - PyArray_API[126]) -#define PyArray_Repeat \ - (*(PyObject * (*)(PyArrayObject *, PyObject *, int)) \ - PyArray_API[127]) -#define PyArray_Choose \ - (*(PyObject * (*)(PyArrayObject *, PyObject *, PyArrayObject *, NPY_CLIPMODE)) \ - PyArray_API[128]) -#define PyArray_Sort \ - (*(int (*)(PyArrayObject *, int, NPY_SORTKIND)) \ - PyArray_API[129]) -#define PyArray_ArgSort \ - (*(PyObject * (*)(PyArrayObject *, int, NPY_SORTKIND)) \ - PyArray_API[130]) -#define PyArray_SearchSorted \ - (*(PyObject * (*)(PyArrayObject *, PyObject *, NPY_SEARCHSIDE, PyObject *)) \ - PyArray_API[131]) -#define PyArray_ArgMax \ - (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ - PyArray_API[132]) -#define PyArray_ArgMin \ - (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ - PyArray_API[133]) -#define PyArray_Reshape \ - (*(PyObject * (*)(PyArrayObject *, PyObject *)) \ - PyArray_API[134]) -#define PyArray_Newshape \ - (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *, NPY_ORDER)) \ - PyArray_API[135]) -#define PyArray_Squeeze \ - (*(PyObject * (*)(PyArrayObject *)) \ - PyArray_API[136]) -#define PyArray_View \ - (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, PyTypeObject *)) \ - PyArray_API[137]) -#define PyArray_SwapAxes \ - (*(PyObject * (*)(PyArrayObject *, int, int)) \ - PyArray_API[138]) -#define PyArray_Max \ - (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ - PyArray_API[139]) -#define PyArray_Min \ - (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ - PyArray_API[140]) -#define PyArray_Ptp \ - (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ - PyArray_API[141]) -#define PyArray_Mean \ - (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ - PyArray_API[142]) -#define PyArray_Trace \ - (*(PyObject * (*)(PyArrayObject *, int, int, int, int, PyArrayObject *)) \ - PyArray_API[143]) -#define PyArray_Diagonal \ - (*(PyObject * (*)(PyArrayObject *, int, int, int)) \ - PyArray_API[144]) -#define PyArray_Clip \ - (*(PyObject * (*)(PyArrayObject *, PyObject *, PyObject *, PyArrayObject *)) \ - PyArray_API[145]) -#define PyArray_Conjugate \ - (*(PyObject * (*)(PyArrayObject *, PyArrayObject *)) \ - PyArray_API[146]) -#define PyArray_Nonzero \ - (*(PyObject * (*)(PyArrayObject *)) \ - PyArray_API[147]) -#define PyArray_Std \ - (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *, int)) \ - PyArray_API[148]) -#define PyArray_Sum \ - (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ - PyArray_API[149]) -#define PyArray_CumSum \ - (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ - PyArray_API[150]) -#define PyArray_Prod \ - (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ - PyArray_API[151]) -#define PyArray_CumProd \ - (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ - PyArray_API[152]) -#define PyArray_All \ - (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ - PyArray_API[153]) -#define PyArray_Any \ - (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ - PyArray_API[154]) -#define PyArray_Compress \ - (*(PyObject * (*)(PyArrayObject *, PyObject *, int, PyArrayObject *)) \ - PyArray_API[155]) -#define PyArray_Flatten \ - (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ - PyArray_API[156]) -#define PyArray_Ravel \ - (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ - PyArray_API[157]) -#define PyArray_MultiplyList \ - (*(npy_intp (*)(npy_intp const *, int)) \ - PyArray_API[158]) -#define PyArray_MultiplyIntList \ - (*(int (*)(int const *, int)) \ - PyArray_API[159]) -#define PyArray_GetPtr \ - (*(void * (*)(PyArrayObject *, npy_intp const*)) \ - PyArray_API[160]) -#define PyArray_CompareLists \ - (*(int (*)(npy_intp const *, npy_intp const *, int)) \ - PyArray_API[161]) -#define PyArray_AsCArray \ - (*(int (*)(PyObject **, void *, npy_intp *, int, PyArray_Descr*)) \ - PyArray_API[162]) -#define PyArray_As1D \ - (*(int (*)(PyObject **NPY_UNUSED(op), char **NPY_UNUSED(ptr), int *NPY_UNUSED(d1), int NPY_UNUSED(typecode))) \ - PyArray_API[163]) -#define PyArray_As2D \ - (*(int (*)(PyObject **NPY_UNUSED(op), char ***NPY_UNUSED(ptr), int *NPY_UNUSED(d1), int *NPY_UNUSED(d2), int NPY_UNUSED(typecode))) \ - PyArray_API[164]) -#define PyArray_Free \ - (*(int (*)(PyObject *, void *)) \ - PyArray_API[165]) -#define PyArray_Converter \ - (*(int (*)(PyObject *, PyObject **)) \ - PyArray_API[166]) -#define PyArray_IntpFromSequence \ - (*(int (*)(PyObject *, npy_intp *, int)) \ - PyArray_API[167]) -#define PyArray_Concatenate \ - (*(PyObject * (*)(PyObject *, int)) \ - PyArray_API[168]) -#define PyArray_InnerProduct \ - (*(PyObject * (*)(PyObject *, PyObject *)) \ - PyArray_API[169]) -#define PyArray_MatrixProduct \ - (*(PyObject * (*)(PyObject *, PyObject *)) \ - PyArray_API[170]) -#define PyArray_CopyAndTranspose \ - (*(PyObject * (*)(PyObject *)) \ - PyArray_API[171]) -#define PyArray_Correlate \ - (*(PyObject * (*)(PyObject *, PyObject *, int)) \ - PyArray_API[172]) -#define PyArray_TypestrConvert \ - (*(int (*)(int, int)) \ - PyArray_API[173]) -#define PyArray_DescrConverter \ - (*(int (*)(PyObject *, PyArray_Descr **)) \ - PyArray_API[174]) -#define PyArray_DescrConverter2 \ - (*(int (*)(PyObject *, PyArray_Descr **)) \ - PyArray_API[175]) -#define PyArray_IntpConverter \ - (*(int (*)(PyObject *, PyArray_Dims *)) \ - PyArray_API[176]) -#define PyArray_BufferConverter \ - (*(int (*)(PyObject *, PyArray_Chunk *)) \ - PyArray_API[177]) -#define PyArray_AxisConverter \ - (*(int (*)(PyObject *, int *)) \ - PyArray_API[178]) -#define PyArray_BoolConverter \ - (*(int (*)(PyObject *, npy_bool *)) \ - PyArray_API[179]) -#define PyArray_ByteorderConverter \ - (*(int (*)(PyObject *, char *)) \ - PyArray_API[180]) -#define PyArray_OrderConverter \ - (*(int (*)(PyObject *, NPY_ORDER *)) \ - PyArray_API[181]) -#define PyArray_EquivTypes \ - (*(unsigned char (*)(PyArray_Descr *, PyArray_Descr *)) \ - PyArray_API[182]) -#define PyArray_Zeros \ - (*(PyObject * (*)(int, npy_intp const *, PyArray_Descr *, int)) \ - PyArray_API[183]) -#define PyArray_Empty \ - (*(PyObject * (*)(int, npy_intp const *, PyArray_Descr *, int)) \ - PyArray_API[184]) -#define PyArray_Where \ - (*(PyObject * (*)(PyObject *, PyObject *, PyObject *)) \ - PyArray_API[185]) -#define PyArray_Arange \ - (*(PyObject * (*)(double, double, double, int)) \ - PyArray_API[186]) -#define PyArray_ArangeObj \ - (*(PyObject * (*)(PyObject *, PyObject *, PyObject *, PyArray_Descr *)) \ - PyArray_API[187]) -#define PyArray_SortkindConverter \ - (*(int (*)(PyObject *, NPY_SORTKIND *)) \ - PyArray_API[188]) -#define PyArray_LexSort \ - (*(PyObject * (*)(PyObject *, int)) \ - PyArray_API[189]) -#define PyArray_Round \ - (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ - PyArray_API[190]) -#define PyArray_EquivTypenums \ - (*(unsigned char (*)(int, int)) \ - PyArray_API[191]) -#define PyArray_RegisterDataType \ - (*(int (*)(PyArray_Descr *)) \ - PyArray_API[192]) -#define PyArray_RegisterCastFunc \ - (*(int (*)(PyArray_Descr *, int, PyArray_VectorUnaryFunc *)) \ - PyArray_API[193]) -#define PyArray_RegisterCanCast \ - (*(int (*)(PyArray_Descr *, int, NPY_SCALARKIND)) \ - PyArray_API[194]) -#define PyArray_InitArrFuncs \ - (*(void (*)(PyArray_ArrFuncs *)) \ - PyArray_API[195]) -#define PyArray_IntTupleFromIntp \ - (*(PyObject * (*)(int, npy_intp *)) \ - PyArray_API[196]) -#define PyArray_TypeNumFromName \ - (*(int (*)(char *)) \ - PyArray_API[197]) -#define PyArray_ClipmodeConverter \ - (*(int (*)(PyObject *, NPY_CLIPMODE *)) \ - PyArray_API[198]) -#define PyArray_OutputConverter \ - (*(int (*)(PyObject *, PyArrayObject **)) \ - PyArray_API[199]) -#define PyArray_BroadcastToShape \ - (*(PyObject * (*)(PyObject *, npy_intp *, int)) \ - PyArray_API[200]) -#define _PyArray_SigintHandler \ - (*(void (*)(int)) \ - PyArray_API[201]) -#define _PyArray_GetSigintBuf \ - (*(void* (*)(void)) \ - PyArray_API[202]) -#define PyArray_DescrAlignConverter \ - (*(int (*)(PyObject *, PyArray_Descr **)) \ - PyArray_API[203]) -#define PyArray_DescrAlignConverter2 \ - (*(int (*)(PyObject *, PyArray_Descr **)) \ - PyArray_API[204]) -#define PyArray_SearchsideConverter \ - (*(int (*)(PyObject *, void *)) \ - PyArray_API[205]) -#define PyArray_CheckAxis \ - (*(PyObject * (*)(PyArrayObject *, int *, int)) \ - PyArray_API[206]) -#define PyArray_OverflowMultiplyList \ - (*(npy_intp (*)(npy_intp *, int)) \ - PyArray_API[207]) -#define PyArray_CompareString \ - (*(int (*)(const char *, const char *, size_t)) \ - PyArray_API[208]) -#define PyArray_MultiIterFromObjects \ - (*(PyObject* (*)(PyObject **, int, int, ...)) \ - PyArray_API[209]) -#define PyArray_GetEndianness \ - (*(int (*)(void)) \ - PyArray_API[210]) -#define PyArray_GetNDArrayCFeatureVersion \ - (*(unsigned int (*)(void)) \ - PyArray_API[211]) -#define PyArray_Correlate2 \ - (*(PyObject * (*)(PyObject *, PyObject *, int)) \ - PyArray_API[212]) -#define PyArray_NeighborhoodIterNew \ - (*(PyObject* (*)(PyArrayIterObject *, const npy_intp *, int, PyArrayObject*)) \ - PyArray_API[213]) -#define PyTimeIntegerArrType_Type (*(PyTypeObject *)PyArray_API[214]) -#define PyDatetimeArrType_Type (*(PyTypeObject *)PyArray_API[215]) -#define PyTimedeltaArrType_Type (*(PyTypeObject *)PyArray_API[216]) -#define PyHalfArrType_Type (*(PyTypeObject *)PyArray_API[217]) -#define NpyIter_Type (*(PyTypeObject *)PyArray_API[218]) -#define PyArray_SetDatetimeParseFunction \ - (*(void (*)(PyObject *NPY_UNUSED(op))) \ - PyArray_API[219]) -#define PyArray_DatetimeToDatetimeStruct \ - (*(void (*)(npy_datetime NPY_UNUSED(val), NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_datetimestruct *)) \ - PyArray_API[220]) -#define PyArray_TimedeltaToTimedeltaStruct \ - (*(void (*)(npy_timedelta NPY_UNUSED(val), NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_timedeltastruct *)) \ - PyArray_API[221]) -#define PyArray_DatetimeStructToDatetime \ - (*(npy_datetime (*)(NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_datetimestruct *NPY_UNUSED(d))) \ - PyArray_API[222]) -#define PyArray_TimedeltaStructToTimedelta \ - (*(npy_datetime (*)(NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_timedeltastruct *NPY_UNUSED(d))) \ - PyArray_API[223]) -#define NpyIter_New \ - (*(NpyIter * (*)(PyArrayObject *, npy_uint32, NPY_ORDER, NPY_CASTING, PyArray_Descr*)) \ - PyArray_API[224]) -#define NpyIter_MultiNew \ - (*(NpyIter * (*)(int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **)) \ - PyArray_API[225]) -#define NpyIter_AdvancedNew \ - (*(NpyIter * (*)(int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **, int, int **, npy_intp *, npy_intp)) \ - PyArray_API[226]) -#define NpyIter_Copy \ - (*(NpyIter * (*)(NpyIter *)) \ - PyArray_API[227]) -#define NpyIter_Deallocate \ - (*(int (*)(NpyIter *)) \ - PyArray_API[228]) -#define NpyIter_HasDelayedBufAlloc \ - (*(npy_bool (*)(NpyIter *)) \ - PyArray_API[229]) -#define NpyIter_HasExternalLoop \ - (*(npy_bool (*)(NpyIter *)) \ - PyArray_API[230]) -#define NpyIter_EnableExternalLoop \ - (*(int (*)(NpyIter *)) \ - PyArray_API[231]) -#define NpyIter_GetInnerStrideArray \ - (*(npy_intp * (*)(NpyIter *)) \ - PyArray_API[232]) -#define NpyIter_GetInnerLoopSizePtr \ - (*(npy_intp * (*)(NpyIter *)) \ - PyArray_API[233]) -#define NpyIter_Reset \ - (*(int (*)(NpyIter *, char **)) \ - PyArray_API[234]) -#define NpyIter_ResetBasePointers \ - (*(int (*)(NpyIter *, char **, char **)) \ - PyArray_API[235]) -#define NpyIter_ResetToIterIndexRange \ - (*(int (*)(NpyIter *, npy_intp, npy_intp, char **)) \ - PyArray_API[236]) -#define NpyIter_GetNDim \ - (*(int (*)(NpyIter *)) \ - PyArray_API[237]) -#define NpyIter_GetNOp \ - (*(int (*)(NpyIter *)) \ - PyArray_API[238]) -#define NpyIter_GetIterNext \ - (*(NpyIter_IterNextFunc * (*)(NpyIter *, char **)) \ - PyArray_API[239]) -#define NpyIter_GetIterSize \ - (*(npy_intp (*)(NpyIter *)) \ - PyArray_API[240]) -#define NpyIter_GetIterIndexRange \ - (*(void (*)(NpyIter *, npy_intp *, npy_intp *)) \ - PyArray_API[241]) -#define NpyIter_GetIterIndex \ - (*(npy_intp (*)(NpyIter *)) \ - PyArray_API[242]) -#define NpyIter_GotoIterIndex \ - (*(int (*)(NpyIter *, npy_intp)) \ - PyArray_API[243]) -#define NpyIter_HasMultiIndex \ - (*(npy_bool (*)(NpyIter *)) \ - PyArray_API[244]) -#define NpyIter_GetShape \ - (*(int (*)(NpyIter *, npy_intp *)) \ - PyArray_API[245]) -#define NpyIter_GetGetMultiIndex \ - (*(NpyIter_GetMultiIndexFunc * (*)(NpyIter *, char **)) \ - PyArray_API[246]) -#define NpyIter_GotoMultiIndex \ - (*(int (*)(NpyIter *, npy_intp const *)) \ - PyArray_API[247]) -#define NpyIter_RemoveMultiIndex \ - (*(int (*)(NpyIter *)) \ - PyArray_API[248]) -#define NpyIter_HasIndex \ - (*(npy_bool (*)(NpyIter *)) \ - PyArray_API[249]) -#define NpyIter_IsBuffered \ - (*(npy_bool (*)(NpyIter *)) \ - PyArray_API[250]) -#define NpyIter_IsGrowInner \ - (*(npy_bool (*)(NpyIter *)) \ - PyArray_API[251]) -#define NpyIter_GetBufferSize \ - (*(npy_intp (*)(NpyIter *)) \ - PyArray_API[252]) -#define NpyIter_GetIndexPtr \ - (*(npy_intp * (*)(NpyIter *)) \ - PyArray_API[253]) -#define NpyIter_GotoIndex \ - (*(int (*)(NpyIter *, npy_intp)) \ - PyArray_API[254]) -#define NpyIter_GetDataPtrArray \ - (*(char ** (*)(NpyIter *)) \ - PyArray_API[255]) -#define NpyIter_GetDescrArray \ - (*(PyArray_Descr ** (*)(NpyIter *)) \ - PyArray_API[256]) -#define NpyIter_GetOperandArray \ - (*(PyArrayObject ** (*)(NpyIter *)) \ - PyArray_API[257]) -#define NpyIter_GetIterView \ - (*(PyArrayObject * (*)(NpyIter *, npy_intp)) \ - PyArray_API[258]) -#define NpyIter_GetReadFlags \ - (*(void (*)(NpyIter *, char *)) \ - PyArray_API[259]) -#define NpyIter_GetWriteFlags \ - (*(void (*)(NpyIter *, char *)) \ - PyArray_API[260]) -#define NpyIter_DebugPrint \ - (*(void (*)(NpyIter *)) \ - PyArray_API[261]) -#define NpyIter_IterationNeedsAPI \ - (*(npy_bool (*)(NpyIter *)) \ - PyArray_API[262]) -#define NpyIter_GetInnerFixedStrideArray \ - (*(void (*)(NpyIter *, npy_intp *)) \ - PyArray_API[263]) -#define NpyIter_RemoveAxis \ - (*(int (*)(NpyIter *, int)) \ - PyArray_API[264]) -#define NpyIter_GetAxisStrideArray \ - (*(npy_intp * (*)(NpyIter *, int)) \ - PyArray_API[265]) -#define NpyIter_RequiresBuffering \ - (*(npy_bool (*)(NpyIter *)) \ - PyArray_API[266]) -#define NpyIter_GetInitialDataPtrArray \ - (*(char ** (*)(NpyIter *)) \ - PyArray_API[267]) -#define NpyIter_CreateCompatibleStrides \ - (*(int (*)(NpyIter *, npy_intp, npy_intp *)) \ - PyArray_API[268]) -#define PyArray_CastingConverter \ - (*(int (*)(PyObject *, NPY_CASTING *)) \ - PyArray_API[269]) -#define PyArray_CountNonzero \ - (*(npy_intp (*)(PyArrayObject *)) \ - PyArray_API[270]) -#define PyArray_PromoteTypes \ - (*(PyArray_Descr * (*)(PyArray_Descr *, PyArray_Descr *)) \ - PyArray_API[271]) -#define PyArray_MinScalarType \ - (*(PyArray_Descr * (*)(PyArrayObject *)) \ - PyArray_API[272]) -#define PyArray_ResultType \ - (*(PyArray_Descr * (*)(npy_intp, PyArrayObject **, npy_intp, PyArray_Descr **)) \ - PyArray_API[273]) -#define PyArray_CanCastArrayTo \ - (*(npy_bool (*)(PyArrayObject *, PyArray_Descr *, NPY_CASTING)) \ - PyArray_API[274]) -#define PyArray_CanCastTypeTo \ - (*(npy_bool (*)(PyArray_Descr *, PyArray_Descr *, NPY_CASTING)) \ - PyArray_API[275]) -#define PyArray_EinsteinSum \ - (*(PyArrayObject * (*)(char *, npy_intp, PyArrayObject **, PyArray_Descr *, NPY_ORDER, NPY_CASTING, PyArrayObject *)) \ - PyArray_API[276]) -#define PyArray_NewLikeArray \ - (*(PyObject * (*)(PyArrayObject *, NPY_ORDER, PyArray_Descr *, int)) \ - PyArray_API[277]) -#define PyArray_GetArrayParamsFromObject \ - (*(int (*)(PyObject *, PyArray_Descr *, npy_bool, PyArray_Descr **, int *, npy_intp *, PyArrayObject **, PyObject *)) \ - PyArray_API[278]) -#define PyArray_ConvertClipmodeSequence \ - (*(int (*)(PyObject *, NPY_CLIPMODE *, int)) \ - PyArray_API[279]) -#define PyArray_MatrixProduct2 \ - (*(PyObject * (*)(PyObject *, PyObject *, PyArrayObject*)) \ - PyArray_API[280]) -#define NpyIter_IsFirstVisit \ - (*(npy_bool (*)(NpyIter *, int)) \ - PyArray_API[281]) -#define PyArray_SetBaseObject \ - (*(int (*)(PyArrayObject *, PyObject *)) \ - PyArray_API[282]) -#define PyArray_CreateSortedStridePerm \ - (*(void (*)(int, npy_intp const *, npy_stride_sort_item *)) \ - PyArray_API[283]) -#define PyArray_RemoveAxesInPlace \ - (*(void (*)(PyArrayObject *, const npy_bool *)) \ - PyArray_API[284]) -#define PyArray_DebugPrint \ - (*(void (*)(PyArrayObject *)) \ - PyArray_API[285]) -#define PyArray_FailUnlessWriteable \ - (*(int (*)(PyArrayObject *, const char *)) \ - PyArray_API[286]) -#define PyArray_SetUpdateIfCopyBase \ - (*(int (*)(PyArrayObject *, PyArrayObject *)) \ - PyArray_API[287]) -#define PyDataMem_NEW \ - (*(void * (*)(size_t)) \ - PyArray_API[288]) -#define PyDataMem_FREE \ - (*(void (*)(void *)) \ - PyArray_API[289]) -#define PyDataMem_RENEW \ - (*(void * (*)(void *, size_t)) \ - PyArray_API[290]) -#define PyDataMem_SetEventHook \ - (*(PyDataMem_EventHookFunc * (*)(PyDataMem_EventHookFunc *, void *, void **)) \ - PyArray_API[291]) -#define NPY_DEFAULT_ASSIGN_CASTING (*(NPY_CASTING *)PyArray_API[292]) -#define PyArray_MapIterSwapAxes \ - (*(void (*)(PyArrayMapIterObject *, PyArrayObject **, int)) \ - PyArray_API[293]) -#define PyArray_MapIterArray \ - (*(PyObject * (*)(PyArrayObject *, PyObject *)) \ - PyArray_API[294]) -#define PyArray_MapIterNext \ - (*(void (*)(PyArrayMapIterObject *)) \ - PyArray_API[295]) -#define PyArray_Partition \ - (*(int (*)(PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND)) \ - PyArray_API[296]) -#define PyArray_ArgPartition \ - (*(PyObject * (*)(PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND)) \ - PyArray_API[297]) -#define PyArray_SelectkindConverter \ - (*(int (*)(PyObject *, NPY_SELECTKIND *)) \ - PyArray_API[298]) -#define PyDataMem_NEW_ZEROED \ - (*(void * (*)(size_t, size_t)) \ - PyArray_API[299]) -#define PyArray_CheckAnyScalarExact \ - (*(int (*)(PyObject *)) \ - PyArray_API[300]) -#define PyArray_MapIterArrayCopyIfOverlap \ - (*(PyObject * (*)(PyArrayObject *, PyObject *, int, PyArrayObject *)) \ - PyArray_API[301]) -#define PyArray_ResolveWritebackIfCopy \ - (*(int (*)(PyArrayObject *)) \ - PyArray_API[302]) -#define PyArray_SetWritebackIfCopyBase \ - (*(int (*)(PyArrayObject *, PyArrayObject *)) \ - PyArray_API[303]) - -#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT) -static int -_import_array(void) -{ - int st; - PyObject *numpy = PyImport_ImportModule("numpy.core._multiarray_umath"); - PyObject *c_api = NULL; - - if (numpy == NULL) { - return -1; - } - c_api = PyObject_GetAttrString(numpy, "_ARRAY_API"); - Py_DECREF(numpy); - if (c_api == NULL) { - PyErr_SetString(PyExc_AttributeError, "_ARRAY_API not found"); - return -1; - } - -#if PY_VERSION_HEX >= 0x03000000 - if (!PyCapsule_CheckExact(c_api)) { - PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCapsule object"); - Py_DECREF(c_api); - return -1; - } - PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL); -#else - if (!PyCObject_Check(c_api)) { - PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCObject object"); - Py_DECREF(c_api); - return -1; - } - PyArray_API = (void **)PyCObject_AsVoidPtr(c_api); -#endif - Py_DECREF(c_api); - if (PyArray_API == NULL) { - PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is NULL pointer"); - return -1; - } - - /* Perform runtime check of C API version */ - if (NPY_VERSION != PyArray_GetNDArrayCVersion()) { - PyErr_Format(PyExc_RuntimeError, "module compiled against "\ - "ABI version 0x%x but this version of numpy is 0x%x", \ - (int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion()); - return -1; - } - if (NPY_FEATURE_VERSION > PyArray_GetNDArrayCFeatureVersion()) { - PyErr_Format(PyExc_RuntimeError, "module compiled against "\ - "API version 0x%x but this version of numpy is 0x%x", \ - (int) NPY_FEATURE_VERSION, (int) PyArray_GetNDArrayCFeatureVersion()); - return -1; - } - - /* - * Perform runtime check of endianness and check it matches the one set by - * the headers (npy_endian.h) as a safeguard - */ - st = PyArray_GetEndianness(); - if (st == NPY_CPU_UNKNOWN_ENDIAN) { - PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as unknown endian"); - return -1; - } -#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN - if (st != NPY_CPU_BIG) { - PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\ - "big endian, but detected different endianness at runtime"); - return -1; - } -#elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN - if (st != NPY_CPU_LITTLE) { - PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\ - "little endian, but detected different endianness at runtime"); - return -1; - } -#endif - - return 0; -} - -#if PY_VERSION_HEX >= 0x03000000 -#define NUMPY_IMPORT_ARRAY_RETVAL NULL -#else -#define NUMPY_IMPORT_ARRAY_RETVAL -#endif - -#define import_array() {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return NUMPY_IMPORT_ARRAY_RETVAL; } } - -#define import_array1(ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return ret; } } - -#define import_array2(msg, ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, msg); return ret; } } - -#endif - -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/__ufunc_api.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/__ufunc_api.h deleted file mode 100644 index d9385a6..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/__ufunc_api.h +++ /dev/null @@ -1,326 +0,0 @@ - -#ifdef _UMATHMODULE - -extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type; - -extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type; - -NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndData \ - (PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int); -NPY_NO_EXPORT int PyUFunc_RegisterLoopForType \ - (PyUFuncObject *, int, PyUFuncGenericFunction, const int *, void *); -NPY_NO_EXPORT int PyUFunc_GenericFunction \ - (PyUFuncObject *, PyObject *, PyObject *, PyArrayObject **); -NPY_NO_EXPORT void PyUFunc_f_f_As_d_d \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_d_d \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_f_f \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_g_g \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_F_F_As_D_D \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_F_F \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_D_D \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_G_G \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_O_O \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_ff_f_As_dd_d \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_ff_f \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_dd_d \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_gg_g \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_FF_F_As_DD_D \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_DD_D \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_FF_F \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_GG_G \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_OO_O \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_O_O_method \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_OO_O_method \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_On_Om \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT int PyUFunc_GetPyValues \ - (char *, int *, int *, PyObject **); -NPY_NO_EXPORT int PyUFunc_checkfperr \ - (int, PyObject *, int *); -NPY_NO_EXPORT void PyUFunc_clearfperr \ - (void); -NPY_NO_EXPORT int PyUFunc_getfperr \ - (void); -NPY_NO_EXPORT int PyUFunc_handlefperr \ - (int, PyObject *, int, int *); -NPY_NO_EXPORT int PyUFunc_ReplaceLoopBySignature \ - (PyUFuncObject *, PyUFuncGenericFunction, const int *, PyUFuncGenericFunction *); -NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndDataAndSignature \ - (PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int, const char *); -NPY_NO_EXPORT int PyUFunc_SetUsesArraysAsData \ - (void **, size_t); -NPY_NO_EXPORT void PyUFunc_e_e \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_e_e_As_f_f \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_e_e_As_d_d \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_ee_e \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_ee_e_As_ff_f \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_ee_e_As_dd_d \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT int PyUFunc_DefaultTypeResolver \ - (PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyObject *, PyArray_Descr **); -NPY_NO_EXPORT int PyUFunc_ValidateCasting \ - (PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyArray_Descr **); -NPY_NO_EXPORT int PyUFunc_RegisterLoopForDescr \ - (PyUFuncObject *, PyArray_Descr *, PyUFuncGenericFunction, PyArray_Descr **, void *); -NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndDataAndSignatureAndIdentity \ - (PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, const int, const char *, PyObject *); - -#else - -#if defined(PY_UFUNC_UNIQUE_SYMBOL) -#define PyUFunc_API PY_UFUNC_UNIQUE_SYMBOL -#endif - -#if defined(NO_IMPORT) || defined(NO_IMPORT_UFUNC) -extern void **PyUFunc_API; -#else -#if defined(PY_UFUNC_UNIQUE_SYMBOL) -void **PyUFunc_API; -#else -static void **PyUFunc_API=NULL; -#endif -#endif - -#define PyUFunc_Type (*(PyTypeObject *)PyUFunc_API[0]) -#define PyUFunc_FromFuncAndData \ - (*(PyObject * (*)(PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int)) \ - PyUFunc_API[1]) -#define PyUFunc_RegisterLoopForType \ - (*(int (*)(PyUFuncObject *, int, PyUFuncGenericFunction, const int *, void *)) \ - PyUFunc_API[2]) -#define PyUFunc_GenericFunction \ - (*(int (*)(PyUFuncObject *, PyObject *, PyObject *, PyArrayObject **)) \ - PyUFunc_API[3]) -#define PyUFunc_f_f_As_d_d \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[4]) -#define PyUFunc_d_d \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[5]) -#define PyUFunc_f_f \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[6]) -#define PyUFunc_g_g \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[7]) -#define PyUFunc_F_F_As_D_D \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[8]) -#define PyUFunc_F_F \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[9]) -#define PyUFunc_D_D \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[10]) -#define PyUFunc_G_G \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[11]) -#define PyUFunc_O_O \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[12]) -#define PyUFunc_ff_f_As_dd_d \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[13]) -#define PyUFunc_ff_f \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[14]) -#define PyUFunc_dd_d \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[15]) -#define PyUFunc_gg_g \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[16]) -#define PyUFunc_FF_F_As_DD_D \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[17]) -#define PyUFunc_DD_D \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[18]) -#define PyUFunc_FF_F \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[19]) -#define PyUFunc_GG_G \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[20]) -#define PyUFunc_OO_O \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[21]) -#define PyUFunc_O_O_method \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[22]) -#define PyUFunc_OO_O_method \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[23]) -#define PyUFunc_On_Om \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[24]) -#define PyUFunc_GetPyValues \ - (*(int (*)(char *, int *, int *, PyObject **)) \ - PyUFunc_API[25]) -#define PyUFunc_checkfperr \ - (*(int (*)(int, PyObject *, int *)) \ - PyUFunc_API[26]) -#define PyUFunc_clearfperr \ - (*(void (*)(void)) \ - PyUFunc_API[27]) -#define PyUFunc_getfperr \ - (*(int (*)(void)) \ - PyUFunc_API[28]) -#define PyUFunc_handlefperr \ - (*(int (*)(int, PyObject *, int, int *)) \ - PyUFunc_API[29]) -#define PyUFunc_ReplaceLoopBySignature \ - (*(int (*)(PyUFuncObject *, PyUFuncGenericFunction, const int *, PyUFuncGenericFunction *)) \ - PyUFunc_API[30]) -#define PyUFunc_FromFuncAndDataAndSignature \ - (*(PyObject * (*)(PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int, const char *)) \ - PyUFunc_API[31]) -#define PyUFunc_SetUsesArraysAsData \ - (*(int (*)(void **, size_t)) \ - PyUFunc_API[32]) -#define PyUFunc_e_e \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[33]) -#define PyUFunc_e_e_As_f_f \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[34]) -#define PyUFunc_e_e_As_d_d \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[35]) -#define PyUFunc_ee_e \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[36]) -#define PyUFunc_ee_e_As_ff_f \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[37]) -#define PyUFunc_ee_e_As_dd_d \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[38]) -#define PyUFunc_DefaultTypeResolver \ - (*(int (*)(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyObject *, PyArray_Descr **)) \ - PyUFunc_API[39]) -#define PyUFunc_ValidateCasting \ - (*(int (*)(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyArray_Descr **)) \ - PyUFunc_API[40]) -#define PyUFunc_RegisterLoopForDescr \ - (*(int (*)(PyUFuncObject *, PyArray_Descr *, PyUFuncGenericFunction, PyArray_Descr **, void *)) \ - PyUFunc_API[41]) -#define PyUFunc_FromFuncAndDataAndSignatureAndIdentity \ - (*(PyObject * (*)(PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, const int, const char *, PyObject *)) \ - PyUFunc_API[42]) - -static NPY_INLINE int -_import_umath(void) -{ - PyObject *numpy = PyImport_ImportModule("numpy.core._multiarray_umath"); - PyObject *c_api = NULL; - - if (numpy == NULL) { - PyErr_SetString(PyExc_ImportError, - "numpy.core._multiarray_umath failed to import"); - return -1; - } - c_api = PyObject_GetAttrString(numpy, "_UFUNC_API"); - Py_DECREF(numpy); - if (c_api == NULL) { - PyErr_SetString(PyExc_AttributeError, "_UFUNC_API not found"); - return -1; - } - -#if PY_VERSION_HEX >= 0x03000000 - if (!PyCapsule_CheckExact(c_api)) { - PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is not PyCapsule object"); - Py_DECREF(c_api); - return -1; - } - PyUFunc_API = (void **)PyCapsule_GetPointer(c_api, NULL); -#else - if (!PyCObject_Check(c_api)) { - PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is not PyCObject object"); - Py_DECREF(c_api); - return -1; - } - PyUFunc_API = (void **)PyCObject_AsVoidPtr(c_api); -#endif - Py_DECREF(c_api); - if (PyUFunc_API == NULL) { - PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is NULL pointer"); - return -1; - } - return 0; -} - -#if PY_VERSION_HEX >= 0x03000000 -#define NUMPY_IMPORT_UMATH_RETVAL NULL -#else -#define NUMPY_IMPORT_UMATH_RETVAL -#endif - -#define import_umath() \ - do {\ - UFUNC_NOFPE\ - if (_import_umath() < 0) {\ - PyErr_Print();\ - PyErr_SetString(PyExc_ImportError,\ - "numpy.core.umath failed to import");\ - return NUMPY_IMPORT_UMATH_RETVAL;\ - }\ - } while(0) - -#define import_umath1(ret) \ - do {\ - UFUNC_NOFPE\ - if (_import_umath() < 0) {\ - PyErr_Print();\ - PyErr_SetString(PyExc_ImportError,\ - "numpy.core.umath failed to import");\ - return ret;\ - }\ - } while(0) - -#define import_umath2(ret, msg) \ - do {\ - UFUNC_NOFPE\ - if (_import_umath() < 0) {\ - PyErr_Print();\ - PyErr_SetString(PyExc_ImportError, msg);\ - return ret;\ - }\ - } while(0) - -#define import_ufunc() \ - do {\ - UFUNC_NOFPE\ - if (_import_umath() < 0) {\ - PyErr_Print();\ - PyErr_SetString(PyExc_ImportError,\ - "numpy.core.umath failed to import");\ - }\ - } while(0) - -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/_neighborhood_iterator_imp.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/_neighborhood_iterator_imp.h deleted file mode 100644 index e8860cb..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/_neighborhood_iterator_imp.h +++ /dev/null @@ -1,90 +0,0 @@ -#ifndef _NPY_INCLUDE_NEIGHBORHOOD_IMP -#error You should not include this header directly -#endif -/* - * Private API (here for inline) - */ -static NPY_INLINE int -_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter); - -/* - * Update to next item of the iterator - * - * Note: this simply increment the coordinates vector, last dimension - * incremented first , i.e, for dimension 3 - * ... - * -1, -1, -1 - * -1, -1, 0 - * -1, -1, 1 - * .... - * -1, 0, -1 - * -1, 0, 0 - * .... - * 0, -1, -1 - * 0, -1, 0 - * .... - */ -#define _UPDATE_COORD_ITER(c) \ - wb = iter->coordinates[c] < iter->bounds[c][1]; \ - if (wb) { \ - iter->coordinates[c] += 1; \ - return 0; \ - } \ - else { \ - iter->coordinates[c] = iter->bounds[c][0]; \ - } - -static NPY_INLINE int -_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter) -{ - npy_intp i, wb; - - for (i = iter->nd - 1; i >= 0; --i) { - _UPDATE_COORD_ITER(i) - } - - return 0; -} - -/* - * Version optimized for 2d arrays, manual loop unrolling - */ -static NPY_INLINE int -_PyArrayNeighborhoodIter_IncrCoord2D(PyArrayNeighborhoodIterObject* iter) -{ - npy_intp wb; - - _UPDATE_COORD_ITER(1) - _UPDATE_COORD_ITER(0) - - return 0; -} -#undef _UPDATE_COORD_ITER - -/* - * Advance to the next neighbour - */ -static NPY_INLINE int -PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter) -{ - _PyArrayNeighborhoodIter_IncrCoord (iter); - iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates); - - return 0; -} - -/* - * Reset functions - */ -static NPY_INLINE int -PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter) -{ - npy_intp i; - - for (i = 0; i < iter->nd; ++i) { - iter->coordinates[i] = iter->bounds[i][0]; - } - iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates); - - return 0; -} diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/_numpyconfig.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/_numpyconfig.h deleted file mode 100644 index edb7e37..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/_numpyconfig.h +++ /dev/null @@ -1,32 +0,0 @@ -#define NPY_HAVE_ENDIAN_H 1 -#define NPY_SIZEOF_SHORT SIZEOF_SHORT -#define NPY_SIZEOF_INT SIZEOF_INT -#define NPY_SIZEOF_LONG SIZEOF_LONG -#define NPY_SIZEOF_FLOAT 4 -#define NPY_SIZEOF_COMPLEX_FLOAT 8 -#define NPY_SIZEOF_DOUBLE 8 -#define NPY_SIZEOF_COMPLEX_DOUBLE 16 -#define NPY_SIZEOF_LONGDOUBLE 16 -#define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32 -#define NPY_SIZEOF_PY_INTPTR_T 8 -#define NPY_SIZEOF_OFF_T 8 -#define NPY_SIZEOF_PY_LONG_LONG 8 -#define NPY_SIZEOF_LONGLONG 8 -#define NPY_NO_SMP 0 -#define NPY_HAVE_DECL_ISNAN -#define NPY_HAVE_DECL_ISINF -#define NPY_HAVE_DECL_ISFINITE -#define NPY_HAVE_DECL_SIGNBIT -#define NPY_USE_C99_COMPLEX 1 -#define NPY_HAVE_COMPLEX_DOUBLE 1 -#define NPY_HAVE_COMPLEX_FLOAT 1 -#define NPY_HAVE_COMPLEX_LONG_DOUBLE 1 -#define NPY_RELAXED_STRIDES_CHECKING 1 -#define NPY_USE_C99_FORMATS 1 -#define NPY_VISIBILITY_HIDDEN __attribute__((visibility("hidden"))) -#define NPY_ABI_VERSION 0x01000009 -#define NPY_API_VERSION 0x0000000D - -#ifndef __STDC_FORMAT_MACROS -#define __STDC_FORMAT_MACROS 1 -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/arrayobject.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/arrayobject.h deleted file mode 100644 index 4f46d6b..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/arrayobject.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef Py_ARRAYOBJECT_H -#define Py_ARRAYOBJECT_H - -#include "ndarrayobject.h" -#include "npy_interrupt.h" - -#ifdef NPY_NO_PREFIX -#include "noprefix.h" -#endif - -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/arrayscalars.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/arrayscalars.h deleted file mode 100644 index 64450e7..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/arrayscalars.h +++ /dev/null @@ -1,175 +0,0 @@ -#ifndef _NPY_ARRAYSCALARS_H_ -#define _NPY_ARRAYSCALARS_H_ - -#ifndef _MULTIARRAYMODULE -typedef struct { - PyObject_HEAD - npy_bool obval; -} PyBoolScalarObject; -#endif - - -typedef struct { - PyObject_HEAD - signed char obval; -} PyByteScalarObject; - - -typedef struct { - PyObject_HEAD - short obval; -} PyShortScalarObject; - - -typedef struct { - PyObject_HEAD - int obval; -} PyIntScalarObject; - - -typedef struct { - PyObject_HEAD - long obval; -} PyLongScalarObject; - - -typedef struct { - PyObject_HEAD - npy_longlong obval; -} PyLongLongScalarObject; - - -typedef struct { - PyObject_HEAD - unsigned char obval; -} PyUByteScalarObject; - - -typedef struct { - PyObject_HEAD - unsigned short obval; -} PyUShortScalarObject; - - -typedef struct { - PyObject_HEAD - unsigned int obval; -} PyUIntScalarObject; - - -typedef struct { - PyObject_HEAD - unsigned long obval; -} PyULongScalarObject; - - -typedef struct { - PyObject_HEAD - npy_ulonglong obval; -} PyULongLongScalarObject; - - -typedef struct { - PyObject_HEAD - npy_half obval; -} PyHalfScalarObject; - - -typedef struct { - PyObject_HEAD - float obval; -} PyFloatScalarObject; - - -typedef struct { - PyObject_HEAD - double obval; -} PyDoubleScalarObject; - - -typedef struct { - PyObject_HEAD - npy_longdouble obval; -} PyLongDoubleScalarObject; - - -typedef struct { - PyObject_HEAD - npy_cfloat obval; -} PyCFloatScalarObject; - - -typedef struct { - PyObject_HEAD - npy_cdouble obval; -} PyCDoubleScalarObject; - - -typedef struct { - PyObject_HEAD - npy_clongdouble obval; -} PyCLongDoubleScalarObject; - - -typedef struct { - PyObject_HEAD - PyObject * obval; -} PyObjectScalarObject; - -typedef struct { - PyObject_HEAD - npy_datetime obval; - PyArray_DatetimeMetaData obmeta; -} PyDatetimeScalarObject; - -typedef struct { - PyObject_HEAD - npy_timedelta obval; - PyArray_DatetimeMetaData obmeta; -} PyTimedeltaScalarObject; - - -typedef struct { - PyObject_HEAD - char obval; -} PyScalarObject; - -#define PyStringScalarObject PyStringObject -#define PyUnicodeScalarObject PyUnicodeObject - -typedef struct { - PyObject_VAR_HEAD - char *obval; - PyArray_Descr *descr; - int flags; - PyObject *base; -} PyVoidScalarObject; - -/* Macros - PyScalarObject - PyArrType_Type - are defined in ndarrayobject.h -*/ - -#define PyArrayScalar_False ((PyObject *)(&(_PyArrayScalar_BoolValues[0]))) -#define PyArrayScalar_True ((PyObject *)(&(_PyArrayScalar_BoolValues[1]))) -#define PyArrayScalar_FromLong(i) \ - ((PyObject *)(&(_PyArrayScalar_BoolValues[((i)!=0)]))) -#define PyArrayScalar_RETURN_BOOL_FROM_LONG(i) \ - return Py_INCREF(PyArrayScalar_FromLong(i)), \ - PyArrayScalar_FromLong(i) -#define PyArrayScalar_RETURN_FALSE \ - return Py_INCREF(PyArrayScalar_False), \ - PyArrayScalar_False -#define PyArrayScalar_RETURN_TRUE \ - return Py_INCREF(PyArrayScalar_True), \ - PyArrayScalar_True - -#define PyArrayScalar_New(cls) \ - Py##cls##ArrType_Type.tp_alloc(&Py##cls##ArrType_Type, 0) -#define PyArrayScalar_VAL(obj, cls) \ - ((Py##cls##ScalarObject *)obj)->obval -#define PyArrayScalar_ASSIGN(obj, cls, val) \ - PyArrayScalar_VAL(obj, cls) = val - -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/halffloat.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/halffloat.h deleted file mode 100644 index ab0d221..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/halffloat.h +++ /dev/null @@ -1,70 +0,0 @@ -#ifndef __NPY_HALFFLOAT_H__ -#define __NPY_HALFFLOAT_H__ - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/* - * Half-precision routines - */ - -/* Conversions */ -float npy_half_to_float(npy_half h); -double npy_half_to_double(npy_half h); -npy_half npy_float_to_half(float f); -npy_half npy_double_to_half(double d); -/* Comparisons */ -int npy_half_eq(npy_half h1, npy_half h2); -int npy_half_ne(npy_half h1, npy_half h2); -int npy_half_le(npy_half h1, npy_half h2); -int npy_half_lt(npy_half h1, npy_half h2); -int npy_half_ge(npy_half h1, npy_half h2); -int npy_half_gt(npy_half h1, npy_half h2); -/* faster *_nonan variants for when you know h1 and h2 are not NaN */ -int npy_half_eq_nonan(npy_half h1, npy_half h2); -int npy_half_lt_nonan(npy_half h1, npy_half h2); -int npy_half_le_nonan(npy_half h1, npy_half h2); -/* Miscellaneous functions */ -int npy_half_iszero(npy_half h); -int npy_half_isnan(npy_half h); -int npy_half_isinf(npy_half h); -int npy_half_isfinite(npy_half h); -int npy_half_signbit(npy_half h); -npy_half npy_half_copysign(npy_half x, npy_half y); -npy_half npy_half_spacing(npy_half h); -npy_half npy_half_nextafter(npy_half x, npy_half y); -npy_half npy_half_divmod(npy_half x, npy_half y, npy_half *modulus); - -/* - * Half-precision constants - */ - -#define NPY_HALF_ZERO (0x0000u) -#define NPY_HALF_PZERO (0x0000u) -#define NPY_HALF_NZERO (0x8000u) -#define NPY_HALF_ONE (0x3c00u) -#define NPY_HALF_NEGONE (0xbc00u) -#define NPY_HALF_PINF (0x7c00u) -#define NPY_HALF_NINF (0xfc00u) -#define NPY_HALF_NAN (0x7e00u) - -#define NPY_MAX_HALF (0x7bffu) - -/* - * Bit-level conversions - */ - -npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f); -npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d); -npy_uint32 npy_halfbits_to_floatbits(npy_uint16 h); -npy_uint64 npy_halfbits_to_doublebits(npy_uint16 h); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/multiarray_api.txt b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/multiarray_api.txt deleted file mode 100644 index 7e06386..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/multiarray_api.txt +++ /dev/null @@ -1,2506 +0,0 @@ - -=========== -NumPy C-API -=========== -:: - - unsigned int - PyArray_GetNDArrayCVersion(void ) - - -Included at the very first so not auto-grabbed and thus not labeled. - -:: - - int - PyArray_SetNumericOps(PyObject *dict) - -Set internal structure with number functions that all arrays will use - -:: - - PyObject * - PyArray_GetNumericOps(void ) - -Get dictionary showing number functions that all arrays will use - -:: - - int - PyArray_INCREF(PyArrayObject *mp) - -For object arrays, increment all internal references. - -:: - - int - PyArray_XDECREF(PyArrayObject *mp) - -Decrement all internal references for object arrays. -(or arrays with object fields) - -:: - - void - PyArray_SetStringFunction(PyObject *op, int repr) - -Set the array print function to be a Python function. - -:: - - PyArray_Descr * - PyArray_DescrFromType(int type) - -Get the PyArray_Descr structure for a type. - -:: - - PyObject * - PyArray_TypeObjectFromType(int type) - -Get a typeobject from a type-number -- can return NULL. - -New reference - -:: - - char * - PyArray_Zero(PyArrayObject *arr) - -Get pointer to zero of correct type for array. - -:: - - char * - PyArray_One(PyArrayObject *arr) - -Get pointer to one of correct type for array - -:: - - PyObject * - PyArray_CastToType(PyArrayObject *arr, PyArray_Descr *dtype, int - is_f_order) - -For backward compatibility - -Cast an array using typecode structure. -steals reference to dtype --- cannot be NULL - -This function always makes a copy of arr, even if the dtype -doesn't change. - -:: - - int - PyArray_CastTo(PyArrayObject *out, PyArrayObject *mp) - -Cast to an already created array. - -:: - - int - PyArray_CastAnyTo(PyArrayObject *out, PyArrayObject *mp) - -Cast to an already created array. Arrays don't have to be "broadcastable" -Only requirement is they have the same number of elements. - -:: - - int - PyArray_CanCastSafely(int fromtype, int totype) - -Check the type coercion rules. - -:: - - npy_bool - PyArray_CanCastTo(PyArray_Descr *from, PyArray_Descr *to) - -leaves reference count alone --- cannot be NULL - -PyArray_CanCastTypeTo is equivalent to this, but adds a 'casting' -parameter. - -:: - - int - PyArray_ObjectType(PyObject *op, int minimum_type) - -Return the typecode of the array a Python object would be converted to - -Returns the type number the result should have, or NPY_NOTYPE on error. - -:: - - PyArray_Descr * - PyArray_DescrFromObject(PyObject *op, PyArray_Descr *mintype) - -new reference -- accepts NULL for mintype - -:: - - PyArrayObject ** - PyArray_ConvertToCommonType(PyObject *op, int *retn) - - -:: - - PyArray_Descr * - PyArray_DescrFromScalar(PyObject *sc) - -Return descr object from array scalar. - -New reference - -:: - - PyArray_Descr * - PyArray_DescrFromTypeObject(PyObject *type) - - -:: - - npy_intp - PyArray_Size(PyObject *op) - -Compute the size of an array (in number of items) - -:: - - PyObject * - PyArray_Scalar(void *data, PyArray_Descr *descr, PyObject *base) - -Get scalar-equivalent to a region of memory described by a descriptor. - -:: - - PyObject * - PyArray_FromScalar(PyObject *scalar, PyArray_Descr *outcode) - -Get 0-dim array from scalar - -0-dim array from array-scalar object -always contains a copy of the data -unless outcode is NULL, it is of void type and the referrer does -not own it either. - -steals reference to outcode - -:: - - void - PyArray_ScalarAsCtype(PyObject *scalar, void *ctypeptr) - -Convert to c-type - -no error checking is performed -- ctypeptr must be same type as scalar -in case of flexible type, the data is not copied -into ctypeptr which is expected to be a pointer to pointer - -:: - - int - PyArray_CastScalarToCtype(PyObject *scalar, void - *ctypeptr, PyArray_Descr *outcode) - -Cast Scalar to c-type - -The output buffer must be large-enough to receive the value -Even for flexible types which is different from ScalarAsCtype -where only a reference for flexible types is returned - -This may not work right on narrow builds for NumPy unicode scalars. - -:: - - int - PyArray_CastScalarDirect(PyObject *scalar, PyArray_Descr - *indescr, void *ctypeptr, int outtype) - -Cast Scalar to c-type - -:: - - PyObject * - PyArray_ScalarFromObject(PyObject *object) - -Get an Array Scalar From a Python Object - -Returns NULL if unsuccessful but error is only set if another error occurred. -Currently only Numeric-like object supported. - -:: - - PyArray_VectorUnaryFunc * - PyArray_GetCastFunc(PyArray_Descr *descr, int type_num) - -Get a cast function to cast from the input descriptor to the -output type_number (must be a registered data-type). -Returns NULL if un-successful. - -:: - - PyObject * - PyArray_FromDims(int NPY_UNUSED(nd) , int *NPY_UNUSED(d) , int - NPY_UNUSED(type) ) - -Deprecated, use PyArray_SimpleNew instead. - -:: - - PyObject * - PyArray_FromDimsAndDataAndDescr(int NPY_UNUSED(nd) , int - *NPY_UNUSED(d) , PyArray_Descr - *descr, char *NPY_UNUSED(data) ) - -Deprecated, use PyArray_NewFromDescr instead. - -:: - - PyObject * - PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int - min_depth, int max_depth, int flags, PyObject - *context) - -Does not check for NPY_ARRAY_ENSURECOPY and NPY_ARRAY_NOTSWAPPED in flags -Steals a reference to newtype --- which can be NULL - -:: - - PyObject * - PyArray_EnsureArray(PyObject *op) - -This is a quick wrapper around -PyArray_FromAny(op, NULL, 0, 0, NPY_ARRAY_ENSUREARRAY, NULL) -that special cases Arrays and PyArray_Scalars up front -It *steals a reference* to the object -It also guarantees that the result is PyArray_Type -Because it decrefs op if any conversion needs to take place -so it can be used like PyArray_EnsureArray(some_function(...)) - -:: - - PyObject * - PyArray_EnsureAnyArray(PyObject *op) - - -:: - - PyObject * - PyArray_FromFile(FILE *fp, PyArray_Descr *dtype, npy_intp num, char - *sep) - - -Given a ``FILE *`` pointer ``fp``, and a ``PyArray_Descr``, return an -array corresponding to the data encoded in that file. - -The reference to `dtype` is stolen (it is possible that the passed in -dtype is not held on to). - -The number of elements to read is given as ``num``; if it is < 0, then -then as many as possible are read. - -If ``sep`` is NULL or empty, then binary data is assumed, else -text data, with ``sep`` as the separator between elements. Whitespace in -the separator matches any length of whitespace in the text, and a match -for whitespace around the separator is added. - -For memory-mapped files, use the buffer interface. No more data than -necessary is read by this routine. - -:: - - PyObject * - PyArray_FromString(char *data, npy_intp slen, PyArray_Descr - *dtype, npy_intp num, char *sep) - - -Given a pointer to a string ``data``, a string length ``slen``, and -a ``PyArray_Descr``, return an array corresponding to the data -encoded in that string. - -If the dtype is NULL, the default array type is used (double). -If non-null, the reference is stolen. - -If ``slen`` is < 0, then the end of string is used for text data. -It is an error for ``slen`` to be < 0 for binary data (since embedded NULLs -would be the norm). - -The number of elements to read is given as ``num``; if it is < 0, then -then as many as possible are read. - -If ``sep`` is NULL or empty, then binary data is assumed, else -text data, with ``sep`` as the separator between elements. Whitespace in -the separator matches any length of whitespace in the text, and a match -for whitespace around the separator is added. - -:: - - PyObject * - PyArray_FromBuffer(PyObject *buf, PyArray_Descr *type, npy_intp - count, npy_intp offset) - - -:: - - PyObject * - PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count) - - -steals a reference to dtype (which cannot be NULL) - -:: - - PyObject * - PyArray_Return(PyArrayObject *mp) - - -Return either an array or the appropriate Python object if the array -is 0d and matches a Python type. -steals reference to mp - -:: - - PyObject * - PyArray_GetField(PyArrayObject *self, PyArray_Descr *typed, int - offset) - -Get a subset of bytes from each element of the array -steals reference to typed, must not be NULL - -:: - - int - PyArray_SetField(PyArrayObject *self, PyArray_Descr *dtype, int - offset, PyObject *val) - -Set a subset of bytes from each element of the array -steals reference to dtype, must not be NULL - -:: - - PyObject * - PyArray_Byteswap(PyArrayObject *self, npy_bool inplace) - - -:: - - PyObject * - PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int - refcheck, NPY_ORDER NPY_UNUSED(order) ) - -Resize (reallocate data). Only works if nothing else is referencing this -array and it is contiguous. If refcheck is 0, then the reference count is -not checked and assumed to be 1. You still must own this data and have no -weak-references and no base object. - -:: - - int - PyArray_MoveInto(PyArrayObject *dst, PyArrayObject *src) - -Move the memory of one array into another, allowing for overlapping data. - -Returns 0 on success, negative on failure. - -:: - - int - PyArray_CopyInto(PyArrayObject *dst, PyArrayObject *src) - -Copy an Array into another array. -Broadcast to the destination shape if necessary. - -Returns 0 on success, -1 on failure. - -:: - - int - PyArray_CopyAnyInto(PyArrayObject *dst, PyArrayObject *src) - -Copy an Array into another array -- memory must not overlap -Does not require src and dest to have "broadcastable" shapes -(only the same number of elements). - -TODO: For NumPy 2.0, this could accept an order parameter which -only allows NPY_CORDER and NPY_FORDER. Could also rename -this to CopyAsFlat to make the name more intuitive. - -Returns 0 on success, -1 on error. - -:: - - int - PyArray_CopyObject(PyArrayObject *dest, PyObject *src_object) - - -:: - - PyObject * - PyArray_NewCopy(PyArrayObject *obj, NPY_ORDER order) - -Copy an array. - -:: - - PyObject * - PyArray_ToList(PyArrayObject *self) - -To List - -:: - - PyObject * - PyArray_ToString(PyArrayObject *self, NPY_ORDER order) - - -:: - - int - PyArray_ToFile(PyArrayObject *self, FILE *fp, char *sep, char *format) - -To File - -:: - - int - PyArray_Dump(PyObject *self, PyObject *file, int protocol) - - -:: - - PyObject * - PyArray_Dumps(PyObject *self, int protocol) - - -:: - - int - PyArray_ValidType(int type) - -Is the typenum valid? - -:: - - void - PyArray_UpdateFlags(PyArrayObject *ret, int flagmask) - -Update Several Flags at once. - -:: - - PyObject * - PyArray_New(PyTypeObject *subtype, int nd, npy_intp const *dims, int - type_num, npy_intp const *strides, void *data, int - itemsize, int flags, PyObject *obj) - -Generic new array creation routine. - -:: - - PyObject * - PyArray_NewFromDescr(PyTypeObject *subtype, PyArray_Descr *descr, int - nd, npy_intp const *dims, npy_intp const - *strides, void *data, int flags, PyObject *obj) - -Generic new array creation routine. - -steals a reference to descr. On failure or when dtype->subarray is -true, dtype will be decrefed. - -:: - - PyArray_Descr * - PyArray_DescrNew(PyArray_Descr *base) - -base cannot be NULL - -:: - - PyArray_Descr * - PyArray_DescrNewFromType(int type_num) - - -:: - - double - PyArray_GetPriority(PyObject *obj, double default_) - -Get Priority from object - -:: - - PyObject * - PyArray_IterNew(PyObject *obj) - -Get Iterator. - -:: - - PyObject* - PyArray_MultiIterNew(int n, ... ) - -Get MultiIterator, - -:: - - int - PyArray_PyIntAsInt(PyObject *o) - - -:: - - npy_intp - PyArray_PyIntAsIntp(PyObject *o) - - -:: - - int - PyArray_Broadcast(PyArrayMultiIterObject *mit) - - -:: - - void - PyArray_FillObjectArray(PyArrayObject *arr, PyObject *obj) - -Assumes contiguous - -:: - - int - PyArray_FillWithScalar(PyArrayObject *arr, PyObject *obj) - - -:: - - npy_bool - PyArray_CheckStrides(int elsize, int nd, npy_intp numbytes, npy_intp - offset, npy_intp *dims, npy_intp *newstrides) - - -:: - - PyArray_Descr * - PyArray_DescrNewByteorder(PyArray_Descr *self, char newendian) - - -returns a copy of the PyArray_Descr structure with the byteorder -altered: -no arguments: The byteorder is swapped (in all subfields as well) -single argument: The byteorder is forced to the given state -(in all subfields as well) - -Valid states: ('big', '>') or ('little' or '<') -('native', or '=') - -If a descr structure with | is encountered it's own -byte-order is not changed but any fields are: - - -Deep bytorder change of a data-type descriptor -Leaves reference count of self unchanged --- does not DECREF self *** - -:: - - PyObject * - PyArray_IterAllButAxis(PyObject *obj, int *inaxis) - -Get Iterator that iterates over all but one axis (don't use this with -PyArray_ITER_GOTO1D). The axis will be over-written if negative -with the axis having the smallest stride. - -:: - - PyObject * - PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int - min_depth, int max_depth, int requires, PyObject - *context) - -steals a reference to descr -- accepts NULL - -:: - - PyObject * - PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int - flags) - -steals reference to newtype --- acc. NULL - -:: - - PyObject * - PyArray_FromInterface(PyObject *origin) - - -:: - - PyObject * - PyArray_FromStructInterface(PyObject *input) - - -:: - - PyObject * - PyArray_FromArrayAttr(PyObject *op, PyArray_Descr *typecode, PyObject - *context) - - -:: - - NPY_SCALARKIND - PyArray_ScalarKind(int typenum, PyArrayObject **arr) - -ScalarKind - -Returns the scalar kind of a type number, with an -optional tweak based on the scalar value itself. -If no scalar is provided, it returns INTPOS_SCALAR -for both signed and unsigned integers, otherwise -it checks the sign of any signed integer to choose -INTNEG_SCALAR when appropriate. - -:: - - int - PyArray_CanCoerceScalar(int thistype, int neededtype, NPY_SCALARKIND - scalar) - - -Determines whether the data type 'thistype', with -scalar kind 'scalar', can be coerced into 'neededtype'. - -:: - - PyObject * - PyArray_NewFlagsObject(PyObject *obj) - - -Get New ArrayFlagsObject - -:: - - npy_bool - PyArray_CanCastScalar(PyTypeObject *from, PyTypeObject *to) - -See if array scalars can be cast. - -TODO: For NumPy 2.0, add a NPY_CASTING parameter. - -:: - - int - PyArray_CompareUCS4(npy_ucs4 *s1, npy_ucs4 *s2, size_t len) - - -:: - - int - PyArray_RemoveSmallest(PyArrayMultiIterObject *multi) - -Adjusts previously broadcasted iterators so that the axis with -the smallest sum of iterator strides is not iterated over. -Returns dimension which is smallest in the range [0,multi->nd). -A -1 is returned if multi->nd == 0. - -don't use with PyArray_ITER_GOTO1D because factors are not adjusted - -:: - - int - PyArray_ElementStrides(PyObject *obj) - - -:: - - void - PyArray_Item_INCREF(char *data, PyArray_Descr *descr) - -XINCREF all objects in a single array item. This is complicated for -structured datatypes where the position of objects needs to be extracted. -The function is execute recursively for each nested field or subarrays dtype -such as as `np.dtype([("field1", "O"), ("field2", "f,O", (3,2))])` - -:: - - void - PyArray_Item_XDECREF(char *data, PyArray_Descr *descr) - - -XDECREF all objects in a single array item. This is complicated for -structured datatypes where the position of objects needs to be extracted. -The function is execute recursively for each nested field or subarrays dtype -such as as `np.dtype([("field1", "O"), ("field2", "f,O", (3,2))])` - -:: - - PyObject * - PyArray_FieldNames(PyObject *fields) - -Return the tuple of ordered field names from a dictionary. - -:: - - PyObject * - PyArray_Transpose(PyArrayObject *ap, PyArray_Dims *permute) - -Return Transpose. - -:: - - PyObject * - PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int - axis, PyArrayObject *out, NPY_CLIPMODE clipmode) - -Take - -:: - - PyObject * - PyArray_PutTo(PyArrayObject *self, PyObject*values0, PyObject - *indices0, NPY_CLIPMODE clipmode) - -Put values into an array - -:: - - PyObject * - PyArray_PutMask(PyArrayObject *self, PyObject*values0, PyObject*mask0) - -Put values into an array according to a mask. - -:: - - PyObject * - PyArray_Repeat(PyArrayObject *aop, PyObject *op, int axis) - -Repeat the array. - -:: - - PyObject * - PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject - *out, NPY_CLIPMODE clipmode) - - -:: - - int - PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND which) - -Sort an array in-place - -:: - - PyObject * - PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND which) - -ArgSort an array - -:: - - PyObject * - PyArray_SearchSorted(PyArrayObject *op1, PyObject *op2, NPY_SEARCHSIDE - side, PyObject *perm) - - -Search the sorted array op1 for the location of the items in op2. The -result is an array of indexes, one for each element in op2, such that if -the item were to be inserted in op1 just before that index the array -would still be in sorted order. - -Parameters ----------- -op1 : PyArrayObject * -Array to be searched, must be 1-D. -op2 : PyObject * -Array of items whose insertion indexes in op1 are wanted -side : {NPY_SEARCHLEFT, NPY_SEARCHRIGHT} -If NPY_SEARCHLEFT, return first valid insertion indexes -If NPY_SEARCHRIGHT, return last valid insertion indexes -perm : PyObject * -Permutation array that sorts op1 (optional) - -Returns -------- -ret : PyObject * -New reference to npy_intp array containing indexes where items in op2 -could be validly inserted into op1. NULL on error. - -Notes ------ -Binary search is used to find the indexes. - -:: - - PyObject * - PyArray_ArgMax(PyArrayObject *op, int axis, PyArrayObject *out) - -ArgMax - -:: - - PyObject * - PyArray_ArgMin(PyArrayObject *op, int axis, PyArrayObject *out) - -ArgMin - -:: - - PyObject * - PyArray_Reshape(PyArrayObject *self, PyObject *shape) - -Reshape - -:: - - PyObject * - PyArray_Newshape(PyArrayObject *self, PyArray_Dims *newdims, NPY_ORDER - order) - -New shape for an array - -:: - - PyObject * - PyArray_Squeeze(PyArrayObject *self) - - -return a new view of the array object with all of its unit-length -dimensions squeezed out if needed, otherwise -return the same array. - -:: - - PyObject * - PyArray_View(PyArrayObject *self, PyArray_Descr *type, PyTypeObject - *pytype) - -View -steals a reference to type -- accepts NULL - -:: - - PyObject * - PyArray_SwapAxes(PyArrayObject *ap, int a1, int a2) - -SwapAxes - -:: - - PyObject * - PyArray_Max(PyArrayObject *ap, int axis, PyArrayObject *out) - -Max - -:: - - PyObject * - PyArray_Min(PyArrayObject *ap, int axis, PyArrayObject *out) - -Min - -:: - - PyObject * - PyArray_Ptp(PyArrayObject *ap, int axis, PyArrayObject *out) - -Ptp - -:: - - PyObject * - PyArray_Mean(PyArrayObject *self, int axis, int rtype, PyArrayObject - *out) - -Mean - -:: - - PyObject * - PyArray_Trace(PyArrayObject *self, int offset, int axis1, int - axis2, int rtype, PyArrayObject *out) - -Trace - -:: - - PyObject * - PyArray_Diagonal(PyArrayObject *self, int offset, int axis1, int - axis2) - -Diagonal - -In NumPy versions prior to 1.7, this function always returned a copy of -the diagonal array. In 1.7, the code has been updated to compute a view -onto 'self', but it still copies this array before returning, as well as -setting the internal WARN_ON_WRITE flag. In a future version, it will -simply return a view onto self. - -:: - - PyObject * - PyArray_Clip(PyArrayObject *self, PyObject *min, PyObject - *max, PyArrayObject *out) - -Clip - -:: - - PyObject * - PyArray_Conjugate(PyArrayObject *self, PyArrayObject *out) - -Conjugate - -:: - - PyObject * - PyArray_Nonzero(PyArrayObject *self) - -Nonzero - -TODO: In NumPy 2.0, should make the iteration order a parameter. - -:: - - PyObject * - PyArray_Std(PyArrayObject *self, int axis, int rtype, PyArrayObject - *out, int variance) - -Set variance to 1 to by-pass square-root calculation and return variance -Std - -:: - - PyObject * - PyArray_Sum(PyArrayObject *self, int axis, int rtype, PyArrayObject - *out) - -Sum - -:: - - PyObject * - PyArray_CumSum(PyArrayObject *self, int axis, int rtype, PyArrayObject - *out) - -CumSum - -:: - - PyObject * - PyArray_Prod(PyArrayObject *self, int axis, int rtype, PyArrayObject - *out) - -Prod - -:: - - PyObject * - PyArray_CumProd(PyArrayObject *self, int axis, int - rtype, PyArrayObject *out) - -CumProd - -:: - - PyObject * - PyArray_All(PyArrayObject *self, int axis, PyArrayObject *out) - -All - -:: - - PyObject * - PyArray_Any(PyArrayObject *self, int axis, PyArrayObject *out) - -Any - -:: - - PyObject * - PyArray_Compress(PyArrayObject *self, PyObject *condition, int - axis, PyArrayObject *out) - -Compress - -:: - - PyObject * - PyArray_Flatten(PyArrayObject *a, NPY_ORDER order) - -Flatten - -:: - - PyObject * - PyArray_Ravel(PyArrayObject *arr, NPY_ORDER order) - -Ravel -Returns a contiguous array - -:: - - npy_intp - PyArray_MultiplyList(npy_intp const *l1, int n) - -Multiply a List - -:: - - int - PyArray_MultiplyIntList(int const *l1, int n) - -Multiply a List of ints - -:: - - void * - PyArray_GetPtr(PyArrayObject *obj, npy_intp const*ind) - -Produce a pointer into array - -:: - - int - PyArray_CompareLists(npy_intp const *l1, npy_intp const *l2, int n) - -Compare Lists - -:: - - int - PyArray_AsCArray(PyObject **op, void *ptr, npy_intp *dims, int - nd, PyArray_Descr*typedescr) - -Simulate a C-array -steals a reference to typedescr -- can be NULL - -:: - - int - PyArray_As1D(PyObject **NPY_UNUSED(op) , char **NPY_UNUSED(ptr) , int - *NPY_UNUSED(d1) , int NPY_UNUSED(typecode) ) - -Convert to a 1D C-array - -:: - - int - PyArray_As2D(PyObject **NPY_UNUSED(op) , char ***NPY_UNUSED(ptr) , int - *NPY_UNUSED(d1) , int *NPY_UNUSED(d2) , int - NPY_UNUSED(typecode) ) - -Convert to a 2D C-array - -:: - - int - PyArray_Free(PyObject *op, void *ptr) - -Free pointers created if As2D is called - -:: - - int - PyArray_Converter(PyObject *object, PyObject **address) - - -Useful to pass as converter function for O& processing in PyArgs_ParseTuple. - -This conversion function can be used with the "O&" argument for -PyArg_ParseTuple. It will immediately return an object of array type -or will convert to a NPY_ARRAY_CARRAY any other object. - -If you use PyArray_Converter, you must DECREF the array when finished -as you get a new reference to it. - -:: - - int - PyArray_IntpFromSequence(PyObject *seq, npy_intp *vals, int maxvals) - -PyArray_IntpFromSequence -Returns the number of integers converted or -1 if an error occurred. -vals must be large enough to hold maxvals - -:: - - PyObject * - PyArray_Concatenate(PyObject *op, int axis) - -Concatenate - -Concatenate an arbitrary Python sequence into an array. -op is a python object supporting the sequence interface. -Its elements will be concatenated together to form a single -multidimensional array. If axis is NPY_MAXDIMS or bigger, then -each sequence object will be flattened before concatenation - -:: - - PyObject * - PyArray_InnerProduct(PyObject *op1, PyObject *op2) - -Numeric.innerproduct(a,v) - -:: - - PyObject * - PyArray_MatrixProduct(PyObject *op1, PyObject *op2) - -Numeric.matrixproduct(a,v) -just like inner product but does the swapaxes stuff on the fly - -:: - - PyObject * - PyArray_CopyAndTranspose(PyObject *op) - -Copy and Transpose - -Could deprecate this function, as there isn't a speed benefit over -calling Transpose and then Copy. - -:: - - PyObject * - PyArray_Correlate(PyObject *op1, PyObject *op2, int mode) - -Numeric.correlate(a1,a2,mode) - -:: - - int - PyArray_TypestrConvert(int itemsize, int gentype) - -Typestr converter - -:: - - int - PyArray_DescrConverter(PyObject *obj, PyArray_Descr **at) - -Get typenum from an object -- None goes to NPY_DEFAULT_TYPE -This function takes a Python object representing a type and converts it -to a the correct PyArray_Descr * structure to describe the type. - -Many objects can be used to represent a data-type which in NumPy is -quite a flexible concept. - -This is the central code that converts Python objects to -Type-descriptor objects that are used throughout numpy. - -Returns a new reference in *at, but the returned should not be -modified as it may be one of the canonical immutable objects or -a reference to the input obj. - -:: - - int - PyArray_DescrConverter2(PyObject *obj, PyArray_Descr **at) - -Get typenum from an object -- None goes to NULL - -:: - - int - PyArray_IntpConverter(PyObject *obj, PyArray_Dims *seq) - -Get intp chunk from sequence - -This function takes a Python sequence object and allocates and -fills in an intp array with the converted values. - -Remember to free the pointer seq.ptr when done using -PyDimMem_FREE(seq.ptr)** - -:: - - int - PyArray_BufferConverter(PyObject *obj, PyArray_Chunk *buf) - -Get buffer chunk from object - -this function takes a Python object which exposes the (single-segment) -buffer interface and returns a pointer to the data segment - -You should increment the reference count by one of buf->base -if you will hang on to a reference - -You only get a borrowed reference to the object. Do not free the -memory... - -:: - - int - PyArray_AxisConverter(PyObject *obj, int *axis) - -Get axis from an object (possibly None) -- a converter function, - -See also PyArray_ConvertMultiAxis, which also handles a tuple of axes. - -:: - - int - PyArray_BoolConverter(PyObject *object, npy_bool *val) - -Convert an object to true / false - -:: - - int - PyArray_ByteorderConverter(PyObject *obj, char *endian) - -Convert object to endian - -:: - - int - PyArray_OrderConverter(PyObject *object, NPY_ORDER *val) - -Convert an object to FORTRAN / C / ANY / KEEP - -:: - - unsigned char - PyArray_EquivTypes(PyArray_Descr *type1, PyArray_Descr *type2) - - -This function returns true if the two typecodes are -equivalent (same basic kind and same itemsize). - -:: - - PyObject * - PyArray_Zeros(int nd, npy_intp const *dims, PyArray_Descr *type, int - is_f_order) - -Zeros - -steals a reference to type. On failure or when dtype->subarray is -true, dtype will be decrefed. -accepts NULL type - -:: - - PyObject * - PyArray_Empty(int nd, npy_intp const *dims, PyArray_Descr *type, int - is_f_order) - -Empty - -accepts NULL type -steals a reference to type - -:: - - PyObject * - PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) - -Where - -:: - - PyObject * - PyArray_Arange(double start, double stop, double step, int type_num) - -Arange, - -:: - - PyObject * - PyArray_ArangeObj(PyObject *start, PyObject *stop, PyObject - *step, PyArray_Descr *dtype) - - -ArangeObj, - -this doesn't change the references - -:: - - int - PyArray_SortkindConverter(PyObject *obj, NPY_SORTKIND *sortkind) - -Convert object to sort kind - -:: - - PyObject * - PyArray_LexSort(PyObject *sort_keys, int axis) - -LexSort an array providing indices that will sort a collection of arrays -lexicographically. The first key is sorted on first, followed by the second key --- requires that arg"merge"sort is available for each sort_key - -Returns an index array that shows the indexes for the lexicographic sort along -the given axis. - -:: - - PyObject * - PyArray_Round(PyArrayObject *a, int decimals, PyArrayObject *out) - -Round - -:: - - unsigned char - PyArray_EquivTypenums(int typenum1, int typenum2) - - -:: - - int - PyArray_RegisterDataType(PyArray_Descr *descr) - -Register Data type -Does not change the reference count of descr - -:: - - int - PyArray_RegisterCastFunc(PyArray_Descr *descr, int - totype, PyArray_VectorUnaryFunc *castfunc) - -Register Casting Function -Replaces any function currently stored. - -:: - - int - PyArray_RegisterCanCast(PyArray_Descr *descr, int - totype, NPY_SCALARKIND scalar) - -Register a type number indicating that a descriptor can be cast -to it safely - -:: - - void - PyArray_InitArrFuncs(PyArray_ArrFuncs *f) - -Initialize arrfuncs to NULL - -:: - - PyObject * - PyArray_IntTupleFromIntp(int len, npy_intp *vals) - -PyArray_IntTupleFromIntp - -:: - - int - PyArray_TypeNumFromName(char *str) - - -:: - - int - PyArray_ClipmodeConverter(PyObject *object, NPY_CLIPMODE *val) - -Convert an object to NPY_RAISE / NPY_CLIP / NPY_WRAP - -:: - - int - PyArray_OutputConverter(PyObject *object, PyArrayObject **address) - -Useful to pass as converter function for O& processing in -PyArgs_ParseTuple for output arrays - -:: - - PyObject * - PyArray_BroadcastToShape(PyObject *obj, npy_intp *dims, int nd) - -Get Iterator broadcast to a particular shape - -:: - - void - _PyArray_SigintHandler(int signum) - - -:: - - void* - _PyArray_GetSigintBuf(void ) - - -:: - - int - PyArray_DescrAlignConverter(PyObject *obj, PyArray_Descr **at) - - -Get type-descriptor from an object forcing alignment if possible -None goes to DEFAULT type. - -any object with the .fields attribute and/or .itemsize attribute (if the -.fields attribute does not give the total size -- i.e. a partial record -naming). If itemsize is given it must be >= size computed from fields - -The .fields attribute must return a convertible dictionary if present. -Result inherits from NPY_VOID. - -:: - - int - PyArray_DescrAlignConverter2(PyObject *obj, PyArray_Descr **at) - - -Get type-descriptor from an object forcing alignment if possible -None goes to NULL. - -:: - - int - PyArray_SearchsideConverter(PyObject *obj, void *addr) - -Convert object to searchsorted side - -:: - - PyObject * - PyArray_CheckAxis(PyArrayObject *arr, int *axis, int flags) - -PyArray_CheckAxis - -check that axis is valid -convert 0-d arrays to 1-d arrays - -:: - - npy_intp - PyArray_OverflowMultiplyList(npy_intp *l1, int n) - -Multiply a List of Non-negative numbers with over-flow detection. - -:: - - int - PyArray_CompareString(const char *s1, const char *s2, size_t len) - - -:: - - PyObject* - PyArray_MultiIterFromObjects(PyObject **mps, int n, int nadd, ... ) - -Get MultiIterator from array of Python objects and any additional - -PyObject **mps - array of PyObjects -int n - number of PyObjects in the array -int nadd - number of additional arrays to include in the iterator. - -Returns a multi-iterator object. - -:: - - int - PyArray_GetEndianness(void ) - - -:: - - unsigned int - PyArray_GetNDArrayCFeatureVersion(void ) - -Returns the built-in (at compilation time) C API version - -:: - - PyObject * - PyArray_Correlate2(PyObject *op1, PyObject *op2, int mode) - -correlate(a1,a2,mode) - -This function computes the usual correlation (correlate(a1, a2) != -correlate(a2, a1), and conjugate the second argument for complex inputs - -:: - - PyObject* - PyArray_NeighborhoodIterNew(PyArrayIterObject *x, const npy_intp - *bounds, int mode, PyArrayObject*fill) - -A Neighborhood Iterator object. - -:: - - void - PyArray_SetDatetimeParseFunction(PyObject *NPY_UNUSED(op) ) - -This function is scheduled to be removed - -TO BE REMOVED - NOT USED INTERNALLY. - -:: - - void - PyArray_DatetimeToDatetimeStruct(npy_datetime NPY_UNUSED(val) - , NPY_DATETIMEUNIT NPY_UNUSED(fr) - , npy_datetimestruct *result) - -Fill the datetime struct from the value and resolution unit. - -TO BE REMOVED - NOT USED INTERNALLY. - -:: - - void - PyArray_TimedeltaToTimedeltaStruct(npy_timedelta NPY_UNUSED(val) - , NPY_DATETIMEUNIT NPY_UNUSED(fr) - , npy_timedeltastruct *result) - -Fill the timedelta struct from the timedelta value and resolution unit. - -TO BE REMOVED - NOT USED INTERNALLY. - -:: - - npy_datetime - PyArray_DatetimeStructToDatetime(NPY_DATETIMEUNIT NPY_UNUSED(fr) - , npy_datetimestruct *NPY_UNUSED(d) ) - -Create a datetime value from a filled datetime struct and resolution unit. - -TO BE REMOVED - NOT USED INTERNALLY. - -:: - - npy_datetime - PyArray_TimedeltaStructToTimedelta(NPY_DATETIMEUNIT NPY_UNUSED(fr) - , npy_timedeltastruct - *NPY_UNUSED(d) ) - -Create a timdelta value from a filled timedelta struct and resolution unit. - -TO BE REMOVED - NOT USED INTERNALLY. - -:: - - NpyIter * - NpyIter_New(PyArrayObject *op, npy_uint32 flags, NPY_ORDER - order, NPY_CASTING casting, PyArray_Descr*dtype) - -Allocate a new iterator for one array object. - -:: - - NpyIter * - NpyIter_MultiNew(int nop, PyArrayObject **op_in, npy_uint32 - flags, NPY_ORDER order, NPY_CASTING - casting, npy_uint32 *op_flags, PyArray_Descr - **op_request_dtypes) - -Allocate a new iterator for more than one array object, using -standard NumPy broadcasting rules and the default buffer size. - -:: - - NpyIter * - NpyIter_AdvancedNew(int nop, PyArrayObject **op_in, npy_uint32 - flags, NPY_ORDER order, NPY_CASTING - casting, npy_uint32 *op_flags, PyArray_Descr - **op_request_dtypes, int oa_ndim, int - **op_axes, npy_intp *itershape, npy_intp - buffersize) - -Allocate a new iterator for multiple array objects, and advanced -options for controlling the broadcasting, shape, and buffer size. - -:: - - NpyIter * - NpyIter_Copy(NpyIter *iter) - -Makes a copy of the iterator - -:: - - int - NpyIter_Deallocate(NpyIter *iter) - -Deallocate an iterator - -:: - - npy_bool - NpyIter_HasDelayedBufAlloc(NpyIter *iter) - -Whether the buffer allocation is being delayed - -:: - - npy_bool - NpyIter_HasExternalLoop(NpyIter *iter) - -Whether the iterator handles the inner loop - -:: - - int - NpyIter_EnableExternalLoop(NpyIter *iter) - -Removes the inner loop handling (so HasExternalLoop returns true) - -:: - - npy_intp * - NpyIter_GetInnerStrideArray(NpyIter *iter) - -Get the array of strides for the inner loop (when HasExternalLoop is true) - -This function may be safely called without holding the Python GIL. - -:: - - npy_intp * - NpyIter_GetInnerLoopSizePtr(NpyIter *iter) - -Get a pointer to the size of the inner loop (when HasExternalLoop is true) - -This function may be safely called without holding the Python GIL. - -:: - - int - NpyIter_Reset(NpyIter *iter, char **errmsg) - -Resets the iterator to its initial state - -If errmsg is non-NULL, it should point to a variable which will -receive the error message, and no Python exception will be set. -This is so that the function can be called from code not holding -the GIL. - -:: - - int - NpyIter_ResetBasePointers(NpyIter *iter, char **baseptrs, char - **errmsg) - -Resets the iterator to its initial state, with new base data pointers. -This function requires great caution. - -If errmsg is non-NULL, it should point to a variable which will -receive the error message, and no Python exception will be set. -This is so that the function can be called from code not holding -the GIL. - -:: - - int - NpyIter_ResetToIterIndexRange(NpyIter *iter, npy_intp istart, npy_intp - iend, char **errmsg) - -Resets the iterator to a new iterator index range - -If errmsg is non-NULL, it should point to a variable which will -receive the error message, and no Python exception will be set. -This is so that the function can be called from code not holding -the GIL. - -:: - - int - NpyIter_GetNDim(NpyIter *iter) - -Gets the number of dimensions being iterated - -:: - - int - NpyIter_GetNOp(NpyIter *iter) - -Gets the number of operands being iterated - -:: - - NpyIter_IterNextFunc * - NpyIter_GetIterNext(NpyIter *iter, char **errmsg) - -Compute the specialized iteration function for an iterator - -If errmsg is non-NULL, it should point to a variable which will -receive the error message, and no Python exception will be set. -This is so that the function can be called from code not holding -the GIL. - -:: - - npy_intp - NpyIter_GetIterSize(NpyIter *iter) - -Gets the number of elements being iterated - -:: - - void - NpyIter_GetIterIndexRange(NpyIter *iter, npy_intp *istart, npy_intp - *iend) - -Gets the range of iteration indices being iterated - -:: - - npy_intp - NpyIter_GetIterIndex(NpyIter *iter) - -Gets the current iteration index - -:: - - int - NpyIter_GotoIterIndex(NpyIter *iter, npy_intp iterindex) - -Sets the iterator position to the specified iterindex, -which matches the iteration order of the iterator. - -Returns NPY_SUCCEED on success, NPY_FAIL on failure. - -:: - - npy_bool - NpyIter_HasMultiIndex(NpyIter *iter) - -Whether the iterator is tracking a multi-index - -:: - - int - NpyIter_GetShape(NpyIter *iter, npy_intp *outshape) - -Gets the broadcast shape if a multi-index is being tracked by the iterator, -otherwise gets the shape of the iteration as Fortran-order -(fastest-changing index first). - -The reason Fortran-order is returned when a multi-index -is not enabled is that this is providing a direct view into how -the iterator traverses the n-dimensional space. The iterator organizes -its memory from fastest index to slowest index, and when -a multi-index is enabled, it uses a permutation to recover the original -order. - -Returns NPY_SUCCEED or NPY_FAIL. - -:: - - NpyIter_GetMultiIndexFunc * - NpyIter_GetGetMultiIndex(NpyIter *iter, char **errmsg) - -Compute a specialized get_multi_index function for the iterator - -If errmsg is non-NULL, it should point to a variable which will -receive the error message, and no Python exception will be set. -This is so that the function can be called from code not holding -the GIL. - -:: - - int - NpyIter_GotoMultiIndex(NpyIter *iter, npy_intp const *multi_index) - -Sets the iterator to the specified multi-index, which must have the -correct number of entries for 'ndim'. It is only valid -when NPY_ITER_MULTI_INDEX was passed to the constructor. This operation -fails if the multi-index is out of bounds. - -Returns NPY_SUCCEED on success, NPY_FAIL on failure. - -:: - - int - NpyIter_RemoveMultiIndex(NpyIter *iter) - -Removes multi-index support from an iterator. - -Returns NPY_SUCCEED or NPY_FAIL. - -:: - - npy_bool - NpyIter_HasIndex(NpyIter *iter) - -Whether the iterator is tracking an index - -:: - - npy_bool - NpyIter_IsBuffered(NpyIter *iter) - -Whether the iterator is buffered - -:: - - npy_bool - NpyIter_IsGrowInner(NpyIter *iter) - -Whether the inner loop can grow if buffering is unneeded - -:: - - npy_intp - NpyIter_GetBufferSize(NpyIter *iter) - -Gets the size of the buffer, or 0 if buffering is not enabled - -:: - - npy_intp * - NpyIter_GetIndexPtr(NpyIter *iter) - -Get a pointer to the index, if it is being tracked - -:: - - int - NpyIter_GotoIndex(NpyIter *iter, npy_intp flat_index) - -If the iterator is tracking an index, sets the iterator -to the specified index. - -Returns NPY_SUCCEED on success, NPY_FAIL on failure. - -:: - - char ** - NpyIter_GetDataPtrArray(NpyIter *iter) - -Get the array of data pointers (1 per object being iterated) - -This function may be safely called without holding the Python GIL. - -:: - - PyArray_Descr ** - NpyIter_GetDescrArray(NpyIter *iter) - -Get the array of data type pointers (1 per object being iterated) - -:: - - PyArrayObject ** - NpyIter_GetOperandArray(NpyIter *iter) - -Get the array of objects being iterated - -:: - - PyArrayObject * - NpyIter_GetIterView(NpyIter *iter, npy_intp i) - -Returns a view to the i-th object with the iterator's internal axes - -:: - - void - NpyIter_GetReadFlags(NpyIter *iter, char *outreadflags) - -Gets an array of read flags (1 per object being iterated) - -:: - - void - NpyIter_GetWriteFlags(NpyIter *iter, char *outwriteflags) - -Gets an array of write flags (1 per object being iterated) - -:: - - void - NpyIter_DebugPrint(NpyIter *iter) - -For debugging - -:: - - npy_bool - NpyIter_IterationNeedsAPI(NpyIter *iter) - -Whether the iteration loop, and in particular the iternext() -function, needs API access. If this is true, the GIL must -be retained while iterating. - -:: - - void - NpyIter_GetInnerFixedStrideArray(NpyIter *iter, npy_intp *out_strides) - -Get an array of strides which are fixed. Any strides which may -change during iteration receive the value NPY_MAX_INTP. Once -the iterator is ready to iterate, call this to get the strides -which will always be fixed in the inner loop, then choose optimized -inner loop functions which take advantage of those fixed strides. - -This function may be safely called without holding the Python GIL. - -:: - - int - NpyIter_RemoveAxis(NpyIter *iter, int axis) - -Removes an axis from iteration. This requires that NPY_ITER_MULTI_INDEX -was set for iterator creation, and does not work if buffering is -enabled. This function also resets the iterator to its initial state. - -Returns NPY_SUCCEED or NPY_FAIL. - -:: - - npy_intp * - NpyIter_GetAxisStrideArray(NpyIter *iter, int axis) - -Gets the array of strides for the specified axis. -If the iterator is tracking a multi-index, gets the strides -for the axis specified, otherwise gets the strides for -the iteration axis as Fortran order (fastest-changing axis first). - -Returns NULL if an error occurs. - -:: - - npy_bool - NpyIter_RequiresBuffering(NpyIter *iter) - -Whether the iteration could be done with no buffering. - -:: - - char ** - NpyIter_GetInitialDataPtrArray(NpyIter *iter) - -Get the array of data pointers (1 per object being iterated), -directly into the arrays (never pointing to a buffer), for starting -unbuffered iteration. This always returns the addresses for the -iterator position as reset to iterator index 0. - -These pointers are different from the pointers accepted by -NpyIter_ResetBasePointers, because the direction along some -axes may have been reversed, requiring base offsets. - -This function may be safely called without holding the Python GIL. - -:: - - int - NpyIter_CreateCompatibleStrides(NpyIter *iter, npy_intp - itemsize, npy_intp *outstrides) - -Builds a set of strides which are the same as the strides of an -output array created using the NPY_ITER_ALLOCATE flag, where NULL -was passed for op_axes. This is for data packed contiguously, -but not necessarily in C or Fortran order. This should be used -together with NpyIter_GetShape and NpyIter_GetNDim. - -A use case for this function is to match the shape and layout of -the iterator and tack on one or more dimensions. For example, -in order to generate a vector per input value for a numerical gradient, -you pass in ndim*itemsize for itemsize, then add another dimension to -the end with size ndim and stride itemsize. To do the Hessian matrix, -you do the same thing but add two dimensions, or take advantage of -the symmetry and pack it into 1 dimension with a particular encoding. - -This function may only be called if the iterator is tracking a multi-index -and if NPY_ITER_DONT_NEGATE_STRIDES was used to prevent an axis from -being iterated in reverse order. - -If an array is created with this method, simply adding 'itemsize' -for each iteration will traverse the new array matching the -iterator. - -Returns NPY_SUCCEED or NPY_FAIL. - -:: - - int - PyArray_CastingConverter(PyObject *obj, NPY_CASTING *casting) - -Convert any Python object, *obj*, to an NPY_CASTING enum. - -:: - - npy_intp - PyArray_CountNonzero(PyArrayObject *self) - -Counts the number of non-zero elements in the array. - -Returns -1 on error. - -:: - - PyArray_Descr * - PyArray_PromoteTypes(PyArray_Descr *type1, PyArray_Descr *type2) - -Produces the smallest size and lowest kind type to which both -input types can be cast. - -:: - - PyArray_Descr * - PyArray_MinScalarType(PyArrayObject *arr) - -If arr is a scalar (has 0 dimensions) with a built-in number data type, -finds the smallest type size/kind which can still represent its data. -Otherwise, returns the array's data type. - - -:: - - PyArray_Descr * - PyArray_ResultType(npy_intp narrs, PyArrayObject **arr, npy_intp - ndtypes, PyArray_Descr **dtypes) - -Produces the result type of a bunch of inputs, using the UFunc -type promotion rules. Use this function when you have a set of -input arrays, and need to determine an output array dtype. - -If all the inputs are scalars (have 0 dimensions) or the maximum "kind" -of the scalars is greater than the maximum "kind" of the arrays, does -a regular type promotion. - -Otherwise, does a type promotion on the MinScalarType -of all the inputs. Data types passed directly are treated as array -types. - - -:: - - npy_bool - PyArray_CanCastArrayTo(PyArrayObject *arr, PyArray_Descr - *to, NPY_CASTING casting) - -Returns 1 if the array object may be cast to the given data type using -the casting rule, 0 otherwise. This differs from PyArray_CanCastTo in -that it handles scalar arrays (0 dimensions) specially, by checking -their value. - -:: - - npy_bool - PyArray_CanCastTypeTo(PyArray_Descr *from, PyArray_Descr - *to, NPY_CASTING casting) - -Returns true if data of type 'from' may be cast to data of type -'to' according to the rule 'casting'. - -:: - - PyArrayObject * - PyArray_EinsteinSum(char *subscripts, npy_intp nop, PyArrayObject - **op_in, PyArray_Descr *dtype, NPY_ORDER - order, NPY_CASTING casting, PyArrayObject *out) - -This function provides summation of array elements according to -the Einstein summation convention. For example: -- trace(a) -> einsum("ii", a) -- transpose(a) -> einsum("ji", a) -- multiply(a,b) -> einsum(",", a, b) -- inner(a,b) -> einsum("i,i", a, b) -- outer(a,b) -> einsum("i,j", a, b) -- matvec(a,b) -> einsum("ij,j", a, b) -- matmat(a,b) -> einsum("ij,jk", a, b) - -subscripts: The string of subscripts for einstein summation. -nop: The number of operands -op_in: The array of operands -dtype: Either NULL, or the data type to force the calculation as. -order: The order for the calculation/the output axes. -casting: What kind of casts should be permitted. -out: Either NULL, or an array into which the output should be placed. - -By default, the labels get placed in alphabetical order -at the end of the output. So, if c = einsum("i,j", a, b) -then c[i,j] == a[i]*b[j], but if c = einsum("j,i", a, b) -then c[i,j] = a[j]*b[i]. - -Alternatively, you can control the output order or prevent -an axis from being summed/force an axis to be summed by providing -indices for the output. This allows us to turn 'trace' into -'diag', for example. -- diag(a) -> einsum("ii->i", a) -- sum(a, axis=0) -> einsum("i...->", a) - -Subscripts at the beginning and end may be specified by -putting an ellipsis "..." in the middle. For example, -the function einsum("i...i", a) takes the diagonal of -the first and last dimensions of the operand, and -einsum("ij...,jk...->ik...") takes the matrix product using -the first two indices of each operand instead of the last two. - -When there is only one operand, no axes being summed, and -no output parameter, this function returns a view -into the operand instead of making a copy. - -:: - - PyObject * - PyArray_NewLikeArray(PyArrayObject *prototype, NPY_ORDER - order, PyArray_Descr *dtype, int subok) - -Creates a new array with the same shape as the provided one, -with possible memory layout order and data type changes. - -prototype - The array the new one should be like. -order - NPY_CORDER - C-contiguous result. -NPY_FORTRANORDER - Fortran-contiguous result. -NPY_ANYORDER - Fortran if prototype is Fortran, C otherwise. -NPY_KEEPORDER - Keeps the axis ordering of prototype. -dtype - If not NULL, overrides the data type of the result. -subok - If 1, use the prototype's array subtype, otherwise -always create a base-class array. - -NOTE: If dtype is not NULL, steals the dtype reference. On failure or when -dtype->subarray is true, dtype will be decrefed. - -:: - - int - PyArray_GetArrayParamsFromObject(PyObject *op, PyArray_Descr - *requested_dtype, npy_bool - writeable, PyArray_Descr - **out_dtype, int *out_ndim, npy_intp - *out_dims, PyArrayObject - **out_arr, PyObject *context) - -Retrieves the array parameters for viewing/converting an arbitrary -PyObject* to a NumPy array. This allows the "innate type and shape" -of Python list-of-lists to be discovered without -actually converting to an array. - -In some cases, such as structured arrays and the __array__ interface, -a data type needs to be used to make sense of the object. When -this is needed, provide a Descr for 'requested_dtype', otherwise -provide NULL. This reference is not stolen. Also, if the requested -dtype doesn't modify the interpretation of the input, out_dtype will -still get the "innate" dtype of the object, not the dtype passed -in 'requested_dtype'. - -If writing to the value in 'op' is desired, set the boolean -'writeable' to 1. This raises an error when 'op' is a scalar, list -of lists, or other non-writeable 'op'. - -Result: When success (0 return value) is returned, either out_arr -is filled with a non-NULL PyArrayObject and -the rest of the parameters are untouched, or out_arr is -filled with NULL, and the rest of the parameters are -filled. - -Typical usage: - -PyArrayObject *arr = NULL; -PyArray_Descr *dtype = NULL; -int ndim = 0; -npy_intp dims[NPY_MAXDIMS]; - -if (PyArray_GetArrayParamsFromObject(op, NULL, 1, &dtype, -&ndim, dims, &arr, NULL) < 0) { -return NULL; -} -if (arr == NULL) { -... validate/change dtype, validate flags, ndim, etc ... -// Could make custom strides here too -arr = PyArray_NewFromDescr(&PyArray_Type, dtype, ndim, -dims, NULL, -is_f_order ? NPY_ARRAY_F_CONTIGUOUS : 0, -NULL); -if (arr == NULL) { -return NULL; -} -if (PyArray_CopyObject(arr, op) < 0) { -Py_DECREF(arr); -return NULL; -} -} -else { -... in this case the other parameters weren't filled, just -validate and possibly copy arr itself ... -} -... use arr ... - -:: - - int - PyArray_ConvertClipmodeSequence(PyObject *object, NPY_CLIPMODE - *modes, int n) - -Convert an object to an array of n NPY_CLIPMODE values. -This is intended to be used in functions where a different mode -could be applied to each axis, like in ravel_multi_index. - -:: - - PyObject * - PyArray_MatrixProduct2(PyObject *op1, PyObject - *op2, PyArrayObject*out) - -Numeric.matrixproduct2(a,v,out) -just like inner product but does the swapaxes stuff on the fly - -:: - - npy_bool - NpyIter_IsFirstVisit(NpyIter *iter, int iop) - -Checks to see whether this is the first time the elements -of the specified reduction operand which the iterator points at are -being seen for the first time. The function returns -a reasonable answer for reduction operands and when buffering is -disabled. The answer may be incorrect for buffered non-reduction -operands. - -This function is intended to be used in EXTERNAL_LOOP mode only, -and will produce some wrong answers when that mode is not enabled. - -If this function returns true, the caller should also -check the inner loop stride of the operand, because if -that stride is 0, then only the first element of the innermost -external loop is being visited for the first time. - -WARNING: For performance reasons, 'iop' is not bounds-checked, -it is not confirmed that 'iop' is actually a reduction -operand, and it is not confirmed that EXTERNAL_LOOP -mode is enabled. These checks are the responsibility of -the caller, and should be done outside of any inner loops. - -:: - - int - PyArray_SetBaseObject(PyArrayObject *arr, PyObject *obj) - -Sets the 'base' attribute of the array. This steals a reference -to 'obj'. - -Returns 0 on success, -1 on failure. - -:: - - void - PyArray_CreateSortedStridePerm(int ndim, npy_intp const - *strides, npy_stride_sort_item - *out_strideperm) - - -This function populates the first ndim elements -of strideperm with sorted descending by their absolute values. -For example, the stride array (4, -2, 12) becomes -[(2, 12), (0, 4), (1, -2)]. - -:: - - void - PyArray_RemoveAxesInPlace(PyArrayObject *arr, const npy_bool *flags) - - -Removes the axes flagged as True from the array, -modifying it in place. If an axis flagged for removal -has a shape entry bigger than one, this effectively selects -index zero for that axis. - -WARNING: If an axis flagged for removal has a shape equal to zero, -the array will point to invalid memory. The caller must -validate this! -If an axis flagged for removal has a shape larger than one, -the aligned flag (and in the future the contiguous flags), -may need explicit update. -(check also NPY_RELAXED_STRIDES_CHECKING) - -For example, this can be used to remove the reduction axes -from a reduction result once its computation is complete. - -:: - - void - PyArray_DebugPrint(PyArrayObject *obj) - -Prints the raw data of the ndarray in a form useful for debugging -low-level C issues. - -:: - - int - PyArray_FailUnlessWriteable(PyArrayObject *obj, const char *name) - - -This function does nothing if obj is writeable, and raises an exception -(and returns -1) if obj is not writeable. It may also do other -house-keeping, such as issuing warnings on arrays which are transitioning -to become views. Always call this function at some point before writing to -an array. - -'name' is a name for the array, used to give better error -messages. Something like "assignment destination", "output array", or even -just "array". - -:: - - int - PyArray_SetUpdateIfCopyBase(PyArrayObject *arr, PyArrayObject *base) - - -Precondition: 'arr' is a copy of 'base' (though possibly with different -strides, ordering, etc.). This function sets the UPDATEIFCOPY flag and the -->base pointer on 'arr', so that when 'arr' is destructed, it will copy any -changes back to 'base'. DEPRECATED, use PyArray_SetWritebackIfCopyBase - -Steals a reference to 'base'. - -Returns 0 on success, -1 on failure. - -:: - - void * - PyDataMem_NEW(size_t size) - -Allocates memory for array data. - -:: - - void - PyDataMem_FREE(void *ptr) - -Free memory for array data. - -:: - - void * - PyDataMem_RENEW(void *ptr, size_t size) - -Reallocate/resize memory for array data. - -:: - - PyDataMem_EventHookFunc * - PyDataMem_SetEventHook(PyDataMem_EventHookFunc *newhook, void - *user_data, void **old_data) - -Sets the allocation event hook for numpy array data. -Takes a PyDataMem_EventHookFunc *, which has the signature: -void hook(void *old, void *new, size_t size, void *user_data). -Also takes a void *user_data, and void **old_data. - -Returns a pointer to the previous hook or NULL. If old_data is -non-NULL, the previous user_data pointer will be copied to it. - -If not NULL, hook will be called at the end of each PyDataMem_NEW/FREE/RENEW: -result = PyDataMem_NEW(size) -> (*hook)(NULL, result, size, user_data) -PyDataMem_FREE(ptr) -> (*hook)(ptr, NULL, 0, user_data) -result = PyDataMem_RENEW(ptr, size) -> (*hook)(ptr, result, size, user_data) - -When the hook is called, the GIL will be held by the calling -thread. The hook should be written to be reentrant, if it performs -operations that might cause new allocation events (such as the -creation/destruction numpy objects, or creating/destroying Python -objects which might cause a gc) - -:: - - void - PyArray_MapIterSwapAxes(PyArrayMapIterObject *mit, PyArrayObject - **ret, int getmap) - - -:: - - PyObject * - PyArray_MapIterArray(PyArrayObject *a, PyObject *index) - - -Use advanced indexing to iterate an array. - -:: - - void - PyArray_MapIterNext(PyArrayMapIterObject *mit) - -This function needs to update the state of the map iterator -and point mit->dataptr to the memory-location of the next object - -Note that this function never handles an extra operand but provides -compatibility for an old (exposed) API. - -:: - - int - PyArray_Partition(PyArrayObject *op, PyArrayObject *ktharray, int - axis, NPY_SELECTKIND which) - -Partition an array in-place - -:: - - PyObject * - PyArray_ArgPartition(PyArrayObject *op, PyArrayObject *ktharray, int - axis, NPY_SELECTKIND which) - -ArgPartition an array - -:: - - int - PyArray_SelectkindConverter(PyObject *obj, NPY_SELECTKIND *selectkind) - -Convert object to select kind - -:: - - void * - PyDataMem_NEW_ZEROED(size_t size, size_t elsize) - -Allocates zeroed memory for array data. - -:: - - int - PyArray_CheckAnyScalarExact(PyObject *obj) - -return true an object is exactly a numpy scalar - -:: - - PyObject * - PyArray_MapIterArrayCopyIfOverlap(PyArrayObject *a, PyObject - *index, int - copy_if_overlap, PyArrayObject - *extra_op) - - -Same as PyArray_MapIterArray, but: - -If copy_if_overlap != 0, check if `a` has memory overlap with any of the -arrays in `index` and with `extra_op`. If yes, make copies as appropriate -to avoid problems if `a` is modified during the iteration. -`iter->array` may contain a copied array (UPDATEIFCOPY/WRITEBACKIFCOPY set). - -:: - - int - PyArray_ResolveWritebackIfCopy(PyArrayObject *self) - - -If WRITEBACKIFCOPY and self has data, reset the base WRITEABLE flag, -copy the local data to base, release the local data, and set flags -appropriately. Return 0 if not relevant, 1 if success, < 0 on failure - -:: - - int - PyArray_SetWritebackIfCopyBase(PyArrayObject *arr, PyArrayObject - *base) - - -Precondition: 'arr' is a copy of 'base' (though possibly with different -strides, ordering, etc.). This function sets the WRITEBACKIFCOPY flag and the -->base pointer on 'arr', call PyArray_ResolveWritebackIfCopy to copy any -changes back to 'base' before deallocating the array. - -Steals a reference to 'base'. - -Returns 0 on success, -1 on failure. - diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/ndarrayobject.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/ndarrayobject.h deleted file mode 100644 index 95e9cb0..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/ndarrayobject.h +++ /dev/null @@ -1,285 +0,0 @@ -/* - * DON'T INCLUDE THIS DIRECTLY. - */ - -#ifndef NPY_NDARRAYOBJECT_H -#define NPY_NDARRAYOBJECT_H -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include "ndarraytypes.h" - -/* Includes the "function" C-API -- these are all stored in a - list of pointers --- one for each file - The two lists are concatenated into one in multiarray. - - They are available as import_array() -*/ - -#include "__multiarray_api.h" - - -/* C-API that requires previous API to be defined */ - -#define PyArray_DescrCheck(op) PyObject_TypeCheck(op, &PyArrayDescr_Type) - -#define PyArray_Check(op) PyObject_TypeCheck(op, &PyArray_Type) -#define PyArray_CheckExact(op) (((PyObject*)(op))->ob_type == &PyArray_Type) - -#define PyArray_HasArrayInterfaceType(op, type, context, out) \ - ((((out)=PyArray_FromStructInterface(op)) != Py_NotImplemented) || \ - (((out)=PyArray_FromInterface(op)) != Py_NotImplemented) || \ - (((out)=PyArray_FromArrayAttr(op, type, context)) != \ - Py_NotImplemented)) - -#define PyArray_HasArrayInterface(op, out) \ - PyArray_HasArrayInterfaceType(op, NULL, NULL, out) - -#define PyArray_IsZeroDim(op) (PyArray_Check(op) && \ - (PyArray_NDIM((PyArrayObject *)op) == 0)) - -#define PyArray_IsScalar(obj, cls) \ - (PyObject_TypeCheck(obj, &Py##cls##ArrType_Type)) - -#define PyArray_CheckScalar(m) (PyArray_IsScalar(m, Generic) || \ - PyArray_IsZeroDim(m)) -#if PY_MAJOR_VERSION >= 3 -#define PyArray_IsPythonNumber(obj) \ - (PyFloat_Check(obj) || PyComplex_Check(obj) || \ - PyLong_Check(obj) || PyBool_Check(obj)) -#define PyArray_IsIntegerScalar(obj) (PyLong_Check(obj) \ - || PyArray_IsScalar((obj), Integer)) -#define PyArray_IsPythonScalar(obj) \ - (PyArray_IsPythonNumber(obj) || PyBytes_Check(obj) || \ - PyUnicode_Check(obj)) -#else -#define PyArray_IsPythonNumber(obj) \ - (PyInt_Check(obj) || PyFloat_Check(obj) || PyComplex_Check(obj) || \ - PyLong_Check(obj) || PyBool_Check(obj)) -#define PyArray_IsIntegerScalar(obj) (PyInt_Check(obj) \ - || PyLong_Check(obj) \ - || PyArray_IsScalar((obj), Integer)) -#define PyArray_IsPythonScalar(obj) \ - (PyArray_IsPythonNumber(obj) || PyString_Check(obj) || \ - PyUnicode_Check(obj)) -#endif - -#define PyArray_IsAnyScalar(obj) \ - (PyArray_IsScalar(obj, Generic) || PyArray_IsPythonScalar(obj)) - -#define PyArray_CheckAnyScalar(obj) (PyArray_IsPythonScalar(obj) || \ - PyArray_CheckScalar(obj)) - - -#define PyArray_GETCONTIGUOUS(m) (PyArray_ISCONTIGUOUS(m) ? \ - Py_INCREF(m), (m) : \ - (PyArrayObject *)(PyArray_Copy(m))) - -#define PyArray_SAMESHAPE(a1,a2) ((PyArray_NDIM(a1) == PyArray_NDIM(a2)) && \ - PyArray_CompareLists(PyArray_DIMS(a1), \ - PyArray_DIMS(a2), \ - PyArray_NDIM(a1))) - -#define PyArray_SIZE(m) PyArray_MultiplyList(PyArray_DIMS(m), PyArray_NDIM(m)) -#define PyArray_NBYTES(m) (PyArray_ITEMSIZE(m) * PyArray_SIZE(m)) -#define PyArray_FROM_O(m) PyArray_FromAny(m, NULL, 0, 0, 0, NULL) - -#define PyArray_FROM_OF(m,flags) PyArray_CheckFromAny(m, NULL, 0, 0, flags, \ - NULL) - -#define PyArray_FROM_OT(m,type) PyArray_FromAny(m, \ - PyArray_DescrFromType(type), 0, 0, 0, NULL) - -#define PyArray_FROM_OTF(m, type, flags) \ - PyArray_FromAny(m, PyArray_DescrFromType(type), 0, 0, \ - (((flags) & NPY_ARRAY_ENSURECOPY) ? \ - ((flags) | NPY_ARRAY_DEFAULT) : (flags)), NULL) - -#define PyArray_FROMANY(m, type, min, max, flags) \ - PyArray_FromAny(m, PyArray_DescrFromType(type), min, max, \ - (((flags) & NPY_ARRAY_ENSURECOPY) ? \ - (flags) | NPY_ARRAY_DEFAULT : (flags)), NULL) - -#define PyArray_ZEROS(m, dims, type, is_f_order) \ - PyArray_Zeros(m, dims, PyArray_DescrFromType(type), is_f_order) - -#define PyArray_EMPTY(m, dims, type, is_f_order) \ - PyArray_Empty(m, dims, PyArray_DescrFromType(type), is_f_order) - -#define PyArray_FILLWBYTE(obj, val) memset(PyArray_DATA(obj), val, \ - PyArray_NBYTES(obj)) -#ifndef PYPY_VERSION -#define PyArray_REFCOUNT(obj) (((PyObject *)(obj))->ob_refcnt) -#define NPY_REFCOUNT PyArray_REFCOUNT -#endif -#define NPY_MAX_ELSIZE (2 * NPY_SIZEOF_LONGDOUBLE) - -#define PyArray_ContiguousFromAny(op, type, min_depth, max_depth) \ - PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ - max_depth, NPY_ARRAY_DEFAULT, NULL) - -#define PyArray_EquivArrTypes(a1, a2) \ - PyArray_EquivTypes(PyArray_DESCR(a1), PyArray_DESCR(a2)) - -#define PyArray_EquivByteorders(b1, b2) \ - (((b1) == (b2)) || (PyArray_ISNBO(b1) == PyArray_ISNBO(b2))) - -#define PyArray_SimpleNew(nd, dims, typenum) \ - PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, NULL, 0, 0, NULL) - -#define PyArray_SimpleNewFromData(nd, dims, typenum, data) \ - PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, \ - data, 0, NPY_ARRAY_CARRAY, NULL) - -#define PyArray_SimpleNewFromDescr(nd, dims, descr) \ - PyArray_NewFromDescr(&PyArray_Type, descr, nd, dims, \ - NULL, NULL, 0, NULL) - -#define PyArray_ToScalar(data, arr) \ - PyArray_Scalar(data, PyArray_DESCR(arr), (PyObject *)arr) - - -/* These might be faster without the dereferencing of obj - going on inside -- of course an optimizing compiler should - inline the constants inside a for loop making it a moot point -*/ - -#define PyArray_GETPTR1(obj, i) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDES(obj)[0])) - -#define PyArray_GETPTR2(obj, i, j) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDES(obj)[0] + \ - (j)*PyArray_STRIDES(obj)[1])) - -#define PyArray_GETPTR3(obj, i, j, k) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDES(obj)[0] + \ - (j)*PyArray_STRIDES(obj)[1] + \ - (k)*PyArray_STRIDES(obj)[2])) - -#define PyArray_GETPTR4(obj, i, j, k, l) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDES(obj)[0] + \ - (j)*PyArray_STRIDES(obj)[1] + \ - (k)*PyArray_STRIDES(obj)[2] + \ - (l)*PyArray_STRIDES(obj)[3])) - -/* Move to arrayobject.c once PyArray_XDECREF_ERR is removed */ -static NPY_INLINE void -PyArray_DiscardWritebackIfCopy(PyArrayObject *arr) -{ - PyArrayObject_fields *fa = (PyArrayObject_fields *)arr; - if (fa && fa->base) { - if ((fa->flags & NPY_ARRAY_UPDATEIFCOPY) || - (fa->flags & NPY_ARRAY_WRITEBACKIFCOPY)) { - PyArray_ENABLEFLAGS((PyArrayObject*)fa->base, NPY_ARRAY_WRITEABLE); - Py_DECREF(fa->base); - fa->base = NULL; - PyArray_CLEARFLAGS(arr, NPY_ARRAY_WRITEBACKIFCOPY); - PyArray_CLEARFLAGS(arr, NPY_ARRAY_UPDATEIFCOPY); - } - } -} - -#define PyArray_DESCR_REPLACE(descr) do { \ - PyArray_Descr *_new_; \ - _new_ = PyArray_DescrNew(descr); \ - Py_XDECREF(descr); \ - descr = _new_; \ - } while(0) - -/* Copy should always return contiguous array */ -#define PyArray_Copy(obj) PyArray_NewCopy(obj, NPY_CORDER) - -#define PyArray_FromObject(op, type, min_depth, max_depth) \ - PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ - max_depth, NPY_ARRAY_BEHAVED | \ - NPY_ARRAY_ENSUREARRAY, NULL) - -#define PyArray_ContiguousFromObject(op, type, min_depth, max_depth) \ - PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ - max_depth, NPY_ARRAY_DEFAULT | \ - NPY_ARRAY_ENSUREARRAY, NULL) - -#define PyArray_CopyFromObject(op, type, min_depth, max_depth) \ - PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ - max_depth, NPY_ARRAY_ENSURECOPY | \ - NPY_ARRAY_DEFAULT | \ - NPY_ARRAY_ENSUREARRAY, NULL) - -#define PyArray_Cast(mp, type_num) \ - PyArray_CastToType(mp, PyArray_DescrFromType(type_num), 0) - -#define PyArray_Take(ap, items, axis) \ - PyArray_TakeFrom(ap, items, axis, NULL, NPY_RAISE) - -#define PyArray_Put(ap, items, values) \ - PyArray_PutTo(ap, items, values, NPY_RAISE) - -/* Compatibility with old Numeric stuff -- don't use in new code */ - -#define PyArray_FromDimsAndData(nd, d, type, data) \ - PyArray_FromDimsAndDataAndDescr(nd, d, PyArray_DescrFromType(type), \ - data) - - -/* - Check to see if this key in the dictionary is the "title" - entry of the tuple (i.e. a duplicate dictionary entry in the fields - dict. -*/ - -static NPY_INLINE int -NPY_TITLE_KEY_check(PyObject *key, PyObject *value) -{ - PyObject *title; - if (PyTuple_Size(value) != 3) { - return 0; - } - title = PyTuple_GetItem(value, 2); - if (key == title) { - return 1; - } -#ifdef PYPY_VERSION - /* - * On PyPy, dictionary keys do not always preserve object identity. - * Fall back to comparison by value. - */ - if (PyUnicode_Check(title) && PyUnicode_Check(key)) { - return PyUnicode_Compare(title, key) == 0 ? 1 : 0; - } -#if PY_VERSION_HEX < 0x03000000 - if (PyString_Check(title) && PyString_Check(key)) { - return PyObject_Compare(title, key) == 0 ? 1 : 0; - } -#endif -#endif - return 0; -} - -/* Macro, for backward compat with "if NPY_TITLE_KEY(key, value) { ..." */ -#define NPY_TITLE_KEY(key, value) (NPY_TITLE_KEY_check((key), (value))) - -#define DEPRECATE(msg) PyErr_WarnEx(PyExc_DeprecationWarning,msg,1) -#define DEPRECATE_FUTUREWARNING(msg) PyErr_WarnEx(PyExc_FutureWarning,msg,1) - -#if !defined(NPY_NO_DEPRECATED_API) || \ - (NPY_NO_DEPRECATED_API < NPY_1_14_API_VERSION) -static NPY_INLINE void -PyArray_XDECREF_ERR(PyArrayObject *arr) -{ - /* 2017-Nov-10 1.14 */ - DEPRECATE("PyArray_XDECREF_ERR is deprecated, call " - "PyArray_DiscardWritebackIfCopy then Py_XDECREF instead"); - PyArray_DiscardWritebackIfCopy(arr); - Py_XDECREF(arr); -} -#endif - - -#ifdef __cplusplus -} -#endif - - -#endif /* NPY_NDARRAYOBJECT_H */ diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/ndarraytypes.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/ndarraytypes.h deleted file mode 100644 index ad98d56..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/ndarraytypes.h +++ /dev/null @@ -1,1848 +0,0 @@ -#ifndef NDARRAYTYPES_H -#define NDARRAYTYPES_H - -#include "npy_common.h" -#include "npy_endian.h" -#include "npy_cpu.h" -#include "utils.h" - -#define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN - -/* Only use thread if configured in config and python supports it */ -#if defined WITH_THREAD && !NPY_NO_SMP - #define NPY_ALLOW_THREADS 1 -#else - #define NPY_ALLOW_THREADS 0 -#endif - -#ifndef __has_extension -#define __has_extension(x) 0 -#endif - -#if !defined(_NPY_NO_DEPRECATIONS) && \ - ((defined(__GNUC__)&& __GNUC__ >= 6) || \ - __has_extension(attribute_deprecated_with_message)) -#define NPY_ATTR_DEPRECATE(text) __attribute__ ((deprecated (text))) -#else -#define NPY_ATTR_DEPRECATE(text) -#endif - -/* - * There are several places in the code where an array of dimensions - * is allocated statically. This is the size of that static - * allocation. - * - * The array creation itself could have arbitrary dimensions but all - * the places where static allocation is used would need to be changed - * to dynamic (including inside of several structures) - */ - -#define NPY_MAXDIMS 32 -#define NPY_MAXARGS 32 - -/* Used for Converter Functions "O&" code in ParseTuple */ -#define NPY_FAIL 0 -#define NPY_SUCCEED 1 - -/* - * Binary compatibility version number. This number is increased - * whenever the C-API is changed such that binary compatibility is - * broken, i.e. whenever a recompile of extension modules is needed. - */ -#define NPY_VERSION NPY_ABI_VERSION - -/* - * Minor API version. This number is increased whenever a change is - * made to the C-API -- whether it breaks binary compatibility or not. - * Some changes, such as adding a function pointer to the end of the - * function table, can be made without breaking binary compatibility. - * In this case, only the NPY_FEATURE_VERSION (*not* NPY_VERSION) - * would be increased. Whenever binary compatibility is broken, both - * NPY_VERSION and NPY_FEATURE_VERSION should be increased. - */ -#define NPY_FEATURE_VERSION NPY_API_VERSION - -enum NPY_TYPES { NPY_BOOL=0, - NPY_BYTE, NPY_UBYTE, - NPY_SHORT, NPY_USHORT, - NPY_INT, NPY_UINT, - NPY_LONG, NPY_ULONG, - NPY_LONGLONG, NPY_ULONGLONG, - NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE, - NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE, - NPY_OBJECT=17, - NPY_STRING, NPY_UNICODE, - NPY_VOID, - /* - * New 1.6 types appended, may be integrated - * into the above in 2.0. - */ - NPY_DATETIME, NPY_TIMEDELTA, NPY_HALF, - - NPY_NTYPES, - NPY_NOTYPE, - NPY_CHAR NPY_ATTR_DEPRECATE("Use NPY_STRING"), - NPY_USERDEF=256, /* leave room for characters */ - - /* The number of types not including the new 1.6 types */ - NPY_NTYPES_ABI_COMPATIBLE=21 -}; -#ifdef _MSC_VER -#pragma deprecated(NPY_CHAR) -#endif - -/* basetype array priority */ -#define NPY_PRIORITY 0.0 - -/* default subtype priority */ -#define NPY_SUBTYPE_PRIORITY 1.0 - -/* default scalar priority */ -#define NPY_SCALAR_PRIORITY -1000000.0 - -/* How many floating point types are there (excluding half) */ -#define NPY_NUM_FLOATTYPE 3 - -/* - * These characters correspond to the array type and the struct - * module - */ - -enum NPY_TYPECHAR { - NPY_BOOLLTR = '?', - NPY_BYTELTR = 'b', - NPY_UBYTELTR = 'B', - NPY_SHORTLTR = 'h', - NPY_USHORTLTR = 'H', - NPY_INTLTR = 'i', - NPY_UINTLTR = 'I', - NPY_LONGLTR = 'l', - NPY_ULONGLTR = 'L', - NPY_LONGLONGLTR = 'q', - NPY_ULONGLONGLTR = 'Q', - NPY_HALFLTR = 'e', - NPY_FLOATLTR = 'f', - NPY_DOUBLELTR = 'd', - NPY_LONGDOUBLELTR = 'g', - NPY_CFLOATLTR = 'F', - NPY_CDOUBLELTR = 'D', - NPY_CLONGDOUBLELTR = 'G', - NPY_OBJECTLTR = 'O', - NPY_STRINGLTR = 'S', - NPY_STRINGLTR2 = 'a', - NPY_UNICODELTR = 'U', - NPY_VOIDLTR = 'V', - NPY_DATETIMELTR = 'M', - NPY_TIMEDELTALTR = 'm', - NPY_CHARLTR = 'c', - - /* - * No Descriptor, just a define -- this let's - * Python users specify an array of integers - * large enough to hold a pointer on the - * platform - */ - NPY_INTPLTR = 'p', - NPY_UINTPLTR = 'P', - - /* - * These are for dtype 'kinds', not dtype 'typecodes' - * as the above are for. - */ - NPY_GENBOOLLTR ='b', - NPY_SIGNEDLTR = 'i', - NPY_UNSIGNEDLTR = 'u', - NPY_FLOATINGLTR = 'f', - NPY_COMPLEXLTR = 'c' -}; - -/* - * Changing this may break Numpy API compatibility - * due to changing offsets in PyArray_ArrFuncs, so be - * careful. Here we have reused the mergesort slot for - * any kind of stable sort, the actual implementation will - * depend on the data type. - */ -typedef enum { - NPY_QUICKSORT=0, - NPY_HEAPSORT=1, - NPY_MERGESORT=2, - NPY_STABLESORT=2, -} NPY_SORTKIND; -#define NPY_NSORTS (NPY_STABLESORT + 1) - - -typedef enum { - NPY_INTROSELECT=0 -} NPY_SELECTKIND; -#define NPY_NSELECTS (NPY_INTROSELECT + 1) - - -typedef enum { - NPY_SEARCHLEFT=0, - NPY_SEARCHRIGHT=1 -} NPY_SEARCHSIDE; -#define NPY_NSEARCHSIDES (NPY_SEARCHRIGHT + 1) - - -typedef enum { - NPY_NOSCALAR=-1, - NPY_BOOL_SCALAR, - NPY_INTPOS_SCALAR, - NPY_INTNEG_SCALAR, - NPY_FLOAT_SCALAR, - NPY_COMPLEX_SCALAR, - NPY_OBJECT_SCALAR -} NPY_SCALARKIND; -#define NPY_NSCALARKINDS (NPY_OBJECT_SCALAR + 1) - -/* For specifying array memory layout or iteration order */ -typedef enum { - /* Fortran order if inputs are all Fortran, C otherwise */ - NPY_ANYORDER=-1, - /* C order */ - NPY_CORDER=0, - /* Fortran order */ - NPY_FORTRANORDER=1, - /* An order as close to the inputs as possible */ - NPY_KEEPORDER=2 -} NPY_ORDER; - -/* For specifying allowed casting in operations which support it */ -typedef enum { - /* Only allow identical types */ - NPY_NO_CASTING=0, - /* Allow identical and byte swapped types */ - NPY_EQUIV_CASTING=1, - /* Only allow safe casts */ - NPY_SAFE_CASTING=2, - /* Allow safe casts or casts within the same kind */ - NPY_SAME_KIND_CASTING=3, - /* Allow any casts */ - NPY_UNSAFE_CASTING=4 -} NPY_CASTING; - -typedef enum { - NPY_CLIP=0, - NPY_WRAP=1, - NPY_RAISE=2 -} NPY_CLIPMODE; - -/* The special not-a-time (NaT) value */ -#define NPY_DATETIME_NAT NPY_MIN_INT64 - -/* - * Upper bound on the length of a DATETIME ISO 8601 string - * YEAR: 21 (64-bit year) - * MONTH: 3 - * DAY: 3 - * HOURS: 3 - * MINUTES: 3 - * SECONDS: 3 - * ATTOSECONDS: 1 + 3*6 - * TIMEZONE: 5 - * NULL TERMINATOR: 1 - */ -#define NPY_DATETIME_MAX_ISO8601_STRLEN (21 + 3*5 + 1 + 3*6 + 6 + 1) - -/* The FR in the unit names stands for frequency */ -typedef enum { - /* Force signed enum type, must be -1 for code compatibility */ - NPY_FR_ERROR = -1, /* error or undetermined */ - - /* Start of valid units */ - NPY_FR_Y = 0, /* Years */ - NPY_FR_M = 1, /* Months */ - NPY_FR_W = 2, /* Weeks */ - /* Gap where 1.6 NPY_FR_B (value 3) was */ - NPY_FR_D = 4, /* Days */ - NPY_FR_h = 5, /* hours */ - NPY_FR_m = 6, /* minutes */ - NPY_FR_s = 7, /* seconds */ - NPY_FR_ms = 8, /* milliseconds */ - NPY_FR_us = 9, /* microseconds */ - NPY_FR_ns = 10, /* nanoseconds */ - NPY_FR_ps = 11, /* picoseconds */ - NPY_FR_fs = 12, /* femtoseconds */ - NPY_FR_as = 13, /* attoseconds */ - NPY_FR_GENERIC = 14 /* unbound units, can convert to anything */ -} NPY_DATETIMEUNIT; - -/* - * NOTE: With the NPY_FR_B gap for 1.6 ABI compatibility, NPY_DATETIME_NUMUNITS - * is technically one more than the actual number of units. - */ -#define NPY_DATETIME_NUMUNITS (NPY_FR_GENERIC + 1) -#define NPY_DATETIME_DEFAULTUNIT NPY_FR_GENERIC - -/* - * Business day conventions for mapping invalid business - * days to valid business days. - */ -typedef enum { - /* Go forward in time to the following business day. */ - NPY_BUSDAY_FORWARD, - NPY_BUSDAY_FOLLOWING = NPY_BUSDAY_FORWARD, - /* Go backward in time to the preceding business day. */ - NPY_BUSDAY_BACKWARD, - NPY_BUSDAY_PRECEDING = NPY_BUSDAY_BACKWARD, - /* - * Go forward in time to the following business day, unless it - * crosses a month boundary, in which case go backward - */ - NPY_BUSDAY_MODIFIEDFOLLOWING, - /* - * Go backward in time to the preceding business day, unless it - * crosses a month boundary, in which case go forward. - */ - NPY_BUSDAY_MODIFIEDPRECEDING, - /* Produce a NaT for non-business days. */ - NPY_BUSDAY_NAT, - /* Raise an exception for non-business days. */ - NPY_BUSDAY_RAISE -} NPY_BUSDAY_ROLL; - -/************************************************************ - * NumPy Auxiliary Data for inner loops, sort functions, etc. - ************************************************************/ - -/* - * When creating an auxiliary data struct, this should always appear - * as the first member, like this: - * - * typedef struct { - * NpyAuxData base; - * double constant; - * } constant_multiplier_aux_data; - */ -typedef struct NpyAuxData_tag NpyAuxData; - -/* Function pointers for freeing or cloning auxiliary data */ -typedef void (NpyAuxData_FreeFunc) (NpyAuxData *); -typedef NpyAuxData *(NpyAuxData_CloneFunc) (NpyAuxData *); - -struct NpyAuxData_tag { - NpyAuxData_FreeFunc *free; - NpyAuxData_CloneFunc *clone; - /* To allow for a bit of expansion without breaking the ABI */ - void *reserved[2]; -}; - -/* Macros to use for freeing and cloning auxiliary data */ -#define NPY_AUXDATA_FREE(auxdata) \ - do { \ - if ((auxdata) != NULL) { \ - (auxdata)->free(auxdata); \ - } \ - } while(0) -#define NPY_AUXDATA_CLONE(auxdata) \ - ((auxdata)->clone(auxdata)) - -#define NPY_ERR(str) fprintf(stderr, #str); fflush(stderr); -#define NPY_ERR2(str) fprintf(stderr, str); fflush(stderr); - -#define NPY_STRINGIFY(x) #x -#define NPY_TOSTRING(x) NPY_STRINGIFY(x) - - /* - * Macros to define how array, and dimension/strides data is - * allocated. - */ - - /* Data buffer - PyDataMem_NEW/FREE/RENEW are in multiarraymodule.c */ - -#define NPY_USE_PYMEM 1 - -#if NPY_USE_PYMEM == 1 - /* numpy sometimes calls PyArray_malloc() with the GIL released. On Python - 3.3 and older, it was safe to call PyMem_Malloc() with the GIL released. - On Python 3.4 and newer, it's better to use PyMem_RawMalloc() to be able - to use tracemalloc. On Python 3.6, calling PyMem_Malloc() with the GIL - released is now a fatal error in debug mode. */ -# if PY_VERSION_HEX >= 0x03040000 -# define PyArray_malloc PyMem_RawMalloc -# define PyArray_free PyMem_RawFree -# define PyArray_realloc PyMem_RawRealloc -# else -# define PyArray_malloc PyMem_Malloc -# define PyArray_free PyMem_Free -# define PyArray_realloc PyMem_Realloc -# endif -#else -#define PyArray_malloc malloc -#define PyArray_free free -#define PyArray_realloc realloc -#endif - -/* Dimensions and strides */ -#define PyDimMem_NEW(size) \ - ((npy_intp *)PyArray_malloc(size*sizeof(npy_intp))) - -#define PyDimMem_FREE(ptr) PyArray_free(ptr) - -#define PyDimMem_RENEW(ptr,size) \ - ((npy_intp *)PyArray_realloc(ptr,size*sizeof(npy_intp))) - -/* forward declaration */ -struct _PyArray_Descr; - -/* These must deal with unaligned and swapped data if necessary */ -typedef PyObject * (PyArray_GetItemFunc) (void *, void *); -typedef int (PyArray_SetItemFunc)(PyObject *, void *, void *); - -typedef void (PyArray_CopySwapNFunc)(void *, npy_intp, void *, npy_intp, - npy_intp, int, void *); - -typedef void (PyArray_CopySwapFunc)(void *, void *, int, void *); -typedef npy_bool (PyArray_NonzeroFunc)(void *, void *); - - -/* - * These assume aligned and notswapped data -- a buffer will be used - * before or contiguous data will be obtained - */ - -typedef int (PyArray_CompareFunc)(const void *, const void *, void *); -typedef int (PyArray_ArgFunc)(void*, npy_intp, npy_intp*, void *); - -typedef void (PyArray_DotFunc)(void *, npy_intp, void *, npy_intp, void *, - npy_intp, void *); - -typedef void (PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, - void *); - -/* - * XXX the ignore argument should be removed next time the API version - * is bumped. It used to be the separator. - */ -typedef int (PyArray_ScanFunc)(FILE *fp, void *dptr, - char *ignore, struct _PyArray_Descr *); -typedef int (PyArray_FromStrFunc)(char *s, void *dptr, char **endptr, - struct _PyArray_Descr *); - -typedef int (PyArray_FillFunc)(void *, npy_intp, void *); - -typedef int (PyArray_SortFunc)(void *, npy_intp, void *); -typedef int (PyArray_ArgSortFunc)(void *, npy_intp *, npy_intp, void *); -typedef int (PyArray_PartitionFunc)(void *, npy_intp, npy_intp, - npy_intp *, npy_intp *, - void *); -typedef int (PyArray_ArgPartitionFunc)(void *, npy_intp *, npy_intp, npy_intp, - npy_intp *, npy_intp *, - void *); - -typedef int (PyArray_FillWithScalarFunc)(void *, npy_intp, void *, void *); - -typedef int (PyArray_ScalarKindFunc)(void *); - -typedef void (PyArray_FastClipFunc)(void *in, npy_intp n_in, void *min, - void *max, void *out); -typedef void (PyArray_FastPutmaskFunc)(void *in, void *mask, npy_intp n_in, - void *values, npy_intp nv); -typedef int (PyArray_FastTakeFunc)(void *dest, void *src, npy_intp *indarray, - npy_intp nindarray, npy_intp n_outer, - npy_intp m_middle, npy_intp nelem, - NPY_CLIPMODE clipmode); - -typedef struct { - npy_intp *ptr; - int len; -} PyArray_Dims; - -typedef struct { - /* - * Functions to cast to most other standard types - * Can have some NULL entries. The types - * DATETIME, TIMEDELTA, and HALF go into the castdict - * even though they are built-in. - */ - PyArray_VectorUnaryFunc *cast[NPY_NTYPES_ABI_COMPATIBLE]; - - /* The next four functions *cannot* be NULL */ - - /* - * Functions to get and set items with standard Python types - * -- not array scalars - */ - PyArray_GetItemFunc *getitem; - PyArray_SetItemFunc *setitem; - - /* - * Copy and/or swap data. Memory areas may not overlap - * Use memmove first if they might - */ - PyArray_CopySwapNFunc *copyswapn; - PyArray_CopySwapFunc *copyswap; - - /* - * Function to compare items - * Can be NULL - */ - PyArray_CompareFunc *compare; - - /* - * Function to select largest - * Can be NULL - */ - PyArray_ArgFunc *argmax; - - /* - * Function to compute dot product - * Can be NULL - */ - PyArray_DotFunc *dotfunc; - - /* - * Function to scan an ASCII file and - * place a single value plus possible separator - * Can be NULL - */ - PyArray_ScanFunc *scanfunc; - - /* - * Function to read a single value from a string - * and adjust the pointer; Can be NULL - */ - PyArray_FromStrFunc *fromstr; - - /* - * Function to determine if data is zero or not - * If NULL a default version is - * used at Registration time. - */ - PyArray_NonzeroFunc *nonzero; - - /* - * Used for arange. Should return 0 on success - * and -1 on failure. - * Can be NULL. - */ - PyArray_FillFunc *fill; - - /* - * Function to fill arrays with scalar values - * Can be NULL - */ - PyArray_FillWithScalarFunc *fillwithscalar; - - /* - * Sorting functions - * Can be NULL - */ - PyArray_SortFunc *sort[NPY_NSORTS]; - PyArray_ArgSortFunc *argsort[NPY_NSORTS]; - - /* - * Dictionary of additional casting functions - * PyArray_VectorUnaryFuncs - * which can be populated to support casting - * to other registered types. Can be NULL - */ - PyObject *castdict; - - /* - * Functions useful for generalizing - * the casting rules. - * Can be NULL; - */ - PyArray_ScalarKindFunc *scalarkind; - int **cancastscalarkindto; - int *cancastto; - - PyArray_FastClipFunc *fastclip; - PyArray_FastPutmaskFunc *fastputmask; - PyArray_FastTakeFunc *fasttake; - - /* - * Function to select smallest - * Can be NULL - */ - PyArray_ArgFunc *argmin; - -} PyArray_ArrFuncs; - -/* The item must be reference counted when it is inserted or extracted. */ -#define NPY_ITEM_REFCOUNT 0x01 -/* Same as needing REFCOUNT */ -#define NPY_ITEM_HASOBJECT 0x01 -/* Convert to list for pickling */ -#define NPY_LIST_PICKLE 0x02 -/* The item is a POINTER */ -#define NPY_ITEM_IS_POINTER 0x04 -/* memory needs to be initialized for this data-type */ -#define NPY_NEEDS_INIT 0x08 -/* operations need Python C-API so don't give-up thread. */ -#define NPY_NEEDS_PYAPI 0x10 -/* Use f.getitem when extracting elements of this data-type */ -#define NPY_USE_GETITEM 0x20 -/* Use f.setitem when setting creating 0-d array from this data-type.*/ -#define NPY_USE_SETITEM 0x40 -/* A sticky flag specifically for structured arrays */ -#define NPY_ALIGNED_STRUCT 0x80 - -/* - *These are inherited for global data-type if any data-types in the - * field have them - */ -#define NPY_FROM_FIELDS (NPY_NEEDS_INIT | NPY_LIST_PICKLE | \ - NPY_ITEM_REFCOUNT | NPY_NEEDS_PYAPI) - -#define NPY_OBJECT_DTYPE_FLAGS (NPY_LIST_PICKLE | NPY_USE_GETITEM | \ - NPY_ITEM_IS_POINTER | NPY_ITEM_REFCOUNT | \ - NPY_NEEDS_INIT | NPY_NEEDS_PYAPI) - -#define PyDataType_FLAGCHK(dtype, flag) \ - (((dtype)->flags & (flag)) == (flag)) - -#define PyDataType_REFCHK(dtype) \ - PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT) - -typedef struct _PyArray_Descr { - PyObject_HEAD - /* - * the type object representing an - * instance of this type -- should not - * be two type_numbers with the same type - * object. - */ - PyTypeObject *typeobj; - /* kind for this type */ - char kind; - /* unique-character representing this type */ - char type; - /* - * '>' (big), '<' (little), '|' - * (not-applicable), or '=' (native). - */ - char byteorder; - /* flags describing data type */ - char flags; - /* number representing this type */ - int type_num; - /* element size (itemsize) for this type */ - int elsize; - /* alignment needed for this type */ - int alignment; - /* - * Non-NULL if this type is - * is an array (C-contiguous) - * of some other type - */ - struct _arr_descr *subarray; - /* - * The fields dictionary for this type - * For statically defined descr this - * is always Py_None - */ - PyObject *fields; - /* - * An ordered tuple of field names or NULL - * if no fields are defined - */ - PyObject *names; - /* - * a table of functions specific for each - * basic data descriptor - */ - PyArray_ArrFuncs *f; - /* Metadata about this dtype */ - PyObject *metadata; - /* - * Metadata specific to the C implementation - * of the particular dtype. This was added - * for NumPy 1.7.0. - */ - NpyAuxData *c_metadata; - /* Cached hash value (-1 if not yet computed). - * This was added for NumPy 2.0.0. - */ - npy_hash_t hash; -} PyArray_Descr; - -typedef struct _arr_descr { - PyArray_Descr *base; - PyObject *shape; /* a tuple */ -} PyArray_ArrayDescr; - -/* - * The main array object structure. - * - * It has been recommended to use the inline functions defined below - * (PyArray_DATA and friends) to access fields here for a number of - * releases. Direct access to the members themselves is deprecated. - * To ensure that your code does not use deprecated access, - * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION - * (or NPY_1_8_API_VERSION or higher as required). - */ -/* This struct will be moved to a private header in a future release */ -typedef struct tagPyArrayObject_fields { - PyObject_HEAD - /* Pointer to the raw data buffer */ - char *data; - /* The number of dimensions, also called 'ndim' */ - int nd; - /* The size in each dimension, also called 'shape' */ - npy_intp *dimensions; - /* - * Number of bytes to jump to get to the - * next element in each dimension - */ - npy_intp *strides; - /* - * This object is decref'd upon - * deletion of array. Except in the - * case of WRITEBACKIFCOPY which has - * special handling. - * - * For views it points to the original - * array, collapsed so no chains of - * views occur. - * - * For creation from buffer object it - * points to an object that should be - * decref'd on deletion - * - * For WRITEBACKIFCOPY flag this is an - * array to-be-updated upon calling - * PyArray_ResolveWritebackIfCopy - */ - PyObject *base; - /* Pointer to type structure */ - PyArray_Descr *descr; - /* Flags describing array -- see below */ - int flags; - /* For weak references */ - PyObject *weakreflist; -} PyArrayObject_fields; - -/* - * To hide the implementation details, we only expose - * the Python struct HEAD. - */ -#if !defined(NPY_NO_DEPRECATED_API) || \ - (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION) -/* - * Can't put this in npy_deprecated_api.h like the others. - * PyArrayObject field access is deprecated as of NumPy 1.7. - */ -typedef PyArrayObject_fields PyArrayObject; -#else -typedef struct tagPyArrayObject { - PyObject_HEAD -} PyArrayObject; -#endif - -#define NPY_SIZEOF_PYARRAYOBJECT (sizeof(PyArrayObject_fields)) - -/* Array Flags Object */ -typedef struct PyArrayFlagsObject { - PyObject_HEAD - PyObject *arr; - int flags; -} PyArrayFlagsObject; - -/* Mirrors buffer object to ptr */ - -typedef struct { - PyObject_HEAD - PyObject *base; - void *ptr; - npy_intp len; - int flags; -} PyArray_Chunk; - -typedef struct { - NPY_DATETIMEUNIT base; - int num; -} PyArray_DatetimeMetaData; - -typedef struct { - NpyAuxData base; - PyArray_DatetimeMetaData meta; -} PyArray_DatetimeDTypeMetaData; - -/* - * This structure contains an exploded view of a date-time value. - * NaT is represented by year == NPY_DATETIME_NAT. - */ -typedef struct { - npy_int64 year; - npy_int32 month, day, hour, min, sec, us, ps, as; -} npy_datetimestruct; - -/* This is not used internally. */ -typedef struct { - npy_int64 day; - npy_int32 sec, us, ps, as; -} npy_timedeltastruct; - -typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *); - -/* - * Means c-style contiguous (last index varies the fastest). The data - * elements right after each other. - * - * This flag may be requested in constructor functions. - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_C_CONTIGUOUS 0x0001 - -/* - * Set if array is a contiguous Fortran array: the first index varies - * the fastest in memory (strides array is reverse of C-contiguous - * array) - * - * This flag may be requested in constructor functions. - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_F_CONTIGUOUS 0x0002 - -/* - * Note: all 0-d arrays are C_CONTIGUOUS and F_CONTIGUOUS. If a - * 1-d array is C_CONTIGUOUS it is also F_CONTIGUOUS. Arrays with - * more then one dimension can be C_CONTIGUOUS and F_CONTIGUOUS - * at the same time if they have either zero or one element. - * If NPY_RELAXED_STRIDES_CHECKING is set, a higher dimensional - * array is always C_CONTIGUOUS and F_CONTIGUOUS if it has zero elements - * and the array is contiguous if ndarray.squeeze() is contiguous. - * I.e. dimensions for which `ndarray.shape[dimension] == 1` are - * ignored. - */ - -/* - * If set, the array owns the data: it will be free'd when the array - * is deleted. - * - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_OWNDATA 0x0004 - -/* - * An array never has the next four set; they're only used as parameter - * flags to the various FromAny functions - * - * This flag may be requested in constructor functions. - */ - -/* Cause a cast to occur regardless of whether or not it is safe. */ -#define NPY_ARRAY_FORCECAST 0x0010 - -/* - * Always copy the array. Returned arrays are always CONTIGUOUS, - * ALIGNED, and WRITEABLE. - * - * This flag may be requested in constructor functions. - */ -#define NPY_ARRAY_ENSURECOPY 0x0020 - -/* - * Make sure the returned array is a base-class ndarray - * - * This flag may be requested in constructor functions. - */ -#define NPY_ARRAY_ENSUREARRAY 0x0040 - -/* - * Make sure that the strides are in units of the element size Needed - * for some operations with record-arrays. - * - * This flag may be requested in constructor functions. - */ -#define NPY_ARRAY_ELEMENTSTRIDES 0x0080 - -/* - * Array data is aligned on the appropriate memory address for the type - * stored according to how the compiler would align things (e.g., an - * array of integers (4 bytes each) starts on a memory address that's - * a multiple of 4) - * - * This flag may be requested in constructor functions. - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_ALIGNED 0x0100 - -/* - * Array data has the native endianness - * - * This flag may be requested in constructor functions. - */ -#define NPY_ARRAY_NOTSWAPPED 0x0200 - -/* - * Array data is writeable - * - * This flag may be requested in constructor functions. - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_WRITEABLE 0x0400 - -/* - * If this flag is set, then base contains a pointer to an array of - * the same size that should be updated with the current contents of - * this array when PyArray_ResolveWritebackIfCopy is called. - * - * This flag may be requested in constructor functions. - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_UPDATEIFCOPY 0x1000 /* Deprecated in 1.14 */ -#define NPY_ARRAY_WRITEBACKIFCOPY 0x2000 - -/* - * NOTE: there are also internal flags defined in multiarray/arrayobject.h, - * which start at bit 31 and work down. - */ - -#define NPY_ARRAY_BEHAVED (NPY_ARRAY_ALIGNED | \ - NPY_ARRAY_WRITEABLE) -#define NPY_ARRAY_BEHAVED_NS (NPY_ARRAY_ALIGNED | \ - NPY_ARRAY_WRITEABLE | \ - NPY_ARRAY_NOTSWAPPED) -#define NPY_ARRAY_CARRAY (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_BEHAVED) -#define NPY_ARRAY_CARRAY_RO (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) -#define NPY_ARRAY_FARRAY (NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_BEHAVED) -#define NPY_ARRAY_FARRAY_RO (NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) -#define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY) -#define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO) -#define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY) -#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY | \ - NPY_ARRAY_UPDATEIFCOPY) -#define NPY_ARRAY_INOUT_ARRAY2 (NPY_ARRAY_CARRAY | \ - NPY_ARRAY_WRITEBACKIFCOPY) -#define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO) -#define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY) -#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY | \ - NPY_ARRAY_UPDATEIFCOPY) -#define NPY_ARRAY_INOUT_FARRAY2 (NPY_ARRAY_FARRAY | \ - NPY_ARRAY_WRITEBACKIFCOPY) - -#define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) - -/* This flag is for the array interface, not PyArrayObject */ -#define NPY_ARR_HAS_DESCR 0x0800 - - - - -/* - * Size of internal buffers used for alignment Make BUFSIZE a multiple - * of sizeof(npy_cdouble) -- usually 16 so that ufunc buffers are aligned - */ -#define NPY_MIN_BUFSIZE ((int)sizeof(npy_cdouble)) -#define NPY_MAX_BUFSIZE (((int)sizeof(npy_cdouble))*1000000) -#define NPY_BUFSIZE 8192 -/* buffer stress test size: */ -/*#define NPY_BUFSIZE 17*/ - -#define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) -#define PyArray_MIN(a,b) (((a)<(b))?(a):(b)) -#define PyArray_CLT(p,q) ((((p).real==(q).real) ? ((p).imag < (q).imag) : \ - ((p).real < (q).real))) -#define PyArray_CGT(p,q) ((((p).real==(q).real) ? ((p).imag > (q).imag) : \ - ((p).real > (q).real))) -#define PyArray_CLE(p,q) ((((p).real==(q).real) ? ((p).imag <= (q).imag) : \ - ((p).real <= (q).real))) -#define PyArray_CGE(p,q) ((((p).real==(q).real) ? ((p).imag >= (q).imag) : \ - ((p).real >= (q).real))) -#define PyArray_CEQ(p,q) (((p).real==(q).real) && ((p).imag == (q).imag)) -#define PyArray_CNE(p,q) (((p).real!=(q).real) || ((p).imag != (q).imag)) - -/* - * C API: consists of Macros and functions. The MACROS are defined - * here. - */ - - -#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS((m), NPY_ARRAY_C_CONTIGUOUS) -#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS((m), NPY_ARRAY_WRITEABLE) -#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS((m), NPY_ARRAY_ALIGNED) - -#define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS((m), NPY_ARRAY_C_CONTIGUOUS) -#define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS((m), NPY_ARRAY_F_CONTIGUOUS) - -/* the variable is used in some places, so always define it */ -#define NPY_BEGIN_THREADS_DEF PyThreadState *_save=NULL; -#if NPY_ALLOW_THREADS -#define NPY_BEGIN_ALLOW_THREADS Py_BEGIN_ALLOW_THREADS -#define NPY_END_ALLOW_THREADS Py_END_ALLOW_THREADS -#define NPY_BEGIN_THREADS do {_save = PyEval_SaveThread();} while (0); -#define NPY_END_THREADS do { if (_save) \ - { PyEval_RestoreThread(_save); _save = NULL;} } while (0); -#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) do { if ((loop_size) > 500) \ - { _save = PyEval_SaveThread();} } while (0); - -#define NPY_BEGIN_THREADS_DESCR(dtype) \ - do {if (!(PyDataType_FLAGCHK((dtype), NPY_NEEDS_PYAPI))) \ - NPY_BEGIN_THREADS;} while (0); - -#define NPY_END_THREADS_DESCR(dtype) \ - do {if (!(PyDataType_FLAGCHK((dtype), NPY_NEEDS_PYAPI))) \ - NPY_END_THREADS; } while (0); - -#define NPY_ALLOW_C_API_DEF PyGILState_STATE __save__; -#define NPY_ALLOW_C_API do {__save__ = PyGILState_Ensure();} while (0); -#define NPY_DISABLE_C_API do {PyGILState_Release(__save__);} while (0); -#else -#define NPY_BEGIN_ALLOW_THREADS -#define NPY_END_ALLOW_THREADS -#define NPY_BEGIN_THREADS -#define NPY_END_THREADS -#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) -#define NPY_BEGIN_THREADS_DESCR(dtype) -#define NPY_END_THREADS_DESCR(dtype) -#define NPY_ALLOW_C_API_DEF -#define NPY_ALLOW_C_API -#define NPY_DISABLE_C_API -#endif - -/********************************** - * The nditer object, added in 1.6 - **********************************/ - -/* The actual structure of the iterator is an internal detail */ -typedef struct NpyIter_InternalOnly NpyIter; - -/* Iterator function pointers that may be specialized */ -typedef int (NpyIter_IterNextFunc)(NpyIter *iter); -typedef void (NpyIter_GetMultiIndexFunc)(NpyIter *iter, - npy_intp *outcoords); - -/*** Global flags that may be passed to the iterator constructors ***/ - -/* Track an index representing C order */ -#define NPY_ITER_C_INDEX 0x00000001 -/* Track an index representing Fortran order */ -#define NPY_ITER_F_INDEX 0x00000002 -/* Track a multi-index */ -#define NPY_ITER_MULTI_INDEX 0x00000004 -/* User code external to the iterator does the 1-dimensional innermost loop */ -#define NPY_ITER_EXTERNAL_LOOP 0x00000008 -/* Convert all the operands to a common data type */ -#define NPY_ITER_COMMON_DTYPE 0x00000010 -/* Operands may hold references, requiring API access during iteration */ -#define NPY_ITER_REFS_OK 0x00000020 -/* Zero-sized operands should be permitted, iteration checks IterSize for 0 */ -#define NPY_ITER_ZEROSIZE_OK 0x00000040 -/* Permits reductions (size-0 stride with dimension size > 1) */ -#define NPY_ITER_REDUCE_OK 0x00000080 -/* Enables sub-range iteration */ -#define NPY_ITER_RANGED 0x00000100 -/* Enables buffering */ -#define NPY_ITER_BUFFERED 0x00000200 -/* When buffering is enabled, grows the inner loop if possible */ -#define NPY_ITER_GROWINNER 0x00000400 -/* Delay allocation of buffers until first Reset* call */ -#define NPY_ITER_DELAY_BUFALLOC 0x00000800 -/* When NPY_KEEPORDER is specified, disable reversing negative-stride axes */ -#define NPY_ITER_DONT_NEGATE_STRIDES 0x00001000 -/* - * If output operands overlap with other operands (based on heuristics that - * has false positives but no false negatives), make temporary copies to - * eliminate overlap. - */ -#define NPY_ITER_COPY_IF_OVERLAP 0x00002000 - -/*** Per-operand flags that may be passed to the iterator constructors ***/ - -/* The operand will be read from and written to */ -#define NPY_ITER_READWRITE 0x00010000 -/* The operand will only be read from */ -#define NPY_ITER_READONLY 0x00020000 -/* The operand will only be written to */ -#define NPY_ITER_WRITEONLY 0x00040000 -/* The operand's data must be in native byte order */ -#define NPY_ITER_NBO 0x00080000 -/* The operand's data must be aligned */ -#define NPY_ITER_ALIGNED 0x00100000 -/* The operand's data must be contiguous (within the inner loop) */ -#define NPY_ITER_CONTIG 0x00200000 -/* The operand may be copied to satisfy requirements */ -#define NPY_ITER_COPY 0x00400000 -/* The operand may be copied with WRITEBACKIFCOPY to satisfy requirements */ -#define NPY_ITER_UPDATEIFCOPY 0x00800000 -/* Allocate the operand if it is NULL */ -#define NPY_ITER_ALLOCATE 0x01000000 -/* If an operand is allocated, don't use any subtype */ -#define NPY_ITER_NO_SUBTYPE 0x02000000 -/* This is a virtual array slot, operand is NULL but temporary data is there */ -#define NPY_ITER_VIRTUAL 0x04000000 -/* Require that the dimension match the iterator dimensions exactly */ -#define NPY_ITER_NO_BROADCAST 0x08000000 -/* A mask is being used on this array, affects buffer -> array copy */ -#define NPY_ITER_WRITEMASKED 0x10000000 -/* This array is the mask for all WRITEMASKED operands */ -#define NPY_ITER_ARRAYMASK 0x20000000 -/* Assume iterator order data access for COPY_IF_OVERLAP */ -#define NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE 0x40000000 - -#define NPY_ITER_GLOBAL_FLAGS 0x0000ffff -#define NPY_ITER_PER_OP_FLAGS 0xffff0000 - - -/***************************** - * Basic iterator object - *****************************/ - -/* FWD declaration */ -typedef struct PyArrayIterObject_tag PyArrayIterObject; - -/* - * type of the function which translates a set of coordinates to a - * pointer to the data - */ -typedef char* (*npy_iter_get_dataptr_t)( - PyArrayIterObject* iter, const npy_intp*); - -struct PyArrayIterObject_tag { - PyObject_HEAD - int nd_m1; /* number of dimensions - 1 */ - npy_intp index, size; - npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ - npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ - npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ - npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ - npy_intp factors[NPY_MAXDIMS]; /* shape factors */ - PyArrayObject *ao; - char *dataptr; /* pointer to current item*/ - npy_bool contiguous; - - npy_intp bounds[NPY_MAXDIMS][2]; - npy_intp limits[NPY_MAXDIMS][2]; - npy_intp limits_sizes[NPY_MAXDIMS]; - npy_iter_get_dataptr_t translate; -} ; - - -/* Iterator API */ -#define PyArrayIter_Check(op) PyObject_TypeCheck((op), &PyArrayIter_Type) - -#define _PyAIT(it) ((PyArrayIterObject *)(it)) -#define PyArray_ITER_RESET(it) do { \ - _PyAIT(it)->index = 0; \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ - memset(_PyAIT(it)->coordinates, 0, \ - (_PyAIT(it)->nd_m1+1)*sizeof(npy_intp)); \ -} while (0) - -#define _PyArray_ITER_NEXT1(it) do { \ - (it)->dataptr += _PyAIT(it)->strides[0]; \ - (it)->coordinates[0]++; \ -} while (0) - -#define _PyArray_ITER_NEXT2(it) do { \ - if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ - (it)->coordinates[1]++; \ - (it)->dataptr += (it)->strides[1]; \ - } \ - else { \ - (it)->coordinates[1] = 0; \ - (it)->coordinates[0]++; \ - (it)->dataptr += (it)->strides[0] - \ - (it)->backstrides[1]; \ - } \ -} while (0) - -#define PyArray_ITER_NEXT(it) do { \ - _PyAIT(it)->index++; \ - if (_PyAIT(it)->nd_m1 == 0) { \ - _PyArray_ITER_NEXT1(_PyAIT(it)); \ - } \ - else if (_PyAIT(it)->contiguous) \ - _PyAIT(it)->dataptr += PyArray_DESCR(_PyAIT(it)->ao)->elsize; \ - else if (_PyAIT(it)->nd_m1 == 1) { \ - _PyArray_ITER_NEXT2(_PyAIT(it)); \ - } \ - else { \ - int __npy_i; \ - for (__npy_i=_PyAIT(it)->nd_m1; __npy_i >= 0; __npy_i--) { \ - if (_PyAIT(it)->coordinates[__npy_i] < \ - _PyAIT(it)->dims_m1[__npy_i]) { \ - _PyAIT(it)->coordinates[__npy_i]++; \ - _PyAIT(it)->dataptr += \ - _PyAIT(it)->strides[__npy_i]; \ - break; \ - } \ - else { \ - _PyAIT(it)->coordinates[__npy_i] = 0; \ - _PyAIT(it)->dataptr -= \ - _PyAIT(it)->backstrides[__npy_i]; \ - } \ - } \ - } \ -} while (0) - -#define PyArray_ITER_GOTO(it, destination) do { \ - int __npy_i; \ - _PyAIT(it)->index = 0; \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ - for (__npy_i = _PyAIT(it)->nd_m1; __npy_i>=0; __npy_i--) { \ - if (destination[__npy_i] < 0) { \ - destination[__npy_i] += \ - _PyAIT(it)->dims_m1[__npy_i]+1; \ - } \ - _PyAIT(it)->dataptr += destination[__npy_i] * \ - _PyAIT(it)->strides[__npy_i]; \ - _PyAIT(it)->coordinates[__npy_i] = \ - destination[__npy_i]; \ - _PyAIT(it)->index += destination[__npy_i] * \ - ( __npy_i==_PyAIT(it)->nd_m1 ? 1 : \ - _PyAIT(it)->dims_m1[__npy_i+1]+1) ; \ - } \ -} while (0) - -#define PyArray_ITER_GOTO1D(it, ind) do { \ - int __npy_i; \ - npy_intp __npy_ind = (npy_intp)(ind); \ - if (__npy_ind < 0) __npy_ind += _PyAIT(it)->size; \ - _PyAIT(it)->index = __npy_ind; \ - if (_PyAIT(it)->nd_m1 == 0) { \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ - __npy_ind * _PyAIT(it)->strides[0]; \ - } \ - else if (_PyAIT(it)->contiguous) \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ - __npy_ind * PyArray_DESCR(_PyAIT(it)->ao)->elsize; \ - else { \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ - for (__npy_i = 0; __npy_i<=_PyAIT(it)->nd_m1; \ - __npy_i++) { \ - _PyAIT(it)->dataptr += \ - (__npy_ind / _PyAIT(it)->factors[__npy_i]) \ - * _PyAIT(it)->strides[__npy_i]; \ - __npy_ind %= _PyAIT(it)->factors[__npy_i]; \ - } \ - } \ -} while (0) - -#define PyArray_ITER_DATA(it) ((void *)(_PyAIT(it)->dataptr)) - -#define PyArray_ITER_NOTDONE(it) (_PyAIT(it)->index < _PyAIT(it)->size) - - -/* - * Any object passed to PyArray_Broadcast must be binary compatible - * with this structure. - */ - -typedef struct { - PyObject_HEAD - int numiter; /* number of iters */ - npy_intp size; /* broadcasted size */ - npy_intp index; /* current index */ - int nd; /* number of dims */ - npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ - PyArrayIterObject *iters[NPY_MAXARGS]; /* iterators */ -} PyArrayMultiIterObject; - -#define _PyMIT(m) ((PyArrayMultiIterObject *)(m)) -#define PyArray_MultiIter_RESET(multi) do { \ - int __npy_mi; \ - _PyMIT(multi)->index = 0; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_RESET(_PyMIT(multi)->iters[__npy_mi]); \ - } \ -} while (0) - -#define PyArray_MultiIter_NEXT(multi) do { \ - int __npy_mi; \ - _PyMIT(multi)->index++; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_NEXT(_PyMIT(multi)->iters[__npy_mi]); \ - } \ -} while (0) - -#define PyArray_MultiIter_GOTO(multi, dest) do { \ - int __npy_mi; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_GOTO(_PyMIT(multi)->iters[__npy_mi], dest); \ - } \ - _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ -} while (0) - -#define PyArray_MultiIter_GOTO1D(multi, ind) do { \ - int __npy_mi; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_GOTO1D(_PyMIT(multi)->iters[__npy_mi], ind); \ - } \ - _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ -} while (0) - -#define PyArray_MultiIter_DATA(multi, i) \ - ((void *)(_PyMIT(multi)->iters[i]->dataptr)) - -#define PyArray_MultiIter_NEXTi(multi, i) \ - PyArray_ITER_NEXT(_PyMIT(multi)->iters[i]) - -#define PyArray_MultiIter_NOTDONE(multi) \ - (_PyMIT(multi)->index < _PyMIT(multi)->size) - - -/* - * Store the information needed for fancy-indexing over an array. The - * fields are slightly unordered to keep consec, dataptr and subspace - * where they were originally. - */ -typedef struct { - PyObject_HEAD - /* - * Multi-iterator portion --- needs to be present in this - * order to work with PyArray_Broadcast - */ - - int numiter; /* number of index-array - iterators */ - npy_intp size; /* size of broadcasted - result */ - npy_intp index; /* current index */ - int nd; /* number of dims */ - npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ - NpyIter *outer; /* index objects - iterator */ - void *unused[NPY_MAXDIMS - 2]; - PyArrayObject *array; - /* Flat iterator for the indexed array. For compatibility solely. */ - PyArrayIterObject *ait; - - /* - * Subspace array. For binary compatibility (was an iterator, - * but only the check for NULL should be used). - */ - PyArrayObject *subspace; - - /* - * if subspace iteration, then this is the array of axes in - * the underlying array represented by the index objects - */ - int iteraxes[NPY_MAXDIMS]; - npy_intp fancy_strides[NPY_MAXDIMS]; - - /* pointer when all fancy indices are 0 */ - char *baseoffset; - - /* - * after binding consec denotes at which axis the fancy axes - * are inserted. - */ - int consec; - char *dataptr; - - int nd_fancy; - npy_intp fancy_dims[NPY_MAXDIMS]; - - /* Whether the iterator (any of the iterators) requires API */ - int needs_api; - - /* - * Extra op information. - */ - PyArrayObject *extra_op; - PyArray_Descr *extra_op_dtype; /* desired dtype */ - npy_uint32 *extra_op_flags; /* Iterator flags */ - - NpyIter *extra_op_iter; - NpyIter_IterNextFunc *extra_op_next; - char **extra_op_ptrs; - - /* - * Information about the iteration state. - */ - NpyIter_IterNextFunc *outer_next; - char **outer_ptrs; - npy_intp *outer_strides; - - /* - * Information about the subspace iterator. - */ - NpyIter *subspace_iter; - NpyIter_IterNextFunc *subspace_next; - char **subspace_ptrs; - npy_intp *subspace_strides; - - /* Count for the external loop (which ever it is) for API iteration */ - npy_intp iter_count; - -} PyArrayMapIterObject; - -enum { - NPY_NEIGHBORHOOD_ITER_ZERO_PADDING, - NPY_NEIGHBORHOOD_ITER_ONE_PADDING, - NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING, - NPY_NEIGHBORHOOD_ITER_CIRCULAR_PADDING, - NPY_NEIGHBORHOOD_ITER_MIRROR_PADDING -}; - -typedef struct { - PyObject_HEAD - - /* - * PyArrayIterObject part: keep this in this exact order - */ - int nd_m1; /* number of dimensions - 1 */ - npy_intp index, size; - npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ - npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ - npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ - npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ - npy_intp factors[NPY_MAXDIMS]; /* shape factors */ - PyArrayObject *ao; - char *dataptr; /* pointer to current item*/ - npy_bool contiguous; - - npy_intp bounds[NPY_MAXDIMS][2]; - npy_intp limits[NPY_MAXDIMS][2]; - npy_intp limits_sizes[NPY_MAXDIMS]; - npy_iter_get_dataptr_t translate; - - /* - * New members - */ - npy_intp nd; - - /* Dimensions is the dimension of the array */ - npy_intp dimensions[NPY_MAXDIMS]; - - /* - * Neighborhood points coordinates are computed relatively to the - * point pointed by _internal_iter - */ - PyArrayIterObject* _internal_iter; - /* - * To keep a reference to the representation of the constant value - * for constant padding - */ - char* constant; - - int mode; -} PyArrayNeighborhoodIterObject; - -/* - * Neighborhood iterator API - */ - -/* General: those work for any mode */ -static NPY_INLINE int -PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter); -static NPY_INLINE int -PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter); -#if 0 -static NPY_INLINE int -PyArrayNeighborhoodIter_Next2D(PyArrayNeighborhoodIterObject* iter); -#endif - -/* - * Include inline implementations - functions defined there are not - * considered public API - */ -#define _NPY_INCLUDE_NEIGHBORHOOD_IMP -#include "_neighborhood_iterator_imp.h" -#undef _NPY_INCLUDE_NEIGHBORHOOD_IMP - -/* The default array type */ -#define NPY_DEFAULT_TYPE NPY_DOUBLE - -/* - * All sorts of useful ways to look into a PyArrayObject. It is recommended - * to use PyArrayObject * objects instead of always casting from PyObject *, - * for improved type checking. - * - * In many cases here the macro versions of the accessors are deprecated, - * but can't be immediately changed to inline functions because the - * preexisting macros accept PyObject * and do automatic casts. Inline - * functions accepting PyArrayObject * provides for some compile-time - * checking of correctness when working with these objects in C. - */ - -#define PyArray_ISONESEGMENT(m) (PyArray_NDIM(m) == 0 || \ - PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) || \ - PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS)) - -#define PyArray_ISFORTRAN(m) (PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) && \ - (!PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS))) - -#define PyArray_FORTRAN_IF(m) ((PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) ? \ - NPY_ARRAY_F_CONTIGUOUS : 0)) - -#if (defined(NPY_NO_DEPRECATED_API) && (NPY_1_7_API_VERSION <= NPY_NO_DEPRECATED_API)) -/* - * Changing access macros into functions, to allow for future hiding - * of the internal memory layout. This later hiding will allow the 2.x series - * to change the internal representation of arrays without affecting - * ABI compatibility. - */ - -static NPY_INLINE int -PyArray_NDIM(const PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->nd; -} - -static NPY_INLINE void * -PyArray_DATA(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->data; -} - -static NPY_INLINE char * -PyArray_BYTES(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->data; -} - -static NPY_INLINE npy_intp * -PyArray_DIMS(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->dimensions; -} - -static NPY_INLINE npy_intp * -PyArray_STRIDES(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->strides; -} - -static NPY_INLINE npy_intp -PyArray_DIM(const PyArrayObject *arr, int idim) -{ - return ((PyArrayObject_fields *)arr)->dimensions[idim]; -} - -static NPY_INLINE npy_intp -PyArray_STRIDE(const PyArrayObject *arr, int istride) -{ - return ((PyArrayObject_fields *)arr)->strides[istride]; -} - -static NPY_INLINE NPY_RETURNS_BORROWED_REF PyObject * -PyArray_BASE(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->base; -} - -static NPY_INLINE NPY_RETURNS_BORROWED_REF PyArray_Descr * -PyArray_DESCR(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->descr; -} - -static NPY_INLINE int -PyArray_FLAGS(const PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->flags; -} - -static NPY_INLINE npy_intp -PyArray_ITEMSIZE(const PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->descr->elsize; -} - -static NPY_INLINE int -PyArray_TYPE(const PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->descr->type_num; -} - -static NPY_INLINE int -PyArray_CHKFLAGS(const PyArrayObject *arr, int flags) -{ - return (PyArray_FLAGS(arr) & flags) == flags; -} - -static NPY_INLINE PyObject * -PyArray_GETITEM(const PyArrayObject *arr, const char *itemptr) -{ - return ((PyArrayObject_fields *)arr)->descr->f->getitem( - (void *)itemptr, (PyArrayObject *)arr); -} - -static NPY_INLINE int -PyArray_SETITEM(PyArrayObject *arr, char *itemptr, PyObject *v) -{ - return ((PyArrayObject_fields *)arr)->descr->f->setitem( - v, itemptr, arr); -} - -#else - -/* These macros are deprecated as of NumPy 1.7. */ -#define PyArray_NDIM(obj) (((PyArrayObject_fields *)(obj))->nd) -#define PyArray_BYTES(obj) (((PyArrayObject_fields *)(obj))->data) -#define PyArray_DATA(obj) ((void *)((PyArrayObject_fields *)(obj))->data) -#define PyArray_DIMS(obj) (((PyArrayObject_fields *)(obj))->dimensions) -#define PyArray_STRIDES(obj) (((PyArrayObject_fields *)(obj))->strides) -#define PyArray_DIM(obj,n) (PyArray_DIMS(obj)[n]) -#define PyArray_STRIDE(obj,n) (PyArray_STRIDES(obj)[n]) -#define PyArray_BASE(obj) (((PyArrayObject_fields *)(obj))->base) -#define PyArray_DESCR(obj) (((PyArrayObject_fields *)(obj))->descr) -#define PyArray_FLAGS(obj) (((PyArrayObject_fields *)(obj))->flags) -#define PyArray_CHKFLAGS(m, FLAGS) \ - ((((PyArrayObject_fields *)(m))->flags & (FLAGS)) == (FLAGS)) -#define PyArray_ITEMSIZE(obj) \ - (((PyArrayObject_fields *)(obj))->descr->elsize) -#define PyArray_TYPE(obj) \ - (((PyArrayObject_fields *)(obj))->descr->type_num) -#define PyArray_GETITEM(obj,itemptr) \ - PyArray_DESCR(obj)->f->getitem((char *)(itemptr), \ - (PyArrayObject *)(obj)) - -#define PyArray_SETITEM(obj,itemptr,v) \ - PyArray_DESCR(obj)->f->setitem((PyObject *)(v), \ - (char *)(itemptr), \ - (PyArrayObject *)(obj)) -#endif - -static NPY_INLINE PyArray_Descr * -PyArray_DTYPE(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->descr; -} - -static NPY_INLINE npy_intp * -PyArray_SHAPE(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->dimensions; -} - -/* - * Enables the specified array flags. Does no checking, - * assumes you know what you're doing. - */ -static NPY_INLINE void -PyArray_ENABLEFLAGS(PyArrayObject *arr, int flags) -{ - ((PyArrayObject_fields *)arr)->flags |= flags; -} - -/* - * Clears the specified array flags. Does no checking, - * assumes you know what you're doing. - */ -static NPY_INLINE void -PyArray_CLEARFLAGS(PyArrayObject *arr, int flags) -{ - ((PyArrayObject_fields *)arr)->flags &= ~flags; -} - -#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) - -#define PyTypeNum_ISUNSIGNED(type) (((type) == NPY_UBYTE) || \ - ((type) == NPY_USHORT) || \ - ((type) == NPY_UINT) || \ - ((type) == NPY_ULONG) || \ - ((type) == NPY_ULONGLONG)) - -#define PyTypeNum_ISSIGNED(type) (((type) == NPY_BYTE) || \ - ((type) == NPY_SHORT) || \ - ((type) == NPY_INT) || \ - ((type) == NPY_LONG) || \ - ((type) == NPY_LONGLONG)) - -#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \ - ((type) <= NPY_ULONGLONG)) - -#define PyTypeNum_ISFLOAT(type) ((((type) >= NPY_FLOAT) && \ - ((type) <= NPY_LONGDOUBLE)) || \ - ((type) == NPY_HALF)) - -#define PyTypeNum_ISNUMBER(type) (((type) <= NPY_CLONGDOUBLE) || \ - ((type) == NPY_HALF)) - -#define PyTypeNum_ISSTRING(type) (((type) == NPY_STRING) || \ - ((type) == NPY_UNICODE)) - -#define PyTypeNum_ISCOMPLEX(type) (((type) >= NPY_CFLOAT) && \ - ((type) <= NPY_CLONGDOUBLE)) - -#define PyTypeNum_ISPYTHON(type) (((type) == NPY_LONG) || \ - ((type) == NPY_DOUBLE) || \ - ((type) == NPY_CDOUBLE) || \ - ((type) == NPY_BOOL) || \ - ((type) == NPY_OBJECT )) - -#define PyTypeNum_ISFLEXIBLE(type) (((type) >=NPY_STRING) && \ - ((type) <=NPY_VOID)) - -#define PyTypeNum_ISDATETIME(type) (((type) >=NPY_DATETIME) && \ - ((type) <=NPY_TIMEDELTA)) - -#define PyTypeNum_ISUSERDEF(type) (((type) >= NPY_USERDEF) && \ - ((type) < NPY_USERDEF+ \ - NPY_NUMUSERTYPES)) - -#define PyTypeNum_ISEXTENDED(type) (PyTypeNum_ISFLEXIBLE(type) || \ - PyTypeNum_ISUSERDEF(type)) - -#define PyTypeNum_ISOBJECT(type) ((type) == NPY_OBJECT) - - -#define PyDataType_ISBOOL(obj) PyTypeNum_ISBOOL(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISSIGNED(obj) PyTypeNum_ISSIGNED(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISINTEGER(obj) PyTypeNum_ISINTEGER(((PyArray_Descr*)(obj))->type_num ) -#define PyDataType_ISFLOAT(obj) PyTypeNum_ISFLOAT(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISNUMBER(obj) PyTypeNum_ISNUMBER(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISSTRING(obj) PyTypeNum_ISSTRING(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISPYTHON(obj) PyTypeNum_ISPYTHON(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISDATETIME(obj) PyTypeNum_ISDATETIME(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISOBJECT(obj) PyTypeNum_ISOBJECT(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_HASFIELDS(obj) (((PyArray_Descr *)(obj))->names != NULL) -#define PyDataType_HASSUBARRAY(dtype) ((dtype)->subarray != NULL) -#define PyDataType_ISUNSIZED(dtype) ((dtype)->elsize == 0 && \ - !PyDataType_HASFIELDS(dtype)) -#define PyDataType_MAKEUNSIZED(dtype) ((dtype)->elsize = 0) - -#define PyArray_ISBOOL(obj) PyTypeNum_ISBOOL(PyArray_TYPE(obj)) -#define PyArray_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(PyArray_TYPE(obj)) -#define PyArray_ISSIGNED(obj) PyTypeNum_ISSIGNED(PyArray_TYPE(obj)) -#define PyArray_ISINTEGER(obj) PyTypeNum_ISINTEGER(PyArray_TYPE(obj)) -#define PyArray_ISFLOAT(obj) PyTypeNum_ISFLOAT(PyArray_TYPE(obj)) -#define PyArray_ISNUMBER(obj) PyTypeNum_ISNUMBER(PyArray_TYPE(obj)) -#define PyArray_ISSTRING(obj) PyTypeNum_ISSTRING(PyArray_TYPE(obj)) -#define PyArray_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(PyArray_TYPE(obj)) -#define PyArray_ISPYTHON(obj) PyTypeNum_ISPYTHON(PyArray_TYPE(obj)) -#define PyArray_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) -#define PyArray_ISDATETIME(obj) PyTypeNum_ISDATETIME(PyArray_TYPE(obj)) -#define PyArray_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(PyArray_TYPE(obj)) -#define PyArray_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(PyArray_TYPE(obj)) -#define PyArray_ISOBJECT(obj) PyTypeNum_ISOBJECT(PyArray_TYPE(obj)) -#define PyArray_HASFIELDS(obj) PyDataType_HASFIELDS(PyArray_DESCR(obj)) - - /* - * FIXME: This should check for a flag on the data-type that - * states whether or not it is variable length. Because the - * ISFLEXIBLE check is hard-coded to the built-in data-types. - */ -#define PyArray_ISVARIABLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) - -#define PyArray_SAFEALIGNEDCOPY(obj) (PyArray_ISALIGNED(obj) && !PyArray_ISVARIABLE(obj)) - - -#define NPY_LITTLE '<' -#define NPY_BIG '>' -#define NPY_NATIVE '=' -#define NPY_SWAP 's' -#define NPY_IGNORE '|' - -#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN -#define NPY_NATBYTE NPY_BIG -#define NPY_OPPBYTE NPY_LITTLE -#else -#define NPY_NATBYTE NPY_LITTLE -#define NPY_OPPBYTE NPY_BIG -#endif - -#define PyArray_ISNBO(arg) ((arg) != NPY_OPPBYTE) -#define PyArray_IsNativeByteOrder PyArray_ISNBO -#define PyArray_ISNOTSWAPPED(m) PyArray_ISNBO(PyArray_DESCR(m)->byteorder) -#define PyArray_ISBYTESWAPPED(m) (!PyArray_ISNOTSWAPPED(m)) - -#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \ - PyArray_ISNOTSWAPPED(m)) - -#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY) -#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY_RO) -#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY) -#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY_RO) -#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_ARRAY_BEHAVED) -#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_ALIGNED) - - -#define PyDataType_ISNOTSWAPPED(d) PyArray_ISNBO(((PyArray_Descr *)(d))->byteorder) -#define PyDataType_ISBYTESWAPPED(d) (!PyDataType_ISNOTSWAPPED(d)) - -/************************************************************ - * A struct used by PyArray_CreateSortedStridePerm, new in 1.7. - ************************************************************/ - -typedef struct { - npy_intp perm, stride; -} npy_stride_sort_item; - -/************************************************************ - * This is the form of the struct that's returned pointed by the - * PyCObject attribute of an array __array_struct__. See - * https://docs.scipy.org/doc/numpy/reference/arrays.interface.html for the full - * documentation. - ************************************************************/ -typedef struct { - int two; /* - * contains the integer 2 as a sanity - * check - */ - - int nd; /* number of dimensions */ - - char typekind; /* - * kind in array --- character code of - * typestr - */ - - int itemsize; /* size of each element */ - - int flags; /* - * how should be data interpreted. Valid - * flags are CONTIGUOUS (1), F_CONTIGUOUS (2), - * ALIGNED (0x100), NOTSWAPPED (0x200), and - * WRITEABLE (0x400). ARR_HAS_DESCR (0x800) - * states that arrdescr field is present in - * structure - */ - - npy_intp *shape; /* - * A length-nd array of shape - * information - */ - - npy_intp *strides; /* A length-nd array of stride information */ - - void *data; /* A pointer to the first element of the array */ - - PyObject *descr; /* - * A list of fields or NULL (ignored if flags - * does not have ARR_HAS_DESCR flag set) - */ -} PyArrayInterface; - -/* - * This is a function for hooking into the PyDataMem_NEW/FREE/RENEW functions. - * See the documentation for PyDataMem_SetEventHook. - */ -typedef void (PyDataMem_EventHookFunc)(void *inp, void *outp, size_t size, - void *user_data); - -/* - * Use the keyword NPY_DEPRECATED_INCLUDES to ensure that the header files - * npy_*_*_deprecated_api.h are only included from here and nowhere else. - */ -#ifdef NPY_DEPRECATED_INCLUDES -#error "Do not use the reserved keyword NPY_DEPRECATED_INCLUDES." -#endif -#define NPY_DEPRECATED_INCLUDES -#if !defined(NPY_NO_DEPRECATED_API) || \ - (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION) -#include "npy_1_7_deprecated_api.h" -#endif -/* - * There is no file npy_1_8_deprecated_api.h since there are no additional - * deprecated API features in NumPy 1.8. - * - * Note to maintainers: insert code like the following in future NumPy - * versions. - * - * #if !defined(NPY_NO_DEPRECATED_API) || \ - * (NPY_NO_DEPRECATED_API < NPY_1_9_API_VERSION) - * #include "npy_1_9_deprecated_api.h" - * #endif - */ -#undef NPY_DEPRECATED_INCLUDES - -#endif /* NPY_ARRAYTYPES_H */ diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/noprefix.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/noprefix.h deleted file mode 100644 index 041f301..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/noprefix.h +++ /dev/null @@ -1,212 +0,0 @@ -#ifndef NPY_NOPREFIX_H -#define NPY_NOPREFIX_H - -/* - * You can directly include noprefix.h as a backward - * compatibility measure - */ -#ifndef NPY_NO_PREFIX -#include "ndarrayobject.h" -#include "npy_interrupt.h" -#endif - -#define SIGSETJMP NPY_SIGSETJMP -#define SIGLONGJMP NPY_SIGLONGJMP -#define SIGJMP_BUF NPY_SIGJMP_BUF - -#define MAX_DIMS NPY_MAXDIMS - -#define longlong npy_longlong -#define ulonglong npy_ulonglong -#define Bool npy_bool -#define longdouble npy_longdouble -#define byte npy_byte - -#ifndef _BSD_SOURCE -#define ushort npy_ushort -#define uint npy_uint -#define ulong npy_ulong -#endif - -#define ubyte npy_ubyte -#define ushort npy_ushort -#define uint npy_uint -#define ulong npy_ulong -#define cfloat npy_cfloat -#define cdouble npy_cdouble -#define clongdouble npy_clongdouble -#define Int8 npy_int8 -#define UInt8 npy_uint8 -#define Int16 npy_int16 -#define UInt16 npy_uint16 -#define Int32 npy_int32 -#define UInt32 npy_uint32 -#define Int64 npy_int64 -#define UInt64 npy_uint64 -#define Int128 npy_int128 -#define UInt128 npy_uint128 -#define Int256 npy_int256 -#define UInt256 npy_uint256 -#define Float16 npy_float16 -#define Complex32 npy_complex32 -#define Float32 npy_float32 -#define Complex64 npy_complex64 -#define Float64 npy_float64 -#define Complex128 npy_complex128 -#define Float80 npy_float80 -#define Complex160 npy_complex160 -#define Float96 npy_float96 -#define Complex192 npy_complex192 -#define Float128 npy_float128 -#define Complex256 npy_complex256 -#define intp npy_intp -#define uintp npy_uintp -#define datetime npy_datetime -#define timedelta npy_timedelta - -#define SIZEOF_LONGLONG NPY_SIZEOF_LONGLONG -#define SIZEOF_INTP NPY_SIZEOF_INTP -#define SIZEOF_UINTP NPY_SIZEOF_UINTP -#define SIZEOF_HALF NPY_SIZEOF_HALF -#define SIZEOF_LONGDOUBLE NPY_SIZEOF_LONGDOUBLE -#define SIZEOF_DATETIME NPY_SIZEOF_DATETIME -#define SIZEOF_TIMEDELTA NPY_SIZEOF_TIMEDELTA - -#define LONGLONG_FMT NPY_LONGLONG_FMT -#define ULONGLONG_FMT NPY_ULONGLONG_FMT -#define LONGLONG_SUFFIX NPY_LONGLONG_SUFFIX -#define ULONGLONG_SUFFIX NPY_ULONGLONG_SUFFIX - -#define MAX_INT8 127 -#define MIN_INT8 -128 -#define MAX_UINT8 255 -#define MAX_INT16 32767 -#define MIN_INT16 -32768 -#define MAX_UINT16 65535 -#define MAX_INT32 2147483647 -#define MIN_INT32 (-MAX_INT32 - 1) -#define MAX_UINT32 4294967295U -#define MAX_INT64 LONGLONG_SUFFIX(9223372036854775807) -#define MIN_INT64 (-MAX_INT64 - LONGLONG_SUFFIX(1)) -#define MAX_UINT64 ULONGLONG_SUFFIX(18446744073709551615) -#define MAX_INT128 LONGLONG_SUFFIX(85070591730234615865843651857942052864) -#define MIN_INT128 (-MAX_INT128 - LONGLONG_SUFFIX(1)) -#define MAX_UINT128 ULONGLONG_SUFFIX(170141183460469231731687303715884105728) -#define MAX_INT256 LONGLONG_SUFFIX(57896044618658097711785492504343953926634992332820282019728792003956564819967) -#define MIN_INT256 (-MAX_INT256 - LONGLONG_SUFFIX(1)) -#define MAX_UINT256 ULONGLONG_SUFFIX(115792089237316195423570985008687907853269984665640564039457584007913129639935) - -#define MAX_BYTE NPY_MAX_BYTE -#define MIN_BYTE NPY_MIN_BYTE -#define MAX_UBYTE NPY_MAX_UBYTE -#define MAX_SHORT NPY_MAX_SHORT -#define MIN_SHORT NPY_MIN_SHORT -#define MAX_USHORT NPY_MAX_USHORT -#define MAX_INT NPY_MAX_INT -#define MIN_INT NPY_MIN_INT -#define MAX_UINT NPY_MAX_UINT -#define MAX_LONG NPY_MAX_LONG -#define MIN_LONG NPY_MIN_LONG -#define MAX_ULONG NPY_MAX_ULONG -#define MAX_LONGLONG NPY_MAX_LONGLONG -#define MIN_LONGLONG NPY_MIN_LONGLONG -#define MAX_ULONGLONG NPY_MAX_ULONGLONG -#define MIN_DATETIME NPY_MIN_DATETIME -#define MAX_DATETIME NPY_MAX_DATETIME -#define MIN_TIMEDELTA NPY_MIN_TIMEDELTA -#define MAX_TIMEDELTA NPY_MAX_TIMEDELTA - -#define BITSOF_BOOL NPY_BITSOF_BOOL -#define BITSOF_CHAR NPY_BITSOF_CHAR -#define BITSOF_SHORT NPY_BITSOF_SHORT -#define BITSOF_INT NPY_BITSOF_INT -#define BITSOF_LONG NPY_BITSOF_LONG -#define BITSOF_LONGLONG NPY_BITSOF_LONGLONG -#define BITSOF_HALF NPY_BITSOF_HALF -#define BITSOF_FLOAT NPY_BITSOF_FLOAT -#define BITSOF_DOUBLE NPY_BITSOF_DOUBLE -#define BITSOF_LONGDOUBLE NPY_BITSOF_LONGDOUBLE -#define BITSOF_DATETIME NPY_BITSOF_DATETIME -#define BITSOF_TIMEDELTA NPY_BITSOF_TIMEDELTA - -#define _pya_malloc PyArray_malloc -#define _pya_free PyArray_free -#define _pya_realloc PyArray_realloc - -#define BEGIN_THREADS_DEF NPY_BEGIN_THREADS_DEF -#define BEGIN_THREADS NPY_BEGIN_THREADS -#define END_THREADS NPY_END_THREADS -#define ALLOW_C_API_DEF NPY_ALLOW_C_API_DEF -#define ALLOW_C_API NPY_ALLOW_C_API -#define DISABLE_C_API NPY_DISABLE_C_API - -#define PY_FAIL NPY_FAIL -#define PY_SUCCEED NPY_SUCCEED - -#ifndef TRUE -#define TRUE NPY_TRUE -#endif - -#ifndef FALSE -#define FALSE NPY_FALSE -#endif - -#define LONGDOUBLE_FMT NPY_LONGDOUBLE_FMT - -#define CONTIGUOUS NPY_CONTIGUOUS -#define C_CONTIGUOUS NPY_C_CONTIGUOUS -#define FORTRAN NPY_FORTRAN -#define F_CONTIGUOUS NPY_F_CONTIGUOUS -#define OWNDATA NPY_OWNDATA -#define FORCECAST NPY_FORCECAST -#define ENSURECOPY NPY_ENSURECOPY -#define ENSUREARRAY NPY_ENSUREARRAY -#define ELEMENTSTRIDES NPY_ELEMENTSTRIDES -#define ALIGNED NPY_ALIGNED -#define NOTSWAPPED NPY_NOTSWAPPED -#define WRITEABLE NPY_WRITEABLE -#define UPDATEIFCOPY NPY_UPDATEIFCOPY -#define WRITEBACKIFCOPY NPY_ARRAY_WRITEBACKIFCOPY -#define ARR_HAS_DESCR NPY_ARR_HAS_DESCR -#define BEHAVED NPY_BEHAVED -#define BEHAVED_NS NPY_BEHAVED_NS -#define CARRAY NPY_CARRAY -#define CARRAY_RO NPY_CARRAY_RO -#define FARRAY NPY_FARRAY -#define FARRAY_RO NPY_FARRAY_RO -#define DEFAULT NPY_DEFAULT -#define IN_ARRAY NPY_IN_ARRAY -#define OUT_ARRAY NPY_OUT_ARRAY -#define INOUT_ARRAY NPY_INOUT_ARRAY -#define IN_FARRAY NPY_IN_FARRAY -#define OUT_FARRAY NPY_OUT_FARRAY -#define INOUT_FARRAY NPY_INOUT_FARRAY -#define UPDATE_ALL NPY_UPDATE_ALL - -#define OWN_DATA NPY_OWNDATA -#define BEHAVED_FLAGS NPY_BEHAVED -#define BEHAVED_FLAGS_NS NPY_BEHAVED_NS -#define CARRAY_FLAGS_RO NPY_CARRAY_RO -#define CARRAY_FLAGS NPY_CARRAY -#define FARRAY_FLAGS NPY_FARRAY -#define FARRAY_FLAGS_RO NPY_FARRAY_RO -#define DEFAULT_FLAGS NPY_DEFAULT -#define UPDATE_ALL_FLAGS NPY_UPDATE_ALL_FLAGS - -#ifndef MIN -#define MIN PyArray_MIN -#endif -#ifndef MAX -#define MAX PyArray_MAX -#endif -#define MAX_INTP NPY_MAX_INTP -#define MIN_INTP NPY_MIN_INTP -#define MAX_UINTP NPY_MAX_UINTP -#define INTP_FMT NPY_INTP_FMT - -#ifndef PYPY_VERSION -#define REFCOUNT PyArray_REFCOUNT -#define MAX_ELSIZE NPY_MAX_ELSIZE -#endif - -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_1_7_deprecated_api.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_1_7_deprecated_api.h deleted file mode 100644 index a6ee212..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_1_7_deprecated_api.h +++ /dev/null @@ -1,133 +0,0 @@ -#ifndef _NPY_1_7_DEPRECATED_API_H -#define _NPY_1_7_DEPRECATED_API_H - -#ifndef NPY_DEPRECATED_INCLUDES -#error "Should never include npy_*_*_deprecated_api directly." -#endif - -/* Emit a warning if the user did not specifically request the old API */ -#ifndef NPY_NO_DEPRECATED_API -#if defined(_WIN32) -#define _WARN___STR2__(x) #x -#define _WARN___STR1__(x) _WARN___STR2__(x) -#define _WARN___LOC__ __FILE__ "(" _WARN___STR1__(__LINE__) ") : Warning Msg: " -#pragma message(_WARN___LOC__"Using deprecated NumPy API, disable it with " \ - "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION") -#elif defined(__GNUC__) -#warning "Using deprecated NumPy API, disable it with " \ - "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION" -#endif -/* TODO: How to do this warning message for other compilers? */ -#endif - -/* - * This header exists to collect all dangerous/deprecated NumPy API - * as of NumPy 1.7. - * - * This is an attempt to remove bad API, the proliferation of macros, - * and namespace pollution currently produced by the NumPy headers. - */ - -/* These array flags are deprecated as of NumPy 1.7 */ -#define NPY_CONTIGUOUS NPY_ARRAY_C_CONTIGUOUS -#define NPY_FORTRAN NPY_ARRAY_F_CONTIGUOUS - -/* - * The consistent NPY_ARRAY_* names which don't pollute the NPY_* - * namespace were added in NumPy 1.7. - * - * These versions of the carray flags are deprecated, but - * probably should only be removed after two releases instead of one. - */ -#define NPY_C_CONTIGUOUS NPY_ARRAY_C_CONTIGUOUS -#define NPY_F_CONTIGUOUS NPY_ARRAY_F_CONTIGUOUS -#define NPY_OWNDATA NPY_ARRAY_OWNDATA -#define NPY_FORCECAST NPY_ARRAY_FORCECAST -#define NPY_ENSURECOPY NPY_ARRAY_ENSURECOPY -#define NPY_ENSUREARRAY NPY_ARRAY_ENSUREARRAY -#define NPY_ELEMENTSTRIDES NPY_ARRAY_ELEMENTSTRIDES -#define NPY_ALIGNED NPY_ARRAY_ALIGNED -#define NPY_NOTSWAPPED NPY_ARRAY_NOTSWAPPED -#define NPY_WRITEABLE NPY_ARRAY_WRITEABLE -#define NPY_UPDATEIFCOPY NPY_ARRAY_UPDATEIFCOPY -#define NPY_BEHAVED NPY_ARRAY_BEHAVED -#define NPY_BEHAVED_NS NPY_ARRAY_BEHAVED_NS -#define NPY_CARRAY NPY_ARRAY_CARRAY -#define NPY_CARRAY_RO NPY_ARRAY_CARRAY_RO -#define NPY_FARRAY NPY_ARRAY_FARRAY -#define NPY_FARRAY_RO NPY_ARRAY_FARRAY_RO -#define NPY_DEFAULT NPY_ARRAY_DEFAULT -#define NPY_IN_ARRAY NPY_ARRAY_IN_ARRAY -#define NPY_OUT_ARRAY NPY_ARRAY_OUT_ARRAY -#define NPY_INOUT_ARRAY NPY_ARRAY_INOUT_ARRAY -#define NPY_IN_FARRAY NPY_ARRAY_IN_FARRAY -#define NPY_OUT_FARRAY NPY_ARRAY_OUT_FARRAY -#define NPY_INOUT_FARRAY NPY_ARRAY_INOUT_FARRAY -#define NPY_UPDATE_ALL NPY_ARRAY_UPDATE_ALL - -/* This way of accessing the default type is deprecated as of NumPy 1.7 */ -#define PyArray_DEFAULT NPY_DEFAULT_TYPE - -/* These DATETIME bits aren't used internally */ -#if PY_VERSION_HEX >= 0x03000000 -#define PyDataType_GetDatetimeMetaData(descr) \ - ((descr->metadata == NULL) ? NULL : \ - ((PyArray_DatetimeMetaData *)(PyCapsule_GetPointer( \ - PyDict_GetItemString( \ - descr->metadata, NPY_METADATA_DTSTR), NULL)))) -#else -#define PyDataType_GetDatetimeMetaData(descr) \ - ((descr->metadata == NULL) ? NULL : \ - ((PyArray_DatetimeMetaData *)(PyCObject_AsVoidPtr( \ - PyDict_GetItemString(descr->metadata, NPY_METADATA_DTSTR))))) -#endif - -/* - * Deprecated as of NumPy 1.7, this kind of shortcut doesn't - * belong in the public API. - */ -#define NPY_AO PyArrayObject - -/* - * Deprecated as of NumPy 1.7, an all-lowercase macro doesn't - * belong in the public API. - */ -#define fortran fortran_ - -/* - * Deprecated as of NumPy 1.7, as it is a namespace-polluting - * macro. - */ -#define FORTRAN_IF PyArray_FORTRAN_IF - -/* Deprecated as of NumPy 1.7, datetime64 uses c_metadata instead */ -#define NPY_METADATA_DTSTR "__timeunit__" - -/* - * Deprecated as of NumPy 1.7. - * The reasoning: - * - These are for datetime, but there's no datetime "namespace". - * - They just turn NPY_STR_ into "", which is just - * making something simple be indirected. - */ -#define NPY_STR_Y "Y" -#define NPY_STR_M "M" -#define NPY_STR_W "W" -#define NPY_STR_D "D" -#define NPY_STR_h "h" -#define NPY_STR_m "m" -#define NPY_STR_s "s" -#define NPY_STR_ms "ms" -#define NPY_STR_us "us" -#define NPY_STR_ns "ns" -#define NPY_STR_ps "ps" -#define NPY_STR_fs "fs" -#define NPY_STR_as "as" - -/* - * The macros in old_defines.h are Deprecated as of NumPy 1.7 and will be - * removed in the next major release. - */ -#include "old_defines.h" - -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_3kcompat.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_3kcompat.h deleted file mode 100644 index 832bc05..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_3kcompat.h +++ /dev/null @@ -1,577 +0,0 @@ -/* - * This is a convenience header file providing compatibility utilities - * for supporting Python 2 and Python 3 in the same code base. - * - * If you want to use this for your own projects, it's recommended to make a - * copy of it. Although the stuff below is unlikely to change, we don't provide - * strong backwards compatibility guarantees at the moment. - */ - -#ifndef _NPY_3KCOMPAT_H_ -#define _NPY_3KCOMPAT_H_ - -#include -#include - -#if PY_VERSION_HEX >= 0x03000000 -#ifndef NPY_PY3K -#define NPY_PY3K 1 -#endif -#endif - -#include "numpy/npy_common.h" -#include "numpy/ndarrayobject.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/* - * PyInt -> PyLong - */ - -#if defined(NPY_PY3K) -/* Return True only if the long fits in a C long */ -static NPY_INLINE int PyInt_Check(PyObject *op) { - int overflow = 0; - if (!PyLong_Check(op)) { - return 0; - } - PyLong_AsLongAndOverflow(op, &overflow); - return (overflow == 0); -} - -#define PyInt_FromLong PyLong_FromLong -#define PyInt_AsLong PyLong_AsLong -#define PyInt_AS_LONG PyLong_AsLong -#define PyInt_AsSsize_t PyLong_AsSsize_t - -/* NOTE: - * - * Since the PyLong type is very different from the fixed-range PyInt, - * we don't define PyInt_Type -> PyLong_Type. - */ -#endif /* NPY_PY3K */ - -/* Py3 changes PySlice_GetIndicesEx' first argument's type to PyObject* */ -#ifdef NPY_PY3K -# define NpySlice_GetIndicesEx PySlice_GetIndicesEx -#else -# define NpySlice_GetIndicesEx(op, nop, start, end, step, slicelength) \ - PySlice_GetIndicesEx((PySliceObject *)op, nop, start, end, step, slicelength) -#endif - -/* <2.7.11 and <3.4.4 have the wrong argument type for Py_EnterRecursiveCall */ -#if (PY_VERSION_HEX < 0x02070B00) || \ - ((0x03000000 <= PY_VERSION_HEX) && (PY_VERSION_HEX < 0x03040400)) - #define Npy_EnterRecursiveCall(x) Py_EnterRecursiveCall((char *)(x)) -#else - #define Npy_EnterRecursiveCall(x) Py_EnterRecursiveCall(x) -#endif - -/* Py_SETREF was added in 3.5.2, and only if Py_LIMITED_API is absent */ -#if PY_VERSION_HEX < 0x03050200 - #define Py_SETREF(op, op2) \ - do { \ - PyObject *_py_tmp = (PyObject *)(op); \ - (op) = (op2); \ - Py_DECREF(_py_tmp); \ - } while (0) -#endif - -/* - * PyString -> PyBytes - */ - -#if defined(NPY_PY3K) - -#define PyString_Type PyBytes_Type -#define PyString_Check PyBytes_Check -#define PyStringObject PyBytesObject -#define PyString_FromString PyBytes_FromString -#define PyString_FromStringAndSize PyBytes_FromStringAndSize -#define PyString_AS_STRING PyBytes_AS_STRING -#define PyString_AsStringAndSize PyBytes_AsStringAndSize -#define PyString_FromFormat PyBytes_FromFormat -#define PyString_Concat PyBytes_Concat -#define PyString_ConcatAndDel PyBytes_ConcatAndDel -#define PyString_AsString PyBytes_AsString -#define PyString_GET_SIZE PyBytes_GET_SIZE -#define PyString_Size PyBytes_Size - -#define PyUString_Type PyUnicode_Type -#define PyUString_Check PyUnicode_Check -#define PyUStringObject PyUnicodeObject -#define PyUString_FromString PyUnicode_FromString -#define PyUString_FromStringAndSize PyUnicode_FromStringAndSize -#define PyUString_FromFormat PyUnicode_FromFormat -#define PyUString_Concat PyUnicode_Concat2 -#define PyUString_ConcatAndDel PyUnicode_ConcatAndDel -#define PyUString_GET_SIZE PyUnicode_GET_SIZE -#define PyUString_Size PyUnicode_Size -#define PyUString_InternFromString PyUnicode_InternFromString -#define PyUString_Format PyUnicode_Format - -#define PyBaseString_Check(obj) (PyUnicode_Check(obj)) - -#else - -#define PyBytes_Type PyString_Type -#define PyBytes_Check PyString_Check -#define PyBytesObject PyStringObject -#define PyBytes_FromString PyString_FromString -#define PyBytes_FromStringAndSize PyString_FromStringAndSize -#define PyBytes_AS_STRING PyString_AS_STRING -#define PyBytes_AsStringAndSize PyString_AsStringAndSize -#define PyBytes_FromFormat PyString_FromFormat -#define PyBytes_Concat PyString_Concat -#define PyBytes_ConcatAndDel PyString_ConcatAndDel -#define PyBytes_AsString PyString_AsString -#define PyBytes_GET_SIZE PyString_GET_SIZE -#define PyBytes_Size PyString_Size - -#define PyUString_Type PyString_Type -#define PyUString_Check PyString_Check -#define PyUStringObject PyStringObject -#define PyUString_FromString PyString_FromString -#define PyUString_FromStringAndSize PyString_FromStringAndSize -#define PyUString_FromFormat PyString_FromFormat -#define PyUString_Concat PyString_Concat -#define PyUString_ConcatAndDel PyString_ConcatAndDel -#define PyUString_GET_SIZE PyString_GET_SIZE -#define PyUString_Size PyString_Size -#define PyUString_InternFromString PyString_InternFromString -#define PyUString_Format PyString_Format - -#define PyBaseString_Check(obj) (PyBytes_Check(obj) || PyUnicode_Check(obj)) - -#endif /* NPY_PY3K */ - - -static NPY_INLINE void -PyUnicode_ConcatAndDel(PyObject **left, PyObject *right) -{ - Py_SETREF(*left, PyUnicode_Concat(*left, right)); - Py_DECREF(right); -} - -static NPY_INLINE void -PyUnicode_Concat2(PyObject **left, PyObject *right) -{ - Py_SETREF(*left, PyUnicode_Concat(*left, right)); -} - -/* - * PyFile_* compatibility - */ - -/* - * Get a FILE* handle to the file represented by the Python object - */ -static NPY_INLINE FILE* -npy_PyFile_Dup2(PyObject *file, char *mode, npy_off_t *orig_pos) -{ - int fd, fd2, unbuf; - PyObject *ret, *os, *io, *io_raw; - npy_off_t pos; - FILE *handle; - - /* For Python 2 PyFileObject, use PyFile_AsFile */ -#if !defined(NPY_PY3K) - if (PyFile_Check(file)) { - return PyFile_AsFile(file); - } -#endif - - /* Flush first to ensure things end up in the file in the correct order */ - ret = PyObject_CallMethod(file, "flush", ""); - if (ret == NULL) { - return NULL; - } - Py_DECREF(ret); - fd = PyObject_AsFileDescriptor(file); - if (fd == -1) { - return NULL; - } - - /* - * The handle needs to be dup'd because we have to call fclose - * at the end - */ - os = PyImport_ImportModule("os"); - if (os == NULL) { - return NULL; - } - ret = PyObject_CallMethod(os, "dup", "i", fd); - Py_DECREF(os); - if (ret == NULL) { - return NULL; - } - fd2 = PyNumber_AsSsize_t(ret, NULL); - Py_DECREF(ret); - - /* Convert to FILE* handle */ -#ifdef _WIN32 - handle = _fdopen(fd2, mode); -#else - handle = fdopen(fd2, mode); -#endif - if (handle == NULL) { - PyErr_SetString(PyExc_IOError, - "Getting a FILE* from a Python file object failed"); - return NULL; - } - - /* Record the original raw file handle position */ - *orig_pos = npy_ftell(handle); - if (*orig_pos == -1) { - /* The io module is needed to determine if buffering is used */ - io = PyImport_ImportModule("io"); - if (io == NULL) { - fclose(handle); - return NULL; - } - /* File object instances of RawIOBase are unbuffered */ - io_raw = PyObject_GetAttrString(io, "RawIOBase"); - Py_DECREF(io); - if (io_raw == NULL) { - fclose(handle); - return NULL; - } - unbuf = PyObject_IsInstance(file, io_raw); - Py_DECREF(io_raw); - if (unbuf == 1) { - /* Succeed if the IO is unbuffered */ - return handle; - } - else { - PyErr_SetString(PyExc_IOError, "obtaining file position failed"); - fclose(handle); - return NULL; - } - } - - /* Seek raw handle to the Python-side position */ - ret = PyObject_CallMethod(file, "tell", ""); - if (ret == NULL) { - fclose(handle); - return NULL; - } - pos = PyLong_AsLongLong(ret); - Py_DECREF(ret); - if (PyErr_Occurred()) { - fclose(handle); - return NULL; - } - if (npy_fseek(handle, pos, SEEK_SET) == -1) { - PyErr_SetString(PyExc_IOError, "seeking file failed"); - fclose(handle); - return NULL; - } - return handle; -} - -/* - * Close the dup-ed file handle, and seek the Python one to the current position - */ -static NPY_INLINE int -npy_PyFile_DupClose2(PyObject *file, FILE* handle, npy_off_t orig_pos) -{ - int fd, unbuf; - PyObject *ret, *io, *io_raw; - npy_off_t position; - - /* For Python 2 PyFileObject, do nothing */ -#if !defined(NPY_PY3K) - if (PyFile_Check(file)) { - return 0; - } -#endif - - position = npy_ftell(handle); - - /* Close the FILE* handle */ - fclose(handle); - - /* - * Restore original file handle position, in order to not confuse - * Python-side data structures - */ - fd = PyObject_AsFileDescriptor(file); - if (fd == -1) { - return -1; - } - - if (npy_lseek(fd, orig_pos, SEEK_SET) == -1) { - - /* The io module is needed to determine if buffering is used */ - io = PyImport_ImportModule("io"); - if (io == NULL) { - return -1; - } - /* File object instances of RawIOBase are unbuffered */ - io_raw = PyObject_GetAttrString(io, "RawIOBase"); - Py_DECREF(io); - if (io_raw == NULL) { - return -1; - } - unbuf = PyObject_IsInstance(file, io_raw); - Py_DECREF(io_raw); - if (unbuf == 1) { - /* Succeed if the IO is unbuffered */ - return 0; - } - else { - PyErr_SetString(PyExc_IOError, "seeking file failed"); - return -1; - } - } - - if (position == -1) { - PyErr_SetString(PyExc_IOError, "obtaining file position failed"); - return -1; - } - - /* Seek Python-side handle to the FILE* handle position */ - ret = PyObject_CallMethod(file, "seek", NPY_OFF_T_PYFMT "i", position, 0); - if (ret == NULL) { - return -1; - } - Py_DECREF(ret); - return 0; -} - -static NPY_INLINE int -npy_PyFile_Check(PyObject *file) -{ - int fd; - /* For Python 2, check if it is a PyFileObject */ -#if !defined(NPY_PY3K) - if (PyFile_Check(file)) { - return 1; - } -#endif - fd = PyObject_AsFileDescriptor(file); - if (fd == -1) { - PyErr_Clear(); - return 0; - } - return 1; -} - -static NPY_INLINE PyObject* -npy_PyFile_OpenFile(PyObject *filename, const char *mode) -{ - PyObject *open; - open = PyDict_GetItemString(PyEval_GetBuiltins(), "open"); - if (open == NULL) { - return NULL; - } - return PyObject_CallFunction(open, "Os", filename, mode); -} - -static NPY_INLINE int -npy_PyFile_CloseFile(PyObject *file) -{ - PyObject *ret; - - ret = PyObject_CallMethod(file, "close", NULL); - if (ret == NULL) { - return -1; - } - Py_DECREF(ret); - return 0; -} - - -/* This is a copy of _PyErr_ChainExceptions - */ -static NPY_INLINE void -npy_PyErr_ChainExceptions(PyObject *exc, PyObject *val, PyObject *tb) -{ - if (exc == NULL) - return; - - if (PyErr_Occurred()) { - /* only py3 supports this anyway */ - #ifdef NPY_PY3K - PyObject *exc2, *val2, *tb2; - PyErr_Fetch(&exc2, &val2, &tb2); - PyErr_NormalizeException(&exc, &val, &tb); - if (tb != NULL) { - PyException_SetTraceback(val, tb); - Py_DECREF(tb); - } - Py_DECREF(exc); - PyErr_NormalizeException(&exc2, &val2, &tb2); - PyException_SetContext(val2, val); - PyErr_Restore(exc2, val2, tb2); - #endif - } - else { - PyErr_Restore(exc, val, tb); - } -} - - -/* This is a copy of _PyErr_ChainExceptions, with: - * - a minimal implementation for python 2 - * - __cause__ used instead of __context__ - */ -static NPY_INLINE void -npy_PyErr_ChainExceptionsCause(PyObject *exc, PyObject *val, PyObject *tb) -{ - if (exc == NULL) - return; - - if (PyErr_Occurred()) { - /* only py3 supports this anyway */ - #ifdef NPY_PY3K - PyObject *exc2, *val2, *tb2; - PyErr_Fetch(&exc2, &val2, &tb2); - PyErr_NormalizeException(&exc, &val, &tb); - if (tb != NULL) { - PyException_SetTraceback(val, tb); - Py_DECREF(tb); - } - Py_DECREF(exc); - PyErr_NormalizeException(&exc2, &val2, &tb2); - PyException_SetCause(val2, val); - PyErr_Restore(exc2, val2, tb2); - #endif - } - else { - PyErr_Restore(exc, val, tb); - } -} - -/* - * PyObject_Cmp - */ -#if defined(NPY_PY3K) -static NPY_INLINE int -PyObject_Cmp(PyObject *i1, PyObject *i2, int *cmp) -{ - int v; - v = PyObject_RichCompareBool(i1, i2, Py_LT); - if (v == 1) { - *cmp = -1; - return 1; - } - else if (v == -1) { - return -1; - } - - v = PyObject_RichCompareBool(i1, i2, Py_GT); - if (v == 1) { - *cmp = 1; - return 1; - } - else if (v == -1) { - return -1; - } - - v = PyObject_RichCompareBool(i1, i2, Py_EQ); - if (v == 1) { - *cmp = 0; - return 1; - } - else { - *cmp = 0; - return -1; - } -} -#endif - -/* - * PyCObject functions adapted to PyCapsules. - * - * The main job here is to get rid of the improved error handling - * of PyCapsules. It's a shame... - */ -#if PY_VERSION_HEX >= 0x03000000 - -static NPY_INLINE PyObject * -NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *)) -{ - PyObject *ret = PyCapsule_New(ptr, NULL, dtor); - if (ret == NULL) { - PyErr_Clear(); - } - return ret; -} - -static NPY_INLINE PyObject * -NpyCapsule_FromVoidPtrAndDesc(void *ptr, void* context, void (*dtor)(PyObject *)) -{ - PyObject *ret = NpyCapsule_FromVoidPtr(ptr, dtor); - if (ret != NULL && PyCapsule_SetContext(ret, context) != 0) { - PyErr_Clear(); - Py_DECREF(ret); - ret = NULL; - } - return ret; -} - -static NPY_INLINE void * -NpyCapsule_AsVoidPtr(PyObject *obj) -{ - void *ret = PyCapsule_GetPointer(obj, NULL); - if (ret == NULL) { - PyErr_Clear(); - } - return ret; -} - -static NPY_INLINE void * -NpyCapsule_GetDesc(PyObject *obj) -{ - return PyCapsule_GetContext(obj); -} - -static NPY_INLINE int -NpyCapsule_Check(PyObject *ptr) -{ - return PyCapsule_CheckExact(ptr); -} - -#else - -static NPY_INLINE PyObject * -NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(void *)) -{ - return PyCObject_FromVoidPtr(ptr, dtor); -} - -static NPY_INLINE PyObject * -NpyCapsule_FromVoidPtrAndDesc(void *ptr, void* context, - void (*dtor)(void *, void *)) -{ - return PyCObject_FromVoidPtrAndDesc(ptr, context, dtor); -} - -static NPY_INLINE void * -NpyCapsule_AsVoidPtr(PyObject *ptr) -{ - return PyCObject_AsVoidPtr(ptr); -} - -static NPY_INLINE void * -NpyCapsule_GetDesc(PyObject *obj) -{ - return PyCObject_GetDesc(obj); -} - -static NPY_INLINE int -NpyCapsule_Check(PyObject *ptr) -{ - return PyCObject_Check(ptr); -} - -#endif - -#ifdef __cplusplus -} -#endif - -#endif /* _NPY_3KCOMPAT_H_ */ diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_common.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_common.h deleted file mode 100644 index 27b83f7..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_common.h +++ /dev/null @@ -1,1104 +0,0 @@ -#ifndef _NPY_COMMON_H_ -#define _NPY_COMMON_H_ - -/* numpconfig.h is auto-generated */ -#include "numpyconfig.h" -#ifdef HAVE_NPY_CONFIG_H -#include -#endif - -/* need Python.h for npy_intp, npy_uintp */ -#include - -/* - * using static inline modifiers when defining npy_math functions - * allows the compiler to make optimizations when possible - */ -#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD -#ifndef NPY_INLINE_MATH -#define NPY_INLINE_MATH 1 -#endif -#endif - -/* - * gcc does not unroll even with -O3 - * use with care, unrolling on modern cpus rarely speeds things up - */ -#ifdef HAVE_ATTRIBUTE_OPTIMIZE_UNROLL_LOOPS -#define NPY_GCC_UNROLL_LOOPS \ - __attribute__((optimize("unroll-loops"))) -#else -#define NPY_GCC_UNROLL_LOOPS -#endif - -/* highest gcc optimization level, enabled autovectorizer */ -#ifdef HAVE_ATTRIBUTE_OPTIMIZE_OPT_3 -#define NPY_GCC_OPT_3 __attribute__((optimize("O3"))) -#else -#define NPY_GCC_OPT_3 -#endif - -/* compile target attributes */ -#if defined HAVE_ATTRIBUTE_TARGET_AVX && defined HAVE_LINK_AVX -#define NPY_GCC_TARGET_AVX __attribute__((target("avx"))) -#else -#define NPY_GCC_TARGET_AVX -#endif - -#if defined HAVE_ATTRIBUTE_TARGET_AVX2_WITH_INTRINSICS -#define HAVE_ATTRIBUTE_TARGET_FMA -#define NPY_GCC_TARGET_FMA __attribute__((target("avx2,fma"))) -#endif - -#if defined HAVE_ATTRIBUTE_TARGET_AVX2 && defined HAVE_LINK_AVX2 -#define NPY_GCC_TARGET_AVX2 __attribute__((target("avx2"))) -#else -#define NPY_GCC_TARGET_AVX2 -#endif - -#if defined HAVE_ATTRIBUTE_TARGET_AVX512F && defined HAVE_LINK_AVX512F -#define NPY_GCC_TARGET_AVX512F __attribute__((target("avx512f"))) -#elif defined HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS -#define NPY_GCC_TARGET_AVX512F __attribute__((target("avx512f"))) -#else -#define NPY_GCC_TARGET_AVX512F -#endif - -/* - * mark an argument (starting from 1) that must not be NULL and is not checked - * DO NOT USE IF FUNCTION CHECKS FOR NULL!! the compiler will remove the check - */ -#ifdef HAVE_ATTRIBUTE_NONNULL -#define NPY_GCC_NONNULL(n) __attribute__((nonnull(n))) -#else -#define NPY_GCC_NONNULL(n) -#endif - -#if defined HAVE_XMMINTRIN_H && defined HAVE__MM_LOAD_PS -#define NPY_HAVE_SSE_INTRINSICS -#endif - -#if defined HAVE_EMMINTRIN_H && defined HAVE__MM_LOAD_PD -#define NPY_HAVE_SSE2_INTRINSICS -#endif - -#if defined HAVE_IMMINTRIN_H && defined HAVE_LINK_AVX2 -#define NPY_HAVE_AVX2_INTRINSICS -#endif - -#if defined HAVE_IMMINTRIN_H && defined HAVE_LINK_AVX512F -#define NPY_HAVE_AVX512F_INTRINSICS -#endif -/* - * give a hint to the compiler which branch is more likely or unlikely - * to occur, e.g. rare error cases: - * - * if (NPY_UNLIKELY(failure == 0)) - * return NULL; - * - * the double !! is to cast the expression (e.g. NULL) to a boolean required by - * the intrinsic - */ -#ifdef HAVE___BUILTIN_EXPECT -#define NPY_LIKELY(x) __builtin_expect(!!(x), 1) -#define NPY_UNLIKELY(x) __builtin_expect(!!(x), 0) -#else -#define NPY_LIKELY(x) (x) -#define NPY_UNLIKELY(x) (x) -#endif - -#ifdef HAVE___BUILTIN_PREFETCH -/* unlike _mm_prefetch also works on non-x86 */ -#define NPY_PREFETCH(x, rw, loc) __builtin_prefetch((x), (rw), (loc)) -#else -#ifdef HAVE__MM_PREFETCH -/* _MM_HINT_ET[01] (rw = 1) unsupported, only available in gcc >= 4.9 */ -#define NPY_PREFETCH(x, rw, loc) _mm_prefetch((x), loc == 0 ? _MM_HINT_NTA : \ - (loc == 1 ? _MM_HINT_T2 : \ - (loc == 2 ? _MM_HINT_T1 : \ - (loc == 3 ? _MM_HINT_T0 : -1)))) -#else -#define NPY_PREFETCH(x, rw,loc) -#endif -#endif - -#if defined(_MSC_VER) - #define NPY_INLINE __inline -#elif defined(__GNUC__) - #if defined(__STRICT_ANSI__) - #define NPY_INLINE __inline__ - #else - #define NPY_INLINE inline - #endif -#else - #define NPY_INLINE -#endif - -#ifdef HAVE___THREAD - #define NPY_TLS __thread -#else - #ifdef HAVE___DECLSPEC_THREAD_ - #define NPY_TLS __declspec(thread) - #else - #define NPY_TLS - #endif -#endif - -#ifdef WITH_CPYCHECKER_RETURNS_BORROWED_REF_ATTRIBUTE - #define NPY_RETURNS_BORROWED_REF \ - __attribute__((cpychecker_returns_borrowed_ref)) -#else - #define NPY_RETURNS_BORROWED_REF -#endif - -#ifdef WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE - #define NPY_STEALS_REF_TO_ARG(n) \ - __attribute__((cpychecker_steals_reference_to_arg(n))) -#else - #define NPY_STEALS_REF_TO_ARG(n) -#endif - -/* 64 bit file position support, also on win-amd64. Ticket #1660 */ -#if defined(_MSC_VER) && defined(_WIN64) && (_MSC_VER > 1400) || \ - defined(__MINGW32__) || defined(__MINGW64__) - #include - -/* mingw based on 3.4.5 has lseek but not ftell/fseek */ -#if defined(__MINGW32__) || defined(__MINGW64__) -extern int __cdecl _fseeki64(FILE *, long long, int); -extern long long __cdecl _ftelli64(FILE *); -#endif - - #define npy_fseek _fseeki64 - #define npy_ftell _ftelli64 - #define npy_lseek _lseeki64 - #define npy_off_t npy_int64 - - #if NPY_SIZEOF_INT == 8 - #define NPY_OFF_T_PYFMT "i" - #elif NPY_SIZEOF_LONG == 8 - #define NPY_OFF_T_PYFMT "l" - #elif NPY_SIZEOF_LONGLONG == 8 - #define NPY_OFF_T_PYFMT "L" - #else - #error Unsupported size for type off_t - #endif -#else -#ifdef HAVE_FSEEKO - #define npy_fseek fseeko -#else - #define npy_fseek fseek -#endif -#ifdef HAVE_FTELLO - #define npy_ftell ftello -#else - #define npy_ftell ftell -#endif - #include - #define npy_lseek lseek - #define npy_off_t off_t - - #if NPY_SIZEOF_OFF_T == NPY_SIZEOF_SHORT - #define NPY_OFF_T_PYFMT "h" - #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_INT - #define NPY_OFF_T_PYFMT "i" - #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_LONG - #define NPY_OFF_T_PYFMT "l" - #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_LONGLONG - #define NPY_OFF_T_PYFMT "L" - #else - #error Unsupported size for type off_t - #endif -#endif - -/* enums for detected endianness */ -enum { - NPY_CPU_UNKNOWN_ENDIAN, - NPY_CPU_LITTLE, - NPY_CPU_BIG -}; - -/* - * This is to typedef npy_intp to the appropriate pointer size for this - * platform. Py_intptr_t, Py_uintptr_t are defined in pyport.h. - */ -typedef Py_intptr_t npy_intp; -typedef Py_uintptr_t npy_uintp; - -/* - * Define sizes that were not defined in numpyconfig.h. - */ -#define NPY_SIZEOF_CHAR 1 -#define NPY_SIZEOF_BYTE 1 -#define NPY_SIZEOF_DATETIME 8 -#define NPY_SIZEOF_TIMEDELTA 8 -#define NPY_SIZEOF_INTP NPY_SIZEOF_PY_INTPTR_T -#define NPY_SIZEOF_UINTP NPY_SIZEOF_PY_INTPTR_T -#define NPY_SIZEOF_HALF 2 -#define NPY_SIZEOF_CFLOAT NPY_SIZEOF_COMPLEX_FLOAT -#define NPY_SIZEOF_CDOUBLE NPY_SIZEOF_COMPLEX_DOUBLE -#define NPY_SIZEOF_CLONGDOUBLE NPY_SIZEOF_COMPLEX_LONGDOUBLE - -#ifdef constchar -#undef constchar -#endif - -#define NPY_SSIZE_T_PYFMT "n" -#define constchar char - -/* NPY_INTP_FMT Note: - * Unlike the other NPY_*_FMT macros which are used with - * PyOS_snprintf, NPY_INTP_FMT is used with PyErr_Format and - * PyString_Format. These functions use different formatting - * codes which are portably specified according to the Python - * documentation. See ticket #1795. - */ -#if NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_INT - #define NPY_INTP NPY_INT - #define NPY_UINTP NPY_UINT - #define PyIntpArrType_Type PyIntArrType_Type - #define PyUIntpArrType_Type PyUIntArrType_Type - #define NPY_MAX_INTP NPY_MAX_INT - #define NPY_MIN_INTP NPY_MIN_INT - #define NPY_MAX_UINTP NPY_MAX_UINT - #define NPY_INTP_FMT "d" -#elif NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_LONG - #define NPY_INTP NPY_LONG - #define NPY_UINTP NPY_ULONG - #define PyIntpArrType_Type PyLongArrType_Type - #define PyUIntpArrType_Type PyULongArrType_Type - #define NPY_MAX_INTP NPY_MAX_LONG - #define NPY_MIN_INTP NPY_MIN_LONG - #define NPY_MAX_UINTP NPY_MAX_ULONG - #define NPY_INTP_FMT "ld" -#elif defined(PY_LONG_LONG) && (NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_LONGLONG) - #define NPY_INTP NPY_LONGLONG - #define NPY_UINTP NPY_ULONGLONG - #define PyIntpArrType_Type PyLongLongArrType_Type - #define PyUIntpArrType_Type PyULongLongArrType_Type - #define NPY_MAX_INTP NPY_MAX_LONGLONG - #define NPY_MIN_INTP NPY_MIN_LONGLONG - #define NPY_MAX_UINTP NPY_MAX_ULONGLONG - #define NPY_INTP_FMT "lld" -#endif - -/* - * We can only use C99 formats for npy_int_p if it is the same as - * intp_t, hence the condition on HAVE_UNITPTR_T - */ -#if (NPY_USE_C99_FORMATS) == 1 \ - && (defined HAVE_UINTPTR_T) \ - && (defined HAVE_INTTYPES_H) - #include - #undef NPY_INTP_FMT - #define NPY_INTP_FMT PRIdPTR -#endif - - -/* - * Some platforms don't define bool, long long, or long double. - * Handle that here. - */ -#define NPY_BYTE_FMT "hhd" -#define NPY_UBYTE_FMT "hhu" -#define NPY_SHORT_FMT "hd" -#define NPY_USHORT_FMT "hu" -#define NPY_INT_FMT "d" -#define NPY_UINT_FMT "u" -#define NPY_LONG_FMT "ld" -#define NPY_ULONG_FMT "lu" -#define NPY_HALF_FMT "g" -#define NPY_FLOAT_FMT "g" -#define NPY_DOUBLE_FMT "g" - - -#ifdef PY_LONG_LONG -typedef PY_LONG_LONG npy_longlong; -typedef unsigned PY_LONG_LONG npy_ulonglong; -# ifdef _MSC_VER -# define NPY_LONGLONG_FMT "I64d" -# define NPY_ULONGLONG_FMT "I64u" -# else -# define NPY_LONGLONG_FMT "lld" -# define NPY_ULONGLONG_FMT "llu" -# endif -# ifdef _MSC_VER -# define NPY_LONGLONG_SUFFIX(x) (x##i64) -# define NPY_ULONGLONG_SUFFIX(x) (x##Ui64) -# else -# define NPY_LONGLONG_SUFFIX(x) (x##LL) -# define NPY_ULONGLONG_SUFFIX(x) (x##ULL) -# endif -#else -typedef long npy_longlong; -typedef unsigned long npy_ulonglong; -# define NPY_LONGLONG_SUFFIX(x) (x##L) -# define NPY_ULONGLONG_SUFFIX(x) (x##UL) -#endif - - -typedef unsigned char npy_bool; -#define NPY_FALSE 0 -#define NPY_TRUE 1 - - -#if NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE - typedef double npy_longdouble; - #define NPY_LONGDOUBLE_FMT "g" -#else - typedef long double npy_longdouble; - #define NPY_LONGDOUBLE_FMT "Lg" -#endif - -#ifndef Py_USING_UNICODE -#error Must use Python with unicode enabled. -#endif - - -typedef signed char npy_byte; -typedef unsigned char npy_ubyte; -typedef unsigned short npy_ushort; -typedef unsigned int npy_uint; -typedef unsigned long npy_ulong; - -/* These are for completeness */ -typedef char npy_char; -typedef short npy_short; -typedef int npy_int; -typedef long npy_long; -typedef float npy_float; -typedef double npy_double; - -/* - * Hash value compatibility. - * As of Python 3.2 hash values are of type Py_hash_t. - * Previous versions use C long. - */ -#if PY_VERSION_HEX < 0x03020000 -typedef long npy_hash_t; -#define NPY_SIZEOF_HASH_T NPY_SIZEOF_LONG -#else -typedef Py_hash_t npy_hash_t; -#define NPY_SIZEOF_HASH_T NPY_SIZEOF_INTP -#endif - -/* - * Disabling C99 complex usage: a lot of C code in numpy/scipy rely on being - * able to do .real/.imag. Will have to convert code first. - */ -#if 0 -#if defined(NPY_USE_C99_COMPLEX) && defined(NPY_HAVE_COMPLEX_DOUBLE) -typedef complex npy_cdouble; -#else -typedef struct { double real, imag; } npy_cdouble; -#endif - -#if defined(NPY_USE_C99_COMPLEX) && defined(NPY_HAVE_COMPLEX_FLOAT) -typedef complex float npy_cfloat; -#else -typedef struct { float real, imag; } npy_cfloat; -#endif - -#if defined(NPY_USE_C99_COMPLEX) && defined(NPY_HAVE_COMPLEX_LONG_DOUBLE) -typedef complex long double npy_clongdouble; -#else -typedef struct {npy_longdouble real, imag;} npy_clongdouble; -#endif -#endif -#if NPY_SIZEOF_COMPLEX_DOUBLE != 2 * NPY_SIZEOF_DOUBLE -#error npy_cdouble definition is not compatible with C99 complex definition ! \ - Please contact NumPy maintainers and give detailed information about your \ - compiler and platform -#endif -typedef struct { double real, imag; } npy_cdouble; - -#if NPY_SIZEOF_COMPLEX_FLOAT != 2 * NPY_SIZEOF_FLOAT -#error npy_cfloat definition is not compatible with C99 complex definition ! \ - Please contact NumPy maintainers and give detailed information about your \ - compiler and platform -#endif -typedef struct { float real, imag; } npy_cfloat; - -#if NPY_SIZEOF_COMPLEX_LONGDOUBLE != 2 * NPY_SIZEOF_LONGDOUBLE -#error npy_clongdouble definition is not compatible with C99 complex definition ! \ - Please contact NumPy maintainers and give detailed information about your \ - compiler and platform -#endif -typedef struct { npy_longdouble real, imag; } npy_clongdouble; - -/* - * numarray-style bit-width typedefs - */ -#define NPY_MAX_INT8 127 -#define NPY_MIN_INT8 -128 -#define NPY_MAX_UINT8 255 -#define NPY_MAX_INT16 32767 -#define NPY_MIN_INT16 -32768 -#define NPY_MAX_UINT16 65535 -#define NPY_MAX_INT32 2147483647 -#define NPY_MIN_INT32 (-NPY_MAX_INT32 - 1) -#define NPY_MAX_UINT32 4294967295U -#define NPY_MAX_INT64 NPY_LONGLONG_SUFFIX(9223372036854775807) -#define NPY_MIN_INT64 (-NPY_MAX_INT64 - NPY_LONGLONG_SUFFIX(1)) -#define NPY_MAX_UINT64 NPY_ULONGLONG_SUFFIX(18446744073709551615) -#define NPY_MAX_INT128 NPY_LONGLONG_SUFFIX(85070591730234615865843651857942052864) -#define NPY_MIN_INT128 (-NPY_MAX_INT128 - NPY_LONGLONG_SUFFIX(1)) -#define NPY_MAX_UINT128 NPY_ULONGLONG_SUFFIX(170141183460469231731687303715884105728) -#define NPY_MAX_INT256 NPY_LONGLONG_SUFFIX(57896044618658097711785492504343953926634992332820282019728792003956564819967) -#define NPY_MIN_INT256 (-NPY_MAX_INT256 - NPY_LONGLONG_SUFFIX(1)) -#define NPY_MAX_UINT256 NPY_ULONGLONG_SUFFIX(115792089237316195423570985008687907853269984665640564039457584007913129639935) -#define NPY_MIN_DATETIME NPY_MIN_INT64 -#define NPY_MAX_DATETIME NPY_MAX_INT64 -#define NPY_MIN_TIMEDELTA NPY_MIN_INT64 -#define NPY_MAX_TIMEDELTA NPY_MAX_INT64 - - /* Need to find the number of bits for each type and - make definitions accordingly. - - C states that sizeof(char) == 1 by definition - - So, just using the sizeof keyword won't help. - - It also looks like Python itself uses sizeof(char) quite a - bit, which by definition should be 1 all the time. - - Idea: Make Use of CHAR_BIT which should tell us how many - BITS per CHARACTER - */ - - /* Include platform definitions -- These are in the C89/90 standard */ -#include -#define NPY_MAX_BYTE SCHAR_MAX -#define NPY_MIN_BYTE SCHAR_MIN -#define NPY_MAX_UBYTE UCHAR_MAX -#define NPY_MAX_SHORT SHRT_MAX -#define NPY_MIN_SHORT SHRT_MIN -#define NPY_MAX_USHORT USHRT_MAX -#define NPY_MAX_INT INT_MAX -#ifndef INT_MIN -#define INT_MIN (-INT_MAX - 1) -#endif -#define NPY_MIN_INT INT_MIN -#define NPY_MAX_UINT UINT_MAX -#define NPY_MAX_LONG LONG_MAX -#define NPY_MIN_LONG LONG_MIN -#define NPY_MAX_ULONG ULONG_MAX - -#define NPY_BITSOF_BOOL (sizeof(npy_bool) * CHAR_BIT) -#define NPY_BITSOF_CHAR CHAR_BIT -#define NPY_BITSOF_BYTE (NPY_SIZEOF_BYTE * CHAR_BIT) -#define NPY_BITSOF_SHORT (NPY_SIZEOF_SHORT * CHAR_BIT) -#define NPY_BITSOF_INT (NPY_SIZEOF_INT * CHAR_BIT) -#define NPY_BITSOF_LONG (NPY_SIZEOF_LONG * CHAR_BIT) -#define NPY_BITSOF_LONGLONG (NPY_SIZEOF_LONGLONG * CHAR_BIT) -#define NPY_BITSOF_INTP (NPY_SIZEOF_INTP * CHAR_BIT) -#define NPY_BITSOF_HALF (NPY_SIZEOF_HALF * CHAR_BIT) -#define NPY_BITSOF_FLOAT (NPY_SIZEOF_FLOAT * CHAR_BIT) -#define NPY_BITSOF_DOUBLE (NPY_SIZEOF_DOUBLE * CHAR_BIT) -#define NPY_BITSOF_LONGDOUBLE (NPY_SIZEOF_LONGDOUBLE * CHAR_BIT) -#define NPY_BITSOF_CFLOAT (NPY_SIZEOF_CFLOAT * CHAR_BIT) -#define NPY_BITSOF_CDOUBLE (NPY_SIZEOF_CDOUBLE * CHAR_BIT) -#define NPY_BITSOF_CLONGDOUBLE (NPY_SIZEOF_CLONGDOUBLE * CHAR_BIT) -#define NPY_BITSOF_DATETIME (NPY_SIZEOF_DATETIME * CHAR_BIT) -#define NPY_BITSOF_TIMEDELTA (NPY_SIZEOF_TIMEDELTA * CHAR_BIT) - -#if NPY_BITSOF_LONG == 8 -#define NPY_INT8 NPY_LONG -#define NPY_UINT8 NPY_ULONG - typedef long npy_int8; - typedef unsigned long npy_uint8; -#define PyInt8ScalarObject PyLongScalarObject -#define PyInt8ArrType_Type PyLongArrType_Type -#define PyUInt8ScalarObject PyULongScalarObject -#define PyUInt8ArrType_Type PyULongArrType_Type -#define NPY_INT8_FMT NPY_LONG_FMT -#define NPY_UINT8_FMT NPY_ULONG_FMT -#elif NPY_BITSOF_LONG == 16 -#define NPY_INT16 NPY_LONG -#define NPY_UINT16 NPY_ULONG - typedef long npy_int16; - typedef unsigned long npy_uint16; -#define PyInt16ScalarObject PyLongScalarObject -#define PyInt16ArrType_Type PyLongArrType_Type -#define PyUInt16ScalarObject PyULongScalarObject -#define PyUInt16ArrType_Type PyULongArrType_Type -#define NPY_INT16_FMT NPY_LONG_FMT -#define NPY_UINT16_FMT NPY_ULONG_FMT -#elif NPY_BITSOF_LONG == 32 -#define NPY_INT32 NPY_LONG -#define NPY_UINT32 NPY_ULONG - typedef long npy_int32; - typedef unsigned long npy_uint32; - typedef unsigned long npy_ucs4; -#define PyInt32ScalarObject PyLongScalarObject -#define PyInt32ArrType_Type PyLongArrType_Type -#define PyUInt32ScalarObject PyULongScalarObject -#define PyUInt32ArrType_Type PyULongArrType_Type -#define NPY_INT32_FMT NPY_LONG_FMT -#define NPY_UINT32_FMT NPY_ULONG_FMT -#elif NPY_BITSOF_LONG == 64 -#define NPY_INT64 NPY_LONG -#define NPY_UINT64 NPY_ULONG - typedef long npy_int64; - typedef unsigned long npy_uint64; -#define PyInt64ScalarObject PyLongScalarObject -#define PyInt64ArrType_Type PyLongArrType_Type -#define PyUInt64ScalarObject PyULongScalarObject -#define PyUInt64ArrType_Type PyULongArrType_Type -#define NPY_INT64_FMT NPY_LONG_FMT -#define NPY_UINT64_FMT NPY_ULONG_FMT -#define MyPyLong_FromInt64 PyLong_FromLong -#define MyPyLong_AsInt64 PyLong_AsLong -#elif NPY_BITSOF_LONG == 128 -#define NPY_INT128 NPY_LONG -#define NPY_UINT128 NPY_ULONG - typedef long npy_int128; - typedef unsigned long npy_uint128; -#define PyInt128ScalarObject PyLongScalarObject -#define PyInt128ArrType_Type PyLongArrType_Type -#define PyUInt128ScalarObject PyULongScalarObject -#define PyUInt128ArrType_Type PyULongArrType_Type -#define NPY_INT128_FMT NPY_LONG_FMT -#define NPY_UINT128_FMT NPY_ULONG_FMT -#endif - -#if NPY_BITSOF_LONGLONG == 8 -# ifndef NPY_INT8 -# define NPY_INT8 NPY_LONGLONG -# define NPY_UINT8 NPY_ULONGLONG - typedef npy_longlong npy_int8; - typedef npy_ulonglong npy_uint8; -# define PyInt8ScalarObject PyLongLongScalarObject -# define PyInt8ArrType_Type PyLongLongArrType_Type -# define PyUInt8ScalarObject PyULongLongScalarObject -# define PyUInt8ArrType_Type PyULongLongArrType_Type -#define NPY_INT8_FMT NPY_LONGLONG_FMT -#define NPY_UINT8_FMT NPY_ULONGLONG_FMT -# endif -# define NPY_MAX_LONGLONG NPY_MAX_INT8 -# define NPY_MIN_LONGLONG NPY_MIN_INT8 -# define NPY_MAX_ULONGLONG NPY_MAX_UINT8 -#elif NPY_BITSOF_LONGLONG == 16 -# ifndef NPY_INT16 -# define NPY_INT16 NPY_LONGLONG -# define NPY_UINT16 NPY_ULONGLONG - typedef npy_longlong npy_int16; - typedef npy_ulonglong npy_uint16; -# define PyInt16ScalarObject PyLongLongScalarObject -# define PyInt16ArrType_Type PyLongLongArrType_Type -# define PyUInt16ScalarObject PyULongLongScalarObject -# define PyUInt16ArrType_Type PyULongLongArrType_Type -#define NPY_INT16_FMT NPY_LONGLONG_FMT -#define NPY_UINT16_FMT NPY_ULONGLONG_FMT -# endif -# define NPY_MAX_LONGLONG NPY_MAX_INT16 -# define NPY_MIN_LONGLONG NPY_MIN_INT16 -# define NPY_MAX_ULONGLONG NPY_MAX_UINT16 -#elif NPY_BITSOF_LONGLONG == 32 -# ifndef NPY_INT32 -# define NPY_INT32 NPY_LONGLONG -# define NPY_UINT32 NPY_ULONGLONG - typedef npy_longlong npy_int32; - typedef npy_ulonglong npy_uint32; - typedef npy_ulonglong npy_ucs4; -# define PyInt32ScalarObject PyLongLongScalarObject -# define PyInt32ArrType_Type PyLongLongArrType_Type -# define PyUInt32ScalarObject PyULongLongScalarObject -# define PyUInt32ArrType_Type PyULongLongArrType_Type -#define NPY_INT32_FMT NPY_LONGLONG_FMT -#define NPY_UINT32_FMT NPY_ULONGLONG_FMT -# endif -# define NPY_MAX_LONGLONG NPY_MAX_INT32 -# define NPY_MIN_LONGLONG NPY_MIN_INT32 -# define NPY_MAX_ULONGLONG NPY_MAX_UINT32 -#elif NPY_BITSOF_LONGLONG == 64 -# ifndef NPY_INT64 -# define NPY_INT64 NPY_LONGLONG -# define NPY_UINT64 NPY_ULONGLONG - typedef npy_longlong npy_int64; - typedef npy_ulonglong npy_uint64; -# define PyInt64ScalarObject PyLongLongScalarObject -# define PyInt64ArrType_Type PyLongLongArrType_Type -# define PyUInt64ScalarObject PyULongLongScalarObject -# define PyUInt64ArrType_Type PyULongLongArrType_Type -#define NPY_INT64_FMT NPY_LONGLONG_FMT -#define NPY_UINT64_FMT NPY_ULONGLONG_FMT -# define MyPyLong_FromInt64 PyLong_FromLongLong -# define MyPyLong_AsInt64 PyLong_AsLongLong -# endif -# define NPY_MAX_LONGLONG NPY_MAX_INT64 -# define NPY_MIN_LONGLONG NPY_MIN_INT64 -# define NPY_MAX_ULONGLONG NPY_MAX_UINT64 -#elif NPY_BITSOF_LONGLONG == 128 -# ifndef NPY_INT128 -# define NPY_INT128 NPY_LONGLONG -# define NPY_UINT128 NPY_ULONGLONG - typedef npy_longlong npy_int128; - typedef npy_ulonglong npy_uint128; -# define PyInt128ScalarObject PyLongLongScalarObject -# define PyInt128ArrType_Type PyLongLongArrType_Type -# define PyUInt128ScalarObject PyULongLongScalarObject -# define PyUInt128ArrType_Type PyULongLongArrType_Type -#define NPY_INT128_FMT NPY_LONGLONG_FMT -#define NPY_UINT128_FMT NPY_ULONGLONG_FMT -# endif -# define NPY_MAX_LONGLONG NPY_MAX_INT128 -# define NPY_MIN_LONGLONG NPY_MIN_INT128 -# define NPY_MAX_ULONGLONG NPY_MAX_UINT128 -#elif NPY_BITSOF_LONGLONG == 256 -# define NPY_INT256 NPY_LONGLONG -# define NPY_UINT256 NPY_ULONGLONG - typedef npy_longlong npy_int256; - typedef npy_ulonglong npy_uint256; -# define PyInt256ScalarObject PyLongLongScalarObject -# define PyInt256ArrType_Type PyLongLongArrType_Type -# define PyUInt256ScalarObject PyULongLongScalarObject -# define PyUInt256ArrType_Type PyULongLongArrType_Type -#define NPY_INT256_FMT NPY_LONGLONG_FMT -#define NPY_UINT256_FMT NPY_ULONGLONG_FMT -# define NPY_MAX_LONGLONG NPY_MAX_INT256 -# define NPY_MIN_LONGLONG NPY_MIN_INT256 -# define NPY_MAX_ULONGLONG NPY_MAX_UINT256 -#endif - -#if NPY_BITSOF_INT == 8 -#ifndef NPY_INT8 -#define NPY_INT8 NPY_INT -#define NPY_UINT8 NPY_UINT - typedef int npy_int8; - typedef unsigned int npy_uint8; -# define PyInt8ScalarObject PyIntScalarObject -# define PyInt8ArrType_Type PyIntArrType_Type -# define PyUInt8ScalarObject PyUIntScalarObject -# define PyUInt8ArrType_Type PyUIntArrType_Type -#define NPY_INT8_FMT NPY_INT_FMT -#define NPY_UINT8_FMT NPY_UINT_FMT -#endif -#elif NPY_BITSOF_INT == 16 -#ifndef NPY_INT16 -#define NPY_INT16 NPY_INT -#define NPY_UINT16 NPY_UINT - typedef int npy_int16; - typedef unsigned int npy_uint16; -# define PyInt16ScalarObject PyIntScalarObject -# define PyInt16ArrType_Type PyIntArrType_Type -# define PyUInt16ScalarObject PyIntUScalarObject -# define PyUInt16ArrType_Type PyIntUArrType_Type -#define NPY_INT16_FMT NPY_INT_FMT -#define NPY_UINT16_FMT NPY_UINT_FMT -#endif -#elif NPY_BITSOF_INT == 32 -#ifndef NPY_INT32 -#define NPY_INT32 NPY_INT -#define NPY_UINT32 NPY_UINT - typedef int npy_int32; - typedef unsigned int npy_uint32; - typedef unsigned int npy_ucs4; -# define PyInt32ScalarObject PyIntScalarObject -# define PyInt32ArrType_Type PyIntArrType_Type -# define PyUInt32ScalarObject PyUIntScalarObject -# define PyUInt32ArrType_Type PyUIntArrType_Type -#define NPY_INT32_FMT NPY_INT_FMT -#define NPY_UINT32_FMT NPY_UINT_FMT -#endif -#elif NPY_BITSOF_INT == 64 -#ifndef NPY_INT64 -#define NPY_INT64 NPY_INT -#define NPY_UINT64 NPY_UINT - typedef int npy_int64; - typedef unsigned int npy_uint64; -# define PyInt64ScalarObject PyIntScalarObject -# define PyInt64ArrType_Type PyIntArrType_Type -# define PyUInt64ScalarObject PyUIntScalarObject -# define PyUInt64ArrType_Type PyUIntArrType_Type -#define NPY_INT64_FMT NPY_INT_FMT -#define NPY_UINT64_FMT NPY_UINT_FMT -# define MyPyLong_FromInt64 PyLong_FromLong -# define MyPyLong_AsInt64 PyLong_AsLong -#endif -#elif NPY_BITSOF_INT == 128 -#ifndef NPY_INT128 -#define NPY_INT128 NPY_INT -#define NPY_UINT128 NPY_UINT - typedef int npy_int128; - typedef unsigned int npy_uint128; -# define PyInt128ScalarObject PyIntScalarObject -# define PyInt128ArrType_Type PyIntArrType_Type -# define PyUInt128ScalarObject PyUIntScalarObject -# define PyUInt128ArrType_Type PyUIntArrType_Type -#define NPY_INT128_FMT NPY_INT_FMT -#define NPY_UINT128_FMT NPY_UINT_FMT -#endif -#endif - -#if NPY_BITSOF_SHORT == 8 -#ifndef NPY_INT8 -#define NPY_INT8 NPY_SHORT -#define NPY_UINT8 NPY_USHORT - typedef short npy_int8; - typedef unsigned short npy_uint8; -# define PyInt8ScalarObject PyShortScalarObject -# define PyInt8ArrType_Type PyShortArrType_Type -# define PyUInt8ScalarObject PyUShortScalarObject -# define PyUInt8ArrType_Type PyUShortArrType_Type -#define NPY_INT8_FMT NPY_SHORT_FMT -#define NPY_UINT8_FMT NPY_USHORT_FMT -#endif -#elif NPY_BITSOF_SHORT == 16 -#ifndef NPY_INT16 -#define NPY_INT16 NPY_SHORT -#define NPY_UINT16 NPY_USHORT - typedef short npy_int16; - typedef unsigned short npy_uint16; -# define PyInt16ScalarObject PyShortScalarObject -# define PyInt16ArrType_Type PyShortArrType_Type -# define PyUInt16ScalarObject PyUShortScalarObject -# define PyUInt16ArrType_Type PyUShortArrType_Type -#define NPY_INT16_FMT NPY_SHORT_FMT -#define NPY_UINT16_FMT NPY_USHORT_FMT -#endif -#elif NPY_BITSOF_SHORT == 32 -#ifndef NPY_INT32 -#define NPY_INT32 NPY_SHORT -#define NPY_UINT32 NPY_USHORT - typedef short npy_int32; - typedef unsigned short npy_uint32; - typedef unsigned short npy_ucs4; -# define PyInt32ScalarObject PyShortScalarObject -# define PyInt32ArrType_Type PyShortArrType_Type -# define PyUInt32ScalarObject PyUShortScalarObject -# define PyUInt32ArrType_Type PyUShortArrType_Type -#define NPY_INT32_FMT NPY_SHORT_FMT -#define NPY_UINT32_FMT NPY_USHORT_FMT -#endif -#elif NPY_BITSOF_SHORT == 64 -#ifndef NPY_INT64 -#define NPY_INT64 NPY_SHORT -#define NPY_UINT64 NPY_USHORT - typedef short npy_int64; - typedef unsigned short npy_uint64; -# define PyInt64ScalarObject PyShortScalarObject -# define PyInt64ArrType_Type PyShortArrType_Type -# define PyUInt64ScalarObject PyUShortScalarObject -# define PyUInt64ArrType_Type PyUShortArrType_Type -#define NPY_INT64_FMT NPY_SHORT_FMT -#define NPY_UINT64_FMT NPY_USHORT_FMT -# define MyPyLong_FromInt64 PyLong_FromLong -# define MyPyLong_AsInt64 PyLong_AsLong -#endif -#elif NPY_BITSOF_SHORT == 128 -#ifndef NPY_INT128 -#define NPY_INT128 NPY_SHORT -#define NPY_UINT128 NPY_USHORT - typedef short npy_int128; - typedef unsigned short npy_uint128; -# define PyInt128ScalarObject PyShortScalarObject -# define PyInt128ArrType_Type PyShortArrType_Type -# define PyUInt128ScalarObject PyUShortScalarObject -# define PyUInt128ArrType_Type PyUShortArrType_Type -#define NPY_INT128_FMT NPY_SHORT_FMT -#define NPY_UINT128_FMT NPY_USHORT_FMT -#endif -#endif - - -#if NPY_BITSOF_CHAR == 8 -#ifndef NPY_INT8 -#define NPY_INT8 NPY_BYTE -#define NPY_UINT8 NPY_UBYTE - typedef signed char npy_int8; - typedef unsigned char npy_uint8; -# define PyInt8ScalarObject PyByteScalarObject -# define PyInt8ArrType_Type PyByteArrType_Type -# define PyUInt8ScalarObject PyUByteScalarObject -# define PyUInt8ArrType_Type PyUByteArrType_Type -#define NPY_INT8_FMT NPY_BYTE_FMT -#define NPY_UINT8_FMT NPY_UBYTE_FMT -#endif -#elif NPY_BITSOF_CHAR == 16 -#ifndef NPY_INT16 -#define NPY_INT16 NPY_BYTE -#define NPY_UINT16 NPY_UBYTE - typedef signed char npy_int16; - typedef unsigned char npy_uint16; -# define PyInt16ScalarObject PyByteScalarObject -# define PyInt16ArrType_Type PyByteArrType_Type -# define PyUInt16ScalarObject PyUByteScalarObject -# define PyUInt16ArrType_Type PyUByteArrType_Type -#define NPY_INT16_FMT NPY_BYTE_FMT -#define NPY_UINT16_FMT NPY_UBYTE_FMT -#endif -#elif NPY_BITSOF_CHAR == 32 -#ifndef NPY_INT32 -#define NPY_INT32 NPY_BYTE -#define NPY_UINT32 NPY_UBYTE - typedef signed char npy_int32; - typedef unsigned char npy_uint32; - typedef unsigned char npy_ucs4; -# define PyInt32ScalarObject PyByteScalarObject -# define PyInt32ArrType_Type PyByteArrType_Type -# define PyUInt32ScalarObject PyUByteScalarObject -# define PyUInt32ArrType_Type PyUByteArrType_Type -#define NPY_INT32_FMT NPY_BYTE_FMT -#define NPY_UINT32_FMT NPY_UBYTE_FMT -#endif -#elif NPY_BITSOF_CHAR == 64 -#ifndef NPY_INT64 -#define NPY_INT64 NPY_BYTE -#define NPY_UINT64 NPY_UBYTE - typedef signed char npy_int64; - typedef unsigned char npy_uint64; -# define PyInt64ScalarObject PyByteScalarObject -# define PyInt64ArrType_Type PyByteArrType_Type -# define PyUInt64ScalarObject PyUByteScalarObject -# define PyUInt64ArrType_Type PyUByteArrType_Type -#define NPY_INT64_FMT NPY_BYTE_FMT -#define NPY_UINT64_FMT NPY_UBYTE_FMT -# define MyPyLong_FromInt64 PyLong_FromLong -# define MyPyLong_AsInt64 PyLong_AsLong -#endif -#elif NPY_BITSOF_CHAR == 128 -#ifndef NPY_INT128 -#define NPY_INT128 NPY_BYTE -#define NPY_UINT128 NPY_UBYTE - typedef signed char npy_int128; - typedef unsigned char npy_uint128; -# define PyInt128ScalarObject PyByteScalarObject -# define PyInt128ArrType_Type PyByteArrType_Type -# define PyUInt128ScalarObject PyUByteScalarObject -# define PyUInt128ArrType_Type PyUByteArrType_Type -#define NPY_INT128_FMT NPY_BYTE_FMT -#define NPY_UINT128_FMT NPY_UBYTE_FMT -#endif -#endif - - - -#if NPY_BITSOF_DOUBLE == 32 -#ifndef NPY_FLOAT32 -#define NPY_FLOAT32 NPY_DOUBLE -#define NPY_COMPLEX64 NPY_CDOUBLE - typedef double npy_float32; - typedef npy_cdouble npy_complex64; -# define PyFloat32ScalarObject PyDoubleScalarObject -# define PyComplex64ScalarObject PyCDoubleScalarObject -# define PyFloat32ArrType_Type PyDoubleArrType_Type -# define PyComplex64ArrType_Type PyCDoubleArrType_Type -#define NPY_FLOAT32_FMT NPY_DOUBLE_FMT -#define NPY_COMPLEX64_FMT NPY_CDOUBLE_FMT -#endif -#elif NPY_BITSOF_DOUBLE == 64 -#ifndef NPY_FLOAT64 -#define NPY_FLOAT64 NPY_DOUBLE -#define NPY_COMPLEX128 NPY_CDOUBLE - typedef double npy_float64; - typedef npy_cdouble npy_complex128; -# define PyFloat64ScalarObject PyDoubleScalarObject -# define PyComplex128ScalarObject PyCDoubleScalarObject -# define PyFloat64ArrType_Type PyDoubleArrType_Type -# define PyComplex128ArrType_Type PyCDoubleArrType_Type -#define NPY_FLOAT64_FMT NPY_DOUBLE_FMT -#define NPY_COMPLEX128_FMT NPY_CDOUBLE_FMT -#endif -#elif NPY_BITSOF_DOUBLE == 80 -#ifndef NPY_FLOAT80 -#define NPY_FLOAT80 NPY_DOUBLE -#define NPY_COMPLEX160 NPY_CDOUBLE - typedef double npy_float80; - typedef npy_cdouble npy_complex160; -# define PyFloat80ScalarObject PyDoubleScalarObject -# define PyComplex160ScalarObject PyCDoubleScalarObject -# define PyFloat80ArrType_Type PyDoubleArrType_Type -# define PyComplex160ArrType_Type PyCDoubleArrType_Type -#define NPY_FLOAT80_FMT NPY_DOUBLE_FMT -#define NPY_COMPLEX160_FMT NPY_CDOUBLE_FMT -#endif -#elif NPY_BITSOF_DOUBLE == 96 -#ifndef NPY_FLOAT96 -#define NPY_FLOAT96 NPY_DOUBLE -#define NPY_COMPLEX192 NPY_CDOUBLE - typedef double npy_float96; - typedef npy_cdouble npy_complex192; -# define PyFloat96ScalarObject PyDoubleScalarObject -# define PyComplex192ScalarObject PyCDoubleScalarObject -# define PyFloat96ArrType_Type PyDoubleArrType_Type -# define PyComplex192ArrType_Type PyCDoubleArrType_Type -#define NPY_FLOAT96_FMT NPY_DOUBLE_FMT -#define NPY_COMPLEX192_FMT NPY_CDOUBLE_FMT -#endif -#elif NPY_BITSOF_DOUBLE == 128 -#ifndef NPY_FLOAT128 -#define NPY_FLOAT128 NPY_DOUBLE -#define NPY_COMPLEX256 NPY_CDOUBLE - typedef double npy_float128; - typedef npy_cdouble npy_complex256; -# define PyFloat128ScalarObject PyDoubleScalarObject -# define PyComplex256ScalarObject PyCDoubleScalarObject -# define PyFloat128ArrType_Type PyDoubleArrType_Type -# define PyComplex256ArrType_Type PyCDoubleArrType_Type -#define NPY_FLOAT128_FMT NPY_DOUBLE_FMT -#define NPY_COMPLEX256_FMT NPY_CDOUBLE_FMT -#endif -#endif - - - -#if NPY_BITSOF_FLOAT == 32 -#ifndef NPY_FLOAT32 -#define NPY_FLOAT32 NPY_FLOAT -#define NPY_COMPLEX64 NPY_CFLOAT - typedef float npy_float32; - typedef npy_cfloat npy_complex64; -# define PyFloat32ScalarObject PyFloatScalarObject -# define PyComplex64ScalarObject PyCFloatScalarObject -# define PyFloat32ArrType_Type PyFloatArrType_Type -# define PyComplex64ArrType_Type PyCFloatArrType_Type -#define NPY_FLOAT32_FMT NPY_FLOAT_FMT -#define NPY_COMPLEX64_FMT NPY_CFLOAT_FMT -#endif -#elif NPY_BITSOF_FLOAT == 64 -#ifndef NPY_FLOAT64 -#define NPY_FLOAT64 NPY_FLOAT -#define NPY_COMPLEX128 NPY_CFLOAT - typedef float npy_float64; - typedef npy_cfloat npy_complex128; -# define PyFloat64ScalarObject PyFloatScalarObject -# define PyComplex128ScalarObject PyCFloatScalarObject -# define PyFloat64ArrType_Type PyFloatArrType_Type -# define PyComplex128ArrType_Type PyCFloatArrType_Type -#define NPY_FLOAT64_FMT NPY_FLOAT_FMT -#define NPY_COMPLEX128_FMT NPY_CFLOAT_FMT -#endif -#elif NPY_BITSOF_FLOAT == 80 -#ifndef NPY_FLOAT80 -#define NPY_FLOAT80 NPY_FLOAT -#define NPY_COMPLEX160 NPY_CFLOAT - typedef float npy_float80; - typedef npy_cfloat npy_complex160; -# define PyFloat80ScalarObject PyFloatScalarObject -# define PyComplex160ScalarObject PyCFloatScalarObject -# define PyFloat80ArrType_Type PyFloatArrType_Type -# define PyComplex160ArrType_Type PyCFloatArrType_Type -#define NPY_FLOAT80_FMT NPY_FLOAT_FMT -#define NPY_COMPLEX160_FMT NPY_CFLOAT_FMT -#endif -#elif NPY_BITSOF_FLOAT == 96 -#ifndef NPY_FLOAT96 -#define NPY_FLOAT96 NPY_FLOAT -#define NPY_COMPLEX192 NPY_CFLOAT - typedef float npy_float96; - typedef npy_cfloat npy_complex192; -# define PyFloat96ScalarObject PyFloatScalarObject -# define PyComplex192ScalarObject PyCFloatScalarObject -# define PyFloat96ArrType_Type PyFloatArrType_Type -# define PyComplex192ArrType_Type PyCFloatArrType_Type -#define NPY_FLOAT96_FMT NPY_FLOAT_FMT -#define NPY_COMPLEX192_FMT NPY_CFLOAT_FMT -#endif -#elif NPY_BITSOF_FLOAT == 128 -#ifndef NPY_FLOAT128 -#define NPY_FLOAT128 NPY_FLOAT -#define NPY_COMPLEX256 NPY_CFLOAT - typedef float npy_float128; - typedef npy_cfloat npy_complex256; -# define PyFloat128ScalarObject PyFloatScalarObject -# define PyComplex256ScalarObject PyCFloatScalarObject -# define PyFloat128ArrType_Type PyFloatArrType_Type -# define PyComplex256ArrType_Type PyCFloatArrType_Type -#define NPY_FLOAT128_FMT NPY_FLOAT_FMT -#define NPY_COMPLEX256_FMT NPY_CFLOAT_FMT -#endif -#endif - -/* half/float16 isn't a floating-point type in C */ -#define NPY_FLOAT16 NPY_HALF -typedef npy_uint16 npy_half; -typedef npy_half npy_float16; - -#if NPY_BITSOF_LONGDOUBLE == 32 -#ifndef NPY_FLOAT32 -#define NPY_FLOAT32 NPY_LONGDOUBLE -#define NPY_COMPLEX64 NPY_CLONGDOUBLE - typedef npy_longdouble npy_float32; - typedef npy_clongdouble npy_complex64; -# define PyFloat32ScalarObject PyLongDoubleScalarObject -# define PyComplex64ScalarObject PyCLongDoubleScalarObject -# define PyFloat32ArrType_Type PyLongDoubleArrType_Type -# define PyComplex64ArrType_Type PyCLongDoubleArrType_Type -#define NPY_FLOAT32_FMT NPY_LONGDOUBLE_FMT -#define NPY_COMPLEX64_FMT NPY_CLONGDOUBLE_FMT -#endif -#elif NPY_BITSOF_LONGDOUBLE == 64 -#ifndef NPY_FLOAT64 -#define NPY_FLOAT64 NPY_LONGDOUBLE -#define NPY_COMPLEX128 NPY_CLONGDOUBLE - typedef npy_longdouble npy_float64; - typedef npy_clongdouble npy_complex128; -# define PyFloat64ScalarObject PyLongDoubleScalarObject -# define PyComplex128ScalarObject PyCLongDoubleScalarObject -# define PyFloat64ArrType_Type PyLongDoubleArrType_Type -# define PyComplex128ArrType_Type PyCLongDoubleArrType_Type -#define NPY_FLOAT64_FMT NPY_LONGDOUBLE_FMT -#define NPY_COMPLEX128_FMT NPY_CLONGDOUBLE_FMT -#endif -#elif NPY_BITSOF_LONGDOUBLE == 80 -#ifndef NPY_FLOAT80 -#define NPY_FLOAT80 NPY_LONGDOUBLE -#define NPY_COMPLEX160 NPY_CLONGDOUBLE - typedef npy_longdouble npy_float80; - typedef npy_clongdouble npy_complex160; -# define PyFloat80ScalarObject PyLongDoubleScalarObject -# define PyComplex160ScalarObject PyCLongDoubleScalarObject -# define PyFloat80ArrType_Type PyLongDoubleArrType_Type -# define PyComplex160ArrType_Type PyCLongDoubleArrType_Type -#define NPY_FLOAT80_FMT NPY_LONGDOUBLE_FMT -#define NPY_COMPLEX160_FMT NPY_CLONGDOUBLE_FMT -#endif -#elif NPY_BITSOF_LONGDOUBLE == 96 -#ifndef NPY_FLOAT96 -#define NPY_FLOAT96 NPY_LONGDOUBLE -#define NPY_COMPLEX192 NPY_CLONGDOUBLE - typedef npy_longdouble npy_float96; - typedef npy_clongdouble npy_complex192; -# define PyFloat96ScalarObject PyLongDoubleScalarObject -# define PyComplex192ScalarObject PyCLongDoubleScalarObject -# define PyFloat96ArrType_Type PyLongDoubleArrType_Type -# define PyComplex192ArrType_Type PyCLongDoubleArrType_Type -#define NPY_FLOAT96_FMT NPY_LONGDOUBLE_FMT -#define NPY_COMPLEX192_FMT NPY_CLONGDOUBLE_FMT -#endif -#elif NPY_BITSOF_LONGDOUBLE == 128 -#ifndef NPY_FLOAT128 -#define NPY_FLOAT128 NPY_LONGDOUBLE -#define NPY_COMPLEX256 NPY_CLONGDOUBLE - typedef npy_longdouble npy_float128; - typedef npy_clongdouble npy_complex256; -# define PyFloat128ScalarObject PyLongDoubleScalarObject -# define PyComplex256ScalarObject PyCLongDoubleScalarObject -# define PyFloat128ArrType_Type PyLongDoubleArrType_Type -# define PyComplex256ArrType_Type PyCLongDoubleArrType_Type -#define NPY_FLOAT128_FMT NPY_LONGDOUBLE_FMT -#define NPY_COMPLEX256_FMT NPY_CLONGDOUBLE_FMT -#endif -#elif NPY_BITSOF_LONGDOUBLE == 256 -#define NPY_FLOAT256 NPY_LONGDOUBLE -#define NPY_COMPLEX512 NPY_CLONGDOUBLE - typedef npy_longdouble npy_float256; - typedef npy_clongdouble npy_complex512; -# define PyFloat256ScalarObject PyLongDoubleScalarObject -# define PyComplex512ScalarObject PyCLongDoubleScalarObject -# define PyFloat256ArrType_Type PyLongDoubleArrType_Type -# define PyComplex512ArrType_Type PyCLongDoubleArrType_Type -#define NPY_FLOAT256_FMT NPY_LONGDOUBLE_FMT -#define NPY_COMPLEX512_FMT NPY_CLONGDOUBLE_FMT -#endif - -/* datetime typedefs */ -typedef npy_int64 npy_timedelta; -typedef npy_int64 npy_datetime; -#define NPY_DATETIME_FMT NPY_INT64_FMT -#define NPY_TIMEDELTA_FMT NPY_INT64_FMT - -/* End of typedefs for numarray style bit-width names */ - -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_cpu.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_cpu.h deleted file mode 100644 index 5edd8f4..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_cpu.h +++ /dev/null @@ -1,118 +0,0 @@ -/* - * This set (target) cpu specific macros: - * - Possible values: - * NPY_CPU_X86 - * NPY_CPU_AMD64 - * NPY_CPU_PPC - * NPY_CPU_PPC64 - * NPY_CPU_PPC64LE - * NPY_CPU_SPARC - * NPY_CPU_S390 - * NPY_CPU_IA64 - * NPY_CPU_HPPA - * NPY_CPU_ALPHA - * NPY_CPU_ARMEL - * NPY_CPU_ARMEB - * NPY_CPU_SH_LE - * NPY_CPU_SH_BE - * NPY_CPU_ARCEL - * NPY_CPU_ARCEB - * NPY_CPU_RISCV64 - */ -#ifndef _NPY_CPUARCH_H_ -#define _NPY_CPUARCH_H_ - -#include "numpyconfig.h" -#include /* for memcpy */ - -#if defined( __i386__ ) || defined(i386) || defined(_M_IX86) - /* - * __i386__ is defined by gcc and Intel compiler on Linux, - * _M_IX86 by VS compiler, - * i386 by Sun compilers on opensolaris at least - */ - #define NPY_CPU_X86 -#elif defined(__x86_64__) || defined(__amd64__) || defined(__x86_64) || defined(_M_AMD64) - /* - * both __x86_64__ and __amd64__ are defined by gcc - * __x86_64 defined by sun compiler on opensolaris at least - * _M_AMD64 defined by MS compiler - */ - #define NPY_CPU_AMD64 -#elif defined(__powerpc64__) && defined(__LITTLE_ENDIAN__) - #define NPY_CPU_PPC64LE -#elif defined(__powerpc64__) && defined(__BIG_ENDIAN__) - #define NPY_CPU_PPC64 -#elif defined(__ppc__) || defined(__powerpc__) || defined(_ARCH_PPC) - /* - * __ppc__ is defined by gcc, I remember having seen __powerpc__ once, - * but can't find it ATM - * _ARCH_PPC is used by at least gcc on AIX - * As __powerpc__ and _ARCH_PPC are also defined by PPC64 check - * for those specifically first before defaulting to ppc - */ - #define NPY_CPU_PPC -#elif defined(__sparc__) || defined(__sparc) - /* __sparc__ is defined by gcc and Forte (e.g. Sun) compilers */ - #define NPY_CPU_SPARC -#elif defined(__s390__) - #define NPY_CPU_S390 -#elif defined(__ia64) - #define NPY_CPU_IA64 -#elif defined(__hppa) - #define NPY_CPU_HPPA -#elif defined(__alpha__) - #define NPY_CPU_ALPHA -#elif defined(__arm__) || defined(__aarch64__) - #if defined(__ARMEB__) || defined(__AARCH64EB__) - #if defined(__ARM_32BIT_STATE) - #define NPY_CPU_ARMEB_AARCH32 - #elif defined(__ARM_64BIT_STATE) - #define NPY_CPU_ARMEB_AARCH64 - #else - #define NPY_CPU_ARMEB - #endif - #elif defined(__ARMEL__) || defined(__AARCH64EL__) - #if defined(__ARM_32BIT_STATE) - #define NPY_CPU_ARMEL_AARCH32 - #elif defined(__ARM_64BIT_STATE) - #define NPY_CPU_ARMEL_AARCH64 - #else - #define NPY_CPU_ARMEL - #endif - #else - # error Unknown ARM CPU, please report this to numpy maintainers with \ - information about your platform (OS, CPU and compiler) - #endif -#elif defined(__sh__) && defined(__LITTLE_ENDIAN__) - #define NPY_CPU_SH_LE -#elif defined(__sh__) && defined(__BIG_ENDIAN__) - #define NPY_CPU_SH_BE -#elif defined(__MIPSEL__) - #define NPY_CPU_MIPSEL -#elif defined(__MIPSEB__) - #define NPY_CPU_MIPSEB -#elif defined(__or1k__) - #define NPY_CPU_OR1K -#elif defined(__mc68000__) - #define NPY_CPU_M68K -#elif defined(__arc__) && defined(__LITTLE_ENDIAN__) - #define NPY_CPU_ARCEL -#elif defined(__arc__) && defined(__BIG_ENDIAN__) - #define NPY_CPU_ARCEB -#elif defined(__riscv) && defined(__riscv_xlen) && __riscv_xlen == 64 - #define NPY_CPU_RISCV64 -#else - #error Unknown CPU, please report this to numpy maintainers with \ - information about your platform (OS, CPU and compiler) -#endif - -#define NPY_COPY_PYOBJECT_PTR(dst, src) memcpy(dst, src, sizeof(PyObject *)) - -#if (defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64)) -#define NPY_CPU_HAVE_UNALIGNED_ACCESS 1 -#else -#define NPY_CPU_HAVE_UNALIGNED_ACCESS 0 -#endif - -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_endian.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_endian.h deleted file mode 100644 index 44cdffd..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_endian.h +++ /dev/null @@ -1,72 +0,0 @@ -#ifndef _NPY_ENDIAN_H_ -#define _NPY_ENDIAN_H_ - -/* - * NPY_BYTE_ORDER is set to the same value as BYTE_ORDER set by glibc in - * endian.h - */ - -#if defined(NPY_HAVE_ENDIAN_H) || defined(NPY_HAVE_SYS_ENDIAN_H) - /* Use endian.h if available */ - - #if defined(NPY_HAVE_ENDIAN_H) - #include - #elif defined(NPY_HAVE_SYS_ENDIAN_H) - #include - #endif - - #if defined(BYTE_ORDER) && defined(BIG_ENDIAN) && defined(LITTLE_ENDIAN) - #define NPY_BYTE_ORDER BYTE_ORDER - #define NPY_LITTLE_ENDIAN LITTLE_ENDIAN - #define NPY_BIG_ENDIAN BIG_ENDIAN - #elif defined(_BYTE_ORDER) && defined(_BIG_ENDIAN) && defined(_LITTLE_ENDIAN) - #define NPY_BYTE_ORDER _BYTE_ORDER - #define NPY_LITTLE_ENDIAN _LITTLE_ENDIAN - #define NPY_BIG_ENDIAN _BIG_ENDIAN - #elif defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && defined(__LITTLE_ENDIAN) - #define NPY_BYTE_ORDER __BYTE_ORDER - #define NPY_LITTLE_ENDIAN __LITTLE_ENDIAN - #define NPY_BIG_ENDIAN __BIG_ENDIAN - #endif -#endif - -#ifndef NPY_BYTE_ORDER - /* Set endianness info using target CPU */ - #include "npy_cpu.h" - - #define NPY_LITTLE_ENDIAN 1234 - #define NPY_BIG_ENDIAN 4321 - - #if defined(NPY_CPU_X86) \ - || defined(NPY_CPU_AMD64) \ - || defined(NPY_CPU_IA64) \ - || defined(NPY_CPU_ALPHA) \ - || defined(NPY_CPU_ARMEL) \ - || defined(NPY_CPU_ARMEL_AARCH32) \ - || defined(NPY_CPU_ARMEL_AARCH64) \ - || defined(NPY_CPU_SH_LE) \ - || defined(NPY_CPU_MIPSEL) \ - || defined(NPY_CPU_PPC64LE) \ - || defined(NPY_CPU_ARCEL) \ - || defined(NPY_CPU_RISCV64) - #define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN - #elif defined(NPY_CPU_PPC) \ - || defined(NPY_CPU_SPARC) \ - || defined(NPY_CPU_S390) \ - || defined(NPY_CPU_HPPA) \ - || defined(NPY_CPU_PPC64) \ - || defined(NPY_CPU_ARMEB) \ - || defined(NPY_CPU_ARMEB_AARCH32) \ - || defined(NPY_CPU_ARMEB_AARCH64) \ - || defined(NPY_CPU_SH_BE) \ - || defined(NPY_CPU_MIPSEB) \ - || defined(NPY_CPU_OR1K) \ - || defined(NPY_CPU_M68K) \ - || defined(NPY_CPU_ARCEB) - #define NPY_BYTE_ORDER NPY_BIG_ENDIAN - #else - #error Unknown CPU: can not set endianness - #endif -#endif - -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_interrupt.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_interrupt.h deleted file mode 100644 index 40cb7ac..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_interrupt.h +++ /dev/null @@ -1,117 +0,0 @@ - -/* Signal handling: - -This header file defines macros that allow your code to handle -interrupts received during processing. Interrupts that -could reasonably be handled: - -SIGINT, SIGABRT, SIGALRM, SIGSEGV - -****Warning*************** - -Do not allow code that creates temporary memory or increases reference -counts of Python objects to be interrupted unless you handle it -differently. - -************************** - -The mechanism for handling interrupts is conceptually simple: - - - replace the signal handler with our own home-grown version - and store the old one. - - run the code to be interrupted -- if an interrupt occurs - the handler should basically just cause a return to the - calling function for finish work. - - restore the old signal handler - -Of course, every code that allows interrupts must account for -returning via the interrupt and handle clean-up correctly. But, -even still, the simple paradigm is complicated by at least three -factors. - - 1) platform portability (i.e. Microsoft says not to use longjmp - to return from signal handling. They have a __try and __except - extension to C instead but what about mingw?). - - 2) how to handle threads: apparently whether signals are delivered to - every thread of the process or the "invoking" thread is platform - dependent. --- we don't handle threads for now. - - 3) do we need to worry about re-entrance. For now, assume the - code will not call-back into itself. - -Ideas: - - 1) Start by implementing an approach that works on platforms that - can use setjmp and longjmp functionality and does nothing - on other platforms. - - 2) Ignore threads --- i.e. do not mix interrupt handling and threads - - 3) Add a default signal_handler function to the C-API but have the rest - use macros. - - -Simple Interface: - - -In your C-extension: around a block of code you want to be interruptible -with a SIGINT - -NPY_SIGINT_ON -[code] -NPY_SIGINT_OFF - -In order for this to work correctly, the -[code] block must not allocate any memory or alter the reference count of any -Python objects. In other words [code] must be interruptible so that continuation -after NPY_SIGINT_OFF will only be "missing some computations" - -Interrupt handling does not work well with threads. - -*/ - -/* Add signal handling macros - Make the global variable and signal handler part of the C-API -*/ - -#ifndef NPY_INTERRUPT_H -#define NPY_INTERRUPT_H - -#ifndef NPY_NO_SIGNAL - -#include -#include - -#ifndef sigsetjmp - -#define NPY_SIGSETJMP(arg1, arg2) setjmp(arg1) -#define NPY_SIGLONGJMP(arg1, arg2) longjmp(arg1, arg2) -#define NPY_SIGJMP_BUF jmp_buf - -#else - -#define NPY_SIGSETJMP(arg1, arg2) sigsetjmp(arg1, arg2) -#define NPY_SIGLONGJMP(arg1, arg2) siglongjmp(arg1, arg2) -#define NPY_SIGJMP_BUF sigjmp_buf - -#endif - -# define NPY_SIGINT_ON { \ - PyOS_sighandler_t _npy_sig_save; \ - _npy_sig_save = PyOS_setsig(SIGINT, _PyArray_SigintHandler); \ - if (NPY_SIGSETJMP(*((NPY_SIGJMP_BUF *)_PyArray_GetSigintBuf()), \ - 1) == 0) { \ - -# define NPY_SIGINT_OFF } \ - PyOS_setsig(SIGINT, _npy_sig_save); \ - } - -#else /* NPY_NO_SIGNAL */ - -#define NPY_SIGINT_ON -#define NPY_SIGINT_OFF - -#endif /* HAVE_SIGSETJMP */ - -#endif /* NPY_INTERRUPT_H */ diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_math.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_math.h deleted file mode 100644 index 69e690f..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_math.h +++ /dev/null @@ -1,646 +0,0 @@ -#ifndef __NPY_MATH_C99_H_ -#define __NPY_MATH_C99_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -#include -#ifdef __SUNPRO_CC -#include -#endif -#ifdef HAVE_NPY_CONFIG_H -#include -#endif -#include - -/* By adding static inline specifiers to npy_math function definitions when - appropriate, compiler is given the opportunity to optimize */ -#if NPY_INLINE_MATH -#define NPY_INPLACE NPY_INLINE static -#else -#define NPY_INPLACE -#endif - - -/* - * NAN and INFINITY like macros (same behavior as glibc for NAN, same as C99 - * for INFINITY) - * - * XXX: I should test whether INFINITY and NAN are available on the platform - */ -NPY_INLINE static float __npy_inff(void) -{ - const union { npy_uint32 __i; float __f;} __bint = {0x7f800000UL}; - return __bint.__f; -} - -NPY_INLINE static float __npy_nanf(void) -{ - const union { npy_uint32 __i; float __f;} __bint = {0x7fc00000UL}; - return __bint.__f; -} - -NPY_INLINE static float __npy_pzerof(void) -{ - const union { npy_uint32 __i; float __f;} __bint = {0x00000000UL}; - return __bint.__f; -} - -NPY_INLINE static float __npy_nzerof(void) -{ - const union { npy_uint32 __i; float __f;} __bint = {0x80000000UL}; - return __bint.__f; -} - -#define NPY_INFINITYF __npy_inff() -#define NPY_NANF __npy_nanf() -#define NPY_PZEROF __npy_pzerof() -#define NPY_NZEROF __npy_nzerof() - -#define NPY_INFINITY ((npy_double)NPY_INFINITYF) -#define NPY_NAN ((npy_double)NPY_NANF) -#define NPY_PZERO ((npy_double)NPY_PZEROF) -#define NPY_NZERO ((npy_double)NPY_NZEROF) - -#define NPY_INFINITYL ((npy_longdouble)NPY_INFINITYF) -#define NPY_NANL ((npy_longdouble)NPY_NANF) -#define NPY_PZEROL ((npy_longdouble)NPY_PZEROF) -#define NPY_NZEROL ((npy_longdouble)NPY_NZEROF) - -/* - * Useful constants - */ -#define NPY_E 2.718281828459045235360287471352662498 /* e */ -#define NPY_LOG2E 1.442695040888963407359924681001892137 /* log_2 e */ -#define NPY_LOG10E 0.434294481903251827651128918916605082 /* log_10 e */ -#define NPY_LOGE2 0.693147180559945309417232121458176568 /* log_e 2 */ -#define NPY_LOGE10 2.302585092994045684017991454684364208 /* log_e 10 */ -#define NPY_PI 3.141592653589793238462643383279502884 /* pi */ -#define NPY_PI_2 1.570796326794896619231321691639751442 /* pi/2 */ -#define NPY_PI_4 0.785398163397448309615660845819875721 /* pi/4 */ -#define NPY_1_PI 0.318309886183790671537767526745028724 /* 1/pi */ -#define NPY_2_PI 0.636619772367581343075535053490057448 /* 2/pi */ -#define NPY_EULER 0.577215664901532860606512090082402431 /* Euler constant */ -#define NPY_SQRT2 1.414213562373095048801688724209698079 /* sqrt(2) */ -#define NPY_SQRT1_2 0.707106781186547524400844362104849039 /* 1/sqrt(2) */ - -#define NPY_Ef 2.718281828459045235360287471352662498F /* e */ -#define NPY_LOG2Ef 1.442695040888963407359924681001892137F /* log_2 e */ -#define NPY_LOG10Ef 0.434294481903251827651128918916605082F /* log_10 e */ -#define NPY_LOGE2f 0.693147180559945309417232121458176568F /* log_e 2 */ -#define NPY_LOGE10f 2.302585092994045684017991454684364208F /* log_e 10 */ -#define NPY_PIf 3.141592653589793238462643383279502884F /* pi */ -#define NPY_PI_2f 1.570796326794896619231321691639751442F /* pi/2 */ -#define NPY_PI_4f 0.785398163397448309615660845819875721F /* pi/4 */ -#define NPY_1_PIf 0.318309886183790671537767526745028724F /* 1/pi */ -#define NPY_2_PIf 0.636619772367581343075535053490057448F /* 2/pi */ -#define NPY_EULERf 0.577215664901532860606512090082402431F /* Euler constant */ -#define NPY_SQRT2f 1.414213562373095048801688724209698079F /* sqrt(2) */ -#define NPY_SQRT1_2f 0.707106781186547524400844362104849039F /* 1/sqrt(2) */ - -#define NPY_El 2.718281828459045235360287471352662498L /* e */ -#define NPY_LOG2El 1.442695040888963407359924681001892137L /* log_2 e */ -#define NPY_LOG10El 0.434294481903251827651128918916605082L /* log_10 e */ -#define NPY_LOGE2l 0.693147180559945309417232121458176568L /* log_e 2 */ -#define NPY_LOGE10l 2.302585092994045684017991454684364208L /* log_e 10 */ -#define NPY_PIl 3.141592653589793238462643383279502884L /* pi */ -#define NPY_PI_2l 1.570796326794896619231321691639751442L /* pi/2 */ -#define NPY_PI_4l 0.785398163397448309615660845819875721L /* pi/4 */ -#define NPY_1_PIl 0.318309886183790671537767526745028724L /* 1/pi */ -#define NPY_2_PIl 0.636619772367581343075535053490057448L /* 2/pi */ -#define NPY_EULERl 0.577215664901532860606512090082402431L /* Euler constant */ -#define NPY_SQRT2l 1.414213562373095048801688724209698079L /* sqrt(2) */ -#define NPY_SQRT1_2l 0.707106781186547524400844362104849039L /* 1/sqrt(2) */ - -/* - * Constants used in vector implementation of exp(x) - */ -#define NPY_RINT_CVT_MAGICf 0x1.800000p+23f -#define NPY_CODY_WAITE_LOGE_2_HIGHf -6.93145752e-1f -#define NPY_CODY_WAITE_LOGE_2_LOWf -1.42860677e-6f -#define NPY_COEFF_P0_EXPf 9.999999999980870924916e-01f -#define NPY_COEFF_P1_EXPf 7.257664613233124478488e-01f -#define NPY_COEFF_P2_EXPf 2.473615434895520810817e-01f -#define NPY_COEFF_P3_EXPf 5.114512081637298353406e-02f -#define NPY_COEFF_P4_EXPf 6.757896990527504603057e-03f -#define NPY_COEFF_P5_EXPf 5.082762527590693718096e-04f -#define NPY_COEFF_Q0_EXPf 1.000000000000000000000e+00f -#define NPY_COEFF_Q1_EXPf -2.742335390411667452936e-01f -#define NPY_COEFF_Q2_EXPf 2.159509375685829852307e-02f - -/* - * Constants used in vector implementation of log(x) - */ -#define NPY_COEFF_P0_LOGf 0.000000000000000000000e+00f -#define NPY_COEFF_P1_LOGf 9.999999999999998702752e-01f -#define NPY_COEFF_P2_LOGf 2.112677543073053063722e+00f -#define NPY_COEFF_P3_LOGf 1.480000633576506585156e+00f -#define NPY_COEFF_P4_LOGf 3.808837741388407920751e-01f -#define NPY_COEFF_P5_LOGf 2.589979117907922693523e-02f -#define NPY_COEFF_Q0_LOGf 1.000000000000000000000e+00f -#define NPY_COEFF_Q1_LOGf 2.612677543073109236779e+00f -#define NPY_COEFF_Q2_LOGf 2.453006071784736363091e+00f -#define NPY_COEFF_Q3_LOGf 9.864942958519418960339e-01f -#define NPY_COEFF_Q4_LOGf 1.546476374983906719538e-01f -#define NPY_COEFF_Q5_LOGf 5.875095403124574342950e-03f -/* - * Constants used in vector implementation of sinf/cosf(x) - */ -#define NPY_TWO_O_PIf 0x1.45f306p-1f -#define NPY_CODY_WAITE_PI_O_2_HIGHf -0x1.921fb0p+00f -#define NPY_CODY_WAITE_PI_O_2_MEDf -0x1.5110b4p-22f -#define NPY_CODY_WAITE_PI_O_2_LOWf -0x1.846988p-48f -#define NPY_COEFF_INVF0_COSINEf 0x1.000000p+00f -#define NPY_COEFF_INVF2_COSINEf -0x1.000000p-01f -#define NPY_COEFF_INVF4_COSINEf 0x1.55553cp-05f -#define NPY_COEFF_INVF6_COSINEf -0x1.6c06dcp-10f -#define NPY_COEFF_INVF8_COSINEf 0x1.98e616p-16f -#define NPY_COEFF_INVF3_SINEf -0x1.555556p-03f -#define NPY_COEFF_INVF5_SINEf 0x1.11119ap-07f -#define NPY_COEFF_INVF7_SINEf -0x1.a06bbap-13f -#define NPY_COEFF_INVF9_SINEf 0x1.7d3bbcp-19f -/* - * Integer functions. - */ -NPY_INPLACE npy_uint npy_gcdu(npy_uint a, npy_uint b); -NPY_INPLACE npy_uint npy_lcmu(npy_uint a, npy_uint b); -NPY_INPLACE npy_ulong npy_gcdul(npy_ulong a, npy_ulong b); -NPY_INPLACE npy_ulong npy_lcmul(npy_ulong a, npy_ulong b); -NPY_INPLACE npy_ulonglong npy_gcdull(npy_ulonglong a, npy_ulonglong b); -NPY_INPLACE npy_ulonglong npy_lcmull(npy_ulonglong a, npy_ulonglong b); - -NPY_INPLACE npy_int npy_gcd(npy_int a, npy_int b); -NPY_INPLACE npy_int npy_lcm(npy_int a, npy_int b); -NPY_INPLACE npy_long npy_gcdl(npy_long a, npy_long b); -NPY_INPLACE npy_long npy_lcml(npy_long a, npy_long b); -NPY_INPLACE npy_longlong npy_gcdll(npy_longlong a, npy_longlong b); -NPY_INPLACE npy_longlong npy_lcmll(npy_longlong a, npy_longlong b); - -NPY_INPLACE npy_ubyte npy_rshiftuhh(npy_ubyte a, npy_ubyte b); -NPY_INPLACE npy_ubyte npy_lshiftuhh(npy_ubyte a, npy_ubyte b); -NPY_INPLACE npy_ushort npy_rshiftuh(npy_ushort a, npy_ushort b); -NPY_INPLACE npy_ushort npy_lshiftuh(npy_ushort a, npy_ushort b); -NPY_INPLACE npy_uint npy_rshiftu(npy_uint a, npy_uint b); -NPY_INPLACE npy_uint npy_lshiftu(npy_uint a, npy_uint b); -NPY_INPLACE npy_ulong npy_rshiftul(npy_ulong a, npy_ulong b); -NPY_INPLACE npy_ulong npy_lshiftul(npy_ulong a, npy_ulong b); -NPY_INPLACE npy_ulonglong npy_rshiftull(npy_ulonglong a, npy_ulonglong b); -NPY_INPLACE npy_ulonglong npy_lshiftull(npy_ulonglong a, npy_ulonglong b); - -NPY_INPLACE npy_byte npy_rshifthh(npy_byte a, npy_byte b); -NPY_INPLACE npy_byte npy_lshifthh(npy_byte a, npy_byte b); -NPY_INPLACE npy_short npy_rshifth(npy_short a, npy_short b); -NPY_INPLACE npy_short npy_lshifth(npy_short a, npy_short b); -NPY_INPLACE npy_int npy_rshift(npy_int a, npy_int b); -NPY_INPLACE npy_int npy_lshift(npy_int a, npy_int b); -NPY_INPLACE npy_long npy_rshiftl(npy_long a, npy_long b); -NPY_INPLACE npy_long npy_lshiftl(npy_long a, npy_long b); -NPY_INPLACE npy_longlong npy_rshiftll(npy_longlong a, npy_longlong b); -NPY_INPLACE npy_longlong npy_lshiftll(npy_longlong a, npy_longlong b); - -/* - * avx function has a common API for both sin & cos. This enum is used to - * distinguish between the two - */ -typedef enum { - npy_compute_sin, - npy_compute_cos -} NPY_TRIG_OP; - -/* - * C99 double math funcs - */ -NPY_INPLACE double npy_sin(double x); -NPY_INPLACE double npy_cos(double x); -NPY_INPLACE double npy_tan(double x); -NPY_INPLACE double npy_sinh(double x); -NPY_INPLACE double npy_cosh(double x); -NPY_INPLACE double npy_tanh(double x); - -NPY_INPLACE double npy_asin(double x); -NPY_INPLACE double npy_acos(double x); -NPY_INPLACE double npy_atan(double x); - -NPY_INPLACE double npy_log(double x); -NPY_INPLACE double npy_log10(double x); -NPY_INPLACE double npy_exp(double x); -NPY_INPLACE double npy_sqrt(double x); -NPY_INPLACE double npy_cbrt(double x); - -NPY_INPLACE double npy_fabs(double x); -NPY_INPLACE double npy_ceil(double x); -NPY_INPLACE double npy_fmod(double x, double y); -NPY_INPLACE double npy_floor(double x); - -NPY_INPLACE double npy_expm1(double x); -NPY_INPLACE double npy_log1p(double x); -NPY_INPLACE double npy_hypot(double x, double y); -NPY_INPLACE double npy_acosh(double x); -NPY_INPLACE double npy_asinh(double xx); -NPY_INPLACE double npy_atanh(double x); -NPY_INPLACE double npy_rint(double x); -NPY_INPLACE double npy_trunc(double x); -NPY_INPLACE double npy_exp2(double x); -NPY_INPLACE double npy_log2(double x); - -NPY_INPLACE double npy_atan2(double x, double y); -NPY_INPLACE double npy_pow(double x, double y); -NPY_INPLACE double npy_modf(double x, double* y); -NPY_INPLACE double npy_frexp(double x, int* y); -NPY_INPLACE double npy_ldexp(double n, int y); - -NPY_INPLACE double npy_copysign(double x, double y); -double npy_nextafter(double x, double y); -double npy_spacing(double x); - -/* - * IEEE 754 fpu handling. Those are guaranteed to be macros - */ - -/* use builtins to avoid function calls in tight loops - * only available if npy_config.h is available (= numpys own build) */ -#if HAVE___BUILTIN_ISNAN - #define npy_isnan(x) __builtin_isnan(x) -#else - #ifndef NPY_HAVE_DECL_ISNAN - #define npy_isnan(x) ((x) != (x)) - #else - #if defined(_MSC_VER) && (_MSC_VER < 1900) - #define npy_isnan(x) _isnan((x)) - #else - #define npy_isnan(x) isnan(x) - #endif - #endif -#endif - - -/* only available if npy_config.h is available (= numpys own build) */ -#if HAVE___BUILTIN_ISFINITE - #define npy_isfinite(x) __builtin_isfinite(x) -#else - #ifndef NPY_HAVE_DECL_ISFINITE - #ifdef _MSC_VER - #define npy_isfinite(x) _finite((x)) - #else - #define npy_isfinite(x) !npy_isnan((x) + (-x)) - #endif - #else - #define npy_isfinite(x) isfinite((x)) - #endif -#endif - -/* only available if npy_config.h is available (= numpys own build) */ -#if HAVE___BUILTIN_ISINF - #define npy_isinf(x) __builtin_isinf(x) -#else - #ifndef NPY_HAVE_DECL_ISINF - #define npy_isinf(x) (!npy_isfinite(x) && !npy_isnan(x)) - #else - #if defined(_MSC_VER) && (_MSC_VER < 1900) - #define npy_isinf(x) (!_finite((x)) && !_isnan((x))) - #else - #define npy_isinf(x) isinf((x)) - #endif - #endif -#endif - -#ifndef NPY_HAVE_DECL_SIGNBIT - int _npy_signbit_f(float x); - int _npy_signbit_d(double x); - int _npy_signbit_ld(long double x); - #define npy_signbit(x) \ - (sizeof (x) == sizeof (long double) ? _npy_signbit_ld (x) \ - : sizeof (x) == sizeof (double) ? _npy_signbit_d (x) \ - : _npy_signbit_f (x)) -#else - #define npy_signbit(x) signbit((x)) -#endif - -/* - * float C99 math functions - */ -NPY_INPLACE float npy_sinf(float x); -NPY_INPLACE float npy_cosf(float x); -NPY_INPLACE float npy_tanf(float x); -NPY_INPLACE float npy_sinhf(float x); -NPY_INPLACE float npy_coshf(float x); -NPY_INPLACE float npy_tanhf(float x); -NPY_INPLACE float npy_fabsf(float x); -NPY_INPLACE float npy_floorf(float x); -NPY_INPLACE float npy_ceilf(float x); -NPY_INPLACE float npy_rintf(float x); -NPY_INPLACE float npy_truncf(float x); -NPY_INPLACE float npy_sqrtf(float x); -NPY_INPLACE float npy_cbrtf(float x); -NPY_INPLACE float npy_log10f(float x); -NPY_INPLACE float npy_logf(float x); -NPY_INPLACE float npy_expf(float x); -NPY_INPLACE float npy_expm1f(float x); -NPY_INPLACE float npy_asinf(float x); -NPY_INPLACE float npy_acosf(float x); -NPY_INPLACE float npy_atanf(float x); -NPY_INPLACE float npy_asinhf(float x); -NPY_INPLACE float npy_acoshf(float x); -NPY_INPLACE float npy_atanhf(float x); -NPY_INPLACE float npy_log1pf(float x); -NPY_INPLACE float npy_exp2f(float x); -NPY_INPLACE float npy_log2f(float x); - -NPY_INPLACE float npy_atan2f(float x, float y); -NPY_INPLACE float npy_hypotf(float x, float y); -NPY_INPLACE float npy_powf(float x, float y); -NPY_INPLACE float npy_fmodf(float x, float y); - -NPY_INPLACE float npy_modff(float x, float* y); -NPY_INPLACE float npy_frexpf(float x, int* y); -NPY_INPLACE float npy_ldexpf(float x, int y); - -NPY_INPLACE float npy_copysignf(float x, float y); -float npy_nextafterf(float x, float y); -float npy_spacingf(float x); - -/* - * long double C99 math functions - */ -NPY_INPLACE npy_longdouble npy_sinl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_cosl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_tanl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_sinhl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_coshl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_tanhl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_fabsl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_floorl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_ceill(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_rintl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_truncl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_sqrtl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_cbrtl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_log10l(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_logl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_expl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_expm1l(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_asinl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_acosl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_atanl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_asinhl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_acoshl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_atanhl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_log1pl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_exp2l(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_log2l(npy_longdouble x); - -NPY_INPLACE npy_longdouble npy_atan2l(npy_longdouble x, npy_longdouble y); -NPY_INPLACE npy_longdouble npy_hypotl(npy_longdouble x, npy_longdouble y); -NPY_INPLACE npy_longdouble npy_powl(npy_longdouble x, npy_longdouble y); -NPY_INPLACE npy_longdouble npy_fmodl(npy_longdouble x, npy_longdouble y); - -NPY_INPLACE npy_longdouble npy_modfl(npy_longdouble x, npy_longdouble* y); -NPY_INPLACE npy_longdouble npy_frexpl(npy_longdouble x, int* y); -NPY_INPLACE npy_longdouble npy_ldexpl(npy_longdouble x, int y); - -NPY_INPLACE npy_longdouble npy_copysignl(npy_longdouble x, npy_longdouble y); -npy_longdouble npy_nextafterl(npy_longdouble x, npy_longdouble y); -npy_longdouble npy_spacingl(npy_longdouble x); - -/* - * Non standard functions - */ -NPY_INPLACE double npy_deg2rad(double x); -NPY_INPLACE double npy_rad2deg(double x); -NPY_INPLACE double npy_logaddexp(double x, double y); -NPY_INPLACE double npy_logaddexp2(double x, double y); -NPY_INPLACE double npy_divmod(double x, double y, double *modulus); -NPY_INPLACE double npy_heaviside(double x, double h0); - -NPY_INPLACE float npy_deg2radf(float x); -NPY_INPLACE float npy_rad2degf(float x); -NPY_INPLACE float npy_logaddexpf(float x, float y); -NPY_INPLACE float npy_logaddexp2f(float x, float y); -NPY_INPLACE float npy_divmodf(float x, float y, float *modulus); -NPY_INPLACE float npy_heavisidef(float x, float h0); - -NPY_INPLACE npy_longdouble npy_deg2radl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_rad2degl(npy_longdouble x); -NPY_INPLACE npy_longdouble npy_logaddexpl(npy_longdouble x, npy_longdouble y); -NPY_INPLACE npy_longdouble npy_logaddexp2l(npy_longdouble x, npy_longdouble y); -NPY_INPLACE npy_longdouble npy_divmodl(npy_longdouble x, npy_longdouble y, - npy_longdouble *modulus); -NPY_INPLACE npy_longdouble npy_heavisidel(npy_longdouble x, npy_longdouble h0); - -#define npy_degrees npy_rad2deg -#define npy_degreesf npy_rad2degf -#define npy_degreesl npy_rad2degl - -#define npy_radians npy_deg2rad -#define npy_radiansf npy_deg2radf -#define npy_radiansl npy_deg2radl - -/* - * Complex declarations - */ - -/* - * C99 specifies that complex numbers have the same representation as - * an array of two elements, where the first element is the real part - * and the second element is the imaginary part. - */ -#define __NPY_CPACK_IMP(x, y, type, ctype) \ - union { \ - ctype z; \ - type a[2]; \ - } z1;; \ - \ - z1.a[0] = (x); \ - z1.a[1] = (y); \ - \ - return z1.z; - -static NPY_INLINE npy_cdouble npy_cpack(double x, double y) -{ - __NPY_CPACK_IMP(x, y, double, npy_cdouble); -} - -static NPY_INLINE npy_cfloat npy_cpackf(float x, float y) -{ - __NPY_CPACK_IMP(x, y, float, npy_cfloat); -} - -static NPY_INLINE npy_clongdouble npy_cpackl(npy_longdouble x, npy_longdouble y) -{ - __NPY_CPACK_IMP(x, y, npy_longdouble, npy_clongdouble); -} -#undef __NPY_CPACK_IMP - -/* - * Same remark as above, but in the other direction: extract first/second - * member of complex number, assuming a C99-compatible representation - * - * Those are defineds as static inline, and such as a reasonable compiler would - * most likely compile this to one or two instructions (on CISC at least) - */ -#define __NPY_CEXTRACT_IMP(z, index, type, ctype) \ - union { \ - ctype z; \ - type a[2]; \ - } __z_repr; \ - __z_repr.z = z; \ - \ - return __z_repr.a[index]; - -static NPY_INLINE double npy_creal(npy_cdouble z) -{ - __NPY_CEXTRACT_IMP(z, 0, double, npy_cdouble); -} - -static NPY_INLINE double npy_cimag(npy_cdouble z) -{ - __NPY_CEXTRACT_IMP(z, 1, double, npy_cdouble); -} - -static NPY_INLINE float npy_crealf(npy_cfloat z) -{ - __NPY_CEXTRACT_IMP(z, 0, float, npy_cfloat); -} - -static NPY_INLINE float npy_cimagf(npy_cfloat z) -{ - __NPY_CEXTRACT_IMP(z, 1, float, npy_cfloat); -} - -static NPY_INLINE npy_longdouble npy_creall(npy_clongdouble z) -{ - __NPY_CEXTRACT_IMP(z, 0, npy_longdouble, npy_clongdouble); -} - -static NPY_INLINE npy_longdouble npy_cimagl(npy_clongdouble z) -{ - __NPY_CEXTRACT_IMP(z, 1, npy_longdouble, npy_clongdouble); -} -#undef __NPY_CEXTRACT_IMP - -/* - * Double precision complex functions - */ -double npy_cabs(npy_cdouble z); -double npy_carg(npy_cdouble z); - -npy_cdouble npy_cexp(npy_cdouble z); -npy_cdouble npy_clog(npy_cdouble z); -npy_cdouble npy_cpow(npy_cdouble x, npy_cdouble y); - -npy_cdouble npy_csqrt(npy_cdouble z); - -npy_cdouble npy_ccos(npy_cdouble z); -npy_cdouble npy_csin(npy_cdouble z); -npy_cdouble npy_ctan(npy_cdouble z); - -npy_cdouble npy_ccosh(npy_cdouble z); -npy_cdouble npy_csinh(npy_cdouble z); -npy_cdouble npy_ctanh(npy_cdouble z); - -npy_cdouble npy_cacos(npy_cdouble z); -npy_cdouble npy_casin(npy_cdouble z); -npy_cdouble npy_catan(npy_cdouble z); - -npy_cdouble npy_cacosh(npy_cdouble z); -npy_cdouble npy_casinh(npy_cdouble z); -npy_cdouble npy_catanh(npy_cdouble z); - -/* - * Single precision complex functions - */ -float npy_cabsf(npy_cfloat z); -float npy_cargf(npy_cfloat z); - -npy_cfloat npy_cexpf(npy_cfloat z); -npy_cfloat npy_clogf(npy_cfloat z); -npy_cfloat npy_cpowf(npy_cfloat x, npy_cfloat y); - -npy_cfloat npy_csqrtf(npy_cfloat z); - -npy_cfloat npy_ccosf(npy_cfloat z); -npy_cfloat npy_csinf(npy_cfloat z); -npy_cfloat npy_ctanf(npy_cfloat z); - -npy_cfloat npy_ccoshf(npy_cfloat z); -npy_cfloat npy_csinhf(npy_cfloat z); -npy_cfloat npy_ctanhf(npy_cfloat z); - -npy_cfloat npy_cacosf(npy_cfloat z); -npy_cfloat npy_casinf(npy_cfloat z); -npy_cfloat npy_catanf(npy_cfloat z); - -npy_cfloat npy_cacoshf(npy_cfloat z); -npy_cfloat npy_casinhf(npy_cfloat z); -npy_cfloat npy_catanhf(npy_cfloat z); - - -/* - * Extended precision complex functions - */ -npy_longdouble npy_cabsl(npy_clongdouble z); -npy_longdouble npy_cargl(npy_clongdouble z); - -npy_clongdouble npy_cexpl(npy_clongdouble z); -npy_clongdouble npy_clogl(npy_clongdouble z); -npy_clongdouble npy_cpowl(npy_clongdouble x, npy_clongdouble y); - -npy_clongdouble npy_csqrtl(npy_clongdouble z); - -npy_clongdouble npy_ccosl(npy_clongdouble z); -npy_clongdouble npy_csinl(npy_clongdouble z); -npy_clongdouble npy_ctanl(npy_clongdouble z); - -npy_clongdouble npy_ccoshl(npy_clongdouble z); -npy_clongdouble npy_csinhl(npy_clongdouble z); -npy_clongdouble npy_ctanhl(npy_clongdouble z); - -npy_clongdouble npy_cacosl(npy_clongdouble z); -npy_clongdouble npy_casinl(npy_clongdouble z); -npy_clongdouble npy_catanl(npy_clongdouble z); - -npy_clongdouble npy_cacoshl(npy_clongdouble z); -npy_clongdouble npy_casinhl(npy_clongdouble z); -npy_clongdouble npy_catanhl(npy_clongdouble z); - - -/* - * Functions that set the floating point error - * status word. - */ - -/* - * platform-dependent code translates floating point - * status to an integer sum of these values - */ -#define NPY_FPE_DIVIDEBYZERO 1 -#define NPY_FPE_OVERFLOW 2 -#define NPY_FPE_UNDERFLOW 4 -#define NPY_FPE_INVALID 8 - -int npy_clear_floatstatus_barrier(char*); -int npy_get_floatstatus_barrier(char*); -/* - * use caution with these - clang and gcc8.1 are known to reorder calls - * to this form of the function which can defeat the check. The _barrier - * form of the call is preferable, where the argument is - * (char*)&local_variable - */ -int npy_clear_floatstatus(void); -int npy_get_floatstatus(void); - -void npy_set_floatstatus_divbyzero(void); -void npy_set_floatstatus_overflow(void); -void npy_set_floatstatus_underflow(void); -void npy_set_floatstatus_invalid(void); - -#ifdef __cplusplus -} -#endif - -#if NPY_INLINE_MATH -#include "npy_math_internal.h" -#endif - -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_no_deprecated_api.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_no_deprecated_api.h deleted file mode 100644 index 6183dc2..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_no_deprecated_api.h +++ /dev/null @@ -1,19 +0,0 @@ -/* - * This include file is provided for inclusion in Cython *.pyd files where - * one would like to define the NPY_NO_DEPRECATED_API macro. It can be - * included by - * - * cdef extern from "npy_no_deprecated_api.h": pass - * - */ -#ifndef NPY_NO_DEPRECATED_API - -/* put this check here since there may be multiple includes in C extensions. */ -#if defined(NDARRAYTYPES_H) || defined(_NPY_DEPRECATED_API_H) || \ - defined(OLD_DEFINES_H) -#error "npy_no_deprecated_api.h" must be first among numpy includes. -#else -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#endif - -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_os.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_os.h deleted file mode 100644 index 9228c39..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/npy_os.h +++ /dev/null @@ -1,30 +0,0 @@ -#ifndef _NPY_OS_H_ -#define _NPY_OS_H_ - -#if defined(linux) || defined(__linux) || defined(__linux__) - #define NPY_OS_LINUX -#elif defined(__FreeBSD__) || defined(__NetBSD__) || \ - defined(__OpenBSD__) || defined(__DragonFly__) - #define NPY_OS_BSD - #ifdef __FreeBSD__ - #define NPY_OS_FREEBSD - #elif defined(__NetBSD__) - #define NPY_OS_NETBSD - #elif defined(__OpenBSD__) - #define NPY_OS_OPENBSD - #elif defined(__DragonFly__) - #define NPY_OS_DRAGONFLY - #endif -#elif defined(sun) || defined(__sun) - #define NPY_OS_SOLARIS -#elif defined(__CYGWIN__) - #define NPY_OS_CYGWIN -#elif defined(_WIN32) || defined(__WIN32__) || defined(WIN32) - #define NPY_OS_WIN32 -#elif defined(__APPLE__) - #define NPY_OS_DARWIN -#else - #define NPY_OS_UNKNOWN -#endif - -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/numpyconfig.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/numpyconfig.h deleted file mode 100644 index 4bca82f..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/numpyconfig.h +++ /dev/null @@ -1,44 +0,0 @@ -#ifndef _NPY_NUMPYCONFIG_H_ -#define _NPY_NUMPYCONFIG_H_ - -#include "_numpyconfig.h" - -/* - * On Mac OS X, because there is only one configuration stage for all the archs - * in universal builds, any macro which depends on the arch needs to be - * hardcoded - */ -#ifdef __APPLE__ - #undef NPY_SIZEOF_LONG - #undef NPY_SIZEOF_PY_INTPTR_T - - #ifdef __LP64__ - #define NPY_SIZEOF_LONG 8 - #define NPY_SIZEOF_PY_INTPTR_T 8 - #else - #define NPY_SIZEOF_LONG 4 - #define NPY_SIZEOF_PY_INTPTR_T 4 - #endif -#endif - -/** - * To help with the NPY_NO_DEPRECATED_API macro, we include API version - * numbers for specific versions of NumPy. To exclude all API that was - * deprecated as of 1.7, add the following before #including any NumPy - * headers: - * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION - */ -#define NPY_1_7_API_VERSION 0x00000007 -#define NPY_1_8_API_VERSION 0x00000008 -#define NPY_1_9_API_VERSION 0x00000008 -#define NPY_1_10_API_VERSION 0x00000008 -#define NPY_1_11_API_VERSION 0x00000008 -#define NPY_1_12_API_VERSION 0x00000008 -#define NPY_1_13_API_VERSION 0x00000008 -#define NPY_1_14_API_VERSION 0x00000008 -#define NPY_1_15_API_VERSION 0x00000008 -#define NPY_1_16_API_VERSION 0x00000008 -#define NPY_1_17_API_VERSION 0x00000008 -#define NPY_1_18_API_VERSION 0x00000008 - -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/old_defines.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/old_defines.h deleted file mode 100644 index abf8159..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/old_defines.h +++ /dev/null @@ -1,187 +0,0 @@ -/* This header is deprecated as of NumPy 1.7 */ -#ifndef OLD_DEFINES_H -#define OLD_DEFINES_H - -#if defined(NPY_NO_DEPRECATED_API) && NPY_NO_DEPRECATED_API >= NPY_1_7_API_VERSION -#error The header "old_defines.h" is deprecated as of NumPy 1.7. -#endif - -#define NDARRAY_VERSION NPY_VERSION - -#define PyArray_MIN_BUFSIZE NPY_MIN_BUFSIZE -#define PyArray_MAX_BUFSIZE NPY_MAX_BUFSIZE -#define PyArray_BUFSIZE NPY_BUFSIZE - -#define PyArray_PRIORITY NPY_PRIORITY -#define PyArray_SUBTYPE_PRIORITY NPY_PRIORITY -#define PyArray_NUM_FLOATTYPE NPY_NUM_FLOATTYPE - -#define NPY_MAX PyArray_MAX -#define NPY_MIN PyArray_MIN - -#define PyArray_TYPES NPY_TYPES -#define PyArray_BOOL NPY_BOOL -#define PyArray_BYTE NPY_BYTE -#define PyArray_UBYTE NPY_UBYTE -#define PyArray_SHORT NPY_SHORT -#define PyArray_USHORT NPY_USHORT -#define PyArray_INT NPY_INT -#define PyArray_UINT NPY_UINT -#define PyArray_LONG NPY_LONG -#define PyArray_ULONG NPY_ULONG -#define PyArray_LONGLONG NPY_LONGLONG -#define PyArray_ULONGLONG NPY_ULONGLONG -#define PyArray_HALF NPY_HALF -#define PyArray_FLOAT NPY_FLOAT -#define PyArray_DOUBLE NPY_DOUBLE -#define PyArray_LONGDOUBLE NPY_LONGDOUBLE -#define PyArray_CFLOAT NPY_CFLOAT -#define PyArray_CDOUBLE NPY_CDOUBLE -#define PyArray_CLONGDOUBLE NPY_CLONGDOUBLE -#define PyArray_OBJECT NPY_OBJECT -#define PyArray_STRING NPY_STRING -#define PyArray_UNICODE NPY_UNICODE -#define PyArray_VOID NPY_VOID -#define PyArray_DATETIME NPY_DATETIME -#define PyArray_TIMEDELTA NPY_TIMEDELTA -#define PyArray_NTYPES NPY_NTYPES -#define PyArray_NOTYPE NPY_NOTYPE -#define PyArray_CHAR NPY_CHAR -#define PyArray_USERDEF NPY_USERDEF -#define PyArray_NUMUSERTYPES NPY_NUMUSERTYPES - -#define PyArray_INTP NPY_INTP -#define PyArray_UINTP NPY_UINTP - -#define PyArray_INT8 NPY_INT8 -#define PyArray_UINT8 NPY_UINT8 -#define PyArray_INT16 NPY_INT16 -#define PyArray_UINT16 NPY_UINT16 -#define PyArray_INT32 NPY_INT32 -#define PyArray_UINT32 NPY_UINT32 - -#ifdef NPY_INT64 -#define PyArray_INT64 NPY_INT64 -#define PyArray_UINT64 NPY_UINT64 -#endif - -#ifdef NPY_INT128 -#define PyArray_INT128 NPY_INT128 -#define PyArray_UINT128 NPY_UINT128 -#endif - -#ifdef NPY_FLOAT16 -#define PyArray_FLOAT16 NPY_FLOAT16 -#define PyArray_COMPLEX32 NPY_COMPLEX32 -#endif - -#ifdef NPY_FLOAT80 -#define PyArray_FLOAT80 NPY_FLOAT80 -#define PyArray_COMPLEX160 NPY_COMPLEX160 -#endif - -#ifdef NPY_FLOAT96 -#define PyArray_FLOAT96 NPY_FLOAT96 -#define PyArray_COMPLEX192 NPY_COMPLEX192 -#endif - -#ifdef NPY_FLOAT128 -#define PyArray_FLOAT128 NPY_FLOAT128 -#define PyArray_COMPLEX256 NPY_COMPLEX256 -#endif - -#define PyArray_FLOAT32 NPY_FLOAT32 -#define PyArray_COMPLEX64 NPY_COMPLEX64 -#define PyArray_FLOAT64 NPY_FLOAT64 -#define PyArray_COMPLEX128 NPY_COMPLEX128 - - -#define PyArray_TYPECHAR NPY_TYPECHAR -#define PyArray_BOOLLTR NPY_BOOLLTR -#define PyArray_BYTELTR NPY_BYTELTR -#define PyArray_UBYTELTR NPY_UBYTELTR -#define PyArray_SHORTLTR NPY_SHORTLTR -#define PyArray_USHORTLTR NPY_USHORTLTR -#define PyArray_INTLTR NPY_INTLTR -#define PyArray_UINTLTR NPY_UINTLTR -#define PyArray_LONGLTR NPY_LONGLTR -#define PyArray_ULONGLTR NPY_ULONGLTR -#define PyArray_LONGLONGLTR NPY_LONGLONGLTR -#define PyArray_ULONGLONGLTR NPY_ULONGLONGLTR -#define PyArray_HALFLTR NPY_HALFLTR -#define PyArray_FLOATLTR NPY_FLOATLTR -#define PyArray_DOUBLELTR NPY_DOUBLELTR -#define PyArray_LONGDOUBLELTR NPY_LONGDOUBLELTR -#define PyArray_CFLOATLTR NPY_CFLOATLTR -#define PyArray_CDOUBLELTR NPY_CDOUBLELTR -#define PyArray_CLONGDOUBLELTR NPY_CLONGDOUBLELTR -#define PyArray_OBJECTLTR NPY_OBJECTLTR -#define PyArray_STRINGLTR NPY_STRINGLTR -#define PyArray_STRINGLTR2 NPY_STRINGLTR2 -#define PyArray_UNICODELTR NPY_UNICODELTR -#define PyArray_VOIDLTR NPY_VOIDLTR -#define PyArray_DATETIMELTR NPY_DATETIMELTR -#define PyArray_TIMEDELTALTR NPY_TIMEDELTALTR -#define PyArray_CHARLTR NPY_CHARLTR -#define PyArray_INTPLTR NPY_INTPLTR -#define PyArray_UINTPLTR NPY_UINTPLTR -#define PyArray_GENBOOLLTR NPY_GENBOOLLTR -#define PyArray_SIGNEDLTR NPY_SIGNEDLTR -#define PyArray_UNSIGNEDLTR NPY_UNSIGNEDLTR -#define PyArray_FLOATINGLTR NPY_FLOATINGLTR -#define PyArray_COMPLEXLTR NPY_COMPLEXLTR - -#define PyArray_QUICKSORT NPY_QUICKSORT -#define PyArray_HEAPSORT NPY_HEAPSORT -#define PyArray_MERGESORT NPY_MERGESORT -#define PyArray_SORTKIND NPY_SORTKIND -#define PyArray_NSORTS NPY_NSORTS - -#define PyArray_NOSCALAR NPY_NOSCALAR -#define PyArray_BOOL_SCALAR NPY_BOOL_SCALAR -#define PyArray_INTPOS_SCALAR NPY_INTPOS_SCALAR -#define PyArray_INTNEG_SCALAR NPY_INTNEG_SCALAR -#define PyArray_FLOAT_SCALAR NPY_FLOAT_SCALAR -#define PyArray_COMPLEX_SCALAR NPY_COMPLEX_SCALAR -#define PyArray_OBJECT_SCALAR NPY_OBJECT_SCALAR -#define PyArray_SCALARKIND NPY_SCALARKIND -#define PyArray_NSCALARKINDS NPY_NSCALARKINDS - -#define PyArray_ANYORDER NPY_ANYORDER -#define PyArray_CORDER NPY_CORDER -#define PyArray_FORTRANORDER NPY_FORTRANORDER -#define PyArray_ORDER NPY_ORDER - -#define PyDescr_ISBOOL PyDataType_ISBOOL -#define PyDescr_ISUNSIGNED PyDataType_ISUNSIGNED -#define PyDescr_ISSIGNED PyDataType_ISSIGNED -#define PyDescr_ISINTEGER PyDataType_ISINTEGER -#define PyDescr_ISFLOAT PyDataType_ISFLOAT -#define PyDescr_ISNUMBER PyDataType_ISNUMBER -#define PyDescr_ISSTRING PyDataType_ISSTRING -#define PyDescr_ISCOMPLEX PyDataType_ISCOMPLEX -#define PyDescr_ISPYTHON PyDataType_ISPYTHON -#define PyDescr_ISFLEXIBLE PyDataType_ISFLEXIBLE -#define PyDescr_ISUSERDEF PyDataType_ISUSERDEF -#define PyDescr_ISEXTENDED PyDataType_ISEXTENDED -#define PyDescr_ISOBJECT PyDataType_ISOBJECT -#define PyDescr_HASFIELDS PyDataType_HASFIELDS - -#define PyArray_LITTLE NPY_LITTLE -#define PyArray_BIG NPY_BIG -#define PyArray_NATIVE NPY_NATIVE -#define PyArray_SWAP NPY_SWAP -#define PyArray_IGNORE NPY_IGNORE - -#define PyArray_NATBYTE NPY_NATBYTE -#define PyArray_OPPBYTE NPY_OPPBYTE - -#define PyArray_MAX_ELSIZE NPY_MAX_ELSIZE - -#define PyArray_USE_PYMEM NPY_USE_PYMEM - -#define PyArray_RemoveLargest PyArray_RemoveSmallest - -#define PyArray_UCS4 npy_ucs4 - -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/oldnumeric.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/oldnumeric.h deleted file mode 100644 index 38530fa..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/oldnumeric.h +++ /dev/null @@ -1,25 +0,0 @@ -#include "arrayobject.h" - -#ifndef PYPY_VERSION -#ifndef REFCOUNT -# define REFCOUNT NPY_REFCOUNT -# define MAX_ELSIZE 16 -#endif -#endif - -#define PyArray_UNSIGNED_TYPES -#define PyArray_SBYTE NPY_BYTE -#define PyArray_CopyArray PyArray_CopyInto -#define _PyArray_multiply_list PyArray_MultiplyIntList -#define PyArray_ISSPACESAVER(m) NPY_FALSE -#define PyScalarArray_Check PyArray_CheckScalar - -#define CONTIGUOUS NPY_CONTIGUOUS -#define OWN_DIMENSIONS 0 -#define OWN_STRIDES 0 -#define OWN_DATA NPY_OWNDATA -#define SAVESPACE 0 -#define SAVESPACEBIT 0 - -#undef import_array -#define import_array() { if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); } } diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/random/bitgen.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/random/bitgen.h deleted file mode 100644 index 83c2858..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/random/bitgen.h +++ /dev/null @@ -1,20 +0,0 @@ -#ifndef _RANDOM_BITGEN_H -#define _RANDOM_BITGEN_H - -#pragma once -#include -#include -#include - -/* Must match the declaration in numpy/random/.pxd */ - -typedef struct bitgen { - void *state; - uint64_t (*next_uint64)(void *st); - uint32_t (*next_uint32)(void *st); - double (*next_double)(void *st); - uint64_t (*next_raw)(void *st); -} bitgen_t; - - -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/random/distributions.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/random/distributions.h deleted file mode 100644 index c474c4d..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/random/distributions.h +++ /dev/null @@ -1,200 +0,0 @@ -#ifndef _RANDOMDGEN__DISTRIBUTIONS_H_ -#define _RANDOMDGEN__DISTRIBUTIONS_H_ - -#include "Python.h" -#include "numpy/npy_common.h" -#include -#include -#include - -#include "numpy/npy_math.h" -#include "numpy/random/bitgen.h" - -/* - * RAND_INT_TYPE is used to share integer generators with RandomState which - * used long in place of int64_t. If changing a distribution that uses - * RAND_INT_TYPE, then the original unmodified copy must be retained for - * use in RandomState by copying to the legacy distributions source file. - */ -#ifdef NP_RANDOM_LEGACY -#define RAND_INT_TYPE long -#define RAND_INT_MAX LONG_MAX -#else -#define RAND_INT_TYPE int64_t -#define RAND_INT_MAX INT64_MAX -#endif - -#ifdef _MSC_VER -#define DECLDIR __declspec(dllexport) -#else -#define DECLDIR extern -#endif - -#ifndef MIN -#define MIN(x, y) (((x) < (y)) ? x : y) -#define MAX(x, y) (((x) > (y)) ? x : y) -#endif - -#ifndef M_PI -#define M_PI 3.14159265358979323846264338328 -#endif - -typedef struct s_binomial_t { - int has_binomial; /* !=0: following parameters initialized for binomial */ - double psave; - RAND_INT_TYPE nsave; - double r; - double q; - double fm; - RAND_INT_TYPE m; - double p1; - double xm; - double xl; - double xr; - double c; - double laml; - double lamr; - double p2; - double p3; - double p4; -} binomial_t; - -DECLDIR float random_standard_uniform_f(bitgen_t *bitgen_state); -DECLDIR double random_standard_uniform(bitgen_t *bitgen_state); -DECLDIR void random_standard_uniform_fill(bitgen_t *, npy_intp, double *); -DECLDIR void random_standard_uniform_fill_f(bitgen_t *, npy_intp, float *); - -DECLDIR int64_t random_positive_int64(bitgen_t *bitgen_state); -DECLDIR int32_t random_positive_int32(bitgen_t *bitgen_state); -DECLDIR int64_t random_positive_int(bitgen_t *bitgen_state); -DECLDIR uint64_t random_uint(bitgen_t *bitgen_state); - -DECLDIR double random_standard_exponential(bitgen_t *bitgen_state); -DECLDIR float random_standard_exponential_f(bitgen_t *bitgen_state); -DECLDIR void random_standard_exponential_fill(bitgen_t *, npy_intp, double *); -DECLDIR void random_standard_exponential_fill_f(bitgen_t *, npy_intp, float *); -DECLDIR void random_standard_exponential_inv_fill(bitgen_t *, npy_intp, double *); -DECLDIR void random_standard_exponential_inv_fill_f(bitgen_t *, npy_intp, float *); - -DECLDIR double random_standard_normal(bitgen_t *bitgen_state); -DECLDIR float random_standard_normal_f(bitgen_t *bitgen_state); -DECLDIR void random_standard_normal_fill(bitgen_t *, npy_intp, double *); -DECLDIR void random_standard_normal_fill_f(bitgen_t *, npy_intp, float *); -DECLDIR double random_standard_gamma(bitgen_t *bitgen_state, double shape); -DECLDIR float random_standard_gamma_f(bitgen_t *bitgen_state, float shape); - -DECLDIR double random_normal(bitgen_t *bitgen_state, double loc, double scale); - -DECLDIR double random_gamma(bitgen_t *bitgen_state, double shape, double scale); -DECLDIR float random_gamma_f(bitgen_t *bitgen_state, float shape, float scale); - -DECLDIR double random_exponential(bitgen_t *bitgen_state, double scale); -DECLDIR double random_uniform(bitgen_t *bitgen_state, double lower, double range); -DECLDIR double random_beta(bitgen_t *bitgen_state, double a, double b); -DECLDIR double random_chisquare(bitgen_t *bitgen_state, double df); -DECLDIR double random_f(bitgen_t *bitgen_state, double dfnum, double dfden); -DECLDIR double random_standard_cauchy(bitgen_t *bitgen_state); -DECLDIR double random_pareto(bitgen_t *bitgen_state, double a); -DECLDIR double random_weibull(bitgen_t *bitgen_state, double a); -DECLDIR double random_power(bitgen_t *bitgen_state, double a); -DECLDIR double random_laplace(bitgen_t *bitgen_state, double loc, double scale); -DECLDIR double random_gumbel(bitgen_t *bitgen_state, double loc, double scale); -DECLDIR double random_logistic(bitgen_t *bitgen_state, double loc, double scale); -DECLDIR double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma); -DECLDIR double random_rayleigh(bitgen_t *bitgen_state, double mode); -DECLDIR double random_standard_t(bitgen_t *bitgen_state, double df); -DECLDIR double random_noncentral_chisquare(bitgen_t *bitgen_state, double df, - double nonc); -DECLDIR double random_noncentral_f(bitgen_t *bitgen_state, double dfnum, - double dfden, double nonc); -DECLDIR double random_wald(bitgen_t *bitgen_state, double mean, double scale); -DECLDIR double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa); -DECLDIR double random_triangular(bitgen_t *bitgen_state, double left, double mode, - double right); - -DECLDIR RAND_INT_TYPE random_poisson(bitgen_t *bitgen_state, double lam); -DECLDIR RAND_INT_TYPE random_negative_binomial(bitgen_t *bitgen_state, double n, - double p); - -DECLDIR int64_t random_binomial(bitgen_t *bitgen_state, double p, - int64_t n, binomial_t *binomial); - -DECLDIR RAND_INT_TYPE random_logseries(bitgen_t *bitgen_state, double p); -DECLDIR RAND_INT_TYPE random_geometric(bitgen_t *bitgen_state, double p); -DECLDIR RAND_INT_TYPE random_zipf(bitgen_t *bitgen_state, double a); -DECLDIR int64_t random_hypergeometric(bitgen_t *bitgen_state, - int64_t good, int64_t bad, int64_t sample); -DECLDIR uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max); - -/* Generate random uint64 numbers in closed interval [off, off + rng]. */ -DECLDIR uint64_t random_bounded_uint64(bitgen_t *bitgen_state, uint64_t off, - uint64_t rng, uint64_t mask, - bool use_masked); - -/* Generate random uint32 numbers in closed interval [off, off + rng]. */ -DECLDIR uint32_t random_buffered_bounded_uint32(bitgen_t *bitgen_state, - uint32_t off, uint32_t rng, - uint32_t mask, bool use_masked, - int *bcnt, uint32_t *buf); -DECLDIR uint16_t random_buffered_bounded_uint16(bitgen_t *bitgen_state, - uint16_t off, uint16_t rng, - uint16_t mask, bool use_masked, - int *bcnt, uint32_t *buf); -DECLDIR uint8_t random_buffered_bounded_uint8(bitgen_t *bitgen_state, uint8_t off, - uint8_t rng, uint8_t mask, - bool use_masked, int *bcnt, - uint32_t *buf); -DECLDIR npy_bool random_buffered_bounded_bool(bitgen_t *bitgen_state, npy_bool off, - npy_bool rng, npy_bool mask, - bool use_masked, int *bcnt, - uint32_t *buf); - -DECLDIR void random_bounded_uint64_fill(bitgen_t *bitgen_state, uint64_t off, - uint64_t rng, npy_intp cnt, - bool use_masked, uint64_t *out); -DECLDIR void random_bounded_uint32_fill(bitgen_t *bitgen_state, uint32_t off, - uint32_t rng, npy_intp cnt, - bool use_masked, uint32_t *out); -DECLDIR void random_bounded_uint16_fill(bitgen_t *bitgen_state, uint16_t off, - uint16_t rng, npy_intp cnt, - bool use_masked, uint16_t *out); -DECLDIR void random_bounded_uint8_fill(bitgen_t *bitgen_state, uint8_t off, - uint8_t rng, npy_intp cnt, - bool use_masked, uint8_t *out); -DECLDIR void random_bounded_bool_fill(bitgen_t *bitgen_state, npy_bool off, - npy_bool rng, npy_intp cnt, - bool use_masked, npy_bool *out); - -DECLDIR void random_multinomial(bitgen_t *bitgen_state, RAND_INT_TYPE n, RAND_INT_TYPE *mnix, - double *pix, npy_intp d, binomial_t *binomial); - -/* multivariate hypergeometric, "count" method */ -DECLDIR int random_multivariate_hypergeometric_count(bitgen_t *bitgen_state, - int64_t total, - size_t num_colors, int64_t *colors, - int64_t nsample, - size_t num_variates, int64_t *variates); - -/* multivariate hypergeometric, "marginals" method */ -DECLDIR void random_multivariate_hypergeometric_marginals(bitgen_t *bitgen_state, - int64_t total, - size_t num_colors, int64_t *colors, - int64_t nsample, - size_t num_variates, int64_t *variates); - -/* Common to legacy-distributions.c and distributions.c but not exported */ - -RAND_INT_TYPE random_binomial_btpe(bitgen_t *bitgen_state, - RAND_INT_TYPE n, - double p, - binomial_t *binomial); -RAND_INT_TYPE random_binomial_inversion(bitgen_t *bitgen_state, - RAND_INT_TYPE n, - double p, - binomial_t *binomial); -double random_loggam(double x); -static NPY_INLINE double next_double(bitgen_t *bitgen_state) { - return bitgen_state->next_double(bitgen_state->state); -} - -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/ufunc_api.txt b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/ufunc_api.txt deleted file mode 100644 index 58a2689..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/ufunc_api.txt +++ /dev/null @@ -1,338 +0,0 @@ - -================= -NumPy Ufunc C-API -================= -:: - - PyObject * - PyUFunc_FromFuncAndData(PyUFuncGenericFunction *func, void - **data, char *types, int ntypes, int nin, int - nout, int identity, const char *name, const - char *doc, int unused) - - -:: - - int - PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc, int - usertype, PyUFuncGenericFunction - function, const int *arg_types, void - *data) - - -:: - - int - PyUFunc_GenericFunction(PyUFuncObject *ufunc, PyObject *args, PyObject - *kwds, PyArrayObject **op) - - -This generic function is called with the ufunc object, the arguments to it, -and an array of (pointers to) PyArrayObjects which are NULL. - -'op' is an array of at least NPY_MAXARGS PyArrayObject *. - -:: - - void - PyUFunc_f_f_As_d_d(char **args, npy_intp *dimensions, npy_intp - *steps, void *func) - - -:: - - void - PyUFunc_d_d(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_f_f(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_g_g(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_F_F_As_D_D(char **args, npy_intp *dimensions, npy_intp - *steps, void *func) - - -:: - - void - PyUFunc_F_F(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_D_D(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_G_G(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_O_O(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_ff_f_As_dd_d(char **args, npy_intp *dimensions, npy_intp - *steps, void *func) - - -:: - - void - PyUFunc_ff_f(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_dd_d(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_gg_g(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_FF_F_As_DD_D(char **args, npy_intp *dimensions, npy_intp - *steps, void *func) - - -:: - - void - PyUFunc_DD_D(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_FF_F(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_GG_G(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_OO_O(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_O_O_method(char **args, npy_intp *dimensions, npy_intp - *steps, void *func) - - -:: - - void - PyUFunc_OO_O_method(char **args, npy_intp *dimensions, npy_intp - *steps, void *func) - - -:: - - void - PyUFunc_On_Om(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - int - PyUFunc_GetPyValues(char *name, int *bufsize, int *errmask, PyObject - **errobj) - - -On return, if errobj is populated with a non-NULL value, the caller -owns a new reference to errobj. - -:: - - int - PyUFunc_checkfperr(int errmask, PyObject *errobj, int *first) - - -:: - - void - PyUFunc_clearfperr() - - -:: - - int - PyUFunc_getfperr(void ) - - -:: - - int - PyUFunc_handlefperr(int errmask, PyObject *errobj, int retstatus, int - *first) - - -:: - - int - PyUFunc_ReplaceLoopBySignature(PyUFuncObject - *func, PyUFuncGenericFunction - newfunc, const int - *signature, PyUFuncGenericFunction - *oldfunc) - - -:: - - PyObject * - PyUFunc_FromFuncAndDataAndSignature(PyUFuncGenericFunction *func, void - **data, char *types, int - ntypes, int nin, int nout, int - identity, const char *name, const - char *doc, int unused, const char - *signature) - - -:: - - int - PyUFunc_SetUsesArraysAsData(void **data, size_t i) - - -:: - - void - PyUFunc_e_e(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_e_e_As_f_f(char **args, npy_intp *dimensions, npy_intp - *steps, void *func) - - -:: - - void - PyUFunc_e_e_As_d_d(char **args, npy_intp *dimensions, npy_intp - *steps, void *func) - - -:: - - void - PyUFunc_ee_e(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_ee_e_As_ff_f(char **args, npy_intp *dimensions, npy_intp - *steps, void *func) - - -:: - - void - PyUFunc_ee_e_As_dd_d(char **args, npy_intp *dimensions, npy_intp - *steps, void *func) - - -:: - - int - PyUFunc_DefaultTypeResolver(PyUFuncObject *ufunc, NPY_CASTING - casting, PyArrayObject - **operands, PyObject - *type_tup, PyArray_Descr **out_dtypes) - - -This function applies the default type resolution rules -for the provided ufunc. - -Returns 0 on success, -1 on error. - -:: - - int - PyUFunc_ValidateCasting(PyUFuncObject *ufunc, NPY_CASTING - casting, PyArrayObject - **operands, PyArray_Descr **dtypes) - - -Validates that the input operands can be cast to -the input types, and the output types can be cast to -the output operands where provided. - -Returns 0 on success, -1 (with exception raised) on validation failure. - -:: - - int - PyUFunc_RegisterLoopForDescr(PyUFuncObject *ufunc, PyArray_Descr - *user_dtype, PyUFuncGenericFunction - function, PyArray_Descr - **arg_dtypes, void *data) - - -:: - - PyObject * - PyUFunc_FromFuncAndDataAndSignatureAndIdentity(PyUFuncGenericFunction - *func, void - **data, char - *types, int ntypes, int - nin, int nout, int - identity, const char - *name, const char - *doc, const int - unused, const char - *signature, PyObject - *identity_value) - - diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/ufuncobject.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/ufuncobject.h deleted file mode 100644 index 5ff4a00..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/ufuncobject.h +++ /dev/null @@ -1,369 +0,0 @@ -#ifndef Py_UFUNCOBJECT_H -#define Py_UFUNCOBJECT_H - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/* - * The legacy generic inner loop for a standard element-wise or - * generalized ufunc. - */ -typedef void (*PyUFuncGenericFunction) - (char **args, - npy_intp *dimensions, - npy_intp *strides, - void *innerloopdata); - -/* - * The most generic one-dimensional inner loop for - * a masked standard element-wise ufunc. "Masked" here means that it skips - * doing calculations on any items for which the maskptr array has a true - * value. - */ -typedef void (PyUFunc_MaskedStridedInnerLoopFunc)( - char **dataptrs, npy_intp *strides, - char *maskptr, npy_intp mask_stride, - npy_intp count, - NpyAuxData *innerloopdata); - -/* Forward declaration for the type resolver and loop selector typedefs */ -struct _tagPyUFuncObject; - -/* - * Given the operands for calling a ufunc, should determine the - * calculation input and output data types and return an inner loop function. - * This function should validate that the casting rule is being followed, - * and fail if it is not. - * - * For backwards compatibility, the regular type resolution function does not - * support auxiliary data with object semantics. The type resolution call - * which returns a masked generic function returns a standard NpyAuxData - * object, for which the NPY_AUXDATA_FREE and NPY_AUXDATA_CLONE macros - * work. - * - * ufunc: The ufunc object. - * casting: The 'casting' parameter provided to the ufunc. - * operands: An array of length (ufunc->nin + ufunc->nout), - * with the output parameters possibly NULL. - * type_tup: Either NULL, or the type_tup passed to the ufunc. - * out_dtypes: An array which should be populated with new - * references to (ufunc->nin + ufunc->nout) new - * dtypes, one for each input and output. These - * dtypes should all be in native-endian format. - * - * Should return 0 on success, -1 on failure (with exception set), - * or -2 if Py_NotImplemented should be returned. - */ -typedef int (PyUFunc_TypeResolutionFunc)( - struct _tagPyUFuncObject *ufunc, - NPY_CASTING casting, - PyArrayObject **operands, - PyObject *type_tup, - PyArray_Descr **out_dtypes); - -/* - * Given an array of DTypes as returned by the PyUFunc_TypeResolutionFunc, - * and an array of fixed strides (the array will contain NPY_MAX_INTP for - * strides which are not necessarily fixed), returns an inner loop - * with associated auxiliary data. - * - * For backwards compatibility, there is a variant of the inner loop - * selection which returns an inner loop irrespective of the strides, - * and with a void* static auxiliary data instead of an NpyAuxData * - * dynamically allocatable auxiliary data. - * - * ufunc: The ufunc object. - * dtypes: An array which has been populated with dtypes, - * in most cases by the type resolution function - * for the same ufunc. - * fixed_strides: For each input/output, either the stride that - * will be used every time the function is called - * or NPY_MAX_INTP if the stride might change or - * is not known ahead of time. The loop selection - * function may use this stride to pick inner loops - * which are optimized for contiguous or 0-stride - * cases. - * out_innerloop: Should be populated with the correct ufunc inner - * loop for the given type. - * out_innerloopdata: Should be populated with the void* data to - * be passed into the out_innerloop function. - * out_needs_api: If the inner loop needs to use the Python API, - * should set the to 1, otherwise should leave - * this untouched. - */ -typedef int (PyUFunc_LegacyInnerLoopSelectionFunc)( - struct _tagPyUFuncObject *ufunc, - PyArray_Descr **dtypes, - PyUFuncGenericFunction *out_innerloop, - void **out_innerloopdata, - int *out_needs_api); -typedef int (PyUFunc_MaskedInnerLoopSelectionFunc)( - struct _tagPyUFuncObject *ufunc, - PyArray_Descr **dtypes, - PyArray_Descr *mask_dtype, - npy_intp *fixed_strides, - npy_intp fixed_mask_stride, - PyUFunc_MaskedStridedInnerLoopFunc **out_innerloop, - NpyAuxData **out_innerloopdata, - int *out_needs_api); - -typedef struct _tagPyUFuncObject { - PyObject_HEAD - /* - * nin: Number of inputs - * nout: Number of outputs - * nargs: Always nin + nout (Why is it stored?) - */ - int nin, nout, nargs; - - /* - * Identity for reduction, any of PyUFunc_One, PyUFunc_Zero - * PyUFunc_MinusOne, PyUFunc_None, PyUFunc_ReorderableNone, - * PyUFunc_IdentityValue. - */ - int identity; - - /* Array of one-dimensional core loops */ - PyUFuncGenericFunction *functions; - /* Array of funcdata that gets passed into the functions */ - void **data; - /* The number of elements in 'functions' and 'data' */ - int ntypes; - - /* Used to be unused field 'check_return' */ - int reserved1; - - /* The name of the ufunc */ - const char *name; - - /* Array of type numbers, of size ('nargs' * 'ntypes') */ - char *types; - - /* Documentation string */ - const char *doc; - - void *ptr; - PyObject *obj; - PyObject *userloops; - - /* generalized ufunc parameters */ - - /* 0 for scalar ufunc; 1 for generalized ufunc */ - int core_enabled; - /* number of distinct dimension names in signature */ - int core_num_dim_ix; - - /* - * dimension indices of input/output argument k are stored in - * core_dim_ixs[core_offsets[k]..core_offsets[k]+core_num_dims[k]-1] - */ - - /* numbers of core dimensions of each argument */ - int *core_num_dims; - /* - * dimension indices in a flatted form; indices - * are in the range of [0,core_num_dim_ix) - */ - int *core_dim_ixs; - /* - * positions of 1st core dimensions of each - * argument in core_dim_ixs, equivalent to cumsum(core_num_dims) - */ - int *core_offsets; - /* signature string for printing purpose */ - char *core_signature; - - /* - * A function which resolves the types and fills an array - * with the dtypes for the inputs and outputs. - */ - PyUFunc_TypeResolutionFunc *type_resolver; - /* - * A function which returns an inner loop written for - * NumPy 1.6 and earlier ufuncs. This is for backwards - * compatibility, and may be NULL if inner_loop_selector - * is specified. - */ - PyUFunc_LegacyInnerLoopSelectionFunc *legacy_inner_loop_selector; - /* - * This was blocked off to be the "new" inner loop selector in 1.7, - * but this was never implemented. (This is also why the above - * selector is called the "legacy" selector.) - */ - void *reserved2; - /* - * A function which returns a masked inner loop for the ufunc. - */ - PyUFunc_MaskedInnerLoopSelectionFunc *masked_inner_loop_selector; - - /* - * List of flags for each operand when ufunc is called by nditer object. - * These flags will be used in addition to the default flags for each - * operand set by nditer object. - */ - npy_uint32 *op_flags; - - /* - * List of global flags used when ufunc is called by nditer object. - * These flags will be used in addition to the default global flags - * set by nditer object. - */ - npy_uint32 iter_flags; - - /* New in NPY_API_VERSION 0x0000000D and above */ - - /* - * for each core_num_dim_ix distinct dimension names, - * the possible "frozen" size (-1 if not frozen). - */ - npy_intp *core_dim_sizes; - - /* - * for each distinct core dimension, a set of UFUNC_CORE_DIM* flags - */ - npy_uint32 *core_dim_flags; - - /* Identity for reduction, when identity == PyUFunc_IdentityValue */ - PyObject *identity_value; - -} PyUFuncObject; - -#include "arrayobject.h" -/* Generalized ufunc; 0x0001 reserved for possible use as CORE_ENABLED */ -/* the core dimension's size will be determined by the operands. */ -#define UFUNC_CORE_DIM_SIZE_INFERRED 0x0002 -/* the core dimension may be absent */ -#define UFUNC_CORE_DIM_CAN_IGNORE 0x0004 -/* flags inferred during execution */ -#define UFUNC_CORE_DIM_MISSING 0x00040000 - -#define UFUNC_ERR_IGNORE 0 -#define UFUNC_ERR_WARN 1 -#define UFUNC_ERR_RAISE 2 -#define UFUNC_ERR_CALL 3 -#define UFUNC_ERR_PRINT 4 -#define UFUNC_ERR_LOG 5 - - /* Python side integer mask */ - -#define UFUNC_MASK_DIVIDEBYZERO 0x07 -#define UFUNC_MASK_OVERFLOW 0x3f -#define UFUNC_MASK_UNDERFLOW 0x1ff -#define UFUNC_MASK_INVALID 0xfff - -#define UFUNC_SHIFT_DIVIDEBYZERO 0 -#define UFUNC_SHIFT_OVERFLOW 3 -#define UFUNC_SHIFT_UNDERFLOW 6 -#define UFUNC_SHIFT_INVALID 9 - - -#define UFUNC_OBJ_ISOBJECT 1 -#define UFUNC_OBJ_NEEDS_API 2 - - /* Default user error mode */ -#define UFUNC_ERR_DEFAULT \ - (UFUNC_ERR_WARN << UFUNC_SHIFT_DIVIDEBYZERO) + \ - (UFUNC_ERR_WARN << UFUNC_SHIFT_OVERFLOW) + \ - (UFUNC_ERR_WARN << UFUNC_SHIFT_INVALID) - -#if NPY_ALLOW_THREADS -#define NPY_LOOP_BEGIN_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) _save = PyEval_SaveThread();} while (0); -#define NPY_LOOP_END_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) PyEval_RestoreThread(_save);} while (0); -#else -#define NPY_LOOP_BEGIN_THREADS -#define NPY_LOOP_END_THREADS -#endif - -/* - * UFunc has unit of 0, and the order of operations can be reordered - * This case allows reduction with multiple axes at once. - */ -#define PyUFunc_Zero 0 -/* - * UFunc has unit of 1, and the order of operations can be reordered - * This case allows reduction with multiple axes at once. - */ -#define PyUFunc_One 1 -/* - * UFunc has unit of -1, and the order of operations can be reordered - * This case allows reduction with multiple axes at once. Intended for - * bitwise_and reduction. - */ -#define PyUFunc_MinusOne 2 -/* - * UFunc has no unit, and the order of operations cannot be reordered. - * This case does not allow reduction with multiple axes at once. - */ -#define PyUFunc_None -1 -/* - * UFunc has no unit, and the order of operations can be reordered - * This case allows reduction with multiple axes at once. - */ -#define PyUFunc_ReorderableNone -2 -/* - * UFunc unit is an identity_value, and the order of operations can be reordered - * This case allows reduction with multiple axes at once. - */ -#define PyUFunc_IdentityValue -3 - - -#define UFUNC_REDUCE 0 -#define UFUNC_ACCUMULATE 1 -#define UFUNC_REDUCEAT 2 -#define UFUNC_OUTER 3 - - -typedef struct { - int nin; - int nout; - PyObject *callable; -} PyUFunc_PyFuncData; - -/* A linked-list of function information for - user-defined 1-d loops. - */ -typedef struct _loop1d_info { - PyUFuncGenericFunction func; - void *data; - int *arg_types; - struct _loop1d_info *next; - int nargs; - PyArray_Descr **arg_dtypes; -} PyUFunc_Loop1d; - - -#include "__ufunc_api.h" - -#define UFUNC_PYVALS_NAME "UFUNC_PYVALS" - -/* - * THESE MACROS ARE DEPRECATED. - * Use npy_set_floatstatus_* in the npymath library. - */ -#define UFUNC_FPE_DIVIDEBYZERO NPY_FPE_DIVIDEBYZERO -#define UFUNC_FPE_OVERFLOW NPY_FPE_OVERFLOW -#define UFUNC_FPE_UNDERFLOW NPY_FPE_UNDERFLOW -#define UFUNC_FPE_INVALID NPY_FPE_INVALID - -#define generate_divbyzero_error() npy_set_floatstatus_divbyzero() -#define generate_overflow_error() npy_set_floatstatus_overflow() - - /* Make sure it gets defined if it isn't already */ -#ifndef UFUNC_NOFPE -/* Clear the floating point exception default of Borland C++ */ -#if defined(__BORLANDC__) -#define UFUNC_NOFPE _control87(MCW_EM, MCW_EM); -#else -#define UFUNC_NOFPE -#endif -#endif - - -#ifdef __cplusplus -} -#endif -#endif /* !Py_UFUNCOBJECT_H */ diff --git a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/utils.h b/venv/lib/python3.7/site-packages/numpy/core/include/numpy/utils.h deleted file mode 100644 index 32218b8..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/include/numpy/utils.h +++ /dev/null @@ -1,21 +0,0 @@ -#ifndef __NUMPY_UTILS_HEADER__ -#define __NUMPY_UTILS_HEADER__ - -#ifndef __COMP_NPY_UNUSED - #if defined(__GNUC__) - #define __COMP_NPY_UNUSED __attribute__ ((__unused__)) - # elif defined(__ICC) - #define __COMP_NPY_UNUSED __attribute__ ((__unused__)) - # elif defined(__clang__) - #define __COMP_NPY_UNUSED __attribute__ ((unused)) - #else - #define __COMP_NPY_UNUSED - #endif -#endif - -/* Use this to tag a variable as not used. It will remove unused variable - * warning on support platforms (see __COM_NPY_UNUSED) and mangle the variable - * to avoid accidental use */ -#define NPY_UNUSED(x) (__NPY_UNUSED_TAGGED ## x) __COMP_NPY_UNUSED - -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/core/lib/libnpymath.a b/venv/lib/python3.7/site-packages/numpy/core/lib/libnpymath.a deleted file mode 100644 index 1273173..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/core/lib/libnpymath.a and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/core/lib/npy-pkg-config/mlib.ini b/venv/lib/python3.7/site-packages/numpy/core/lib/npy-pkg-config/mlib.ini deleted file mode 100644 index 5840f5e..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/lib/npy-pkg-config/mlib.ini +++ /dev/null @@ -1,12 +0,0 @@ -[meta] -Name = mlib -Description = Math library used with this version of numpy -Version = 1.0 - -[default] -Libs=-lm -Cflags= - -[msvc] -Libs=m.lib -Cflags= diff --git a/venv/lib/python3.7/site-packages/numpy/core/lib/npy-pkg-config/npymath.ini b/venv/lib/python3.7/site-packages/numpy/core/lib/npy-pkg-config/npymath.ini deleted file mode 100644 index 3e465ad..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/lib/npy-pkg-config/npymath.ini +++ /dev/null @@ -1,20 +0,0 @@ -[meta] -Name=npymath -Description=Portable, core math library implementing C99 standard -Version=0.1 - -[variables] -pkgname=numpy.core -prefix=${pkgdir} -libdir=${prefix}/lib -includedir=${prefix}/include - -[default] -Libs=-L${libdir} -lnpymath -Cflags=-I${includedir} -Requires=mlib - -[msvc] -Libs=/LIBPATH:${libdir} npymath.lib -Cflags=/INCLUDE:${includedir} -Requires=mlib diff --git a/venv/lib/python3.7/site-packages/numpy/core/machar.py b/venv/lib/python3.7/site-packages/numpy/core/machar.py deleted file mode 100644 index 202580b..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/machar.py +++ /dev/null @@ -1,344 +0,0 @@ -""" -Machine arithmetics - determine the parameters of the -floating-point arithmetic system - -Author: Pearu Peterson, September 2003 - -""" -from __future__ import division, absolute_import, print_function - -__all__ = ['MachAr'] - -from numpy.core.fromnumeric import any -from numpy.core._ufunc_config import errstate -from numpy.core.overrides import set_module - -# Need to speed this up...especially for longfloat - -@set_module('numpy') -class MachAr(object): - """ - Diagnosing machine parameters. - - Attributes - ---------- - ibeta : int - Radix in which numbers are represented. - it : int - Number of base-`ibeta` digits in the floating point mantissa M. - machep : int - Exponent of the smallest (most negative) power of `ibeta` that, - added to 1.0, gives something different from 1.0 - eps : float - Floating-point number ``beta**machep`` (floating point precision) - negep : int - Exponent of the smallest power of `ibeta` that, subtracted - from 1.0, gives something different from 1.0. - epsneg : float - Floating-point number ``beta**negep``. - iexp : int - Number of bits in the exponent (including its sign and bias). - minexp : int - Smallest (most negative) power of `ibeta` consistent with there - being no leading zeros in the mantissa. - xmin : float - Floating point number ``beta**minexp`` (the smallest [in - magnitude] usable floating value). - maxexp : int - Smallest (positive) power of `ibeta` that causes overflow. - xmax : float - ``(1-epsneg) * beta**maxexp`` (the largest [in magnitude] - usable floating value). - irnd : int - In ``range(6)``, information on what kind of rounding is done - in addition, and on how underflow is handled. - ngrd : int - Number of 'guard digits' used when truncating the product - of two mantissas to fit the representation. - epsilon : float - Same as `eps`. - tiny : float - Same as `xmin`. - huge : float - Same as `xmax`. - precision : float - ``- int(-log10(eps))`` - resolution : float - ``- 10**(-precision)`` - - Parameters - ---------- - float_conv : function, optional - Function that converts an integer or integer array to a float - or float array. Default is `float`. - int_conv : function, optional - Function that converts a float or float array to an integer or - integer array. Default is `int`. - float_to_float : function, optional - Function that converts a float array to float. Default is `float`. - Note that this does not seem to do anything useful in the current - implementation. - float_to_str : function, optional - Function that converts a single float to a string. Default is - ``lambda v:'%24.16e' %v``. - title : str, optional - Title that is printed in the string representation of `MachAr`. - - See Also - -------- - finfo : Machine limits for floating point types. - iinfo : Machine limits for integer types. - - References - ---------- - .. [1] Press, Teukolsky, Vetterling and Flannery, - "Numerical Recipes in C++," 2nd ed, - Cambridge University Press, 2002, p. 31. - - """ - - def __init__(self, float_conv=float,int_conv=int, - float_to_float=float, - float_to_str=lambda v:'%24.16e' % v, - title='Python floating point number'): - """ - - float_conv - convert integer to float (array) - int_conv - convert float (array) to integer - float_to_float - convert float array to float - float_to_str - convert array float to str - title - description of used floating point numbers - - """ - # We ignore all errors here because we are purposely triggering - # underflow to detect the properties of the runninng arch. - with errstate(under='ignore'): - self._do_init(float_conv, int_conv, float_to_float, float_to_str, title) - - def _do_init(self, float_conv, int_conv, float_to_float, float_to_str, title): - max_iterN = 10000 - msg = "Did not converge after %d tries with %s" - one = float_conv(1) - two = one + one - zero = one - one - - # Do we really need to do this? Aren't they 2 and 2.0? - # Determine ibeta and beta - a = one - for _ in range(max_iterN): - a = a + a - temp = a + one - temp1 = temp - a - if any(temp1 - one != zero): - break - else: - raise RuntimeError(msg % (_, one.dtype)) - b = one - for _ in range(max_iterN): - b = b + b - temp = a + b - itemp = int_conv(temp-a) - if any(itemp != 0): - break - else: - raise RuntimeError(msg % (_, one.dtype)) - ibeta = itemp - beta = float_conv(ibeta) - - # Determine it and irnd - it = -1 - b = one - for _ in range(max_iterN): - it = it + 1 - b = b * beta - temp = b + one - temp1 = temp - b - if any(temp1 - one != zero): - break - else: - raise RuntimeError(msg % (_, one.dtype)) - - betah = beta / two - a = one - for _ in range(max_iterN): - a = a + a - temp = a + one - temp1 = temp - a - if any(temp1 - one != zero): - break - else: - raise RuntimeError(msg % (_, one.dtype)) - temp = a + betah - irnd = 0 - if any(temp-a != zero): - irnd = 1 - tempa = a + beta - temp = tempa + betah - if irnd == 0 and any(temp-tempa != zero): - irnd = 2 - - # Determine negep and epsneg - negep = it + 3 - betain = one / beta - a = one - for i in range(negep): - a = a * betain - b = a - for _ in range(max_iterN): - temp = one - a - if any(temp-one != zero): - break - a = a * beta - negep = negep - 1 - # Prevent infinite loop on PPC with gcc 4.0: - if negep < 0: - raise RuntimeError("could not determine machine tolerance " - "for 'negep', locals() -> %s" % (locals())) - else: - raise RuntimeError(msg % (_, one.dtype)) - negep = -negep - epsneg = a - - # Determine machep and eps - machep = - it - 3 - a = b - - for _ in range(max_iterN): - temp = one + a - if any(temp-one != zero): - break - a = a * beta - machep = machep + 1 - else: - raise RuntimeError(msg % (_, one.dtype)) - eps = a - - # Determine ngrd - ngrd = 0 - temp = one + eps - if irnd == 0 and any(temp*one - one != zero): - ngrd = 1 - - # Determine iexp - i = 0 - k = 1 - z = betain - t = one + eps - nxres = 0 - for _ in range(max_iterN): - y = z - z = y*y - a = z*one # Check here for underflow - temp = z*t - if any(a+a == zero) or any(abs(z) >= y): - break - temp1 = temp * betain - if any(temp1*beta == z): - break - i = i + 1 - k = k + k - else: - raise RuntimeError(msg % (_, one.dtype)) - if ibeta != 10: - iexp = i + 1 - mx = k + k - else: - iexp = 2 - iz = ibeta - while k >= iz: - iz = iz * ibeta - iexp = iexp + 1 - mx = iz + iz - 1 - - # Determine minexp and xmin - for _ in range(max_iterN): - xmin = y - y = y * betain - a = y * one - temp = y * t - if any((a + a) != zero) and any(abs(y) < xmin): - k = k + 1 - temp1 = temp * betain - if any(temp1*beta == y) and any(temp != y): - nxres = 3 - xmin = y - break - else: - break - else: - raise RuntimeError(msg % (_, one.dtype)) - minexp = -k - - # Determine maxexp, xmax - if mx <= k + k - 3 and ibeta != 10: - mx = mx + mx - iexp = iexp + 1 - maxexp = mx + minexp - irnd = irnd + nxres - if irnd >= 2: - maxexp = maxexp - 2 - i = maxexp + minexp - if ibeta == 2 and not i: - maxexp = maxexp - 1 - if i > 20: - maxexp = maxexp - 1 - if any(a != y): - maxexp = maxexp - 2 - xmax = one - epsneg - if any(xmax*one != xmax): - xmax = one - beta*epsneg - xmax = xmax / (xmin*beta*beta*beta) - i = maxexp + minexp + 3 - for j in range(i): - if ibeta == 2: - xmax = xmax + xmax - else: - xmax = xmax * beta - - self.ibeta = ibeta - self.it = it - self.negep = negep - self.epsneg = float_to_float(epsneg) - self._str_epsneg = float_to_str(epsneg) - self.machep = machep - self.eps = float_to_float(eps) - self._str_eps = float_to_str(eps) - self.ngrd = ngrd - self.iexp = iexp - self.minexp = minexp - self.xmin = float_to_float(xmin) - self._str_xmin = float_to_str(xmin) - self.maxexp = maxexp - self.xmax = float_to_float(xmax) - self._str_xmax = float_to_str(xmax) - self.irnd = irnd - - self.title = title - # Commonly used parameters - self.epsilon = self.eps - self.tiny = self.xmin - self.huge = self.xmax - - import math - self.precision = int(-math.log10(float_to_float(self.eps))) - ten = two + two + two + two + two - resolution = ten ** (-self.precision) - self.resolution = float_to_float(resolution) - self._str_resolution = float_to_str(resolution) - - def __str__(self): - fmt = ( - 'Machine parameters for %(title)s\n' - '---------------------------------------------------------------------\n' - 'ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s\n' - 'machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon)\n' - 'negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg)\n' - 'minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny)\n' - 'maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)\n' - '---------------------------------------------------------------------\n' - ) - return fmt % self.__dict__ - - -if __name__ == '__main__': - print(MachAr()) diff --git a/venv/lib/python3.7/site-packages/numpy/core/memmap.py b/venv/lib/python3.7/site-packages/numpy/core/memmap.py deleted file mode 100644 index 0626455..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/memmap.py +++ /dev/null @@ -1,334 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy as np -from .numeric import uint8, ndarray, dtype -from numpy.compat import ( - long, basestring, os_fspath, contextlib_nullcontext, is_pathlib_path -) -from numpy.core.overrides import set_module - -__all__ = ['memmap'] - -dtypedescr = dtype -valid_filemodes = ["r", "c", "r+", "w+"] -writeable_filemodes = ["r+", "w+"] - -mode_equivalents = { - "readonly":"r", - "copyonwrite":"c", - "readwrite":"r+", - "write":"w+" - } - - -@set_module('numpy') -class memmap(ndarray): - """Create a memory-map to an array stored in a *binary* file on disk. - - Memory-mapped files are used for accessing small segments of large files - on disk, without reading the entire file into memory. NumPy's - memmap's are array-like objects. This differs from Python's ``mmap`` - module, which uses file-like objects. - - This subclass of ndarray has some unpleasant interactions with - some operations, because it doesn't quite fit properly as a subclass. - An alternative to using this subclass is to create the ``mmap`` - object yourself, then create an ndarray with ndarray.__new__ directly, - passing the object created in its 'buffer=' parameter. - - This class may at some point be turned into a factory function - which returns a view into an mmap buffer. - - Delete the memmap instance to close the memmap file. - - - Parameters - ---------- - filename : str, file-like object, or pathlib.Path instance - The file name or file object to be used as the array data buffer. - dtype : data-type, optional - The data-type used to interpret the file contents. - Default is `uint8`. - mode : {'r+', 'r', 'w+', 'c'}, optional - The file is opened in this mode: - - +------+-------------------------------------------------------------+ - | 'r' | Open existing file for reading only. | - +------+-------------------------------------------------------------+ - | 'r+' | Open existing file for reading and writing. | - +------+-------------------------------------------------------------+ - | 'w+' | Create or overwrite existing file for reading and writing. | - +------+-------------------------------------------------------------+ - | 'c' | Copy-on-write: assignments affect data in memory, but | - | | changes are not saved to disk. The file on disk is | - | | read-only. | - +------+-------------------------------------------------------------+ - - Default is 'r+'. - offset : int, optional - In the file, array data starts at this offset. Since `offset` is - measured in bytes, it should normally be a multiple of the byte-size - of `dtype`. When ``mode != 'r'``, even positive offsets beyond end of - file are valid; The file will be extended to accommodate the - additional data. By default, ``memmap`` will start at the beginning of - the file, even if ``filename`` is a file pointer ``fp`` and - ``fp.tell() != 0``. - shape : tuple, optional - The desired shape of the array. If ``mode == 'r'`` and the number - of remaining bytes after `offset` is not a multiple of the byte-size - of `dtype`, you must specify `shape`. By default, the returned array - will be 1-D with the number of elements determined by file size - and data-type. - order : {'C', 'F'}, optional - Specify the order of the ndarray memory layout: - :term:`row-major`, C-style or :term:`column-major`, - Fortran-style. This only has an effect if the shape is - greater than 1-D. The default order is 'C'. - - Attributes - ---------- - filename : str or pathlib.Path instance - Path to the mapped file. - offset : int - Offset position in the file. - mode : str - File mode. - - Methods - ------- - flush - Flush any changes in memory to file on disk. - When you delete a memmap object, flush is called first to write - changes to disk before removing the object. - - - See also - -------- - lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file. - - Notes - ----- - The memmap object can be used anywhere an ndarray is accepted. - Given a memmap ``fp``, ``isinstance(fp, numpy.ndarray)`` returns - ``True``. - - Memory-mapped files cannot be larger than 2GB on 32-bit systems. - - When a memmap causes a file to be created or extended beyond its - current size in the filesystem, the contents of the new part are - unspecified. On systems with POSIX filesystem semantics, the extended - part will be filled with zero bytes. - - Examples - -------- - >>> data = np.arange(12, dtype='float32') - >>> data.resize((3,4)) - - This example uses a temporary file so that doctest doesn't write - files to your directory. You would use a 'normal' filename. - - >>> from tempfile import mkdtemp - >>> import os.path as path - >>> filename = path.join(mkdtemp(), 'newfile.dat') - - Create a memmap with dtype and shape that matches our data: - - >>> fp = np.memmap(filename, dtype='float32', mode='w+', shape=(3,4)) - >>> fp - memmap([[0., 0., 0., 0.], - [0., 0., 0., 0.], - [0., 0., 0., 0.]], dtype=float32) - - Write data to memmap array: - - >>> fp[:] = data[:] - >>> fp - memmap([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.]], dtype=float32) - - >>> fp.filename == path.abspath(filename) - True - - Deletion flushes memory changes to disk before removing the object: - - >>> del fp - - Load the memmap and verify data was stored: - - >>> newfp = np.memmap(filename, dtype='float32', mode='r', shape=(3,4)) - >>> newfp - memmap([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.]], dtype=float32) - - Read-only memmap: - - >>> fpr = np.memmap(filename, dtype='float32', mode='r', shape=(3,4)) - >>> fpr.flags.writeable - False - - Copy-on-write memmap: - - >>> fpc = np.memmap(filename, dtype='float32', mode='c', shape=(3,4)) - >>> fpc.flags.writeable - True - - It's possible to assign to copy-on-write array, but values are only - written into the memory copy of the array, and not written to disk: - - >>> fpc - memmap([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.]], dtype=float32) - >>> fpc[0,:] = 0 - >>> fpc - memmap([[ 0., 0., 0., 0.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.]], dtype=float32) - - File on disk is unchanged: - - >>> fpr - memmap([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.]], dtype=float32) - - Offset into a memmap: - - >>> fpo = np.memmap(filename, dtype='float32', mode='r', offset=16) - >>> fpo - memmap([ 4., 5., 6., 7., 8., 9., 10., 11.], dtype=float32) - - """ - - __array_priority__ = -100.0 - - def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0, - shape=None, order='C'): - # Import here to minimize 'import numpy' overhead - import mmap - import os.path - try: - mode = mode_equivalents[mode] - except KeyError: - if mode not in valid_filemodes: - raise ValueError("mode must be one of %s" % - (valid_filemodes + list(mode_equivalents.keys()))) - - if mode == 'w+' and shape is None: - raise ValueError("shape must be given") - - if hasattr(filename, 'read'): - f_ctx = contextlib_nullcontext(filename) - else: - f_ctx = open(os_fspath(filename), ('r' if mode == 'c' else mode)+'b') - - with f_ctx as fid: - fid.seek(0, 2) - flen = fid.tell() - descr = dtypedescr(dtype) - _dbytes = descr.itemsize - - if shape is None: - bytes = flen - offset - if bytes % _dbytes: - raise ValueError("Size of available data is not a " - "multiple of the data-type size.") - size = bytes // _dbytes - shape = (size,) - else: - if not isinstance(shape, tuple): - shape = (shape,) - size = np.intp(1) # avoid default choice of np.int_, which might overflow - for k in shape: - size *= k - - bytes = long(offset + size*_dbytes) - - if mode in ('w+', 'r+') and flen < bytes: - fid.seek(bytes - 1, 0) - fid.write(b'\0') - fid.flush() - - if mode == 'c': - acc = mmap.ACCESS_COPY - elif mode == 'r': - acc = mmap.ACCESS_READ - else: - acc = mmap.ACCESS_WRITE - - start = offset - offset % mmap.ALLOCATIONGRANULARITY - bytes -= start - array_offset = offset - start - mm = mmap.mmap(fid.fileno(), bytes, access=acc, offset=start) - - self = ndarray.__new__(subtype, shape, dtype=descr, buffer=mm, - offset=array_offset, order=order) - self._mmap = mm - self.offset = offset - self.mode = mode - - if is_pathlib_path(filename): - # special case - if we were constructed with a pathlib.path, - # then filename is a path object, not a string - self.filename = filename.resolve() - elif hasattr(fid, "name") and isinstance(fid.name, basestring): - # py3 returns int for TemporaryFile().name - self.filename = os.path.abspath(fid.name) - # same as memmap copies (e.g. memmap + 1) - else: - self.filename = None - - return self - - def __array_finalize__(self, obj): - if hasattr(obj, '_mmap') and np.may_share_memory(self, obj): - self._mmap = obj._mmap - self.filename = obj.filename - self.offset = obj.offset - self.mode = obj.mode - else: - self._mmap = None - self.filename = None - self.offset = None - self.mode = None - - def flush(self): - """ - Write any changes in the array to the file on disk. - - For further information, see `memmap`. - - Parameters - ---------- - None - - See Also - -------- - memmap - - """ - if self.base is not None and hasattr(self.base, 'flush'): - self.base.flush() - - def __array_wrap__(self, arr, context=None): - arr = super(memmap, self).__array_wrap__(arr, context) - - # Return a memmap if a memmap was given as the output of the - # ufunc. Leave the arr class unchanged if self is not a memmap - # to keep original memmap subclasses behavior - if self is arr or type(self) is not memmap: - return arr - # Return scalar instead of 0d memmap, e.g. for np.sum with - # axis=None - if arr.shape == (): - return arr[()] - # Return ndarray otherwise - return arr.view(np.ndarray) - - def __getitem__(self, index): - res = super(memmap, self).__getitem__(index) - if type(res) is memmap and res._mmap is None: - return res.view(type=ndarray) - return res diff --git a/venv/lib/python3.7/site-packages/numpy/core/multiarray.py b/venv/lib/python3.7/site-packages/numpy/core/multiarray.py deleted file mode 100644 index c0fcc10..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/multiarray.py +++ /dev/null @@ -1,1631 +0,0 @@ -""" -Create the numpy.core.multiarray namespace for backward compatibility. In v1.16 -the multiarray and umath c-extension modules were merged into a single -_multiarray_umath extension module. So we replicate the old namespace -by importing from the extension module. - -""" - -import functools -import sys -import warnings -import sys - -from . import overrides -from . import _multiarray_umath -import numpy as np -from numpy.core._multiarray_umath import * -from numpy.core._multiarray_umath import ( - _fastCopyAndTranspose, _flagdict, _insert, _reconstruct, _vec_string, - _ARRAY_API, _monotonicity, _get_ndarray_c_version - ) - -__all__ = [ - '_ARRAY_API', 'ALLOW_THREADS', 'BUFSIZE', 'CLIP', 'DATETIMEUNITS', - 'ITEM_HASOBJECT', 'ITEM_IS_POINTER', 'LIST_PICKLE', 'MAXDIMS', - 'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT', 'NEEDS_INIT', 'NEEDS_PYAPI', - 'RAISE', 'USE_GETITEM', 'USE_SETITEM', 'WRAP', '_fastCopyAndTranspose', - '_flagdict', '_insert', '_reconstruct', '_vec_string', '_monotonicity', - 'add_docstring', 'arange', 'array', 'bincount', 'broadcast', - 'busday_count', 'busday_offset', 'busdaycalendar', 'can_cast', - 'compare_chararrays', 'concatenate', 'copyto', 'correlate', 'correlate2', - 'count_nonzero', 'c_einsum', 'datetime_as_string', 'datetime_data', - 'digitize', 'dot', 'dragon4_positional', 'dragon4_scientific', 'dtype', - 'empty', 'empty_like', 'error', 'flagsobj', 'flatiter', 'format_longfloat', - 'frombuffer', 'fromfile', 'fromiter', 'fromstring', 'inner', - 'int_asbuffer', 'interp', 'interp_complex', 'is_busday', 'lexsort', - 'matmul', 'may_share_memory', 'min_scalar_type', 'ndarray', 'nditer', - 'nested_iters', 'normalize_axis_index', 'packbits', - 'promote_types', 'putmask', 'ravel_multi_index', 'result_type', 'scalar', - 'set_datetimeparse_function', 'set_legacy_print_mode', 'set_numeric_ops', - 'set_string_function', 'set_typeDict', 'shares_memory', 'test_interrupt', - 'tracemalloc_domain', 'typeinfo', 'unpackbits', 'unravel_index', 'vdot', - 'where', 'zeros'] -if sys.version_info.major < 3: - __all__ += ['newbuffer', 'getbuffer'] - -# For backward compatibility, make sure pickle imports these functions from here -_reconstruct.__module__ = 'numpy.core.multiarray' -scalar.__module__ = 'numpy.core.multiarray' - - -arange.__module__ = 'numpy' -array.__module__ = 'numpy' -datetime_data.__module__ = 'numpy' -empty.__module__ = 'numpy' -frombuffer.__module__ = 'numpy' -fromfile.__module__ = 'numpy' -fromiter.__module__ = 'numpy' -frompyfunc.__module__ = 'numpy' -fromstring.__module__ = 'numpy' -geterrobj.__module__ = 'numpy' -may_share_memory.__module__ = 'numpy' -nested_iters.__module__ = 'numpy' -promote_types.__module__ = 'numpy' -set_numeric_ops.__module__ = 'numpy' -seterrobj.__module__ = 'numpy' -zeros.__module__ = 'numpy' - - -# We can't verify dispatcher signatures because NumPy's C functions don't -# support introspection. -array_function_from_c_func_and_dispatcher = functools.partial( - overrides.array_function_from_dispatcher, - module='numpy', docs_from_dispatcher=True, verify=False) - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.empty_like) -def empty_like(prototype, dtype=None, order=None, subok=None, shape=None): - """ - empty_like(prototype, dtype=None, order='K', subok=True, shape=None) - - Return a new array with the same shape and type as a given array. - - Parameters - ---------- - prototype : array_like - The shape and data-type of `prototype` define these same attributes - of the returned array. - dtype : data-type, optional - Overrides the data type of the result. - - .. versionadded:: 1.6.0 - order : {'C', 'F', 'A', or 'K'}, optional - Overrides the memory layout of the result. 'C' means C-order, - 'F' means F-order, 'A' means 'F' if ``prototype`` is Fortran - contiguous, 'C' otherwise. 'K' means match the layout of ``prototype`` - as closely as possible. - - .. versionadded:: 1.6.0 - subok : bool, optional. - If True, then the newly created array will use the sub-class - type of 'a', otherwise it will be a base-class array. Defaults - to True. - shape : int or sequence of ints, optional. - Overrides the shape of the result. If order='K' and the number of - dimensions is unchanged, will try to keep order, otherwise, - order='C' is implied. - - .. versionadded:: 1.17.0 - - Returns - ------- - out : ndarray - Array of uninitialized (arbitrary) data with the same - shape and type as `prototype`. - - See Also - -------- - ones_like : Return an array of ones with shape and type of input. - zeros_like : Return an array of zeros with shape and type of input. - full_like : Return a new array with shape of input filled with value. - empty : Return a new uninitialized array. - - Notes - ----- - This function does *not* initialize the returned array; to do that use - `zeros_like` or `ones_like` instead. It may be marginally faster than - the functions that do set the array values. - - Examples - -------- - >>> a = ([1,2,3], [4,5,6]) # a is array-like - >>> np.empty_like(a) - array([[-1073741821, -1073741821, 3], # uninitialized - [ 0, 0, -1073741821]]) - >>> a = np.array([[1., 2., 3.],[4.,5.,6.]]) - >>> np.empty_like(a) - array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000], # uninitialized - [ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]]) - - """ - return (prototype,) - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.concatenate) -def concatenate(arrays, axis=None, out=None): - """ - concatenate((a1, a2, ...), axis=0, out=None) - - Join a sequence of arrays along an existing axis. - - Parameters - ---------- - a1, a2, ... : sequence of array_like - The arrays must have the same shape, except in the dimension - corresponding to `axis` (the first, by default). - axis : int, optional - The axis along which the arrays will be joined. If axis is None, - arrays are flattened before use. Default is 0. - out : ndarray, optional - If provided, the destination to place the result. The shape must be - correct, matching that of what concatenate would have returned if no - out argument were specified. - - Returns - ------- - res : ndarray - The concatenated array. - - See Also - -------- - ma.concatenate : Concatenate function that preserves input masks. - array_split : Split an array into multiple sub-arrays of equal or - near-equal size. - split : Split array into a list of multiple sub-arrays of equal size. - hsplit : Split array into multiple sub-arrays horizontally (column wise) - vsplit : Split array into multiple sub-arrays vertically (row wise) - dsplit : Split array into multiple sub-arrays along the 3rd axis (depth). - stack : Stack a sequence of arrays along a new axis. - hstack : Stack arrays in sequence horizontally (column wise) - vstack : Stack arrays in sequence vertically (row wise) - dstack : Stack arrays in sequence depth wise (along third dimension) - block : Assemble arrays from blocks. - - Notes - ----- - When one or more of the arrays to be concatenated is a MaskedArray, - this function will return a MaskedArray object instead of an ndarray, - but the input masks are *not* preserved. In cases where a MaskedArray - is expected as input, use the ma.concatenate function from the masked - array module instead. - - Examples - -------- - >>> a = np.array([[1, 2], [3, 4]]) - >>> b = np.array([[5, 6]]) - >>> np.concatenate((a, b), axis=0) - array([[1, 2], - [3, 4], - [5, 6]]) - >>> np.concatenate((a, b.T), axis=1) - array([[1, 2, 5], - [3, 4, 6]]) - >>> np.concatenate((a, b), axis=None) - array([1, 2, 3, 4, 5, 6]) - - This function will not preserve masking of MaskedArray inputs. - - >>> a = np.ma.arange(3) - >>> a[1] = np.ma.masked - >>> b = np.arange(2, 5) - >>> a - masked_array(data=[0, --, 2], - mask=[False, True, False], - fill_value=999999) - >>> b - array([2, 3, 4]) - >>> np.concatenate([a, b]) - masked_array(data=[0, 1, 2, 2, 3, 4], - mask=False, - fill_value=999999) - >>> np.ma.concatenate([a, b]) - masked_array(data=[0, --, 2, 2, 3, 4], - mask=[False, True, False, False, False, False], - fill_value=999999) - - """ - if out is not None: - # optimize for the typical case where only arrays is provided - arrays = list(arrays) - arrays.append(out) - return arrays - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.inner) -def inner(a, b): - """ - inner(a, b) - - Inner product of two arrays. - - Ordinary inner product of vectors for 1-D arrays (without complex - conjugation), in higher dimensions a sum product over the last axes. - - Parameters - ---------- - a, b : array_like - If `a` and `b` are nonscalar, their last dimensions must match. - - Returns - ------- - out : ndarray - `out.shape = a.shape[:-1] + b.shape[:-1]` - - Raises - ------ - ValueError - If the last dimension of `a` and `b` has different size. - - See Also - -------- - tensordot : Sum products over arbitrary axes. - dot : Generalised matrix product, using second last dimension of `b`. - einsum : Einstein summation convention. - - Notes - ----- - For vectors (1-D arrays) it computes the ordinary inner-product:: - - np.inner(a, b) = sum(a[:]*b[:]) - - More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`:: - - np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1)) - - or explicitly:: - - np.inner(a, b)[i0,...,ir-1,j0,...,js-1] - = sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:]) - - In addition `a` or `b` may be scalars, in which case:: - - np.inner(a,b) = a*b - - Examples - -------- - Ordinary inner product for vectors: - - >>> a = np.array([1,2,3]) - >>> b = np.array([0,1,0]) - >>> np.inner(a, b) - 2 - - A multidimensional example: - - >>> a = np.arange(24).reshape((2,3,4)) - >>> b = np.arange(4) - >>> np.inner(a, b) - array([[ 14, 38, 62], - [ 86, 110, 134]]) - - An example where `b` is a scalar: - - >>> np.inner(np.eye(2), 7) - array([[7., 0.], - [0., 7.]]) - - """ - return (a, b) - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.where) -def where(condition, x=None, y=None): - """ - where(condition, [x, y]) - - Return elements chosen from `x` or `y` depending on `condition`. - - .. note:: - When only `condition` is provided, this function is a shorthand for - ``np.asarray(condition).nonzero()``. Using `nonzero` directly should be - preferred, as it behaves correctly for subclasses. The rest of this - documentation covers only the case where all three arguments are - provided. - - Parameters - ---------- - condition : array_like, bool - Where True, yield `x`, otherwise yield `y`. - x, y : array_like - Values from which to choose. `x`, `y` and `condition` need to be - broadcastable to some shape. - - Returns - ------- - out : ndarray - An array with elements from `x` where `condition` is True, and elements - from `y` elsewhere. - - See Also - -------- - choose - nonzero : The function that is called when x and y are omitted - - Notes - ----- - If all the arrays are 1-D, `where` is equivalent to:: - - [xv if c else yv - for c, xv, yv in zip(condition, x, y)] - - Examples - -------- - >>> a = np.arange(10) - >>> a - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - >>> np.where(a < 5, a, 10*a) - array([ 0, 1, 2, 3, 4, 50, 60, 70, 80, 90]) - - This can be used on multidimensional arrays too: - - >>> np.where([[True, False], [True, True]], - ... [[1, 2], [3, 4]], - ... [[9, 8], [7, 6]]) - array([[1, 8], - [3, 4]]) - - The shapes of x, y, and the condition are broadcast together: - - >>> x, y = np.ogrid[:3, :4] - >>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast - array([[10, 0, 0, 0], - [10, 11, 1, 1], - [10, 11, 12, 2]]) - - >>> a = np.array([[0, 1, 2], - ... [0, 2, 4], - ... [0, 3, 6]]) - >>> np.where(a < 4, a, -1) # -1 is broadcast - array([[ 0, 1, 2], - [ 0, 2, -1], - [ 0, 3, -1]]) - """ - return (condition, x, y) - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.lexsort) -def lexsort(keys, axis=None): - """ - lexsort(keys, axis=-1) - - Perform an indirect stable sort using a sequence of keys. - - Given multiple sorting keys, which can be interpreted as columns in a - spreadsheet, lexsort returns an array of integer indices that describes - the sort order by multiple columns. The last key in the sequence is used - for the primary sort order, the second-to-last key for the secondary sort - order, and so on. The keys argument must be a sequence of objects that - can be converted to arrays of the same shape. If a 2D array is provided - for the keys argument, it's rows are interpreted as the sorting keys and - sorting is according to the last row, second last row etc. - - Parameters - ---------- - keys : (k, N) array or tuple containing k (N,)-shaped sequences - The `k` different "columns" to be sorted. The last column (or row if - `keys` is a 2D array) is the primary sort key. - axis : int, optional - Axis to be indirectly sorted. By default, sort over the last axis. - - Returns - ------- - indices : (N,) ndarray of ints - Array of indices that sort the keys along the specified axis. - - See Also - -------- - argsort : Indirect sort. - ndarray.sort : In-place sort. - sort : Return a sorted copy of an array. - - Examples - -------- - Sort names: first by surname, then by name. - - >>> surnames = ('Hertz', 'Galilei', 'Hertz') - >>> first_names = ('Heinrich', 'Galileo', 'Gustav') - >>> ind = np.lexsort((first_names, surnames)) - >>> ind - array([1, 2, 0]) - - >>> [surnames[i] + ", " + first_names[i] for i in ind] - ['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich'] - - Sort two columns of numbers: - - >>> a = [1,5,1,4,3,4,4] # First column - >>> b = [9,4,0,4,0,2,1] # Second column - >>> ind = np.lexsort((b,a)) # Sort by a, then by b - >>> ind - array([2, 0, 4, 6, 5, 3, 1]) - - >>> [(a[i],b[i]) for i in ind] - [(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)] - - Note that sorting is first according to the elements of ``a``. - Secondary sorting is according to the elements of ``b``. - - A normal ``argsort`` would have yielded: - - >>> [(a[i],b[i]) for i in np.argsort(a)] - [(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)] - - Structured arrays are sorted lexically by ``argsort``: - - >>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)], - ... dtype=np.dtype([('x', int), ('y', int)])) - - >>> np.argsort(x) # or np.argsort(x, order=('x', 'y')) - array([2, 0, 4, 6, 5, 3, 1]) - - """ - if isinstance(keys, tuple): - return keys - else: - return (keys,) - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.can_cast) -def can_cast(from_, to, casting=None): - """ - can_cast(from_, to, casting='safe') - - Returns True if cast between data types can occur according to the - casting rule. If from is a scalar or array scalar, also returns - True if the scalar value can be cast without overflow or truncation - to an integer. - - Parameters - ---------- - from_ : dtype, dtype specifier, scalar, or array - Data type, scalar, or array to cast from. - to : dtype or dtype specifier - Data type to cast to. - casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional - Controls what kind of data casting may occur. - - * 'no' means the data types should not be cast at all. - * 'equiv' means only byte-order changes are allowed. - * 'safe' means only casts which can preserve values are allowed. - * 'same_kind' means only safe casts or casts within a kind, - like float64 to float32, are allowed. - * 'unsafe' means any data conversions may be done. - - Returns - ------- - out : bool - True if cast can occur according to the casting rule. - - Notes - ----- - .. versionchanged:: 1.17.0 - Casting between a simple data type and a structured one is possible only - for "unsafe" casting. Casting to multiple fields is allowed, but - casting from multiple fields is not. - - .. versionchanged:: 1.9.0 - Casting from numeric to string types in 'safe' casting mode requires - that the string dtype length is long enough to store the maximum - integer/float value converted. - - See also - -------- - dtype, result_type - - Examples - -------- - Basic examples - - >>> np.can_cast(np.int32, np.int64) - True - >>> np.can_cast(np.float64, complex) - True - >>> np.can_cast(complex, float) - False - - >>> np.can_cast('i8', 'f8') - True - >>> np.can_cast('i8', 'f4') - False - >>> np.can_cast('i4', 'S4') - False - - Casting scalars - - >>> np.can_cast(100, 'i1') - True - >>> np.can_cast(150, 'i1') - False - >>> np.can_cast(150, 'u1') - True - - >>> np.can_cast(3.5e100, np.float32) - False - >>> np.can_cast(1000.0, np.float32) - True - - Array scalar checks the value, array does not - - >>> np.can_cast(np.array(1000.0), np.float32) - True - >>> np.can_cast(np.array([1000.0]), np.float32) - False - - Using the casting rules - - >>> np.can_cast('i8', 'i8', 'no') - True - >>> np.can_cast('i8', 'no') - False - - >>> np.can_cast('i8', 'equiv') - True - >>> np.can_cast('i8', 'equiv') - False - - >>> np.can_cast('i8', 'safe') - True - >>> np.can_cast('i4', 'safe') - False - - >>> np.can_cast('i4', 'same_kind') - True - >>> np.can_cast('u4', 'same_kind') - False - - >>> np.can_cast('u4', 'unsafe') - True - - """ - return (from_,) - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.min_scalar_type) -def min_scalar_type(a): - """ - min_scalar_type(a) - - For scalar ``a``, returns the data type with the smallest size - and smallest scalar kind which can hold its value. For non-scalar - array ``a``, returns the vector's dtype unmodified. - - Floating point values are not demoted to integers, - and complex values are not demoted to floats. - - Parameters - ---------- - a : scalar or array_like - The value whose minimal data type is to be found. - - Returns - ------- - out : dtype - The minimal data type. - - Notes - ----- - .. versionadded:: 1.6.0 - - See Also - -------- - result_type, promote_types, dtype, can_cast - - Examples - -------- - >>> np.min_scalar_type(10) - dtype('uint8') - - >>> np.min_scalar_type(-260) - dtype('int16') - - >>> np.min_scalar_type(3.1) - dtype('float16') - - >>> np.min_scalar_type(1e50) - dtype('float64') - - >>> np.min_scalar_type(np.arange(4,dtype='f8')) - dtype('float64') - - """ - return (a,) - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.result_type) -def result_type(*arrays_and_dtypes): - """ - result_type(*arrays_and_dtypes) - - Returns the type that results from applying the NumPy - type promotion rules to the arguments. - - Type promotion in NumPy works similarly to the rules in languages - like C++, with some slight differences. When both scalars and - arrays are used, the array's type takes precedence and the actual value - of the scalar is taken into account. - - For example, calculating 3*a, where a is an array of 32-bit floats, - intuitively should result in a 32-bit float output. If the 3 is a - 32-bit integer, the NumPy rules indicate it can't convert losslessly - into a 32-bit float, so a 64-bit float should be the result type. - By examining the value of the constant, '3', we see that it fits in - an 8-bit integer, which can be cast losslessly into the 32-bit float. - - Parameters - ---------- - arrays_and_dtypes : list of arrays and dtypes - The operands of some operation whose result type is needed. - - Returns - ------- - out : dtype - The result type. - - See also - -------- - dtype, promote_types, min_scalar_type, can_cast - - Notes - ----- - .. versionadded:: 1.6.0 - - The specific algorithm used is as follows. - - Categories are determined by first checking which of boolean, - integer (int/uint), or floating point (float/complex) the maximum - kind of all the arrays and the scalars are. - - If there are only scalars or the maximum category of the scalars - is higher than the maximum category of the arrays, - the data types are combined with :func:`promote_types` - to produce the return value. - - Otherwise, `min_scalar_type` is called on each array, and - the resulting data types are all combined with :func:`promote_types` - to produce the return value. - - The set of int values is not a subset of the uint values for types - with the same number of bits, something not reflected in - :func:`min_scalar_type`, but handled as a special case in `result_type`. - - Examples - -------- - >>> np.result_type(3, np.arange(7, dtype='i1')) - dtype('int8') - - >>> np.result_type('i4', 'c8') - dtype('complex128') - - >>> np.result_type(3.0, -2) - dtype('float64') - - """ - return arrays_and_dtypes - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.dot) -def dot(a, b, out=None): - """ - dot(a, b, out=None) - - Dot product of two arrays. Specifically, - - - If both `a` and `b` are 1-D arrays, it is inner product of vectors - (without complex conjugation). - - - If both `a` and `b` are 2-D arrays, it is matrix multiplication, - but using :func:`matmul` or ``a @ b`` is preferred. - - - If either `a` or `b` is 0-D (scalar), it is equivalent to :func:`multiply` - and using ``numpy.multiply(a, b)`` or ``a * b`` is preferred. - - - If `a` is an N-D array and `b` is a 1-D array, it is a sum product over - the last axis of `a` and `b`. - - - If `a` is an N-D array and `b` is an M-D array (where ``M>=2``), it is a - sum product over the last axis of `a` and the second-to-last axis of `b`:: - - dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m]) - - Parameters - ---------- - a : array_like - First argument. - b : array_like - Second argument. - out : ndarray, optional - Output argument. This must have the exact kind that would be returned - if it was not used. In particular, it must have the right type, must be - C-contiguous, and its dtype must be the dtype that would be returned - for `dot(a,b)`. This is a performance feature. Therefore, if these - conditions are not met, an exception is raised, instead of attempting - to be flexible. - - Returns - ------- - output : ndarray - Returns the dot product of `a` and `b`. If `a` and `b` are both - scalars or both 1-D arrays then a scalar is returned; otherwise - an array is returned. - If `out` is given, then it is returned. - - Raises - ------ - ValueError - If the last dimension of `a` is not the same size as - the second-to-last dimension of `b`. - - See Also - -------- - vdot : Complex-conjugating dot product. - tensordot : Sum products over arbitrary axes. - einsum : Einstein summation convention. - matmul : '@' operator as method with out parameter. - - Examples - -------- - >>> np.dot(3, 4) - 12 - - Neither argument is complex-conjugated: - - >>> np.dot([2j, 3j], [2j, 3j]) - (-13+0j) - - For 2-D arrays it is the matrix product: - - >>> a = [[1, 0], [0, 1]] - >>> b = [[4, 1], [2, 2]] - >>> np.dot(a, b) - array([[4, 1], - [2, 2]]) - - >>> a = np.arange(3*4*5*6).reshape((3,4,5,6)) - >>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3)) - >>> np.dot(a, b)[2,3,2,1,2,2] - 499128 - >>> sum(a[2,3,2,:] * b[1,2,:,2]) - 499128 - - """ - return (a, b, out) - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.vdot) -def vdot(a, b): - """ - vdot(a, b) - - Return the dot product of two vectors. - - The vdot(`a`, `b`) function handles complex numbers differently than - dot(`a`, `b`). If the first argument is complex the complex conjugate - of the first argument is used for the calculation of the dot product. - - Note that `vdot` handles multidimensional arrays differently than `dot`: - it does *not* perform a matrix product, but flattens input arguments - to 1-D vectors first. Consequently, it should only be used for vectors. - - Parameters - ---------- - a : array_like - If `a` is complex the complex conjugate is taken before calculation - of the dot product. - b : array_like - Second argument to the dot product. - - Returns - ------- - output : ndarray - Dot product of `a` and `b`. Can be an int, float, or - complex depending on the types of `a` and `b`. - - See Also - -------- - dot : Return the dot product without using the complex conjugate of the - first argument. - - Examples - -------- - >>> a = np.array([1+2j,3+4j]) - >>> b = np.array([5+6j,7+8j]) - >>> np.vdot(a, b) - (70-8j) - >>> np.vdot(b, a) - (70+8j) - - Note that higher-dimensional arrays are flattened! - - >>> a = np.array([[1, 4], [5, 6]]) - >>> b = np.array([[4, 1], [2, 2]]) - >>> np.vdot(a, b) - 30 - >>> np.vdot(b, a) - 30 - >>> 1*4 + 4*1 + 5*2 + 6*2 - 30 - - """ - return (a, b) - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.bincount) -def bincount(x, weights=None, minlength=None): - """ - bincount(x, weights=None, minlength=0) - - Count number of occurrences of each value in array of non-negative ints. - - The number of bins (of size 1) is one larger than the largest value in - `x`. If `minlength` is specified, there will be at least this number - of bins in the output array (though it will be longer if necessary, - depending on the contents of `x`). - Each bin gives the number of occurrences of its index value in `x`. - If `weights` is specified the input array is weighted by it, i.e. if a - value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead - of ``out[n] += 1``. - - Parameters - ---------- - x : array_like, 1 dimension, nonnegative ints - Input array. - weights : array_like, optional - Weights, array of the same shape as `x`. - minlength : int, optional - A minimum number of bins for the output array. - - .. versionadded:: 1.6.0 - - Returns - ------- - out : ndarray of ints - The result of binning the input array. - The length of `out` is equal to ``np.amax(x)+1``. - - Raises - ------ - ValueError - If the input is not 1-dimensional, or contains elements with negative - values, or if `minlength` is negative. - TypeError - If the type of the input is float or complex. - - See Also - -------- - histogram, digitize, unique - - Examples - -------- - >>> np.bincount(np.arange(5)) - array([1, 1, 1, 1, 1]) - >>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7])) - array([1, 3, 1, 1, 0, 0, 0, 1]) - - >>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23]) - >>> np.bincount(x).size == np.amax(x)+1 - True - - The input array needs to be of integer dtype, otherwise a - TypeError is raised: - - >>> np.bincount(np.arange(5, dtype=float)) - Traceback (most recent call last): - File "", line 1, in - TypeError: array cannot be safely cast to required type - - A possible use of ``bincount`` is to perform sums over - variable-size chunks of an array, using the ``weights`` keyword. - - >>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights - >>> x = np.array([0, 1, 1, 2, 2, 2]) - >>> np.bincount(x, weights=w) - array([ 0.3, 0.7, 1.1]) - - """ - return (x, weights) - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.ravel_multi_index) -def ravel_multi_index(multi_index, dims, mode=None, order=None): - """ - ravel_multi_index(multi_index, dims, mode='raise', order='C') - - Converts a tuple of index arrays into an array of flat - indices, applying boundary modes to the multi-index. - - Parameters - ---------- - multi_index : tuple of array_like - A tuple of integer arrays, one array for each dimension. - dims : tuple of ints - The shape of array into which the indices from ``multi_index`` apply. - mode : {'raise', 'wrap', 'clip'}, optional - Specifies how out-of-bounds indices are handled. Can specify - either one mode or a tuple of modes, one mode per index. - - * 'raise' -- raise an error (default) - * 'wrap' -- wrap around - * 'clip' -- clip to the range - - In 'clip' mode, a negative index which would normally - wrap will clip to 0 instead. - order : {'C', 'F'}, optional - Determines whether the multi-index should be viewed as - indexing in row-major (C-style) or column-major - (Fortran-style) order. - - Returns - ------- - raveled_indices : ndarray - An array of indices into the flattened version of an array - of dimensions ``dims``. - - See Also - -------- - unravel_index - - Notes - ----- - .. versionadded:: 1.6.0 - - Examples - -------- - >>> arr = np.array([[3,6,6],[4,5,1]]) - >>> np.ravel_multi_index(arr, (7,6)) - array([22, 41, 37]) - >>> np.ravel_multi_index(arr, (7,6), order='F') - array([31, 41, 13]) - >>> np.ravel_multi_index(arr, (4,6), mode='clip') - array([22, 23, 19]) - >>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap')) - array([12, 13, 13]) - - >>> np.ravel_multi_index((3,1,4,1), (6,7,8,9)) - 1621 - """ - return multi_index - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.unravel_index) -def unravel_index(indices, shape=None, order=None, dims=None): - """ - unravel_index(indices, shape, order='C') - - Converts a flat index or array of flat indices into a tuple - of coordinate arrays. - - Parameters - ---------- - indices : array_like - An integer array whose elements are indices into the flattened - version of an array of dimensions ``shape``. Before version 1.6.0, - this function accepted just one index value. - shape : tuple of ints - The shape of the array to use for unraveling ``indices``. - - .. versionchanged:: 1.16.0 - Renamed from ``dims`` to ``shape``. - - order : {'C', 'F'}, optional - Determines whether the indices should be viewed as indexing in - row-major (C-style) or column-major (Fortran-style) order. - - .. versionadded:: 1.6.0 - - Returns - ------- - unraveled_coords : tuple of ndarray - Each array in the tuple has the same shape as the ``indices`` - array. - - See Also - -------- - ravel_multi_index - - Examples - -------- - >>> np.unravel_index([22, 41, 37], (7,6)) - (array([3, 6, 6]), array([4, 5, 1])) - >>> np.unravel_index([31, 41, 13], (7,6), order='F') - (array([3, 6, 6]), array([4, 5, 1])) - - >>> np.unravel_index(1621, (6,7,8,9)) - (3, 1, 4, 1) - - """ - if dims is not None: - warnings.warn("'shape' argument should be used instead of 'dims'", - DeprecationWarning, stacklevel=3) - return (indices,) - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.copyto) -def copyto(dst, src, casting=None, where=None): - """ - copyto(dst, src, casting='same_kind', where=True) - - Copies values from one array to another, broadcasting as necessary. - - Raises a TypeError if the `casting` rule is violated, and if - `where` is provided, it selects which elements to copy. - - .. versionadded:: 1.7.0 - - Parameters - ---------- - dst : ndarray - The array into which values are copied. - src : array_like - The array from which values are copied. - casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional - Controls what kind of data casting may occur when copying. - - * 'no' means the data types should not be cast at all. - * 'equiv' means only byte-order changes are allowed. - * 'safe' means only casts which can preserve values are allowed. - * 'same_kind' means only safe casts or casts within a kind, - like float64 to float32, are allowed. - * 'unsafe' means any data conversions may be done. - where : array_like of bool, optional - A boolean array which is broadcasted to match the dimensions - of `dst`, and selects elements to copy from `src` to `dst` - wherever it contains the value True. - """ - return (dst, src, where) - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.putmask) -def putmask(a, mask, values): - """ - putmask(a, mask, values) - - Changes elements of an array based on conditional and input values. - - Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``. - - If `values` is not the same size as `a` and `mask` then it will repeat. - This gives behavior different from ``a[mask] = values``. - - Parameters - ---------- - a : array_like - Target array. - mask : array_like - Boolean mask array. It has to be the same shape as `a`. - values : array_like - Values to put into `a` where `mask` is True. If `values` is smaller - than `a` it will be repeated. - - See Also - -------- - place, put, take, copyto - - Examples - -------- - >>> x = np.arange(6).reshape(2, 3) - >>> np.putmask(x, x>2, x**2) - >>> x - array([[ 0, 1, 2], - [ 9, 16, 25]]) - - If `values` is smaller than `a` it is repeated: - - >>> x = np.arange(5) - >>> np.putmask(x, x>1, [-33, -44]) - >>> x - array([ 0, 1, -33, -44, -33]) - - """ - return (a, mask, values) - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.packbits) -def packbits(a, axis=None, bitorder='big'): - """ - packbits(a, axis=None, bitorder='big') - - Packs the elements of a binary-valued array into bits in a uint8 array. - - The result is padded to full bytes by inserting zero bits at the end. - - Parameters - ---------- - a : array_like - An array of integers or booleans whose elements should be packed to - bits. - axis : int, optional - The dimension over which bit-packing is done. - ``None`` implies packing the flattened array. - bitorder : {'big', 'little'}, optional - The order of the input bits. 'big' will mimic bin(val), - ``[0, 0, 0, 0, 0, 0, 1, 1] => 3 = 0b00000011 => ``, 'little' will - reverse the order so ``[1, 1, 0, 0, 0, 0, 0, 0] => 3``. - Defaults to 'big'. - - .. versionadded:: 1.17.0 - - Returns - ------- - packed : ndarray - Array of type uint8 whose elements represent bits corresponding to the - logical (0 or nonzero) value of the input elements. The shape of - `packed` has the same number of dimensions as the input (unless `axis` - is None, in which case the output is 1-D). - - See Also - -------- - unpackbits: Unpacks elements of a uint8 array into a binary-valued output - array. - - Examples - -------- - >>> a = np.array([[[1,0,1], - ... [0,1,0]], - ... [[1,1,0], - ... [0,0,1]]]) - >>> b = np.packbits(a, axis=-1) - >>> b - array([[[160], - [ 64]], - [[192], - [ 32]]], dtype=uint8) - - Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000, - and 32 = 0010 0000. - - """ - return (a,) - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.unpackbits) -def unpackbits(a, axis=None, count=None, bitorder='big'): - """ - unpackbits(a, axis=None, count=None, bitorder='big') - - Unpacks elements of a uint8 array into a binary-valued output array. - - Each element of `a` represents a bit-field that should be unpacked - into a binary-valued output array. The shape of the output array is - either 1-D (if `axis` is ``None``) or the same shape as the input - array with unpacking done along the axis specified. - - Parameters - ---------- - a : ndarray, uint8 type - Input array. - axis : int, optional - The dimension over which bit-unpacking is done. - ``None`` implies unpacking the flattened array. - count : int or None, optional - The number of elements to unpack along `axis`, provided as a way - of undoing the effect of packing a size that is not a multiple - of eight. A non-negative number means to only unpack `count` - bits. A negative number means to trim off that many bits from - the end. ``None`` means to unpack the entire array (the - default). Counts larger than the available number of bits will - add zero padding to the output. Negative counts must not - exceed the available number of bits. - - .. versionadded:: 1.17.0 - - bitorder : {'big', 'little'}, optional - The order of the returned bits. 'big' will mimic bin(val), - ``3 = 0b00000011 => [0, 0, 0, 0, 0, 0, 1, 1]``, 'little' will reverse - the order to ``[1, 1, 0, 0, 0, 0, 0, 0]``. - Defaults to 'big'. - - .. versionadded:: 1.17.0 - - Returns - ------- - unpacked : ndarray, uint8 type - The elements are binary-valued (0 or 1). - - See Also - -------- - packbits : Packs the elements of a binary-valued array into bits in - a uint8 array. - - Examples - -------- - >>> a = np.array([[2], [7], [23]], dtype=np.uint8) - >>> a - array([[ 2], - [ 7], - [23]], dtype=uint8) - >>> b = np.unpackbits(a, axis=1) - >>> b - array([[0, 0, 0, 0, 0, 0, 1, 0], - [0, 0, 0, 0, 0, 1, 1, 1], - [0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8) - >>> c = np.unpackbits(a, axis=1, count=-3) - >>> c - array([[0, 0, 0, 0, 0], - [0, 0, 0, 0, 0], - [0, 0, 0, 1, 0]], dtype=uint8) - - >>> p = np.packbits(b, axis=0) - >>> np.unpackbits(p, axis=0) - array([[0, 0, 0, 0, 0, 0, 1, 0], - [0, 0, 0, 0, 0, 1, 1, 1], - [0, 0, 0, 1, 0, 1, 1, 1], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8) - >>> np.array_equal(b, np.unpackbits(p, axis=0, count=b.shape[0])) - True - - """ - return (a,) - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.shares_memory) -def shares_memory(a, b, max_work=None): - """ - shares_memory(a, b, max_work=None) - - Determine if two arrays share memory - - Parameters - ---------- - a, b : ndarray - Input arrays - max_work : int, optional - Effort to spend on solving the overlap problem (maximum number - of candidate solutions to consider). The following special - values are recognized: - - max_work=MAY_SHARE_EXACT (default) - The problem is solved exactly. In this case, the function returns - True only if there is an element shared between the arrays. - max_work=MAY_SHARE_BOUNDS - Only the memory bounds of a and b are checked. - - Raises - ------ - numpy.TooHardError - Exceeded max_work. - - Returns - ------- - out : bool - - See Also - -------- - may_share_memory - - Examples - -------- - >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9])) - False - - """ - return (a, b) - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.may_share_memory) -def may_share_memory(a, b, max_work=None): - """ - may_share_memory(a, b, max_work=None) - - Determine if two arrays might share memory - - A return of True does not necessarily mean that the two arrays - share any element. It just means that they *might*. - - Only the memory bounds of a and b are checked by default. - - Parameters - ---------- - a, b : ndarray - Input arrays - max_work : int, optional - Effort to spend on solving the overlap problem. See - `shares_memory` for details. Default for ``may_share_memory`` - is to do a bounds check. - - Returns - ------- - out : bool - - See Also - -------- - shares_memory - - Examples - -------- - >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9])) - False - >>> x = np.zeros([3, 4]) - >>> np.may_share_memory(x[:,0], x[:,1]) - True - - """ - return (a, b) - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.is_busday) -def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None): - """ - is_busday(dates, weekmask='1111100', holidays=None, busdaycal=None, out=None) - - Calculates which of the given dates are valid days, and which are not. - - .. versionadded:: 1.7.0 - - Parameters - ---------- - dates : array_like of datetime64[D] - The array of dates to process. - weekmask : str or array_like of bool, optional - A seven-element array indicating which of Monday through Sunday are - valid days. May be specified as a length-seven list or array, like - [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string - like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for - weekdays, optionally separated by white space. Valid abbreviations - are: Mon Tue Wed Thu Fri Sat Sun - holidays : array_like of datetime64[D], optional - An array of dates to consider as invalid dates. They may be - specified in any order, and NaT (not-a-time) dates are ignored. - This list is saved in a normalized form that is suited for - fast calculations of valid days. - busdaycal : busdaycalendar, optional - A `busdaycalendar` object which specifies the valid days. If this - parameter is provided, neither weekmask nor holidays may be - provided. - out : array of bool, optional - If provided, this array is filled with the result. - - Returns - ------- - out : array of bool - An array with the same shape as ``dates``, containing True for - each valid day, and False for each invalid day. - - See Also - -------- - busdaycalendar: An object that specifies a custom set of valid days. - busday_offset : Applies an offset counted in valid days. - busday_count : Counts how many valid days are in a half-open date range. - - Examples - -------- - >>> # The weekdays are Friday, Saturday, and Monday - ... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'], - ... holidays=['2011-07-01', '2011-07-04', '2011-07-17']) - array([False, False, True]) - """ - return (dates, weekmask, holidays, out) - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_offset) -def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None, - busdaycal=None, out=None): - """ - busday_offset(dates, offsets, roll='raise', weekmask='1111100', holidays=None, busdaycal=None, out=None) - - First adjusts the date to fall on a valid day according to - the ``roll`` rule, then applies offsets to the given dates - counted in valid days. - - .. versionadded:: 1.7.0 - - Parameters - ---------- - dates : array_like of datetime64[D] - The array of dates to process. - offsets : array_like of int - The array of offsets, which is broadcast with ``dates``. - roll : {'raise', 'nat', 'forward', 'following', 'backward', 'preceding', 'modifiedfollowing', 'modifiedpreceding'}, optional - How to treat dates that do not fall on a valid day. The default - is 'raise'. - - * 'raise' means to raise an exception for an invalid day. - * 'nat' means to return a NaT (not-a-time) for an invalid day. - * 'forward' and 'following' mean to take the first valid day - later in time. - * 'backward' and 'preceding' mean to take the first valid day - earlier in time. - * 'modifiedfollowing' means to take the first valid day - later in time unless it is across a Month boundary, in which - case to take the first valid day earlier in time. - * 'modifiedpreceding' means to take the first valid day - earlier in time unless it is across a Month boundary, in which - case to take the first valid day later in time. - weekmask : str or array_like of bool, optional - A seven-element array indicating which of Monday through Sunday are - valid days. May be specified as a length-seven list or array, like - [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string - like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for - weekdays, optionally separated by white space. Valid abbreviations - are: Mon Tue Wed Thu Fri Sat Sun - holidays : array_like of datetime64[D], optional - An array of dates to consider as invalid dates. They may be - specified in any order, and NaT (not-a-time) dates are ignored. - This list is saved in a normalized form that is suited for - fast calculations of valid days. - busdaycal : busdaycalendar, optional - A `busdaycalendar` object which specifies the valid days. If this - parameter is provided, neither weekmask nor holidays may be - provided. - out : array of datetime64[D], optional - If provided, this array is filled with the result. - - Returns - ------- - out : array of datetime64[D] - An array with a shape from broadcasting ``dates`` and ``offsets`` - together, containing the dates with offsets applied. - - See Also - -------- - busdaycalendar: An object that specifies a custom set of valid days. - is_busday : Returns a boolean array indicating valid days. - busday_count : Counts how many valid days are in a half-open date range. - - Examples - -------- - >>> # First business day in October 2011 (not accounting for holidays) - ... np.busday_offset('2011-10', 0, roll='forward') - numpy.datetime64('2011-10-03') - >>> # Last business day in February 2012 (not accounting for holidays) - ... np.busday_offset('2012-03', -1, roll='forward') - numpy.datetime64('2012-02-29') - >>> # Third Wednesday in January 2011 - ... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed') - numpy.datetime64('2011-01-19') - >>> # 2012 Mother's Day in Canada and the U.S. - ... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun') - numpy.datetime64('2012-05-13') - - >>> # First business day on or after a date - ... np.busday_offset('2011-03-20', 0, roll='forward') - numpy.datetime64('2011-03-21') - >>> np.busday_offset('2011-03-22', 0, roll='forward') - numpy.datetime64('2011-03-22') - >>> # First business day after a date - ... np.busday_offset('2011-03-20', 1, roll='backward') - numpy.datetime64('2011-03-21') - >>> np.busday_offset('2011-03-22', 1, roll='backward') - numpy.datetime64('2011-03-23') - """ - return (dates, offsets, weekmask, holidays, out) - - -@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_count) -def busday_count(begindates, enddates, weekmask=None, holidays=None, - busdaycal=None, out=None): - """ - busday_count(begindates, enddates, weekmask='1111100', holidays=[], busdaycal=None, out=None) - - Counts the number of valid days between `begindates` and - `enddates`, not including the day of `enddates`. - - If ``enddates`` specifies a date value that is earlier than the - corresponding ``begindates`` date value, the count will be negative. - - .. versionadded:: 1.7.0 - - Parameters - ---------- - begindates : array_like of datetime64[D] - The array of the first dates for counting. - enddates : array_like of datetime64[D] - The array of the end dates for counting, which are excluded - from the count themselves. - weekmask : str or array_like of bool, optional - A seven-element array indicating which of Monday through Sunday are - valid days. May be specified as a length-seven list or array, like - [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string - like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for - weekdays, optionally separated by white space. Valid abbreviations - are: Mon Tue Wed Thu Fri Sat Sun - holidays : array_like of datetime64[D], optional - An array of dates to consider as invalid dates. They may be - specified in any order, and NaT (not-a-time) dates are ignored. - This list is saved in a normalized form that is suited for - fast calculations of valid days. - busdaycal : busdaycalendar, optional - A `busdaycalendar` object which specifies the valid days. If this - parameter is provided, neither weekmask nor holidays may be - provided. - out : array of int, optional - If provided, this array is filled with the result. - - Returns - ------- - out : array of int - An array with a shape from broadcasting ``begindates`` and ``enddates`` - together, containing the number of valid days between - the begin and end dates. - - See Also - -------- - busdaycalendar: An object that specifies a custom set of valid days. - is_busday : Returns a boolean array indicating valid days. - busday_offset : Applies an offset counted in valid days. - - Examples - -------- - >>> # Number of weekdays in January 2011 - ... np.busday_count('2011-01', '2011-02') - 21 - >>> # Number of weekdays in 2011 - >>> np.busday_count('2011', '2012') - 260 - >>> # Number of Saturdays in 2011 - ... np.busday_count('2011', '2012', weekmask='Sat') - 53 - """ - return (begindates, enddates, weekmask, holidays, out) - - -@array_function_from_c_func_and_dispatcher( - _multiarray_umath.datetime_as_string) -def datetime_as_string(arr, unit=None, timezone=None, casting=None): - """ - datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind') - - Convert an array of datetimes into an array of strings. - - Parameters - ---------- - arr : array_like of datetime64 - The array of UTC timestamps to format. - unit : str - One of None, 'auto', or a :ref:`datetime unit `. - timezone : {'naive', 'UTC', 'local'} or tzinfo - Timezone information to use when displaying the datetime. If 'UTC', end - with a Z to indicate UTC time. If 'local', convert to the local timezone - first, and suffix with a +-#### timezone offset. If a tzinfo object, - then do as with 'local', but use the specified timezone. - casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'} - Casting to allow when changing between datetime units. - - Returns - ------- - str_arr : ndarray - An array of strings the same shape as `arr`. - - Examples - -------- - >>> import pytz - >>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]') - >>> d - array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30', - '2002-10-27T07:30'], dtype='datetime64[m]') - - Setting the timezone to UTC shows the same information, but with a Z suffix - - >>> np.datetime_as_string(d, timezone='UTC') - array(['2002-10-27T04:30Z', '2002-10-27T05:30Z', '2002-10-27T06:30Z', - '2002-10-27T07:30Z'], dtype='>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern')) - array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400', - '2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='>> np.datetime_as_string(d, unit='h') - array(['2002-10-27T04', '2002-10-27T05', '2002-10-27T06', '2002-10-27T07'], - dtype='>> np.datetime_as_string(d, unit='s') - array(['2002-10-27T04:30:00', '2002-10-27T05:30:00', '2002-10-27T06:30:00', - '2002-10-27T07:30:00'], dtype='>> np.datetime_as_string(d, unit='h', casting='safe') - Traceback (most recent call last): - ... - TypeError: Cannot create a datetime string as units 'h' from a NumPy - datetime with units 'm' according to the rule 'safe' - """ - return (arr,) diff --git a/venv/lib/python3.7/site-packages/numpy/core/numeric.py b/venv/lib/python3.7/site-packages/numpy/core/numeric.py deleted file mode 100644 index 1e011e2..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/numeric.py +++ /dev/null @@ -1,2411 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import functools -import itertools -import operator -import sys -import warnings -import numbers -import contextlib - -import numpy as np -from numpy.compat import pickle, basestring -from . import multiarray -from .multiarray import ( - _fastCopyAndTranspose as fastCopyAndTranspose, ALLOW_THREADS, - BUFSIZE, CLIP, MAXDIMS, MAY_SHARE_BOUNDS, MAY_SHARE_EXACT, RAISE, - WRAP, arange, array, broadcast, can_cast, compare_chararrays, - concatenate, copyto, dot, dtype, empty, - empty_like, flatiter, frombuffer, fromfile, fromiter, fromstring, - inner, int_asbuffer, lexsort, matmul, may_share_memory, - min_scalar_type, ndarray, nditer, nested_iters, promote_types, - putmask, result_type, set_numeric_ops, shares_memory, vdot, where, - zeros, normalize_axis_index) -if sys.version_info[0] < 3: - from .multiarray import newbuffer, getbuffer - -from . import overrides -from . import umath -from . import shape_base -from .overrides import set_module -from .umath import (multiply, invert, sin, PINF, NAN) -from . import numerictypes -from .numerictypes import longlong, intc, int_, float_, complex_, bool_ -from ._exceptions import TooHardError, AxisError -from ._asarray import asarray, asanyarray -from ._ufunc_config import errstate - -bitwise_not = invert -ufunc = type(sin) -newaxis = None - -if sys.version_info[0] >= 3: - import builtins -else: - import __builtin__ as builtins - - -array_function_dispatch = functools.partial( - overrides.array_function_dispatch, module='numpy') - - -__all__ = [ - 'newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc', - 'arange', 'array', 'zeros', 'count_nonzero', 'empty', 'broadcast', 'dtype', - 'fromstring', 'fromfile', 'frombuffer', 'int_asbuffer', 'where', - 'argwhere', 'copyto', 'concatenate', 'fastCopyAndTranspose', 'lexsort', - 'set_numeric_ops', 'can_cast', 'promote_types', 'min_scalar_type', - 'result_type', 'isfortran', 'empty_like', 'zeros_like', 'ones_like', - 'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot', 'roll', - 'rollaxis', 'moveaxis', 'cross', 'tensordot', 'little_endian', - 'fromiter', 'array_equal', 'array_equiv', 'indices', 'fromfunction', - 'isclose', 'isscalar', 'binary_repr', 'base_repr', 'ones', - 'identity', 'allclose', 'compare_chararrays', 'putmask', - 'flatnonzero', 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN', - 'False_', 'True_', 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', - 'BUFSIZE', 'ALLOW_THREADS', 'ComplexWarning', 'full', 'full_like', - 'matmul', 'shares_memory', 'may_share_memory', 'MAY_SHARE_BOUNDS', - 'MAY_SHARE_EXACT', 'TooHardError', 'AxisError'] - -if sys.version_info[0] < 3: - __all__.extend(['getbuffer', 'newbuffer']) - - -@set_module('numpy') -class ComplexWarning(RuntimeWarning): - """ - The warning raised when casting a complex dtype to a real dtype. - - As implemented, casting a complex number to a real discards its imaginary - part, but this behavior may not be what the user actually wants. - - """ - pass - - -def _zeros_like_dispatcher(a, dtype=None, order=None, subok=None, shape=None): - return (a,) - - -@array_function_dispatch(_zeros_like_dispatcher) -def zeros_like(a, dtype=None, order='K', subok=True, shape=None): - """ - Return an array of zeros with the same shape and type as a given array. - - Parameters - ---------- - a : array_like - The shape and data-type of `a` define these same attributes of - the returned array. - dtype : data-type, optional - Overrides the data type of the result. - - .. versionadded:: 1.6.0 - order : {'C', 'F', 'A', or 'K'}, optional - Overrides the memory layout of the result. 'C' means C-order, - 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, - 'C' otherwise. 'K' means match the layout of `a` as closely - as possible. - - .. versionadded:: 1.6.0 - subok : bool, optional. - If True, then the newly created array will use the sub-class - type of 'a', otherwise it will be a base-class array. Defaults - to True. - shape : int or sequence of ints, optional. - Overrides the shape of the result. If order='K' and the number of - dimensions is unchanged, will try to keep order, otherwise, - order='C' is implied. - - .. versionadded:: 1.17.0 - - Returns - ------- - out : ndarray - Array of zeros with the same shape and type as `a`. - - See Also - -------- - empty_like : Return an empty array with shape and type of input. - ones_like : Return an array of ones with shape and type of input. - full_like : Return a new array with shape of input filled with value. - zeros : Return a new array setting values to zero. - - Examples - -------- - >>> x = np.arange(6) - >>> x = x.reshape((2, 3)) - >>> x - array([[0, 1, 2], - [3, 4, 5]]) - >>> np.zeros_like(x) - array([[0, 0, 0], - [0, 0, 0]]) - - >>> y = np.arange(3, dtype=float) - >>> y - array([0., 1., 2.]) - >>> np.zeros_like(y) - array([0., 0., 0.]) - - """ - res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape) - # needed instead of a 0 to get same result as zeros for for string dtypes - z = zeros(1, dtype=res.dtype) - multiarray.copyto(res, z, casting='unsafe') - return res - - -@set_module('numpy') -def ones(shape, dtype=None, order='C'): - """ - Return a new array of given shape and type, filled with ones. - - Parameters - ---------- - shape : int or sequence of ints - Shape of the new array, e.g., ``(2, 3)`` or ``2``. - dtype : data-type, optional - The desired data-type for the array, e.g., `numpy.int8`. Default is - `numpy.float64`. - order : {'C', 'F'}, optional, default: C - Whether to store multi-dimensional data in row-major - (C-style) or column-major (Fortran-style) order in - memory. - - Returns - ------- - out : ndarray - Array of ones with the given shape, dtype, and order. - - See Also - -------- - ones_like : Return an array of ones with shape and type of input. - empty : Return a new uninitialized array. - zeros : Return a new array setting values to zero. - full : Return a new array of given shape filled with value. - - - Examples - -------- - >>> np.ones(5) - array([1., 1., 1., 1., 1.]) - - >>> np.ones((5,), dtype=int) - array([1, 1, 1, 1, 1]) - - >>> np.ones((2, 1)) - array([[1.], - [1.]]) - - >>> s = (2,2) - >>> np.ones(s) - array([[1., 1.], - [1., 1.]]) - - """ - a = empty(shape, dtype, order) - multiarray.copyto(a, 1, casting='unsafe') - return a - - -def _ones_like_dispatcher(a, dtype=None, order=None, subok=None, shape=None): - return (a,) - - -@array_function_dispatch(_ones_like_dispatcher) -def ones_like(a, dtype=None, order='K', subok=True, shape=None): - """ - Return an array of ones with the same shape and type as a given array. - - Parameters - ---------- - a : array_like - The shape and data-type of `a` define these same attributes of - the returned array. - dtype : data-type, optional - Overrides the data type of the result. - - .. versionadded:: 1.6.0 - order : {'C', 'F', 'A', or 'K'}, optional - Overrides the memory layout of the result. 'C' means C-order, - 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, - 'C' otherwise. 'K' means match the layout of `a` as closely - as possible. - - .. versionadded:: 1.6.0 - subok : bool, optional. - If True, then the newly created array will use the sub-class - type of 'a', otherwise it will be a base-class array. Defaults - to True. - shape : int or sequence of ints, optional. - Overrides the shape of the result. If order='K' and the number of - dimensions is unchanged, will try to keep order, otherwise, - order='C' is implied. - - .. versionadded:: 1.17.0 - - Returns - ------- - out : ndarray - Array of ones with the same shape and type as `a`. - - See Also - -------- - empty_like : Return an empty array with shape and type of input. - zeros_like : Return an array of zeros with shape and type of input. - full_like : Return a new array with shape of input filled with value. - ones : Return a new array setting values to one. - - Examples - -------- - >>> x = np.arange(6) - >>> x = x.reshape((2, 3)) - >>> x - array([[0, 1, 2], - [3, 4, 5]]) - >>> np.ones_like(x) - array([[1, 1, 1], - [1, 1, 1]]) - - >>> y = np.arange(3, dtype=float) - >>> y - array([0., 1., 2.]) - >>> np.ones_like(y) - array([1., 1., 1.]) - - """ - res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape) - multiarray.copyto(res, 1, casting='unsafe') - return res - - -@set_module('numpy') -def full(shape, fill_value, dtype=None, order='C'): - """ - Return a new array of given shape and type, filled with `fill_value`. - - Parameters - ---------- - shape : int or sequence of ints - Shape of the new array, e.g., ``(2, 3)`` or ``2``. - fill_value : scalar - Fill value. - dtype : data-type, optional - The desired data-type for the array The default, None, means - `np.array(fill_value).dtype`. - order : {'C', 'F'}, optional - Whether to store multidimensional data in C- or Fortran-contiguous - (row- or column-wise) order in memory. - - Returns - ------- - out : ndarray - Array of `fill_value` with the given shape, dtype, and order. - - See Also - -------- - full_like : Return a new array with shape of input filled with value. - empty : Return a new uninitialized array. - ones : Return a new array setting values to one. - zeros : Return a new array setting values to zero. - - Examples - -------- - >>> np.full((2, 2), np.inf) - array([[inf, inf], - [inf, inf]]) - >>> np.full((2, 2), 10) - array([[10, 10], - [10, 10]]) - - """ - if dtype is None: - dtype = array(fill_value).dtype - a = empty(shape, dtype, order) - multiarray.copyto(a, fill_value, casting='unsafe') - return a - - -def _full_like_dispatcher(a, fill_value, dtype=None, order=None, subok=None, shape=None): - return (a,) - - -@array_function_dispatch(_full_like_dispatcher) -def full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None): - """ - Return a full array with the same shape and type as a given array. - - Parameters - ---------- - a : array_like - The shape and data-type of `a` define these same attributes of - the returned array. - fill_value : scalar - Fill value. - dtype : data-type, optional - Overrides the data type of the result. - order : {'C', 'F', 'A', or 'K'}, optional - Overrides the memory layout of the result. 'C' means C-order, - 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, - 'C' otherwise. 'K' means match the layout of `a` as closely - as possible. - subok : bool, optional. - If True, then the newly created array will use the sub-class - type of 'a', otherwise it will be a base-class array. Defaults - to True. - shape : int or sequence of ints, optional. - Overrides the shape of the result. If order='K' and the number of - dimensions is unchanged, will try to keep order, otherwise, - order='C' is implied. - - .. versionadded:: 1.17.0 - - Returns - ------- - out : ndarray - Array of `fill_value` with the same shape and type as `a`. - - See Also - -------- - empty_like : Return an empty array with shape and type of input. - ones_like : Return an array of ones with shape and type of input. - zeros_like : Return an array of zeros with shape and type of input. - full : Return a new array of given shape filled with value. - - Examples - -------- - >>> x = np.arange(6, dtype=int) - >>> np.full_like(x, 1) - array([1, 1, 1, 1, 1, 1]) - >>> np.full_like(x, 0.1) - array([0, 0, 0, 0, 0, 0]) - >>> np.full_like(x, 0.1, dtype=np.double) - array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) - >>> np.full_like(x, np.nan, dtype=np.double) - array([nan, nan, nan, nan, nan, nan]) - - >>> y = np.arange(6, dtype=np.double) - >>> np.full_like(y, 0.1) - array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) - - """ - res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape) - multiarray.copyto(res, fill_value, casting='unsafe') - return res - - -def _count_nonzero_dispatcher(a, axis=None): - return (a,) - - -@array_function_dispatch(_count_nonzero_dispatcher) -def count_nonzero(a, axis=None): - """ - Counts the number of non-zero values in the array ``a``. - - The word "non-zero" is in reference to the Python 2.x - built-in method ``__nonzero__()`` (renamed ``__bool__()`` - in Python 3.x) of Python objects that tests an object's - "truthfulness". For example, any number is considered - truthful if it is nonzero, whereas any string is considered - truthful if it is not the empty string. Thus, this function - (recursively) counts how many elements in ``a`` (and in - sub-arrays thereof) have their ``__nonzero__()`` or ``__bool__()`` - method evaluated to ``True``. - - Parameters - ---------- - a : array_like - The array for which to count non-zeros. - axis : int or tuple, optional - Axis or tuple of axes along which to count non-zeros. - Default is None, meaning that non-zeros will be counted - along a flattened version of ``a``. - - .. versionadded:: 1.12.0 - - Returns - ------- - count : int or array of int - Number of non-zero values in the array along a given axis. - Otherwise, the total number of non-zero values in the array - is returned. - - See Also - -------- - nonzero : Return the coordinates of all the non-zero values. - - Examples - -------- - >>> np.count_nonzero(np.eye(4)) - 4 - >>> np.count_nonzero([[0,1,7,0,0],[3,0,0,2,19]]) - 5 - >>> np.count_nonzero([[0,1,7,0,0],[3,0,0,2,19]], axis=0) - array([1, 1, 1, 1, 1]) - >>> np.count_nonzero([[0,1,7,0,0],[3,0,0,2,19]], axis=1) - array([2, 3]) - - """ - if axis is None: - return multiarray.count_nonzero(a) - - a = asanyarray(a) - - # TODO: this works around .astype(bool) not working properly (gh-9847) - if np.issubdtype(a.dtype, np.character): - a_bool = a != a.dtype.type() - else: - a_bool = a.astype(np.bool_, copy=False) - - return a_bool.sum(axis=axis, dtype=np.intp) - - -@set_module('numpy') -def isfortran(a): - """ - Check if the array is Fortran contiguous but *not* C contiguous. - - This function is obsolete and, because of changes due to relaxed stride - checking, its return value for the same array may differ for versions - of NumPy >= 1.10.0 and previous versions. If you only want to check if an - array is Fortran contiguous use ``a.flags.f_contiguous`` instead. - - Parameters - ---------- - a : ndarray - Input array. - - Returns - ------- - isfortran : bool - Returns True if the array is Fortran contiguous but *not* C contiguous. - - - Examples - -------- - - np.array allows to specify whether the array is written in C-contiguous - order (last index varies the fastest), or FORTRAN-contiguous order in - memory (first index varies the fastest). - - >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C') - >>> a - array([[1, 2, 3], - [4, 5, 6]]) - >>> np.isfortran(a) - False - - >>> b = np.array([[1, 2, 3], [4, 5, 6]], order='F') - >>> b - array([[1, 2, 3], - [4, 5, 6]]) - >>> np.isfortran(b) - True - - - The transpose of a C-ordered array is a FORTRAN-ordered array. - - >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C') - >>> a - array([[1, 2, 3], - [4, 5, 6]]) - >>> np.isfortran(a) - False - >>> b = a.T - >>> b - array([[1, 4], - [2, 5], - [3, 6]]) - >>> np.isfortran(b) - True - - C-ordered arrays evaluate as False even if they are also FORTRAN-ordered. - - >>> np.isfortran(np.array([1, 2], order='F')) - False - - """ - return a.flags.fnc - - -def _argwhere_dispatcher(a): - return (a,) - - -@array_function_dispatch(_argwhere_dispatcher) -def argwhere(a): - """ - Find the indices of array elements that are non-zero, grouped by element. - - Parameters - ---------- - a : array_like - Input data. - - Returns - ------- - index_array : (N, a.ndim) ndarray - Indices of elements that are non-zero. Indices are grouped by element. - This array will have shape ``(N, a.ndim)`` where ``N`` is the number of - non-zero items. - - See Also - -------- - where, nonzero - - Notes - ----- - ``np.argwhere(a)`` is almost the same as ``np.transpose(np.nonzero(a))``, - but produces a result of the correct shape for a 0D array. - - The output of ``argwhere`` is not suitable for indexing arrays. - For this purpose use ``nonzero(a)`` instead. - - Examples - -------- - >>> x = np.arange(6).reshape(2,3) - >>> x - array([[0, 1, 2], - [3, 4, 5]]) - >>> np.argwhere(x>1) - array([[0, 2], - [1, 0], - [1, 1], - [1, 2]]) - - """ - # nonzero does not behave well on 0d, so promote to 1d - if np.ndim(a) == 0: - a = shape_base.atleast_1d(a) - # then remove the added dimension - return argwhere(a)[:,:0] - return transpose(nonzero(a)) - - -def _flatnonzero_dispatcher(a): - return (a,) - - -@array_function_dispatch(_flatnonzero_dispatcher) -def flatnonzero(a): - """ - Return indices that are non-zero in the flattened version of a. - - This is equivalent to np.nonzero(np.ravel(a))[0]. - - Parameters - ---------- - a : array_like - Input data. - - Returns - ------- - res : ndarray - Output array, containing the indices of the elements of `a.ravel()` - that are non-zero. - - See Also - -------- - nonzero : Return the indices of the non-zero elements of the input array. - ravel : Return a 1-D array containing the elements of the input array. - - Examples - -------- - >>> x = np.arange(-2, 3) - >>> x - array([-2, -1, 0, 1, 2]) - >>> np.flatnonzero(x) - array([0, 1, 3, 4]) - - Use the indices of the non-zero elements as an index array to extract - these elements: - - >>> x.ravel()[np.flatnonzero(x)] - array([-2, -1, 1, 2]) - - """ - return np.nonzero(np.ravel(a))[0] - - -_mode_from_name_dict = {'v': 0, - 's': 1, - 'f': 2} - - -def _mode_from_name(mode): - if isinstance(mode, basestring): - return _mode_from_name_dict[mode.lower()[0]] - return mode - - -def _correlate_dispatcher(a, v, mode=None): - return (a, v) - - -@array_function_dispatch(_correlate_dispatcher) -def correlate(a, v, mode='valid'): - """ - Cross-correlation of two 1-dimensional sequences. - - This function computes the correlation as generally defined in signal - processing texts:: - - c_{av}[k] = sum_n a[n+k] * conj(v[n]) - - with a and v sequences being zero-padded where necessary and conj being - the conjugate. - - Parameters - ---------- - a, v : array_like - Input sequences. - mode : {'valid', 'same', 'full'}, optional - Refer to the `convolve` docstring. Note that the default - is 'valid', unlike `convolve`, which uses 'full'. - old_behavior : bool - `old_behavior` was removed in NumPy 1.10. If you need the old - behavior, use `multiarray.correlate`. - - Returns - ------- - out : ndarray - Discrete cross-correlation of `a` and `v`. - - See Also - -------- - convolve : Discrete, linear convolution of two one-dimensional sequences. - multiarray.correlate : Old, no conjugate, version of correlate. - - Notes - ----- - The definition of correlation above is not unique and sometimes correlation - may be defined differently. Another common definition is:: - - c'_{av}[k] = sum_n a[n] conj(v[n+k]) - - which is related to ``c_{av}[k]`` by ``c'_{av}[k] = c_{av}[-k]``. - - Examples - -------- - >>> np.correlate([1, 2, 3], [0, 1, 0.5]) - array([3.5]) - >>> np.correlate([1, 2, 3], [0, 1, 0.5], "same") - array([2. , 3.5, 3. ]) - >>> np.correlate([1, 2, 3], [0, 1, 0.5], "full") - array([0.5, 2. , 3.5, 3. , 0. ]) - - Using complex sequences: - - >>> np.correlate([1+1j, 2, 3-1j], [0, 1, 0.5j], 'full') - array([ 0.5-0.5j, 1.0+0.j , 1.5-1.5j, 3.0-1.j , 0.0+0.j ]) - - Note that you get the time reversed, complex conjugated result - when the two input sequences change places, i.e., - ``c_{va}[k] = c^{*}_{av}[-k]``: - - >>> np.correlate([0, 1, 0.5j], [1+1j, 2, 3-1j], 'full') - array([ 0.0+0.j , 3.0+1.j , 1.5+1.5j, 1.0+0.j , 0.5+0.5j]) - - """ - mode = _mode_from_name(mode) - return multiarray.correlate2(a, v, mode) - - -def _convolve_dispatcher(a, v, mode=None): - return (a, v) - - -@array_function_dispatch(_convolve_dispatcher) -def convolve(a, v, mode='full'): - """ - Returns the discrete, linear convolution of two one-dimensional sequences. - - The convolution operator is often seen in signal processing, where it - models the effect of a linear time-invariant system on a signal [1]_. In - probability theory, the sum of two independent random variables is - distributed according to the convolution of their individual - distributions. - - If `v` is longer than `a`, the arrays are swapped before computation. - - Parameters - ---------- - a : (N,) array_like - First one-dimensional input array. - v : (M,) array_like - Second one-dimensional input array. - mode : {'full', 'valid', 'same'}, optional - 'full': - By default, mode is 'full'. This returns the convolution - at each point of overlap, with an output shape of (N+M-1,). At - the end-points of the convolution, the signals do not overlap - completely, and boundary effects may be seen. - - 'same': - Mode 'same' returns output of length ``max(M, N)``. Boundary - effects are still visible. - - 'valid': - Mode 'valid' returns output of length - ``max(M, N) - min(M, N) + 1``. The convolution product is only given - for points where the signals overlap completely. Values outside - the signal boundary have no effect. - - Returns - ------- - out : ndarray - Discrete, linear convolution of `a` and `v`. - - See Also - -------- - scipy.signal.fftconvolve : Convolve two arrays using the Fast Fourier - Transform. - scipy.linalg.toeplitz : Used to construct the convolution operator. - polymul : Polynomial multiplication. Same output as convolve, but also - accepts poly1d objects as input. - - Notes - ----- - The discrete convolution operation is defined as - - .. math:: (a * v)[n] = \\sum_{m = -\\infty}^{\\infty} a[m] v[n - m] - - It can be shown that a convolution :math:`x(t) * y(t)` in time/space - is equivalent to the multiplication :math:`X(f) Y(f)` in the Fourier - domain, after appropriate padding (padding is necessary to prevent - circular convolution). Since multiplication is more efficient (faster) - than convolution, the function `scipy.signal.fftconvolve` exploits the - FFT to calculate the convolution of large data-sets. - - References - ---------- - .. [1] Wikipedia, "Convolution", - https://en.wikipedia.org/wiki/Convolution - - Examples - -------- - Note how the convolution operator flips the second array - before "sliding" the two across one another: - - >>> np.convolve([1, 2, 3], [0, 1, 0.5]) - array([0. , 1. , 2.5, 4. , 1.5]) - - Only return the middle values of the convolution. - Contains boundary effects, where zeros are taken - into account: - - >>> np.convolve([1,2,3],[0,1,0.5], 'same') - array([1. , 2.5, 4. ]) - - The two arrays are of the same length, so there - is only one position where they completely overlap: - - >>> np.convolve([1,2,3],[0,1,0.5], 'valid') - array([2.5]) - - """ - a, v = array(a, copy=False, ndmin=1), array(v, copy=False, ndmin=1) - if (len(v) > len(a)): - a, v = v, a - if len(a) == 0: - raise ValueError('a cannot be empty') - if len(v) == 0: - raise ValueError('v cannot be empty') - mode = _mode_from_name(mode) - return multiarray.correlate(a, v[::-1], mode) - - -def _outer_dispatcher(a, b, out=None): - return (a, b, out) - - -@array_function_dispatch(_outer_dispatcher) -def outer(a, b, out=None): - """ - Compute the outer product of two vectors. - - Given two vectors, ``a = [a0, a1, ..., aM]`` and - ``b = [b0, b1, ..., bN]``, - the outer product [1]_ is:: - - [[a0*b0 a0*b1 ... a0*bN ] - [a1*b0 . - [ ... . - [aM*b0 aM*bN ]] - - Parameters - ---------- - a : (M,) array_like - First input vector. Input is flattened if - not already 1-dimensional. - b : (N,) array_like - Second input vector. Input is flattened if - not already 1-dimensional. - out : (M, N) ndarray, optional - A location where the result is stored - - .. versionadded:: 1.9.0 - - Returns - ------- - out : (M, N) ndarray - ``out[i, j] = a[i] * b[j]`` - - See also - -------- - inner - einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent. - ufunc.outer : A generalization to N dimensions and other operations. - ``np.multiply.outer(a.ravel(), b.ravel())`` is the equivalent. - - References - ---------- - .. [1] : G. H. Golub and C. F. Van Loan, *Matrix Computations*, 3rd - ed., Baltimore, MD, Johns Hopkins University Press, 1996, - pg. 8. - - Examples - -------- - Make a (*very* coarse) grid for computing a Mandelbrot set: - - >>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5)) - >>> rl - array([[-2., -1., 0., 1., 2.], - [-2., -1., 0., 1., 2.], - [-2., -1., 0., 1., 2.], - [-2., -1., 0., 1., 2.], - [-2., -1., 0., 1., 2.]]) - >>> im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,))) - >>> im - array([[0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j], - [0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j], - [0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], - [0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j], - [0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]]) - >>> grid = rl + im - >>> grid - array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j], - [-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j], - [-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j], - [-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j], - [-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]]) - - An example using a "vector" of letters: - - >>> x = np.array(['a', 'b', 'c'], dtype=object) - >>> np.outer(x, [1, 2, 3]) - array([['a', 'aa', 'aaa'], - ['b', 'bb', 'bbb'], - ['c', 'cc', 'ccc']], dtype=object) - - """ - a = asarray(a) - b = asarray(b) - return multiply(a.ravel()[:, newaxis], b.ravel()[newaxis, :], out) - - -def _tensordot_dispatcher(a, b, axes=None): - return (a, b) - - -@array_function_dispatch(_tensordot_dispatcher) -def tensordot(a, b, axes=2): - """ - Compute tensor dot product along specified axes. - - Given two tensors, `a` and `b`, and an array_like object containing - two array_like objects, ``(a_axes, b_axes)``, sum the products of - `a`'s and `b`'s elements (components) over the axes specified by - ``a_axes`` and ``b_axes``. The third argument can be a single non-negative - integer_like scalar, ``N``; if it is such, then the last ``N`` dimensions - of `a` and the first ``N`` dimensions of `b` are summed over. - - Parameters - ---------- - a, b : array_like - Tensors to "dot". - - axes : int or (2,) array_like - * integer_like - If an int N, sum over the last N axes of `a` and the first N axes - of `b` in order. The sizes of the corresponding axes must match. - * (2,) array_like - Or, a list of axes to be summed over, first sequence applying to `a`, - second to `b`. Both elements array_like must be of the same length. - - Returns - ------- - output : ndarray - The tensor dot product of the input. - - See Also - -------- - dot, einsum - - Notes - ----- - Three common use cases are: - * ``axes = 0`` : tensor product :math:`a\\otimes b` - * ``axes = 1`` : tensor dot product :math:`a\\cdot b` - * ``axes = 2`` : (default) tensor double contraction :math:`a:b` - - When `axes` is integer_like, the sequence for evaluation will be: first - the -Nth axis in `a` and 0th axis in `b`, and the -1th axis in `a` and - Nth axis in `b` last. - - When there is more than one axis to sum over - and they are not the last - (first) axes of `a` (`b`) - the argument `axes` should consist of - two sequences of the same length, with the first axis to sum over given - first in both sequences, the second axis second, and so forth. - - The shape of the result consists of the non-contracted axes of the - first tensor, followed by the non-contracted axes of the second. - - Examples - -------- - A "traditional" example: - - >>> a = np.arange(60.).reshape(3,4,5) - >>> b = np.arange(24.).reshape(4,3,2) - >>> c = np.tensordot(a,b, axes=([1,0],[0,1])) - >>> c.shape - (5, 2) - >>> c - array([[4400., 4730.], - [4532., 4874.], - [4664., 5018.], - [4796., 5162.], - [4928., 5306.]]) - >>> # A slower but equivalent way of computing the same... - >>> d = np.zeros((5,2)) - >>> for i in range(5): - ... for j in range(2): - ... for k in range(3): - ... for n in range(4): - ... d[i,j] += a[k,n,i] * b[n,k,j] - >>> c == d - array([[ True, True], - [ True, True], - [ True, True], - [ True, True], - [ True, True]]) - - An extended example taking advantage of the overloading of + and \\*: - - >>> a = np.array(range(1, 9)) - >>> a.shape = (2, 2, 2) - >>> A = np.array(('a', 'b', 'c', 'd'), dtype=object) - >>> A.shape = (2, 2) - >>> a; A - array([[[1, 2], - [3, 4]], - [[5, 6], - [7, 8]]]) - array([['a', 'b'], - ['c', 'd']], dtype=object) - - >>> np.tensordot(a, A) # third argument default is 2 for double-contraction - array(['abbcccdddd', 'aaaaabbbbbbcccccccdddddddd'], dtype=object) - - >>> np.tensordot(a, A, 1) - array([[['acc', 'bdd'], - ['aaacccc', 'bbbdddd']], - [['aaaaacccccc', 'bbbbbdddddd'], - ['aaaaaaacccccccc', 'bbbbbbbdddddddd']]], dtype=object) - - >>> np.tensordot(a, A, 0) # tensor product (result too long to incl.) - array([[[[['a', 'b'], - ['c', 'd']], - ... - - >>> np.tensordot(a, A, (0, 1)) - array([[['abbbbb', 'cddddd'], - ['aabbbbbb', 'ccdddddd']], - [['aaabbbbbbb', 'cccddddddd'], - ['aaaabbbbbbbb', 'ccccdddddddd']]], dtype=object) - - >>> np.tensordot(a, A, (2, 1)) - array([[['abb', 'cdd'], - ['aaabbbb', 'cccdddd']], - [['aaaaabbbbbb', 'cccccdddddd'], - ['aaaaaaabbbbbbbb', 'cccccccdddddddd']]], dtype=object) - - >>> np.tensordot(a, A, ((0, 1), (0, 1))) - array(['abbbcccccddddddd', 'aabbbbccccccdddddddd'], dtype=object) - - >>> np.tensordot(a, A, ((2, 1), (1, 0))) - array(['acccbbdddd', 'aaaaacccccccbbbbbbdddddddd'], dtype=object) - - """ - try: - iter(axes) - except Exception: - axes_a = list(range(-axes, 0)) - axes_b = list(range(0, axes)) - else: - axes_a, axes_b = axes - try: - na = len(axes_a) - axes_a = list(axes_a) - except TypeError: - axes_a = [axes_a] - na = 1 - try: - nb = len(axes_b) - axes_b = list(axes_b) - except TypeError: - axes_b = [axes_b] - nb = 1 - - a, b = asarray(a), asarray(b) - as_ = a.shape - nda = a.ndim - bs = b.shape - ndb = b.ndim - equal = True - if na != nb: - equal = False - else: - for k in range(na): - if as_[axes_a[k]] != bs[axes_b[k]]: - equal = False - break - if axes_a[k] < 0: - axes_a[k] += nda - if axes_b[k] < 0: - axes_b[k] += ndb - if not equal: - raise ValueError("shape-mismatch for sum") - - # Move the axes to sum over to the end of "a" - # and to the front of "b" - notin = [k for k in range(nda) if k not in axes_a] - newaxes_a = notin + axes_a - N2 = 1 - for axis in axes_a: - N2 *= as_[axis] - newshape_a = (int(multiply.reduce([as_[ax] for ax in notin])), N2) - olda = [as_[axis] for axis in notin] - - notin = [k for k in range(ndb) if k not in axes_b] - newaxes_b = axes_b + notin - N2 = 1 - for axis in axes_b: - N2 *= bs[axis] - newshape_b = (N2, int(multiply.reduce([bs[ax] for ax in notin]))) - oldb = [bs[axis] for axis in notin] - - at = a.transpose(newaxes_a).reshape(newshape_a) - bt = b.transpose(newaxes_b).reshape(newshape_b) - res = dot(at, bt) - return res.reshape(olda + oldb) - - -def _roll_dispatcher(a, shift, axis=None): - return (a,) - - -@array_function_dispatch(_roll_dispatcher) -def roll(a, shift, axis=None): - """ - Roll array elements along a given axis. - - Elements that roll beyond the last position are re-introduced at - the first. - - Parameters - ---------- - a : array_like - Input array. - shift : int or tuple of ints - The number of places by which elements are shifted. If a tuple, - then `axis` must be a tuple of the same size, and each of the - given axes is shifted by the corresponding number. If an int - while `axis` is a tuple of ints, then the same value is used for - all given axes. - axis : int or tuple of ints, optional - Axis or axes along which elements are shifted. By default, the - array is flattened before shifting, after which the original - shape is restored. - - Returns - ------- - res : ndarray - Output array, with the same shape as `a`. - - See Also - -------- - rollaxis : Roll the specified axis backwards, until it lies in a - given position. - - Notes - ----- - .. versionadded:: 1.12.0 - - Supports rolling over multiple dimensions simultaneously. - - Examples - -------- - >>> x = np.arange(10) - >>> np.roll(x, 2) - array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7]) - >>> np.roll(x, -2) - array([2, 3, 4, 5, 6, 7, 8, 9, 0, 1]) - - >>> x2 = np.reshape(x, (2,5)) - >>> x2 - array([[0, 1, 2, 3, 4], - [5, 6, 7, 8, 9]]) - >>> np.roll(x2, 1) - array([[9, 0, 1, 2, 3], - [4, 5, 6, 7, 8]]) - >>> np.roll(x2, -1) - array([[1, 2, 3, 4, 5], - [6, 7, 8, 9, 0]]) - >>> np.roll(x2, 1, axis=0) - array([[5, 6, 7, 8, 9], - [0, 1, 2, 3, 4]]) - >>> np.roll(x2, -1, axis=0) - array([[5, 6, 7, 8, 9], - [0, 1, 2, 3, 4]]) - >>> np.roll(x2, 1, axis=1) - array([[4, 0, 1, 2, 3], - [9, 5, 6, 7, 8]]) - >>> np.roll(x2, -1, axis=1) - array([[1, 2, 3, 4, 0], - [6, 7, 8, 9, 5]]) - - """ - a = asanyarray(a) - if axis is None: - return roll(a.ravel(), shift, 0).reshape(a.shape) - - else: - axis = normalize_axis_tuple(axis, a.ndim, allow_duplicate=True) - broadcasted = broadcast(shift, axis) - if broadcasted.ndim > 1: - raise ValueError( - "'shift' and 'axis' should be scalars or 1D sequences") - shifts = {ax: 0 for ax in range(a.ndim)} - for sh, ax in broadcasted: - shifts[ax] += sh - - rolls = [((slice(None), slice(None)),)] * a.ndim - for ax, offset in shifts.items(): - offset %= a.shape[ax] or 1 # If `a` is empty, nothing matters. - if offset: - # (original, result), (original, result) - rolls[ax] = ((slice(None, -offset), slice(offset, None)), - (slice(-offset, None), slice(None, offset))) - - result = empty_like(a) - for indices in itertools.product(*rolls): - arr_index, res_index = zip(*indices) - result[res_index] = a[arr_index] - - return result - - -def _rollaxis_dispatcher(a, axis, start=None): - return (a,) - - -@array_function_dispatch(_rollaxis_dispatcher) -def rollaxis(a, axis, start=0): - """ - Roll the specified axis backwards, until it lies in a given position. - - This function continues to be supported for backward compatibility, but you - should prefer `moveaxis`. The `moveaxis` function was added in NumPy - 1.11. - - Parameters - ---------- - a : ndarray - Input array. - axis : int - The axis to roll backwards. The positions of the other axes do not - change relative to one another. - start : int, optional - The axis is rolled until it lies before this position. The default, - 0, results in a "complete" roll. - - Returns - ------- - res : ndarray - For NumPy >= 1.10.0 a view of `a` is always returned. For earlier - NumPy versions a view of `a` is returned only if the order of the - axes is changed, otherwise the input array is returned. - - See Also - -------- - moveaxis : Move array axes to new positions. - roll : Roll the elements of an array by a number of positions along a - given axis. - - Examples - -------- - >>> a = np.ones((3,4,5,6)) - >>> np.rollaxis(a, 3, 1).shape - (3, 6, 4, 5) - >>> np.rollaxis(a, 2).shape - (5, 3, 4, 6) - >>> np.rollaxis(a, 1, 4).shape - (3, 5, 6, 4) - - """ - n = a.ndim - axis = normalize_axis_index(axis, n) - if start < 0: - start += n - msg = "'%s' arg requires %d <= %s < %d, but %d was passed in" - if not (0 <= start < n + 1): - raise AxisError(msg % ('start', -n, 'start', n + 1, start)) - if axis < start: - # it's been removed - start -= 1 - if axis == start: - return a[...] - axes = list(range(0, n)) - axes.remove(axis) - axes.insert(start, axis) - return a.transpose(axes) - - -def normalize_axis_tuple(axis, ndim, argname=None, allow_duplicate=False): - """ - Normalizes an axis argument into a tuple of non-negative integer axes. - - This handles shorthands such as ``1`` and converts them to ``(1,)``, - as well as performing the handling of negative indices covered by - `normalize_axis_index`. - - By default, this forbids axes from being specified multiple times. - - Used internally by multi-axis-checking logic. - - .. versionadded:: 1.13.0 - - Parameters - ---------- - axis : int, iterable of int - The un-normalized index or indices of the axis. - ndim : int - The number of dimensions of the array that `axis` should be normalized - against. - argname : str, optional - A prefix to put before the error message, typically the name of the - argument. - allow_duplicate : bool, optional - If False, the default, disallow an axis from being specified twice. - - Returns - ------- - normalized_axes : tuple of int - The normalized axis index, such that `0 <= normalized_axis < ndim` - - Raises - ------ - AxisError - If any axis provided is out of range - ValueError - If an axis is repeated - - See also - -------- - normalize_axis_index : normalizing a single scalar axis - """ - # Optimization to speed-up the most common cases. - if type(axis) not in (tuple, list): - try: - axis = [operator.index(axis)] - except TypeError: - pass - # Going via an iterator directly is slower than via list comprehension. - axis = tuple([normalize_axis_index(ax, ndim, argname) for ax in axis]) - if not allow_duplicate and len(set(axis)) != len(axis): - if argname: - raise ValueError('repeated axis in `{}` argument'.format(argname)) - else: - raise ValueError('repeated axis') - return axis - - -def _moveaxis_dispatcher(a, source, destination): - return (a,) - - -@array_function_dispatch(_moveaxis_dispatcher) -def moveaxis(a, source, destination): - """ - Move axes of an array to new positions. - - Other axes remain in their original order. - - .. versionadded:: 1.11.0 - - Parameters - ---------- - a : np.ndarray - The array whose axes should be reordered. - source : int or sequence of int - Original positions of the axes to move. These must be unique. - destination : int or sequence of int - Destination positions for each of the original axes. These must also be - unique. - - Returns - ------- - result : np.ndarray - Array with moved axes. This array is a view of the input array. - - See Also - -------- - transpose: Permute the dimensions of an array. - swapaxes: Interchange two axes of an array. - - Examples - -------- - - >>> x = np.zeros((3, 4, 5)) - >>> np.moveaxis(x, 0, -1).shape - (4, 5, 3) - >>> np.moveaxis(x, -1, 0).shape - (5, 3, 4) - - These all achieve the same result: - - >>> np.transpose(x).shape - (5, 4, 3) - >>> np.swapaxes(x, 0, -1).shape - (5, 4, 3) - >>> np.moveaxis(x, [0, 1], [-1, -2]).shape - (5, 4, 3) - >>> np.moveaxis(x, [0, 1, 2], [-1, -2, -3]).shape - (5, 4, 3) - - """ - try: - # allow duck-array types if they define transpose - transpose = a.transpose - except AttributeError: - a = asarray(a) - transpose = a.transpose - - source = normalize_axis_tuple(source, a.ndim, 'source') - destination = normalize_axis_tuple(destination, a.ndim, 'destination') - if len(source) != len(destination): - raise ValueError('`source` and `destination` arguments must have ' - 'the same number of elements') - - order = [n for n in range(a.ndim) if n not in source] - - for dest, src in sorted(zip(destination, source)): - order.insert(dest, src) - - result = transpose(order) - return result - - -# fix hack in scipy which imports this function -def _move_axis_to_0(a, axis): - return moveaxis(a, axis, 0) - - -def _cross_dispatcher(a, b, axisa=None, axisb=None, axisc=None, axis=None): - return (a, b) - - -@array_function_dispatch(_cross_dispatcher) -def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): - """ - Return the cross product of two (arrays of) vectors. - - The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular - to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors - are defined by the last axis of `a` and `b` by default, and these axes - can have dimensions 2 or 3. Where the dimension of either `a` or `b` is - 2, the third component of the input vector is assumed to be zero and the - cross product calculated accordingly. In cases where both input vectors - have dimension 2, the z-component of the cross product is returned. - - Parameters - ---------- - a : array_like - Components of the first vector(s). - b : array_like - Components of the second vector(s). - axisa : int, optional - Axis of `a` that defines the vector(s). By default, the last axis. - axisb : int, optional - Axis of `b` that defines the vector(s). By default, the last axis. - axisc : int, optional - Axis of `c` containing the cross product vector(s). Ignored if - both input vectors have dimension 2, as the return is scalar. - By default, the last axis. - axis : int, optional - If defined, the axis of `a`, `b` and `c` that defines the vector(s) - and cross product(s). Overrides `axisa`, `axisb` and `axisc`. - - Returns - ------- - c : ndarray - Vector cross product(s). - - Raises - ------ - ValueError - When the dimension of the vector(s) in `a` and/or `b` does not - equal 2 or 3. - - See Also - -------- - inner : Inner product - outer : Outer product. - ix_ : Construct index arrays. - - Notes - ----- - .. versionadded:: 1.9.0 - - Supports full broadcasting of the inputs. - - Examples - -------- - Vector cross-product. - - >>> x = [1, 2, 3] - >>> y = [4, 5, 6] - >>> np.cross(x, y) - array([-3, 6, -3]) - - One vector with dimension 2. - - >>> x = [1, 2] - >>> y = [4, 5, 6] - >>> np.cross(x, y) - array([12, -6, -3]) - - Equivalently: - - >>> x = [1, 2, 0] - >>> y = [4, 5, 6] - >>> np.cross(x, y) - array([12, -6, -3]) - - Both vectors with dimension 2. - - >>> x = [1,2] - >>> y = [4,5] - >>> np.cross(x, y) - array(-3) - - Multiple vector cross-products. Note that the direction of the cross - product vector is defined by the `right-hand rule`. - - >>> x = np.array([[1,2,3], [4,5,6]]) - >>> y = np.array([[4,5,6], [1,2,3]]) - >>> np.cross(x, y) - array([[-3, 6, -3], - [ 3, -6, 3]]) - - The orientation of `c` can be changed using the `axisc` keyword. - - >>> np.cross(x, y, axisc=0) - array([[-3, 3], - [ 6, -6], - [-3, 3]]) - - Change the vector definition of `x` and `y` using `axisa` and `axisb`. - - >>> x = np.array([[1,2,3], [4,5,6], [7, 8, 9]]) - >>> y = np.array([[7, 8, 9], [4,5,6], [1,2,3]]) - >>> np.cross(x, y) - array([[ -6, 12, -6], - [ 0, 0, 0], - [ 6, -12, 6]]) - >>> np.cross(x, y, axisa=0, axisb=0) - array([[-24, 48, -24], - [-30, 60, -30], - [-36, 72, -36]]) - - """ - if axis is not None: - axisa, axisb, axisc = (axis,) * 3 - a = asarray(a) - b = asarray(b) - # Check axisa and axisb are within bounds - axisa = normalize_axis_index(axisa, a.ndim, msg_prefix='axisa') - axisb = normalize_axis_index(axisb, b.ndim, msg_prefix='axisb') - - # Move working axis to the end of the shape - a = moveaxis(a, axisa, -1) - b = moveaxis(b, axisb, -1) - msg = ("incompatible dimensions for cross product\n" - "(dimension must be 2 or 3)") - if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3): - raise ValueError(msg) - - # Create the output array - shape = broadcast(a[..., 0], b[..., 0]).shape - if a.shape[-1] == 3 or b.shape[-1] == 3: - shape += (3,) - # Check axisc is within bounds - axisc = normalize_axis_index(axisc, len(shape), msg_prefix='axisc') - dtype = promote_types(a.dtype, b.dtype) - cp = empty(shape, dtype) - - # create local aliases for readability - a0 = a[..., 0] - a1 = a[..., 1] - if a.shape[-1] == 3: - a2 = a[..., 2] - b0 = b[..., 0] - b1 = b[..., 1] - if b.shape[-1] == 3: - b2 = b[..., 2] - if cp.ndim != 0 and cp.shape[-1] == 3: - cp0 = cp[..., 0] - cp1 = cp[..., 1] - cp2 = cp[..., 2] - - if a.shape[-1] == 2: - if b.shape[-1] == 2: - # a0 * b1 - a1 * b0 - multiply(a0, b1, out=cp) - cp -= a1 * b0 - return cp - else: - assert b.shape[-1] == 3 - # cp0 = a1 * b2 - 0 (a2 = 0) - # cp1 = 0 - a0 * b2 (a2 = 0) - # cp2 = a0 * b1 - a1 * b0 - multiply(a1, b2, out=cp0) - multiply(a0, b2, out=cp1) - negative(cp1, out=cp1) - multiply(a0, b1, out=cp2) - cp2 -= a1 * b0 - else: - assert a.shape[-1] == 3 - if b.shape[-1] == 3: - # cp0 = a1 * b2 - a2 * b1 - # cp1 = a2 * b0 - a0 * b2 - # cp2 = a0 * b1 - a1 * b0 - multiply(a1, b2, out=cp0) - tmp = array(a2 * b1) - cp0 -= tmp - multiply(a2, b0, out=cp1) - multiply(a0, b2, out=tmp) - cp1 -= tmp - multiply(a0, b1, out=cp2) - multiply(a1, b0, out=tmp) - cp2 -= tmp - else: - assert b.shape[-1] == 2 - # cp0 = 0 - a2 * b1 (b2 = 0) - # cp1 = a2 * b0 - 0 (b2 = 0) - # cp2 = a0 * b1 - a1 * b0 - multiply(a2, b1, out=cp0) - negative(cp0, out=cp0) - multiply(a2, b0, out=cp1) - multiply(a0, b1, out=cp2) - cp2 -= a1 * b0 - - return moveaxis(cp, -1, axisc) - - -little_endian = (sys.byteorder == 'little') - - -@set_module('numpy') -def indices(dimensions, dtype=int, sparse=False): - """ - Return an array representing the indices of a grid. - - Compute an array where the subarrays contain index values 0, 1, ... - varying only along the corresponding axis. - - Parameters - ---------- - dimensions : sequence of ints - The shape of the grid. - dtype : dtype, optional - Data type of the result. - sparse : boolean, optional - Return a sparse representation of the grid instead of a dense - representation. Default is False. - - .. versionadded:: 1.17 - - Returns - ------- - grid : one ndarray or tuple of ndarrays - If sparse is False: - Returns one array of grid indices, - ``grid.shape = (len(dimensions),) + tuple(dimensions)``. - If sparse is True: - Returns a tuple of arrays, with - ``grid[i].shape = (1, ..., 1, dimensions[i], 1, ..., 1)`` with - dimensions[i] in the ith place - - See Also - -------- - mgrid, ogrid, meshgrid - - Notes - ----- - The output shape in the dense case is obtained by prepending the number - of dimensions in front of the tuple of dimensions, i.e. if `dimensions` - is a tuple ``(r0, ..., rN-1)`` of length ``N``, the output shape is - ``(N, r0, ..., rN-1)``. - - The subarrays ``grid[k]`` contains the N-D array of indices along the - ``k-th`` axis. Explicitly:: - - grid[k, i0, i1, ..., iN-1] = ik - - Examples - -------- - >>> grid = np.indices((2, 3)) - >>> grid.shape - (2, 2, 3) - >>> grid[0] # row indices - array([[0, 0, 0], - [1, 1, 1]]) - >>> grid[1] # column indices - array([[0, 1, 2], - [0, 1, 2]]) - - The indices can be used as an index into an array. - - >>> x = np.arange(20).reshape(5, 4) - >>> row, col = np.indices((2, 3)) - >>> x[row, col] - array([[0, 1, 2], - [4, 5, 6]]) - - Note that it would be more straightforward in the above example to - extract the required elements directly with ``x[:2, :3]``. - - If sparse is set to true, the grid will be returned in a sparse - representation. - - >>> i, j = np.indices((2, 3), sparse=True) - >>> i.shape - (2, 1) - >>> j.shape - (1, 3) - >>> i # row indices - array([[0], - [1]]) - >>> j # column indices - array([[0, 1, 2]]) - - """ - dimensions = tuple(dimensions) - N = len(dimensions) - shape = (1,)*N - if sparse: - res = tuple() - else: - res = empty((N,)+dimensions, dtype=dtype) - for i, dim in enumerate(dimensions): - idx = arange(dim, dtype=dtype).reshape( - shape[:i] + (dim,) + shape[i+1:] - ) - if sparse: - res = res + (idx,) - else: - res[i] = idx - return res - - -@set_module('numpy') -def fromfunction(function, shape, **kwargs): - """ - Construct an array by executing a function over each coordinate. - - The resulting array therefore has a value ``fn(x, y, z)`` at - coordinate ``(x, y, z)``. - - Parameters - ---------- - function : callable - The function is called with N parameters, where N is the rank of - `shape`. Each parameter represents the coordinates of the array - varying along a specific axis. For example, if `shape` - were ``(2, 2)``, then the parameters would be - ``array([[0, 0], [1, 1]])`` and ``array([[0, 1], [0, 1]])`` - shape : (N,) tuple of ints - Shape of the output array, which also determines the shape of - the coordinate arrays passed to `function`. - dtype : data-type, optional - Data-type of the coordinate arrays passed to `function`. - By default, `dtype` is float. - - Returns - ------- - fromfunction : any - The result of the call to `function` is passed back directly. - Therefore the shape of `fromfunction` is completely determined by - `function`. If `function` returns a scalar value, the shape of - `fromfunction` would not match the `shape` parameter. - - See Also - -------- - indices, meshgrid - - Notes - ----- - Keywords other than `dtype` are passed to `function`. - - Examples - -------- - >>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int) - array([[ True, False, False], - [False, True, False], - [False, False, True]]) - - >>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int) - array([[0, 1, 2], - [1, 2, 3], - [2, 3, 4]]) - - """ - dtype = kwargs.pop('dtype', float) - args = indices(shape, dtype=dtype) - return function(*args, **kwargs) - - -def _frombuffer(buf, dtype, shape, order): - return frombuffer(buf, dtype=dtype).reshape(shape, order=order) - - -@set_module('numpy') -def isscalar(element): - """ - Returns True if the type of `element` is a scalar type. - - Parameters - ---------- - element : any - Input argument, can be of any type and shape. - - Returns - ------- - val : bool - True if `element` is a scalar type, False if it is not. - - See Also - -------- - ndim : Get the number of dimensions of an array - - Notes - ----- - If you need a stricter way to identify a *numerical* scalar, use - ``isinstance(x, numbers.Number)``, as that returns ``False`` for most - non-numerical elements such as strings. - - In most cases ``np.ndim(x) == 0`` should be used instead of this function, - as that will also return true for 0d arrays. This is how numpy overloads - functions in the style of the ``dx`` arguments to `gradient` and the ``bins`` - argument to `histogram`. Some key differences: - - +--------------------------------------+---------------+-------------------+ - | x |``isscalar(x)``|``np.ndim(x) == 0``| - +======================================+===============+===================+ - | PEP 3141 numeric objects (including | ``True`` | ``True`` | - | builtins) | | | - +--------------------------------------+---------------+-------------------+ - | builtin string and buffer objects | ``True`` | ``True`` | - +--------------------------------------+---------------+-------------------+ - | other builtin objects, like | ``False`` | ``True`` | - | `pathlib.Path`, `Exception`, | | | - | the result of `re.compile` | | | - +--------------------------------------+---------------+-------------------+ - | third-party objects like | ``False`` | ``True`` | - | `matplotlib.figure.Figure` | | | - +--------------------------------------+---------------+-------------------+ - | zero-dimensional numpy arrays | ``False`` | ``True`` | - +--------------------------------------+---------------+-------------------+ - | other numpy arrays | ``False`` | ``False`` | - +--------------------------------------+---------------+-------------------+ - | `list`, `tuple`, and other sequence | ``False`` | ``False`` | - | objects | | | - +--------------------------------------+---------------+-------------------+ - - Examples - -------- - >>> np.isscalar(3.1) - True - >>> np.isscalar(np.array(3.1)) - False - >>> np.isscalar([3.1]) - False - >>> np.isscalar(False) - True - >>> np.isscalar('numpy') - True - - NumPy supports PEP 3141 numbers: - - >>> from fractions import Fraction - >>> np.isscalar(Fraction(5, 17)) - True - >>> from numbers import Number - >>> np.isscalar(Number()) - True - - """ - return (isinstance(element, generic) - or type(element) in ScalarType - or isinstance(element, numbers.Number)) - - -@set_module('numpy') -def binary_repr(num, width=None): - """ - Return the binary representation of the input number as a string. - - For negative numbers, if width is not given, a minus sign is added to the - front. If width is given, the two's complement of the number is - returned, with respect to that width. - - In a two's-complement system negative numbers are represented by the two's - complement of the absolute value. This is the most common method of - representing signed integers on computers [1]_. A N-bit two's-complement - system can represent every integer in the range - :math:`-2^{N-1}` to :math:`+2^{N-1}-1`. - - Parameters - ---------- - num : int - Only an integer decimal number can be used. - width : int, optional - The length of the returned string if `num` is positive, or the length - of the two's complement if `num` is negative, provided that `width` is - at least a sufficient number of bits for `num` to be represented in the - designated form. - - If the `width` value is insufficient, it will be ignored, and `num` will - be returned in binary (`num` > 0) or two's complement (`num` < 0) form - with its width equal to the minimum number of bits needed to represent - the number in the designated form. This behavior is deprecated and will - later raise an error. - - .. deprecated:: 1.12.0 - - Returns - ------- - bin : str - Binary representation of `num` or two's complement of `num`. - - See Also - -------- - base_repr: Return a string representation of a number in the given base - system. - bin: Python's built-in binary representation generator of an integer. - - Notes - ----- - `binary_repr` is equivalent to using `base_repr` with base 2, but about 25x - faster. - - References - ---------- - .. [1] Wikipedia, "Two's complement", - https://en.wikipedia.org/wiki/Two's_complement - - Examples - -------- - >>> np.binary_repr(3) - '11' - >>> np.binary_repr(-3) - '-11' - >>> np.binary_repr(3, width=4) - '0011' - - The two's complement is returned when the input number is negative and - width is specified: - - >>> np.binary_repr(-3, width=3) - '101' - >>> np.binary_repr(-3, width=5) - '11101' - - """ - def warn_if_insufficient(width, binwidth): - if width is not None and width < binwidth: - warnings.warn( - "Insufficient bit width provided. This behavior " - "will raise an error in the future.", DeprecationWarning, - stacklevel=3) - - # Ensure that num is a Python integer to avoid overflow or unwanted - # casts to floating point. - num = operator.index(num) - - if num == 0: - return '0' * (width or 1) - - elif num > 0: - binary = bin(num)[2:] - binwidth = len(binary) - outwidth = (binwidth if width is None - else max(binwidth, width)) - warn_if_insufficient(width, binwidth) - return binary.zfill(outwidth) - - else: - if width is None: - return '-' + bin(-num)[2:] - - else: - poswidth = len(bin(-num)[2:]) - - # See gh-8679: remove extra digit - # for numbers at boundaries. - if 2**(poswidth - 1) == -num: - poswidth -= 1 - - twocomp = 2**(poswidth + 1) + num - binary = bin(twocomp)[2:] - binwidth = len(binary) - - outwidth = max(binwidth, width) - warn_if_insufficient(width, binwidth) - return '1' * (outwidth - binwidth) + binary - - -@set_module('numpy') -def base_repr(number, base=2, padding=0): - """ - Return a string representation of a number in the given base system. - - Parameters - ---------- - number : int - The value to convert. Positive and negative values are handled. - base : int, optional - Convert `number` to the `base` number system. The valid range is 2-36, - the default value is 2. - padding : int, optional - Number of zeros padded on the left. Default is 0 (no padding). - - Returns - ------- - out : str - String representation of `number` in `base` system. - - See Also - -------- - binary_repr : Faster version of `base_repr` for base 2. - - Examples - -------- - >>> np.base_repr(5) - '101' - >>> np.base_repr(6, 5) - '11' - >>> np.base_repr(7, base=5, padding=3) - '00012' - - >>> np.base_repr(10, base=16) - 'A' - >>> np.base_repr(32, base=16) - '20' - - """ - digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ' - if base > len(digits): - raise ValueError("Bases greater than 36 not handled in base_repr.") - elif base < 2: - raise ValueError("Bases less than 2 not handled in base_repr.") - - num = abs(number) - res = [] - while num: - res.append(digits[num % base]) - num //= base - if padding: - res.append('0' * padding) - if number < 0: - res.append('-') - return ''.join(reversed(res or '0')) - - -# These are all essentially abbreviations -# These might wind up in a special abbreviations module - - -def _maketup(descr, val): - dt = dtype(descr) - # Place val in all scalar tuples: - fields = dt.fields - if fields is None: - return val - else: - res = [_maketup(fields[name][0], val) for name in dt.names] - return tuple(res) - - -@set_module('numpy') -def identity(n, dtype=None): - """ - Return the identity array. - - The identity array is a square array with ones on - the main diagonal. - - Parameters - ---------- - n : int - Number of rows (and columns) in `n` x `n` output. - dtype : data-type, optional - Data-type of the output. Defaults to ``float``. - - Returns - ------- - out : ndarray - `n` x `n` array with its main diagonal set to one, - and all other elements 0. - - Examples - -------- - >>> np.identity(3) - array([[1., 0., 0.], - [0., 1., 0.], - [0., 0., 1.]]) - - """ - from numpy import eye - return eye(n, dtype=dtype) - - -def _allclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None): - return (a, b) - - -@array_function_dispatch(_allclose_dispatcher) -def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): - """ - Returns True if two arrays are element-wise equal within a tolerance. - - The tolerance values are positive, typically very small numbers. The - relative difference (`rtol` * abs(`b`)) and the absolute difference - `atol` are added together to compare against the absolute difference - between `a` and `b`. - - NaNs are treated as equal if they are in the same place and if - ``equal_nan=True``. Infs are treated as equal if they are in the same - place and of the same sign in both arrays. - - Parameters - ---------- - a, b : array_like - Input arrays to compare. - rtol : float - The relative tolerance parameter (see Notes). - atol : float - The absolute tolerance parameter (see Notes). - equal_nan : bool - Whether to compare NaN's as equal. If True, NaN's in `a` will be - considered equal to NaN's in `b` in the output array. - - .. versionadded:: 1.10.0 - - Returns - ------- - allclose : bool - Returns True if the two arrays are equal within the given - tolerance; False otherwise. - - See Also - -------- - isclose, all, any, equal - - Notes - ----- - If the following equation is element-wise True, then allclose returns - True. - - absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) - - The above equation is not symmetric in `a` and `b`, so that - ``allclose(a, b)`` might be different from ``allclose(b, a)`` in - some rare cases. - - The comparison of `a` and `b` uses standard broadcasting, which - means that `a` and `b` need not have the same shape in order for - ``allclose(a, b)`` to evaluate to True. The same is true for - `equal` but not `array_equal`. - - Examples - -------- - >>> np.allclose([1e10,1e-7], [1.00001e10,1e-8]) - False - >>> np.allclose([1e10,1e-8], [1.00001e10,1e-9]) - True - >>> np.allclose([1e10,1e-8], [1.0001e10,1e-9]) - False - >>> np.allclose([1.0, np.nan], [1.0, np.nan]) - False - >>> np.allclose([1.0, np.nan], [1.0, np.nan], equal_nan=True) - True - - """ - res = all(isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)) - return bool(res) - - -def _isclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None): - return (a, b) - - -@array_function_dispatch(_isclose_dispatcher) -def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): - """ - Returns a boolean array where two arrays are element-wise equal within a - tolerance. - - The tolerance values are positive, typically very small numbers. The - relative difference (`rtol` * abs(`b`)) and the absolute difference - `atol` are added together to compare against the absolute difference - between `a` and `b`. - - .. warning:: The default `atol` is not appropriate for comparing numbers - that are much smaller than one (see Notes). - - Parameters - ---------- - a, b : array_like - Input arrays to compare. - rtol : float - The relative tolerance parameter (see Notes). - atol : float - The absolute tolerance parameter (see Notes). - equal_nan : bool - Whether to compare NaN's as equal. If True, NaN's in `a` will be - considered equal to NaN's in `b` in the output array. - - Returns - ------- - y : array_like - Returns a boolean array of where `a` and `b` are equal within the - given tolerance. If both `a` and `b` are scalars, returns a single - boolean value. - - See Also - -------- - allclose - - Notes - ----- - .. versionadded:: 1.7.0 - - For finite values, isclose uses the following equation to test whether - two floating point values are equivalent. - - absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) - - Unlike the built-in `math.isclose`, the above equation is not symmetric - in `a` and `b` -- it assumes `b` is the reference value -- so that - `isclose(a, b)` might be different from `isclose(b, a)`. Furthermore, - the default value of atol is not zero, and is used to determine what - small values should be considered close to zero. The default value is - appropriate for expected values of order unity: if the expected values - are significantly smaller than one, it can result in false positives. - `atol` should be carefully selected for the use case at hand. A zero value - for `atol` will result in `False` if either `a` or `b` is zero. - - Examples - -------- - >>> np.isclose([1e10,1e-7], [1.00001e10,1e-8]) - array([ True, False]) - >>> np.isclose([1e10,1e-8], [1.00001e10,1e-9]) - array([ True, True]) - >>> np.isclose([1e10,1e-8], [1.0001e10,1e-9]) - array([False, True]) - >>> np.isclose([1.0, np.nan], [1.0, np.nan]) - array([ True, False]) - >>> np.isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True) - array([ True, True]) - >>> np.isclose([1e-8, 1e-7], [0.0, 0.0]) - array([ True, False]) - >>> np.isclose([1e-100, 1e-7], [0.0, 0.0], atol=0.0) - array([False, False]) - >>> np.isclose([1e-10, 1e-10], [1e-20, 0.0]) - array([ True, True]) - >>> np.isclose([1e-10, 1e-10], [1e-20, 0.999999e-10], atol=0.0) - array([False, True]) - """ - def within_tol(x, y, atol, rtol): - with errstate(invalid='ignore'): - return less_equal(abs(x-y), atol + rtol * abs(y)) - - x = asanyarray(a) - y = asanyarray(b) - - # Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT). - # This will cause casting of x later. Also, make sure to allow subclasses - # (e.g., for numpy.ma). - dt = multiarray.result_type(y, 1.) - y = array(y, dtype=dt, copy=False, subok=True) - - xfin = isfinite(x) - yfin = isfinite(y) - if all(xfin) and all(yfin): - return within_tol(x, y, atol, rtol) - else: - finite = xfin & yfin - cond = zeros_like(finite, subok=True) - # Because we're using boolean indexing, x & y must be the same shape. - # Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in - # lib.stride_tricks, though, so we can't import it here. - x = x * ones_like(cond) - y = y * ones_like(cond) - # Avoid subtraction with infinite/nan values... - cond[finite] = within_tol(x[finite], y[finite], atol, rtol) - # Check for equality of infinite values... - cond[~finite] = (x[~finite] == y[~finite]) - if equal_nan: - # Make NaN == NaN - both_nan = isnan(x) & isnan(y) - - # Needed to treat masked arrays correctly. = True would not work. - cond[both_nan] = both_nan[both_nan] - - return cond[()] # Flatten 0d arrays to scalars - - -def _array_equal_dispatcher(a1, a2): - return (a1, a2) - - -@array_function_dispatch(_array_equal_dispatcher) -def array_equal(a1, a2): - """ - True if two arrays have the same shape and elements, False otherwise. - - Parameters - ---------- - a1, a2 : array_like - Input arrays. - - Returns - ------- - b : bool - Returns True if the arrays are equal. - - See Also - -------- - allclose: Returns True if two arrays are element-wise equal within a - tolerance. - array_equiv: Returns True if input arrays are shape consistent and all - elements equal. - - Examples - -------- - >>> np.array_equal([1, 2], [1, 2]) - True - >>> np.array_equal(np.array([1, 2]), np.array([1, 2])) - True - >>> np.array_equal([1, 2], [1, 2, 3]) - False - >>> np.array_equal([1, 2], [1, 4]) - False - - """ - try: - a1, a2 = asarray(a1), asarray(a2) - except Exception: - return False - if a1.shape != a2.shape: - return False - return bool(asarray(a1 == a2).all()) - - -def _array_equiv_dispatcher(a1, a2): - return (a1, a2) - - -@array_function_dispatch(_array_equiv_dispatcher) -def array_equiv(a1, a2): - """ - Returns True if input arrays are shape consistent and all elements equal. - - Shape consistent means they are either the same shape, or one input array - can be broadcasted to create the same shape as the other one. - - Parameters - ---------- - a1, a2 : array_like - Input arrays. - - Returns - ------- - out : bool - True if equivalent, False otherwise. - - Examples - -------- - >>> np.array_equiv([1, 2], [1, 2]) - True - >>> np.array_equiv([1, 2], [1, 3]) - False - - Showing the shape equivalence: - - >>> np.array_equiv([1, 2], [[1, 2], [1, 2]]) - True - >>> np.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]]) - False - - >>> np.array_equiv([1, 2], [[1, 2], [1, 3]]) - False - - """ - try: - a1, a2 = asarray(a1), asarray(a2) - except Exception: - return False - try: - multiarray.broadcast(a1, a2) - except Exception: - return False - - return bool(asarray(a1 == a2).all()) - - -Inf = inf = infty = Infinity = PINF -nan = NaN = NAN -False_ = bool_(False) -True_ = bool_(True) - - -def extend_all(module): - existing = set(__all__) - mall = getattr(module, '__all__') - for a in mall: - if a not in existing: - __all__.append(a) - - -from .umath import * -from .numerictypes import * -from . import fromnumeric -from .fromnumeric import * -from . import arrayprint -from .arrayprint import * -from . import _asarray -from ._asarray import * -from . import _ufunc_config -from ._ufunc_config import * -extend_all(fromnumeric) -extend_all(umath) -extend_all(numerictypes) -extend_all(arrayprint) -extend_all(_asarray) -extend_all(_ufunc_config) diff --git a/venv/lib/python3.7/site-packages/numpy/core/numerictypes.py b/venv/lib/python3.7/site-packages/numpy/core/numerictypes.py deleted file mode 100644 index 761c708..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/numerictypes.py +++ /dev/null @@ -1,675 +0,0 @@ -""" -numerictypes: Define the numeric type objects - -This module is designed so "from numerictypes import \\*" is safe. -Exported symbols include: - - Dictionary with all registered number types (including aliases): - typeDict - - Type objects (not all will be available, depends on platform): - see variable sctypes for which ones you have - - Bit-width names - - int8 int16 int32 int64 int128 - uint8 uint16 uint32 uint64 uint128 - float16 float32 float64 float96 float128 float256 - complex32 complex64 complex128 complex192 complex256 complex512 - datetime64 timedelta64 - - c-based names - - bool_ - - object_ - - void, str_, unicode_ - - byte, ubyte, - short, ushort - intc, uintc, - intp, uintp, - int_, uint, - longlong, ulonglong, - - single, csingle, - float_, complex_, - longfloat, clongfloat, - - As part of the type-hierarchy: xx -- is bit-width - - generic - +-> bool_ (kind=b) - +-> number - | +-> integer - | | +-> signedinteger (intxx) (kind=i) - | | | byte - | | | short - | | | intc - | | | intp int0 - | | | int_ - | | | longlong - | | \\-> unsignedinteger (uintxx) (kind=u) - | | ubyte - | | ushort - | | uintc - | | uintp uint0 - | | uint_ - | | ulonglong - | +-> inexact - | +-> floating (floatxx) (kind=f) - | | half - | | single - | | float_ (double) - | | longfloat - | \\-> complexfloating (complexxx) (kind=c) - | csingle (singlecomplex) - | complex_ (cfloat, cdouble) - | clongfloat (longcomplex) - +-> flexible - | +-> character - | | str_ (string_, bytes_) (kind=S) [Python 2] - | | unicode_ (kind=U) [Python 2] - | | - | | bytes_ (string_) (kind=S) [Python 3] - | | str_ (unicode_) (kind=U) [Python 3] - | | - | \\-> void (kind=V) - \\-> object_ (not used much) (kind=O) - -""" -from __future__ import division, absolute_import, print_function - -import types as _types -import sys -import numbers -import warnings - -from numpy.compat import bytes, long -from numpy.core.multiarray import ( - typeinfo, ndarray, array, empty, dtype, datetime_data, - datetime_as_string, busday_offset, busday_count, is_busday, - busdaycalendar - ) -from numpy.core.overrides import set_module - -# we add more at the bottom -__all__ = ['sctypeDict', 'sctypeNA', 'typeDict', 'typeNA', 'sctypes', - 'ScalarType', 'obj2sctype', 'cast', 'nbytes', 'sctype2char', - 'maximum_sctype', 'issctype', 'typecodes', 'find_common_type', - 'issubdtype', 'datetime_data', 'datetime_as_string', - 'busday_offset', 'busday_count', 'is_busday', 'busdaycalendar', - ] - -# we don't need all these imports, but we need to keep them for compatibility -# for users using np.core.numerictypes.UPPER_TABLE -from ._string_helpers import ( - english_lower, english_upper, english_capitalize, LOWER_TABLE, UPPER_TABLE -) - -from ._type_aliases import ( - sctypeDict, - sctypeNA, - allTypes, - bitname, - sctypes, - _concrete_types, - _concrete_typeinfo, - _bits_of, -) -from ._dtype import _kind_name - -# we don't export these for import *, but we do want them accessible -# as numerictypes.bool, etc. -if sys.version_info[0] >= 3: - from builtins import bool, int, float, complex, object, str - unicode = str -else: - from __builtin__ import bool, int, float, complex, object, unicode, str - - -# We use this later -generic = allTypes['generic'] - -genericTypeRank = ['bool', 'int8', 'uint8', 'int16', 'uint16', - 'int32', 'uint32', 'int64', 'uint64', 'int128', - 'uint128', 'float16', - 'float32', 'float64', 'float80', 'float96', 'float128', - 'float256', - 'complex32', 'complex64', 'complex128', 'complex160', - 'complex192', 'complex256', 'complex512', 'object'] - -@set_module('numpy') -def maximum_sctype(t): - """ - Return the scalar type of highest precision of the same kind as the input. - - Parameters - ---------- - t : dtype or dtype specifier - The input data type. This can be a `dtype` object or an object that - is convertible to a `dtype`. - - Returns - ------- - out : dtype - The highest precision data type of the same kind (`dtype.kind`) as `t`. - - See Also - -------- - obj2sctype, mintypecode, sctype2char - dtype - - Examples - -------- - >>> np.maximum_sctype(int) - - >>> np.maximum_sctype(np.uint8) - - >>> np.maximum_sctype(complex) - # may vary - - >>> np.maximum_sctype(str) - - - >>> np.maximum_sctype('i2') - - >>> np.maximum_sctype('f4') - # may vary - - """ - g = obj2sctype(t) - if g is None: - return t - t = g - base = _kind_name(dtype(t)) - if base in sctypes: - return sctypes[base][-1] - else: - return t - - -@set_module('numpy') -def issctype(rep): - """ - Determines whether the given object represents a scalar data-type. - - Parameters - ---------- - rep : any - If `rep` is an instance of a scalar dtype, True is returned. If not, - False is returned. - - Returns - ------- - out : bool - Boolean result of check whether `rep` is a scalar dtype. - - See Also - -------- - issubsctype, issubdtype, obj2sctype, sctype2char - - Examples - -------- - >>> np.issctype(np.int32) - True - >>> np.issctype(list) - False - >>> np.issctype(1.1) - False - - Strings are also a scalar type: - - >>> np.issctype(np.dtype('str')) - True - - """ - if not isinstance(rep, (type, dtype)): - return False - try: - res = obj2sctype(rep) - if res and res != object_: - return True - return False - except Exception: - return False - - -@set_module('numpy') -def obj2sctype(rep, default=None): - """ - Return the scalar dtype or NumPy equivalent of Python type of an object. - - Parameters - ---------- - rep : any - The object of which the type is returned. - default : any, optional - If given, this is returned for objects whose types can not be - determined. If not given, None is returned for those objects. - - Returns - ------- - dtype : dtype or Python type - The data type of `rep`. - - See Also - -------- - sctype2char, issctype, issubsctype, issubdtype, maximum_sctype - - Examples - -------- - >>> np.obj2sctype(np.int32) - - >>> np.obj2sctype(np.array([1., 2.])) - - >>> np.obj2sctype(np.array([1.j])) - - - >>> np.obj2sctype(dict) - - >>> np.obj2sctype('string') - - >>> np.obj2sctype(1, default=list) - - - """ - # prevent abstract classes being upcast - if isinstance(rep, type) and issubclass(rep, generic): - return rep - # extract dtype from arrays - if isinstance(rep, ndarray): - return rep.dtype.type - # fall back on dtype to convert - try: - res = dtype(rep) - except Exception: - return default - else: - return res.type - - -@set_module('numpy') -def issubclass_(arg1, arg2): - """ - Determine if a class is a subclass of a second class. - - `issubclass_` is equivalent to the Python built-in ``issubclass``, - except that it returns False instead of raising a TypeError if one - of the arguments is not a class. - - Parameters - ---------- - arg1 : class - Input class. True is returned if `arg1` is a subclass of `arg2`. - arg2 : class or tuple of classes. - Input class. If a tuple of classes, True is returned if `arg1` is a - subclass of any of the tuple elements. - - Returns - ------- - out : bool - Whether `arg1` is a subclass of `arg2` or not. - - See Also - -------- - issubsctype, issubdtype, issctype - - Examples - -------- - >>> np.issubclass_(np.int32, int) - False # True on Python 2.7 - >>> np.issubclass_(np.int32, float) - False - - """ - try: - return issubclass(arg1, arg2) - except TypeError: - return False - - -@set_module('numpy') -def issubsctype(arg1, arg2): - """ - Determine if the first argument is a subclass of the second argument. - - Parameters - ---------- - arg1, arg2 : dtype or dtype specifier - Data-types. - - Returns - ------- - out : bool - The result. - - See Also - -------- - issctype, issubdtype, obj2sctype - - Examples - -------- - >>> np.issubsctype('S8', str) - False - >>> np.issubsctype(np.array([1]), int) - True - >>> np.issubsctype(np.array([1]), float) - False - - """ - return issubclass(obj2sctype(arg1), obj2sctype(arg2)) - - -@set_module('numpy') -def issubdtype(arg1, arg2): - """ - Returns True if first argument is a typecode lower/equal in type hierarchy. - - Parameters - ---------- - arg1, arg2 : dtype_like - dtype or string representing a typecode. - - Returns - ------- - out : bool - - See Also - -------- - issubsctype, issubclass_ - numpy.core.numerictypes : Overview of numpy type hierarchy. - - Examples - -------- - >>> np.issubdtype('S1', np.string_) - True - >>> np.issubdtype(np.float64, np.float32) - False - - """ - if not issubclass_(arg1, generic): - arg1 = dtype(arg1).type - if not issubclass_(arg2, generic): - arg2_orig = arg2 - arg2 = dtype(arg2).type - if not isinstance(arg2_orig, dtype): - # weird deprecated behaviour, that tried to infer np.floating from - # float, and similar less obvious things, such as np.generic from - # basestring - mro = arg2.mro() - arg2 = mro[1] if len(mro) > 1 else mro[0] - - def type_repr(x): - """ Helper to produce clear error messages """ - if not isinstance(x, type): - return repr(x) - elif issubclass(x, generic): - return "np.{}".format(x.__name__) - else: - return x.__name__ - - # 1.14, 2017-08-01 - warnings.warn( - "Conversion of the second argument of issubdtype from `{raw}` " - "to `{abstract}` is deprecated. In future, it will be treated " - "as `{concrete} == np.dtype({raw}).type`.".format( - raw=type_repr(arg2_orig), - abstract=type_repr(arg2), - concrete=type_repr(dtype(arg2_orig).type) - ), - FutureWarning, stacklevel=2 - ) - - return issubclass(arg1, arg2) - - -# This dictionary allows look up based on any alias for an array data-type -class _typedict(dict): - """ - Base object for a dictionary for look-up with any alias for an array dtype. - - Instances of `_typedict` can not be used as dictionaries directly, - first they have to be populated. - - """ - - def __getitem__(self, obj): - return dict.__getitem__(self, obj2sctype(obj)) - -nbytes = _typedict() -_alignment = _typedict() -_maxvals = _typedict() -_minvals = _typedict() -def _construct_lookups(): - for name, info in _concrete_typeinfo.items(): - obj = info.type - nbytes[obj] = info.bits // 8 - _alignment[obj] = info.alignment - if len(info) > 5: - _maxvals[obj] = info.max - _minvals[obj] = info.min - else: - _maxvals[obj] = None - _minvals[obj] = None - -_construct_lookups() - - -@set_module('numpy') -def sctype2char(sctype): - """ - Return the string representation of a scalar dtype. - - Parameters - ---------- - sctype : scalar dtype or object - If a scalar dtype, the corresponding string character is - returned. If an object, `sctype2char` tries to infer its scalar type - and then return the corresponding string character. - - Returns - ------- - typechar : str - The string character corresponding to the scalar type. - - Raises - ------ - ValueError - If `sctype` is an object for which the type can not be inferred. - - See Also - -------- - obj2sctype, issctype, issubsctype, mintypecode - - Examples - -------- - >>> for sctype in [np.int32, np.double, np.complex_, np.string_, np.ndarray]: - ... print(np.sctype2char(sctype)) - l # may vary - d - D - S - O - - >>> x = np.array([1., 2-1.j]) - >>> np.sctype2char(x) - 'D' - >>> np.sctype2char(list) - 'O' - - """ - sctype = obj2sctype(sctype) - if sctype is None: - raise ValueError("unrecognized type") - if sctype not in _concrete_types: - # for compatibility - raise KeyError(sctype) - return dtype(sctype).char - -# Create dictionary of casting functions that wrap sequences -# indexed by type or type character -cast = _typedict() -for key in _concrete_types: - cast[key] = lambda x, k=key: array(x, copy=False).astype(k) - -try: - ScalarType = [_types.IntType, _types.FloatType, _types.ComplexType, - _types.LongType, _types.BooleanType, - _types.StringType, _types.UnicodeType, _types.BufferType] -except AttributeError: - # Py3K - ScalarType = [int, float, complex, int, bool, bytes, str, memoryview] - -ScalarType.extend(_concrete_types) -ScalarType = tuple(ScalarType) - - -# Now add the types we've determined to this module -for key in allTypes: - globals()[key] = allTypes[key] - __all__.append(key) - -del key - -typecodes = {'Character':'c', - 'Integer':'bhilqp', - 'UnsignedInteger':'BHILQP', - 'Float':'efdg', - 'Complex':'FDG', - 'AllInteger':'bBhHiIlLqQpP', - 'AllFloat':'efdgFDG', - 'Datetime': 'Mm', - 'All':'?bhilqpBHILQPefdgFDGSUVOMm'} - -# backwards compatibility --- deprecated name -typeDict = sctypeDict -typeNA = sctypeNA - -# b -> boolean -# u -> unsigned integer -# i -> signed integer -# f -> floating point -# c -> complex -# M -> datetime -# m -> timedelta -# S -> string -# U -> Unicode string -# V -> record -# O -> Python object -_kind_list = ['b', 'u', 'i', 'f', 'c', 'S', 'U', 'V', 'O', 'M', 'm'] - -__test_types = '?'+typecodes['AllInteger'][:-2]+typecodes['AllFloat']+'O' -__len_test_types = len(__test_types) - -# Keep incrementing until a common type both can be coerced to -# is found. Otherwise, return None -def _find_common_coerce(a, b): - if a > b: - return a - try: - thisind = __test_types.index(a.char) - except ValueError: - return None - return _can_coerce_all([a, b], start=thisind) - -# Find a data-type that all data-types in a list can be coerced to -def _can_coerce_all(dtypelist, start=0): - N = len(dtypelist) - if N == 0: - return None - if N == 1: - return dtypelist[0] - thisind = start - while thisind < __len_test_types: - newdtype = dtype(__test_types[thisind]) - numcoerce = len([x for x in dtypelist if newdtype >= x]) - if numcoerce == N: - return newdtype - thisind += 1 - return None - -def _register_types(): - numbers.Integral.register(integer) - numbers.Complex.register(inexact) - numbers.Real.register(floating) - numbers.Number.register(number) - -_register_types() - - -@set_module('numpy') -def find_common_type(array_types, scalar_types): - """ - Determine common type following standard coercion rules. - - Parameters - ---------- - array_types : sequence - A list of dtypes or dtype convertible objects representing arrays. - scalar_types : sequence - A list of dtypes or dtype convertible objects representing scalars. - - Returns - ------- - datatype : dtype - The common data type, which is the maximum of `array_types` ignoring - `scalar_types`, unless the maximum of `scalar_types` is of a - different kind (`dtype.kind`). If the kind is not understood, then - None is returned. - - See Also - -------- - dtype, common_type, can_cast, mintypecode - - Examples - -------- - >>> np.find_common_type([], [np.int64, np.float32, complex]) - dtype('complex128') - >>> np.find_common_type([np.int64, np.float32], []) - dtype('float64') - - The standard casting rules ensure that a scalar cannot up-cast an - array unless the scalar is of a fundamentally different kind of data - (i.e. under a different hierarchy in the data type hierarchy) then - the array: - - >>> np.find_common_type([np.float32], [np.int64, np.float64]) - dtype('float32') - - Complex is of a different type, so it up-casts the float in the - `array_types` argument: - - >>> np.find_common_type([np.float32], [complex]) - dtype('complex128') - - Type specifier strings are convertible to dtypes and can therefore - be used instead of dtypes: - - >>> np.find_common_type(['f4', 'f4', 'i4'], ['c8']) - dtype('complex128') - - """ - array_types = [dtype(x) for x in array_types] - scalar_types = [dtype(x) for x in scalar_types] - - maxa = _can_coerce_all(array_types) - maxsc = _can_coerce_all(scalar_types) - - if maxa is None: - return maxsc - - if maxsc is None: - return maxa - - try: - index_a = _kind_list.index(maxa.kind) - index_sc = _kind_list.index(maxsc.kind) - except ValueError: - return None - - if index_sc > index_a: - return _find_common_coerce(maxsc, maxa) - else: - return maxa diff --git a/venv/lib/python3.7/site-packages/numpy/core/overrides.py b/venv/lib/python3.7/site-packages/numpy/core/overrides.py deleted file mode 100644 index 55c7bd1..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/overrides.py +++ /dev/null @@ -1,210 +0,0 @@ -"""Implementation of __array_function__ overrides from NEP-18.""" -import collections -import functools -import os -import textwrap - -from numpy.core._multiarray_umath import ( - add_docstring, implement_array_function, _get_implementing_args) -from numpy.compat._inspect import getargspec - - -ARRAY_FUNCTION_ENABLED = bool( - int(os.environ.get('NUMPY_EXPERIMENTAL_ARRAY_FUNCTION', 1))) - - -add_docstring( - implement_array_function, - """ - Implement a function with checks for __array_function__ overrides. - - All arguments are required, and can only be passed by position. - - Arguments - --------- - implementation : function - Function that implements the operation on NumPy array without - overrides when called like ``implementation(*args, **kwargs)``. - public_api : function - Function exposed by NumPy's public API originally called like - ``public_api(*args, **kwargs)`` on which arguments are now being - checked. - relevant_args : iterable - Iterable of arguments to check for __array_function__ methods. - args : tuple - Arbitrary positional arguments originally passed into ``public_api``. - kwargs : dict - Arbitrary keyword arguments originally passed into ``public_api``. - - Returns - ------- - Result from calling ``implementation()`` or an ``__array_function__`` - method, as appropriate. - - Raises - ------ - TypeError : if no implementation is found. - """) - - -# exposed for testing purposes; used internally by implement_array_function -add_docstring( - _get_implementing_args, - """ - Collect arguments on which to call __array_function__. - - Parameters - ---------- - relevant_args : iterable of array-like - Iterable of possibly array-like arguments to check for - __array_function__ methods. - - Returns - ------- - Sequence of arguments with __array_function__ methods, in the order in - which they should be called. - """) - - -ArgSpec = collections.namedtuple('ArgSpec', 'args varargs keywords defaults') - - -def verify_matching_signatures(implementation, dispatcher): - """Verify that a dispatcher function has the right signature.""" - implementation_spec = ArgSpec(*getargspec(implementation)) - dispatcher_spec = ArgSpec(*getargspec(dispatcher)) - - if (implementation_spec.args != dispatcher_spec.args or - implementation_spec.varargs != dispatcher_spec.varargs or - implementation_spec.keywords != dispatcher_spec.keywords or - (bool(implementation_spec.defaults) != - bool(dispatcher_spec.defaults)) or - (implementation_spec.defaults is not None and - len(implementation_spec.defaults) != - len(dispatcher_spec.defaults))): - raise RuntimeError('implementation and dispatcher for %s have ' - 'different function signatures' % implementation) - - if implementation_spec.defaults is not None: - if dispatcher_spec.defaults != (None,) * len(dispatcher_spec.defaults): - raise RuntimeError('dispatcher functions can only use None for ' - 'default argument values') - - -def set_module(module): - """Decorator for overriding __module__ on a function or class. - - Example usage:: - - @set_module('numpy') - def example(): - pass - - assert example.__module__ == 'numpy' - """ - def decorator(func): - if module is not None: - func.__module__ = module - return func - return decorator - - - -# Call textwrap.dedent here instead of in the function so as to avoid -# calling dedent multiple times on the same text -_wrapped_func_source = textwrap.dedent(""" - @functools.wraps(implementation) - def {name}(*args, **kwargs): - relevant_args = dispatcher(*args, **kwargs) - return implement_array_function( - implementation, {name}, relevant_args, args, kwargs) - """) - - -def array_function_dispatch(dispatcher, module=None, verify=True, - docs_from_dispatcher=False): - """Decorator for adding dispatch with the __array_function__ protocol. - - See NEP-18 for example usage. - - Parameters - ---------- - dispatcher : callable - Function that when called like ``dispatcher(*args, **kwargs)`` with - arguments from the NumPy function call returns an iterable of - array-like arguments to check for ``__array_function__``. - module : str, optional - __module__ attribute to set on new function, e.g., ``module='numpy'``. - By default, module is copied from the decorated function. - verify : bool, optional - If True, verify the that the signature of the dispatcher and decorated - function signatures match exactly: all required and optional arguments - should appear in order with the same names, but the default values for - all optional arguments should be ``None``. Only disable verification - if the dispatcher's signature needs to deviate for some particular - reason, e.g., because the function has a signature like - ``func(*args, **kwargs)``. - docs_from_dispatcher : bool, optional - If True, copy docs from the dispatcher function onto the dispatched - function, rather than from the implementation. This is useful for - functions defined in C, which otherwise don't have docstrings. - - Returns - ------- - Function suitable for decorating the implementation of a NumPy function. - """ - - if not ARRAY_FUNCTION_ENABLED: - def decorator(implementation): - if docs_from_dispatcher: - add_docstring(implementation, dispatcher.__doc__) - if module is not None: - implementation.__module__ = module - return implementation - return decorator - - def decorator(implementation): - if verify: - verify_matching_signatures(implementation, dispatcher) - - if docs_from_dispatcher: - add_docstring(implementation, dispatcher.__doc__) - - # Equivalently, we could define this function directly instead of using - # exec. This version has the advantage of giving the helper function a - # more interpettable name. Otherwise, the original function does not - # show up at all in many cases, e.g., if it's written in C or if the - # dispatcher gets an invalid keyword argument. - source = _wrapped_func_source.format(name=implementation.__name__) - - source_object = compile( - source, filename='<__array_function__ internals>', mode='exec') - scope = { - 'implementation': implementation, - 'dispatcher': dispatcher, - 'functools': functools, - 'implement_array_function': implement_array_function, - } - exec(source_object, scope) - - public_api = scope[implementation.__name__] - - if module is not None: - public_api.__module__ = module - - public_api._implementation = implementation - - return public_api - - return decorator - - -def array_function_from_dispatcher( - implementation, module=None, verify=True, docs_from_dispatcher=True): - """Like array_function_dispatcher, but with function arguments flipped.""" - - def decorator(dispatcher): - return array_function_dispatch( - dispatcher, module, verify=verify, - docs_from_dispatcher=docs_from_dispatcher)(implementation) - return decorator diff --git a/venv/lib/python3.7/site-packages/numpy/core/records.py b/venv/lib/python3.7/site-packages/numpy/core/records.py deleted file mode 100644 index a1cad90..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/records.py +++ /dev/null @@ -1,886 +0,0 @@ -""" -Record Arrays -============= -Record arrays expose the fields of structured arrays as properties. - -Most commonly, ndarrays contain elements of a single type, e.g. floats, -integers, bools etc. However, it is possible for elements to be combinations -of these using structured types, such as:: - - >>> a = np.array([(1, 2.0), (1, 2.0)], dtype=[('x', np.int64), ('y', np.float64)]) - >>> a - array([(1, 2.), (1, 2.)], dtype=[('x', '>> a['x'] - array([1, 1]) - - >>> a['y'] - array([2., 2.]) - -Record arrays allow us to access fields as properties:: - - >>> ar = np.rec.array(a) - - >>> ar.x - array([1, 1]) - - >>> ar.y - array([2., 2.]) - -""" -from __future__ import division, absolute_import, print_function - -import sys -import os -import warnings -from collections import Counter, OrderedDict - -from . import numeric as sb -from . import numerictypes as nt -from numpy.compat import ( - isfileobj, bytes, long, unicode, os_fspath, contextlib_nullcontext -) -from numpy.core.overrides import set_module -from .arrayprint import get_printoptions - -# All of the functions allow formats to be a dtype -__all__ = ['record', 'recarray', 'format_parser'] - - -ndarray = sb.ndarray - -_byteorderconv = {'b':'>', - 'l':'<', - 'n':'=', - 'B':'>', - 'L':'<', - 'N':'=', - 'S':'s', - 's':'s', - '>':'>', - '<':'<', - '=':'=', - '|':'|', - 'I':'|', - 'i':'|'} - -# formats regular expression -# allows multidimension spec with a tuple syntax in front -# of the letter code '(2,3)f4' and ' ( 2 , 3 ) f4 ' -# are equally allowed - -numfmt = nt.typeDict - -# taken from OrderedDict recipes in the Python documentation -# https://docs.python.org/3.3/library/collections.html#ordereddict-examples-and-recipes -class _OrderedCounter(Counter, OrderedDict): - """Counter that remembers the order elements are first encountered""" - - def __repr__(self): - return '%s(%r)' % (self.__class__.__name__, OrderedDict(self)) - - def __reduce__(self): - return self.__class__, (OrderedDict(self),) - - -def find_duplicate(list): - """Find duplication in a list, return a list of duplicated elements""" - return [ - item - for item, counts in _OrderedCounter(list).items() - if counts > 1 - ] - - -@set_module('numpy') -class format_parser(object): - """ - Class to convert formats, names, titles description to a dtype. - - After constructing the format_parser object, the dtype attribute is - the converted data-type: - ``dtype = format_parser(formats, names, titles).dtype`` - - Attributes - ---------- - dtype : dtype - The converted data-type. - - Parameters - ---------- - formats : str or list of str - The format description, either specified as a string with - comma-separated format descriptions in the form ``'f8, i4, a5'``, or - a list of format description strings in the form - ``['f8', 'i4', 'a5']``. - names : str or list/tuple of str - The field names, either specified as a comma-separated string in the - form ``'col1, col2, col3'``, or as a list or tuple of strings in the - form ``['col1', 'col2', 'col3']``. - An empty list can be used, in that case default field names - ('f0', 'f1', ...) are used. - titles : sequence - Sequence of title strings. An empty list can be used to leave titles - out. - aligned : bool, optional - If True, align the fields by padding as the C-compiler would. - Default is False. - byteorder : str, optional - If specified, all the fields will be changed to the - provided byte-order. Otherwise, the default byte-order is - used. For all available string specifiers, see `dtype.newbyteorder`. - - See Also - -------- - dtype, typename, sctype2char - - Examples - -------- - >>> np.format_parser(['>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'], - ... []).dtype - dtype([('col1', '>> np.format_parser([' len(titles)): - self._titles += [None] * (self._nfields - len(titles)) - - def _createdescr(self, byteorder): - descr = sb.dtype({'names':self._names, - 'formats':self._f_formats, - 'offsets':self._offsets, - 'titles':self._titles}) - if (byteorder is not None): - byteorder = _byteorderconv[byteorder[0]] - descr = descr.newbyteorder(byteorder) - - self._descr = descr - -class record(nt.void): - """A data-type scalar that allows field access as attribute lookup. - """ - - # manually set name and module so that this class's type shows up - # as numpy.record when printed - __name__ = 'record' - __module__ = 'numpy' - - def __repr__(self): - if get_printoptions()['legacy'] == '1.13': - return self.__str__() - return super(record, self).__repr__() - - def __str__(self): - if get_printoptions()['legacy'] == '1.13': - return str(self.item()) - return super(record, self).__str__() - - def __getattribute__(self, attr): - if attr in ['setfield', 'getfield', 'dtype']: - return nt.void.__getattribute__(self, attr) - try: - return nt.void.__getattribute__(self, attr) - except AttributeError: - pass - fielddict = nt.void.__getattribute__(self, 'dtype').fields - res = fielddict.get(attr, None) - if res: - obj = self.getfield(*res[:2]) - # if it has fields return a record, - # otherwise return the object - try: - dt = obj.dtype - except AttributeError: - #happens if field is Object type - return obj - if dt.names is not None: - return obj.view((self.__class__, obj.dtype)) - return obj - else: - raise AttributeError("'record' object has no " - "attribute '%s'" % attr) - - def __setattr__(self, attr, val): - if attr in ['setfield', 'getfield', 'dtype']: - raise AttributeError("Cannot set '%s' attribute" % attr) - fielddict = nt.void.__getattribute__(self, 'dtype').fields - res = fielddict.get(attr, None) - if res: - return self.setfield(val, *res[:2]) - else: - if getattr(self, attr, None): - return nt.void.__setattr__(self, attr, val) - else: - raise AttributeError("'record' object has no " - "attribute '%s'" % attr) - - def __getitem__(self, indx): - obj = nt.void.__getitem__(self, indx) - - # copy behavior of record.__getattribute__, - if isinstance(obj, nt.void) and obj.dtype.names is not None: - return obj.view((self.__class__, obj.dtype)) - else: - # return a single element - return obj - - def pprint(self): - """Pretty-print all fields.""" - # pretty-print all fields - names = self.dtype.names - maxlen = max(len(name) for name in names) - fmt = '%% %ds: %%s' % maxlen - rows = [fmt % (name, getattr(self, name)) for name in names] - return "\n".join(rows) - -# The recarray is almost identical to a standard array (which supports -# named fields already) The biggest difference is that it can use -# attribute-lookup to find the fields and it is constructed using -# a record. - -# If byteorder is given it forces a particular byteorder on all -# the fields (and any subfields) - -class recarray(ndarray): - """Construct an ndarray that allows field access using attributes. - - Arrays may have a data-types containing fields, analogous - to columns in a spread sheet. An example is ``[(x, int), (y, float)]``, - where each entry in the array is a pair of ``(int, float)``. Normally, - these attributes are accessed using dictionary lookups such as ``arr['x']`` - and ``arr['y']``. Record arrays allow the fields to be accessed as members - of the array, using ``arr.x`` and ``arr.y``. - - Parameters - ---------- - shape : tuple - Shape of output array. - dtype : data-type, optional - The desired data-type. By default, the data-type is determined - from `formats`, `names`, `titles`, `aligned` and `byteorder`. - formats : list of data-types, optional - A list containing the data-types for the different columns, e.g. - ``['i4', 'f8', 'i4']``. `formats` does *not* support the new - convention of using types directly, i.e. ``(int, float, int)``. - Note that `formats` must be a list, not a tuple. - Given that `formats` is somewhat limited, we recommend specifying - `dtype` instead. - names : tuple of str, optional - The name of each column, e.g. ``('x', 'y', 'z')``. - buf : buffer, optional - By default, a new array is created of the given shape and data-type. - If `buf` is specified and is an object exposing the buffer interface, - the array will use the memory from the existing buffer. In this case, - the `offset` and `strides` keywords are available. - - Other Parameters - ---------------- - titles : tuple of str, optional - Aliases for column names. For example, if `names` were - ``('x', 'y', 'z')`` and `titles` is - ``('x_coordinate', 'y_coordinate', 'z_coordinate')``, then - ``arr['x']`` is equivalent to both ``arr.x`` and ``arr.x_coordinate``. - byteorder : {'<', '>', '='}, optional - Byte-order for all fields. - aligned : bool, optional - Align the fields in memory as the C-compiler would. - strides : tuple of ints, optional - Buffer (`buf`) is interpreted according to these strides (strides - define how many bytes each array element, row, column, etc. - occupy in memory). - offset : int, optional - Start reading buffer (`buf`) from this offset onwards. - order : {'C', 'F'}, optional - Row-major (C-style) or column-major (Fortran-style) order. - - Returns - ------- - rec : recarray - Empty array of the given shape and type. - - See Also - -------- - rec.fromrecords : Construct a record array from data. - record : fundamental data-type for `recarray`. - format_parser : determine a data-type from formats, names, titles. - - Notes - ----- - This constructor can be compared to ``empty``: it creates a new record - array but does not fill it with data. To create a record array from data, - use one of the following methods: - - 1. Create a standard ndarray and convert it to a record array, - using ``arr.view(np.recarray)`` - 2. Use the `buf` keyword. - 3. Use `np.rec.fromrecords`. - - Examples - -------- - Create an array with two fields, ``x`` and ``y``: - - >>> x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', '>> x - array([(1., 2), (3., 4)], dtype=[('x', '>> x['x'] - array([1., 3.]) - - View the array as a record array: - - >>> x = x.view(np.recarray) - - >>> x.x - array([1., 3.]) - - >>> x.y - array([2, 4]) - - Create a new, empty record array: - - >>> np.recarray((2,), - ... dtype=[('x', int), ('y', float), ('z', int)]) #doctest: +SKIP - rec.array([(-1073741821, 1.2249118382103472e-301, 24547520), - (3471280, 1.2134086255804012e-316, 0)], - dtype=[('x', ' 0 or self.shape == (0,): - lst = sb.array2string( - self, separator=', ', prefix=prefix, suffix=',') - else: - # show zero-length shape unless it is (0,) - lst = "[], shape=%s" % (repr(self.shape),) - - lf = '\n'+' '*len(prefix) - if get_printoptions()['legacy'] == '1.13': - lf = ' ' + lf # trailing space - return fmt % (lst, lf, repr_dtype) - - def field(self, attr, val=None): - if isinstance(attr, int): - names = ndarray.__getattribute__(self, 'dtype').names - attr = names[attr] - - fielddict = ndarray.__getattribute__(self, 'dtype').fields - - res = fielddict[attr][:2] - - if val is None: - obj = self.getfield(*res) - if obj.dtype.names is not None: - return obj - return obj.view(ndarray) - else: - return self.setfield(val, *res) - - -def fromarrays(arrayList, dtype=None, shape=None, formats=None, - names=None, titles=None, aligned=False, byteorder=None): - """ create a record array from a (flat) list of arrays - - >>> x1=np.array([1,2,3,4]) - >>> x2=np.array(['a','dd','xyz','12']) - >>> x3=np.array([1.1,2,3,4]) - >>> r = np.core.records.fromarrays([x1,x2,x3],names='a,b,c') - >>> print(r[1]) - (2, 'dd', 2.0) # may vary - >>> x1[1]=34 - >>> r.a - array([1, 2, 3, 4]) - """ - - arrayList = [sb.asarray(x) for x in arrayList] - - if shape is None or shape == 0: - shape = arrayList[0].shape - - if isinstance(shape, int): - shape = (shape,) - - if formats is None and dtype is None: - # go through each object in the list to see if it is an ndarray - # and determine the formats. - formats = [] - for obj in arrayList: - formats.append(obj.dtype) - - if dtype is not None: - descr = sb.dtype(dtype) - _names = descr.names - else: - parsed = format_parser(formats, names, titles, aligned, byteorder) - _names = parsed._names - descr = parsed._descr - - # Determine shape from data-type. - if len(descr) != len(arrayList): - raise ValueError("mismatch between the number of fields " - "and the number of arrays") - - d0 = descr[0].shape - nn = len(d0) - if nn > 0: - shape = shape[:-nn] - - for k, obj in enumerate(arrayList): - nn = descr[k].ndim - testshape = obj.shape[:obj.ndim - nn] - if testshape != shape: - raise ValueError("array-shape mismatch in array %d" % k) - - _array = recarray(shape, descr) - - # populate the record array (makes a copy) - for i in range(len(arrayList)): - _array[_names[i]] = arrayList[i] - - return _array - -def fromrecords(recList, dtype=None, shape=None, formats=None, names=None, - titles=None, aligned=False, byteorder=None): - """ create a recarray from a list of records in text form - - The data in the same field can be heterogeneous, they will be promoted - to the highest data type. This method is intended for creating - smaller record arrays. If used to create large array without formats - defined - - r=fromrecords([(2,3.,'abc')]*100000) - - it can be slow. - - If formats is None, then this will auto-detect formats. Use list of - tuples rather than list of lists for faster processing. - - >>> r=np.core.records.fromrecords([(456,'dbe',1.2),(2,'de',1.3)], - ... names='col1,col2,col3') - >>> print(r[0]) - (456, 'dbe', 1.2) - >>> r.col1 - array([456, 2]) - >>> r.col2 - array(['dbe', 'de'], dtype='>> import pickle - >>> pickle.loads(pickle.dumps(r)) - rec.array([(456, 'dbe', 1.2), ( 2, 'de', 1.3)], - dtype=[('col1', ' 1: - raise ValueError("Can only deal with 1-d array.") - _array = recarray(shape, descr) - for k in range(_array.size): - _array[k] = tuple(recList[k]) - # list of lists instead of list of tuples ? - # 2018-02-07, 1.14.1 - warnings.warn( - "fromrecords expected a list of tuples, may have received a list " - "of lists instead. In the future that will raise an error", - FutureWarning, stacklevel=2) - return _array - else: - if shape is not None and retval.shape != shape: - retval.shape = shape - - res = retval.view(recarray) - - return res - - -def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None, - names=None, titles=None, aligned=False, byteorder=None): - """ create a (read-only) record array from binary data contained in - a string""" - - if dtype is None and formats is None: - raise TypeError("fromstring() needs a 'dtype' or 'formats' argument") - - if dtype is not None: - descr = sb.dtype(dtype) - else: - descr = format_parser(formats, names, titles, aligned, byteorder)._descr - - itemsize = descr.itemsize - if (shape is None or shape == 0 or shape == -1): - shape = (len(datastring) - offset) // itemsize - - _array = recarray(shape, descr, buf=datastring, offset=offset) - return _array - -def get_remaining_size(fd): - try: - fn = fd.fileno() - except AttributeError: - return os.path.getsize(fd.name) - fd.tell() - st = os.fstat(fn) - size = st.st_size - fd.tell() - return size - -def fromfile(fd, dtype=None, shape=None, offset=0, formats=None, - names=None, titles=None, aligned=False, byteorder=None): - """Create an array from binary file data - - If file is a string or a path-like object then that file is opened, - else it is assumed to be a file object. The file object must - support random access (i.e. it must have tell and seek methods). - - >>> from tempfile import TemporaryFile - >>> a = np.empty(10,dtype='f8,i4,a5') - >>> a[5] = (0.5,10,'abcde') - >>> - >>> fd=TemporaryFile() - >>> a = a.newbyteorder('<') - >>> a.tofile(fd) - >>> - >>> _ = fd.seek(0) - >>> r=np.core.records.fromfile(fd, formats='f8,i4,a5', shape=10, - ... byteorder='<') - >>> print(r[5]) - (0.5, 10, 'abcde') - >>> r.shape - (10,) - """ - - if dtype is None and formats is None: - raise TypeError("fromfile() needs a 'dtype' or 'formats' argument") - - if (shape is None or shape == 0): - shape = (-1,) - elif isinstance(shape, (int, long)): - shape = (shape,) - - if isfileobj(fd): - # file already opened - ctx = contextlib_nullcontext(fd) - else: - # open file - ctx = open(os_fspath(fd), 'rb') - - with ctx as fd: - if (offset > 0): - fd.seek(offset, 1) - size = get_remaining_size(fd) - - if dtype is not None: - descr = sb.dtype(dtype) - else: - descr = format_parser(formats, names, titles, aligned, byteorder)._descr - - itemsize = descr.itemsize - - shapeprod = sb.array(shape).prod(dtype=nt.intp) - shapesize = shapeprod * itemsize - if shapesize < 0: - shape = list(shape) - shape[shape.index(-1)] = size // -shapesize - shape = tuple(shape) - shapeprod = sb.array(shape).prod(dtype=nt.intp) - - nbytes = shapeprod * itemsize - - if nbytes > size: - raise ValueError( - "Not enough bytes left in file for specified shape and type") - - # create the array - _array = recarray(shape, descr) - nbytesread = fd.readinto(_array.data) - if nbytesread != nbytes: - raise IOError("Didn't read as many bytes as expected") - - return _array - -def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None, - names=None, titles=None, aligned=False, byteorder=None, copy=True): - """Construct a record array from a wide-variety of objects. - """ - - if ((isinstance(obj, (type(None), str)) or isfileobj(obj)) and - (formats is None) and (dtype is None)): - raise ValueError("Must define formats (or dtype) if object is " - "None, string, or an open file") - - kwds = {} - if dtype is not None: - dtype = sb.dtype(dtype) - elif formats is not None: - dtype = format_parser(formats, names, titles, - aligned, byteorder)._descr - else: - kwds = {'formats': formats, - 'names': names, - 'titles': titles, - 'aligned': aligned, - 'byteorder': byteorder - } - - if obj is None: - if shape is None: - raise ValueError("Must define a shape if obj is None") - return recarray(shape, dtype, buf=obj, offset=offset, strides=strides) - - elif isinstance(obj, bytes): - return fromstring(obj, dtype, shape=shape, offset=offset, **kwds) - - elif isinstance(obj, (list, tuple)): - if isinstance(obj[0], (tuple, list)): - return fromrecords(obj, dtype=dtype, shape=shape, **kwds) - else: - return fromarrays(obj, dtype=dtype, shape=shape, **kwds) - - elif isinstance(obj, recarray): - if dtype is not None and (obj.dtype != dtype): - new = obj.view(dtype) - else: - new = obj - if copy: - new = new.copy() - return new - - elif isfileobj(obj): - return fromfile(obj, dtype=dtype, shape=shape, offset=offset) - - elif isinstance(obj, ndarray): - if dtype is not None and (obj.dtype != dtype): - new = obj.view(dtype) - else: - new = obj - if copy: - new = new.copy() - return new.view(recarray) - - else: - interface = getattr(obj, "__array_interface__", None) - if interface is None or not isinstance(interface, dict): - raise ValueError("Unknown input type") - obj = sb.array(obj) - if dtype is not None and (obj.dtype != dtype): - obj = obj.view(dtype) - return obj.view(recarray) diff --git a/venv/lib/python3.7/site-packages/numpy/core/setup.py b/venv/lib/python3.7/site-packages/numpy/core/setup.py deleted file mode 100644 index 974ec46..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/setup.py +++ /dev/null @@ -1,979 +0,0 @@ -from __future__ import division, print_function - -import os -import sys -import pickle -import copy -import warnings -import platform -import textwrap -from os.path import join - -from numpy.distutils import log -from distutils.dep_util import newer -from distutils.sysconfig import get_config_var -from numpy._build_utils.apple_accelerate import ( - uses_accelerate_framework, get_sgemv_fix - ) -from numpy.compat import npy_load_module -from setup_common import * - -# Set to True to enable relaxed strides checking. This (mostly) means -# that `strides[dim]` is ignored if `shape[dim] == 1` when setting flags. -NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0") - -# Put NPY_RELAXED_STRIDES_DEBUG=1 in the environment if you want numpy to use a -# bogus value for affected strides in order to help smoke out bad stride usage -# when relaxed stride checking is enabled. -NPY_RELAXED_STRIDES_DEBUG = (os.environ.get('NPY_RELAXED_STRIDES_DEBUG', "0") != "0") -NPY_RELAXED_STRIDES_DEBUG = NPY_RELAXED_STRIDES_DEBUG and NPY_RELAXED_STRIDES_CHECKING - -# XXX: ugly, we use a class to avoid calling twice some expensive functions in -# config.h/numpyconfig.h. I don't see a better way because distutils force -# config.h generation inside an Extension class, and as such sharing -# configuration information between extensions is not easy. -# Using a pickled-based memoize does not work because config_cmd is an instance -# method, which cPickle does not like. -# -# Use pickle in all cases, as cPickle is gone in python3 and the difference -# in time is only in build. -- Charles Harris, 2013-03-30 - -class CallOnceOnly(object): - def __init__(self): - self._check_types = None - self._check_ieee_macros = None - self._check_complex = None - - def check_types(self, *a, **kw): - if self._check_types is None: - out = check_types(*a, **kw) - self._check_types = pickle.dumps(out) - else: - out = copy.deepcopy(pickle.loads(self._check_types)) - return out - - def check_ieee_macros(self, *a, **kw): - if self._check_ieee_macros is None: - out = check_ieee_macros(*a, **kw) - self._check_ieee_macros = pickle.dumps(out) - else: - out = copy.deepcopy(pickle.loads(self._check_ieee_macros)) - return out - - def check_complex(self, *a, **kw): - if self._check_complex is None: - out = check_complex(*a, **kw) - self._check_complex = pickle.dumps(out) - else: - out = copy.deepcopy(pickle.loads(self._check_complex)) - return out - -def pythonlib_dir(): - """return path where libpython* is.""" - if sys.platform == 'win32': - return os.path.join(sys.prefix, "libs") - else: - return get_config_var('LIBDIR') - -def is_npy_no_signal(): - """Return True if the NPY_NO_SIGNAL symbol must be defined in configuration - header.""" - return sys.platform == 'win32' - -def is_npy_no_smp(): - """Return True if the NPY_NO_SMP symbol must be defined in public - header (when SMP support cannot be reliably enabled).""" - # Perhaps a fancier check is in order here. - # so that threads are only enabled if there - # are actually multiple CPUS? -- but - # threaded code can be nice even on a single - # CPU so that long-calculating code doesn't - # block. - return 'NPY_NOSMP' in os.environ - -def win32_checks(deflist): - from numpy.distutils.misc_util import get_build_architecture - a = get_build_architecture() - - # Distutils hack on AMD64 on windows - print('BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' % - (a, os.name, sys.platform)) - if a == 'AMD64': - deflist.append('DISTUTILS_USE_SDK') - - # On win32, force long double format string to be 'g', not - # 'Lg', since the MS runtime does not support long double whose - # size is > sizeof(double) - if a == "Intel" or a == "AMD64": - deflist.append('FORCE_NO_LONG_DOUBLE_FORMATTING') - -def check_math_capabilities(config, moredefs, mathlibs): - def check_func(func_name): - return config.check_func(func_name, libraries=mathlibs, - decl=True, call=True) - - def check_funcs_once(funcs_name): - decl = dict([(f, True) for f in funcs_name]) - st = config.check_funcs_once(funcs_name, libraries=mathlibs, - decl=decl, call=decl) - if st: - moredefs.extend([(fname2def(f), 1) for f in funcs_name]) - return st - - def check_funcs(funcs_name): - # Use check_funcs_once first, and if it does not work, test func per - # func. Return success only if all the functions are available - if not check_funcs_once(funcs_name): - # Global check failed, check func per func - for f in funcs_name: - if check_func(f): - moredefs.append((fname2def(f), 1)) - return 0 - else: - return 1 - - #use_msvc = config.check_decl("_MSC_VER") - - if not check_funcs_once(MANDATORY_FUNCS): - raise SystemError("One of the required function to build numpy is not" - " available (the list is %s)." % str(MANDATORY_FUNCS)) - - # Standard functions which may not be available and for which we have a - # replacement implementation. Note that some of these are C99 functions. - - # XXX: hack to circumvent cpp pollution from python: python put its - # config.h in the public namespace, so we have a clash for the common - # functions we test. We remove every function tested by python's - # autoconf, hoping their own test are correct - for f in OPTIONAL_STDFUNCS_MAYBE: - if config.check_decl(fname2def(f), - headers=["Python.h", "math.h"]): - OPTIONAL_STDFUNCS.remove(f) - - check_funcs(OPTIONAL_STDFUNCS) - - for h in OPTIONAL_HEADERS: - if config.check_func("", decl=False, call=False, headers=[h]): - h = h.replace(".", "_").replace(os.path.sep, "_") - moredefs.append((fname2def(h), 1)) - - for tup in OPTIONAL_INTRINSICS: - headers = None - if len(tup) == 2: - f, args, m = tup[0], tup[1], fname2def(tup[0]) - elif len(tup) == 3: - f, args, headers, m = tup[0], tup[1], [tup[2]], fname2def(tup[0]) - else: - f, args, headers, m = tup[0], tup[1], [tup[2]], fname2def(tup[3]) - if config.check_func(f, decl=False, call=True, call_args=args, - headers=headers): - moredefs.append((m, 1)) - - for dec, fn in OPTIONAL_FUNCTION_ATTRIBUTES: - if config.check_gcc_function_attribute(dec, fn): - moredefs.append((fname2def(fn), 1)) - - for dec, fn, code, header in OPTIONAL_FUNCTION_ATTRIBUTES_WITH_INTRINSICS: - if config.check_gcc_function_attribute_with_intrinsics(dec, fn, code, - header): - moredefs.append((fname2def(fn), 1)) - - for fn in OPTIONAL_VARIABLE_ATTRIBUTES: - if config.check_gcc_variable_attribute(fn): - m = fn.replace("(", "_").replace(")", "_") - moredefs.append((fname2def(m), 1)) - - # C99 functions: float and long double versions - check_funcs(C99_FUNCS_SINGLE) - check_funcs(C99_FUNCS_EXTENDED) - -def check_complex(config, mathlibs): - priv = [] - pub = [] - - try: - if os.uname()[0] == "Interix": - warnings.warn("Disabling broken complex support. See #1365", stacklevel=2) - return priv, pub - except Exception: - # os.uname not available on all platforms. blanket except ugly but safe - pass - - # Check for complex support - st = config.check_header('complex.h') - if st: - priv.append(('HAVE_COMPLEX_H', 1)) - pub.append(('NPY_USE_C99_COMPLEX', 1)) - - for t in C99_COMPLEX_TYPES: - st = config.check_type(t, headers=["complex.h"]) - if st: - pub.append(('NPY_HAVE_%s' % type2def(t), 1)) - - def check_prec(prec): - flist = [f + prec for f in C99_COMPLEX_FUNCS] - decl = dict([(f, True) for f in flist]) - if not config.check_funcs_once(flist, call=decl, decl=decl, - libraries=mathlibs): - for f in flist: - if config.check_func(f, call=True, decl=True, - libraries=mathlibs): - priv.append((fname2def(f), 1)) - else: - priv.extend([(fname2def(f), 1) for f in flist]) - - check_prec('') - check_prec('f') - check_prec('l') - - return priv, pub - -def check_ieee_macros(config): - priv = [] - pub = [] - - macros = [] - - def _add_decl(f): - priv.append(fname2def("decl_%s" % f)) - pub.append('NPY_%s' % fname2def("decl_%s" % f)) - - # XXX: hack to circumvent cpp pollution from python: python put its - # config.h in the public namespace, so we have a clash for the common - # functions we test. We remove every function tested by python's - # autoconf, hoping their own test are correct - _macros = ["isnan", "isinf", "signbit", "isfinite"] - for f in _macros: - py_symbol = fname2def("decl_%s" % f) - already_declared = config.check_decl(py_symbol, - headers=["Python.h", "math.h"]) - if already_declared: - if config.check_macro_true(py_symbol, - headers=["Python.h", "math.h"]): - pub.append('NPY_%s' % fname2def("decl_%s" % f)) - else: - macros.append(f) - # Normally, isnan and isinf are macro (C99), but some platforms only have - # func, or both func and macro version. Check for macro only, and define - # replacement ones if not found. - # Note: including Python.h is necessary because it modifies some math.h - # definitions - for f in macros: - st = config.check_decl(f, headers=["Python.h", "math.h"]) - if st: - _add_decl(f) - - return priv, pub - -def check_types(config_cmd, ext, build_dir): - private_defines = [] - public_defines = [] - - # Expected size (in number of bytes) for each type. This is an - # optimization: those are only hints, and an exhaustive search for the size - # is done if the hints are wrong. - expected = {'short': [2], 'int': [4], 'long': [8, 4], - 'float': [4], 'double': [8], 'long double': [16, 12, 8], - 'Py_intptr_t': [8, 4], 'PY_LONG_LONG': [8], 'long long': [8], - 'off_t': [8, 4]} - - # Check we have the python header (-dev* packages on Linux) - result = config_cmd.check_header('Python.h') - if not result: - python = 'python' - if '__pypy__' in sys.builtin_module_names: - python = 'pypy' - raise SystemError( - "Cannot compile 'Python.h'. Perhaps you need to " - "install {0}-dev|{0}-devel.".format(python)) - res = config_cmd.check_header("endian.h") - if res: - private_defines.append(('HAVE_ENDIAN_H', 1)) - public_defines.append(('NPY_HAVE_ENDIAN_H', 1)) - res = config_cmd.check_header("sys/endian.h") - if res: - private_defines.append(('HAVE_SYS_ENDIAN_H', 1)) - public_defines.append(('NPY_HAVE_SYS_ENDIAN_H', 1)) - - # Check basic types sizes - for type in ('short', 'int', 'long'): - res = config_cmd.check_decl("SIZEOF_%s" % sym2def(type), headers=["Python.h"]) - if res: - public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), "SIZEOF_%s" % sym2def(type))) - else: - res = config_cmd.check_type_size(type, expected=expected[type]) - if res >= 0: - public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res)) - else: - raise SystemError("Checking sizeof (%s) failed !" % type) - - for type in ('float', 'double', 'long double'): - already_declared = config_cmd.check_decl("SIZEOF_%s" % sym2def(type), - headers=["Python.h"]) - res = config_cmd.check_type_size(type, expected=expected[type]) - if res >= 0: - public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res)) - if not already_declared and not type == 'long double': - private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res)) - else: - raise SystemError("Checking sizeof (%s) failed !" % type) - - # Compute size of corresponding complex type: used to check that our - # definition is binary compatible with C99 complex type (check done at - # build time in npy_common.h) - complex_def = "struct {%s __x; %s __y;}" % (type, type) - res = config_cmd.check_type_size(complex_def, - expected=[2 * x for x in expected[type]]) - if res >= 0: - public_defines.append(('NPY_SIZEOF_COMPLEX_%s' % sym2def(type), '%d' % res)) - else: - raise SystemError("Checking sizeof (%s) failed !" % complex_def) - - for type in ('Py_intptr_t', 'off_t'): - res = config_cmd.check_type_size(type, headers=["Python.h"], - library_dirs=[pythonlib_dir()], - expected=expected[type]) - - if res >= 0: - private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res)) - public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res)) - else: - raise SystemError("Checking sizeof (%s) failed !" % type) - - # We check declaration AND type because that's how distutils does it. - if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']): - res = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h'], - library_dirs=[pythonlib_dir()], - expected=expected['PY_LONG_LONG']) - if res >= 0: - private_defines.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res)) - public_defines.append(('NPY_SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res)) - else: - raise SystemError("Checking sizeof (%s) failed !" % 'PY_LONG_LONG') - - res = config_cmd.check_type_size('long long', - expected=expected['long long']) - if res >= 0: - #private_defines.append(('SIZEOF_%s' % sym2def('long long'), '%d' % res)) - public_defines.append(('NPY_SIZEOF_%s' % sym2def('long long'), '%d' % res)) - else: - raise SystemError("Checking sizeof (%s) failed !" % 'long long') - - if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']): - raise RuntimeError( - "Config wo CHAR_BIT is not supported" - ", please contact the maintainers") - - return private_defines, public_defines - -def check_mathlib(config_cmd): - # Testing the C math library - mathlibs = [] - mathlibs_choices = [[], ['m'], ['cpml']] - mathlib = os.environ.get('MATHLIB') - if mathlib: - mathlibs_choices.insert(0, mathlib.split(',')) - for libs in mathlibs_choices: - if config_cmd.check_func("exp", libraries=libs, decl=True, call=True): - mathlibs = libs - break - else: - raise EnvironmentError("math library missing; rerun " - "setup.py after setting the " - "MATHLIB env variable") - return mathlibs - -def visibility_define(config): - """Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty - string).""" - hide = '__attribute__((visibility("hidden")))' - if config.check_gcc_function_attribute(hide, 'hideme'): - return hide - else: - return '' - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration, dot_join - from numpy.distutils.system_info import get_info, dict_append - - config = Configuration('core', parent_package, top_path) - local_dir = config.local_path - codegen_dir = join(local_dir, 'code_generators') - - if is_released(config): - warnings.simplefilter('error', MismatchCAPIWarning) - - # Check whether we have a mismatch between the set C API VERSION and the - # actual C API VERSION - check_api_version(C_API_VERSION, codegen_dir) - - generate_umath_py = join(codegen_dir, 'generate_umath.py') - n = dot_join(config.name, 'generate_umath') - generate_umath = npy_load_module('_'.join(n.split('.')), - generate_umath_py, ('.py', 'U', 1)) - - header_dir = 'include/numpy' # this is relative to config.path_in_package - - cocache = CallOnceOnly() - - def generate_config_h(ext, build_dir): - target = join(build_dir, header_dir, 'config.h') - d = os.path.dirname(target) - if not os.path.exists(d): - os.makedirs(d) - - if newer(__file__, target): - config_cmd = config.get_config_cmd() - log.info('Generating %s', target) - - # Check sizeof - moredefs, ignored = cocache.check_types(config_cmd, ext, build_dir) - - # Check math library and C99 math funcs availability - mathlibs = check_mathlib(config_cmd) - moredefs.append(('MATHLIB', ','.join(mathlibs))) - - check_math_capabilities(config_cmd, moredefs, mathlibs) - moredefs.extend(cocache.check_ieee_macros(config_cmd)[0]) - moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0]) - - # Signal check - if is_npy_no_signal(): - moredefs.append('__NPY_PRIVATE_NO_SIGNAL') - - # Windows checks - if sys.platform == 'win32' or os.name == 'nt': - win32_checks(moredefs) - - # C99 restrict keyword - moredefs.append(('NPY_RESTRICT', config_cmd.check_restrict())) - - # Inline check - inline = config_cmd.check_inline() - - # Use relaxed stride checking - if NPY_RELAXED_STRIDES_CHECKING: - moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1)) - - # Use bogus stride debug aid when relaxed strides are enabled - if NPY_RELAXED_STRIDES_DEBUG: - moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 1)) - - # Get long double representation - rep = check_long_double_representation(config_cmd) - moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1)) - - if check_for_right_shift_internal_compiler_error(config_cmd): - moredefs.append('NPY_DO_NOT_OPTIMIZE_LONG_right_shift') - moredefs.append('NPY_DO_NOT_OPTIMIZE_ULONG_right_shift') - moredefs.append('NPY_DO_NOT_OPTIMIZE_LONGLONG_right_shift') - moredefs.append('NPY_DO_NOT_OPTIMIZE_ULONGLONG_right_shift') - - # Py3K check - if sys.version_info[0] >= 3: - moredefs.append(('NPY_PY3K', 1)) - - # Generate the config.h file from moredefs - with open(target, 'w') as target_f: - for d in moredefs: - if isinstance(d, str): - target_f.write('#define %s\n' % (d)) - else: - target_f.write('#define %s %s\n' % (d[0], d[1])) - - # define inline to our keyword, or nothing - target_f.write('#ifndef __cplusplus\n') - if inline == 'inline': - target_f.write('/* #undef inline */\n') - else: - target_f.write('#define inline %s\n' % inline) - target_f.write('#endif\n') - - # add the guard to make sure config.h is never included directly, - # but always through npy_config.h - target_f.write(textwrap.dedent(""" - #ifndef _NPY_NPY_CONFIG_H_ - #error config.h should never be included directly, include npy_config.h instead - #endif - """)) - - log.info('File: %s' % target) - with open(target) as target_f: - log.info(target_f.read()) - log.info('EOF') - else: - mathlibs = [] - with open(target) as target_f: - for line in target_f: - s = '#define MATHLIB' - if line.startswith(s): - value = line[len(s):].strip() - if value: - mathlibs.extend(value.split(',')) - - # Ugly: this can be called within a library and not an extension, - # in which case there is no libraries attributes (and none is - # needed). - if hasattr(ext, 'libraries'): - ext.libraries.extend(mathlibs) - - incl_dir = os.path.dirname(target) - if incl_dir not in config.numpy_include_dirs: - config.numpy_include_dirs.append(incl_dir) - - return target - - def generate_numpyconfig_h(ext, build_dir): - """Depends on config.h: generate_config_h has to be called before !""" - # put common include directory in build_dir on search path - # allows using code generation in headers headers - config.add_include_dirs(join(build_dir, "src", "common")) - config.add_include_dirs(join(build_dir, "src", "npymath")) - - target = join(build_dir, header_dir, '_numpyconfig.h') - d = os.path.dirname(target) - if not os.path.exists(d): - os.makedirs(d) - if newer(__file__, target): - config_cmd = config.get_config_cmd() - log.info('Generating %s', target) - - # Check sizeof - ignored, moredefs = cocache.check_types(config_cmd, ext, build_dir) - - if is_npy_no_signal(): - moredefs.append(('NPY_NO_SIGNAL', 1)) - - if is_npy_no_smp(): - moredefs.append(('NPY_NO_SMP', 1)) - else: - moredefs.append(('NPY_NO_SMP', 0)) - - mathlibs = check_mathlib(config_cmd) - moredefs.extend(cocache.check_ieee_macros(config_cmd)[1]) - moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[1]) - - if NPY_RELAXED_STRIDES_CHECKING: - moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1)) - - if NPY_RELAXED_STRIDES_DEBUG: - moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 1)) - - # Check whether we can use inttypes (C99) formats - if config_cmd.check_decl('PRIdPTR', headers=['inttypes.h']): - moredefs.append(('NPY_USE_C99_FORMATS', 1)) - - # visibility check - hidden_visibility = visibility_define(config_cmd) - moredefs.append(('NPY_VISIBILITY_HIDDEN', hidden_visibility)) - - # Add the C API/ABI versions - moredefs.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION)) - moredefs.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION)) - - # Add moredefs to header - with open(target, 'w') as target_f: - for d in moredefs: - if isinstance(d, str): - target_f.write('#define %s\n' % (d)) - else: - target_f.write('#define %s %s\n' % (d[0], d[1])) - - # Define __STDC_FORMAT_MACROS - target_f.write(textwrap.dedent(""" - #ifndef __STDC_FORMAT_MACROS - #define __STDC_FORMAT_MACROS 1 - #endif - """)) - - # Dump the numpyconfig.h header to stdout - log.info('File: %s' % target) - with open(target) as target_f: - log.info(target_f.read()) - log.info('EOF') - config.add_data_files((header_dir, target)) - return target - - def generate_api_func(module_name): - def generate_api(ext, build_dir): - script = join(codegen_dir, module_name + '.py') - sys.path.insert(0, codegen_dir) - try: - m = __import__(module_name) - log.info('executing %s', script) - h_file, c_file, doc_file = m.generate_api(os.path.join(build_dir, header_dir)) - finally: - del sys.path[0] - config.add_data_files((header_dir, h_file), - (header_dir, doc_file)) - return (h_file,) - return generate_api - - generate_numpy_api = generate_api_func('generate_numpy_api') - generate_ufunc_api = generate_api_func('generate_ufunc_api') - - config.add_include_dirs(join(local_dir, "src", "common")) - config.add_include_dirs(join(local_dir, "src")) - config.add_include_dirs(join(local_dir)) - - config.add_data_dir('include/numpy') - config.add_include_dirs(join('src', 'npymath')) - config.add_include_dirs(join('src', 'multiarray')) - config.add_include_dirs(join('src', 'umath')) - config.add_include_dirs(join('src', 'npysort')) - - config.add_define_macros([("NPY_INTERNAL_BUILD", "1")]) # this macro indicates that Numpy build is in process - config.add_define_macros([("HAVE_NPY_CONFIG_H", "1")]) - if sys.platform[:3] == "aix": - config.add_define_macros([("_LARGE_FILES", None)]) - else: - config.add_define_macros([("_FILE_OFFSET_BITS", "64")]) - config.add_define_macros([('_LARGEFILE_SOURCE', '1')]) - config.add_define_macros([('_LARGEFILE64_SOURCE', '1')]) - - config.numpy_include_dirs.extend(config.paths('include')) - - deps = [join('src', 'npymath', '_signbit.c'), - join('include', 'numpy', '*object.h'), - join(codegen_dir, 'genapi.py'), - ] - - ####################################################################### - # npymath library # - ####################################################################### - - subst_dict = dict([("sep", os.path.sep), ("pkgname", "numpy.core")]) - - def get_mathlib_info(*args): - # Another ugly hack: the mathlib info is known once build_src is run, - # but we cannot use add_installed_pkg_config here either, so we only - # update the substitution dictionary during npymath build - config_cmd = config.get_config_cmd() - - # Check that the toolchain works, to fail early if it doesn't - # (avoid late errors with MATHLIB which are confusing if the - # compiler does not work). - st = config_cmd.try_link('int main(void) { return 0;}') - if not st: - # rerun the failing command in verbose mode - config_cmd.compiler.verbose = True - config_cmd.try_link('int main(void) { return 0;}') - raise RuntimeError("Broken toolchain: cannot link a simple C program") - mlibs = check_mathlib(config_cmd) - - posix_mlib = ' '.join(['-l%s' % l for l in mlibs]) - msvc_mlib = ' '.join(['%s.lib' % l for l in mlibs]) - subst_dict["posix_mathlib"] = posix_mlib - subst_dict["msvc_mathlib"] = msvc_mlib - - npymath_sources = [join('src', 'npymath', 'npy_math_internal.h.src'), - join('src', 'npymath', 'npy_math.c'), - join('src', 'npymath', 'ieee754.c.src'), - join('src', 'npymath', 'npy_math_complex.c.src'), - join('src', 'npymath', 'halffloat.c') - ] - - # Must be true for CRT compilers but not MinGW/cygwin. See gh-9977. - # Intel and Clang also don't seem happy with /GL - is_msvc = (platform.platform().startswith('Windows') and - platform.python_compiler().startswith('MS')) - config.add_installed_library('npymath', - sources=npymath_sources + [get_mathlib_info], - install_dir='lib', - build_info={ - 'include_dirs' : [], # empty list required for creating npy_math_internal.h - 'extra_compiler_args' : (['/GL-'] if is_msvc else []), - }) - config.add_npy_pkg_config("npymath.ini.in", "lib/npy-pkg-config", - subst_dict) - config.add_npy_pkg_config("mlib.ini.in", "lib/npy-pkg-config", - subst_dict) - - ####################################################################### - # npysort library # - ####################################################################### - - # This library is created for the build but it is not installed - npysort_sources = [join('src', 'common', 'npy_sort.h.src'), - join('src', 'npysort', 'quicksort.c.src'), - join('src', 'npysort', 'mergesort.c.src'), - join('src', 'npysort', 'timsort.c.src'), - join('src', 'npysort', 'heapsort.c.src'), - join('src', 'npysort', 'radixsort.c.src'), - join('src', 'common', 'npy_partition.h.src'), - join('src', 'npysort', 'selection.c.src'), - join('src', 'common', 'npy_binsearch.h.src'), - join('src', 'npysort', 'binsearch.c.src'), - ] - config.add_library('npysort', - sources=npysort_sources, - include_dirs=[]) - - ####################################################################### - # multiarray_tests module # - ####################################################################### - - config.add_extension('_multiarray_tests', - sources=[join('src', 'multiarray', '_multiarray_tests.c.src'), - join('src', 'common', 'mem_overlap.c')], - depends=[join('src', 'common', 'mem_overlap.h'), - join('src', 'common', 'npy_extint128.h')], - libraries=['npymath']) - - ####################################################################### - # _multiarray_umath module - common part # - ####################################################################### - - common_deps = [ - join('src', 'common', 'array_assign.h'), - join('src', 'common', 'binop_override.h'), - join('src', 'common', 'cblasfuncs.h'), - join('src', 'common', 'lowlevel_strided_loops.h'), - join('src', 'common', 'mem_overlap.h'), - join('src', 'common', 'npy_cblas.h'), - join('src', 'common', 'npy_config.h'), - join('src', 'common', 'npy_ctypes.h'), - join('src', 'common', 'npy_extint128.h'), - join('src', 'common', 'npy_import.h'), - join('src', 'common', 'npy_longdouble.h'), - join('src', 'common', 'templ_common.h.src'), - join('src', 'common', 'ucsnarrow.h'), - join('src', 'common', 'ufunc_override.h'), - join('src', 'common', 'umathmodule.h'), - join('src', 'common', 'numpyos.h'), - ] - - common_src = [ - join('src', 'common', 'array_assign.c'), - join('src', 'common', 'mem_overlap.c'), - join('src', 'common', 'npy_longdouble.c'), - join('src', 'common', 'templ_common.h.src'), - join('src', 'common', 'ucsnarrow.c'), - join('src', 'common', 'ufunc_override.c'), - join('src', 'common', 'numpyos.c'), - ] - - if os.environ.get('NPY_USE_BLAS_ILP64', "0") != "0": - blas_info = get_info('blas_ilp64_opt', 2) - else: - blas_info = get_info('blas_opt', 0) - - have_blas = blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', []) - - if have_blas: - extra_info = blas_info - # These files are also in MANIFEST.in so that they are always in - # the source distribution independently of HAVE_CBLAS. - common_src.extend([join('src', 'common', 'cblasfuncs.c'), - join('src', 'common', 'python_xerbla.c'), - ]) - if uses_accelerate_framework(blas_info): - common_src.extend(get_sgemv_fix()) - else: - extra_info = {} - - ####################################################################### - # _multiarray_umath module - multiarray part # - ####################################################################### - - multiarray_deps = [ - join('src', 'multiarray', 'arrayobject.h'), - join('src', 'multiarray', 'arraytypes.h'), - join('src', 'multiarray', 'arrayfunction_override.h'), - join('src', 'multiarray', 'npy_buffer.h'), - join('src', 'multiarray', 'calculation.h'), - join('src', 'multiarray', 'common.h'), - join('src', 'multiarray', 'convert_datatype.h'), - join('src', 'multiarray', 'convert.h'), - join('src', 'multiarray', 'conversion_utils.h'), - join('src', 'multiarray', 'ctors.h'), - join('src', 'multiarray', 'descriptor.h'), - join('src', 'multiarray', 'dragon4.h'), - join('src', 'multiarray', 'getset.h'), - join('src', 'multiarray', 'hashdescr.h'), - join('src', 'multiarray', 'iterators.h'), - join('src', 'multiarray', 'mapping.h'), - join('src', 'multiarray', 'methods.h'), - join('src', 'multiarray', 'multiarraymodule.h'), - join('src', 'multiarray', 'nditer_impl.h'), - join('src', 'multiarray', 'number.h'), - join('src', 'multiarray', 'refcount.h'), - join('src', 'multiarray', 'scalartypes.h'), - join('src', 'multiarray', 'sequence.h'), - join('src', 'multiarray', 'shape.h'), - join('src', 'multiarray', 'strfuncs.h'), - join('src', 'multiarray', 'typeinfo.h'), - join('src', 'multiarray', 'usertypes.h'), - join('src', 'multiarray', 'vdot.h'), - join('include', 'numpy', 'arrayobject.h'), - join('include', 'numpy', '_neighborhood_iterator_imp.h'), - join('include', 'numpy', 'npy_endian.h'), - join('include', 'numpy', 'arrayscalars.h'), - join('include', 'numpy', 'noprefix.h'), - join('include', 'numpy', 'npy_interrupt.h'), - join('include', 'numpy', 'npy_3kcompat.h'), - join('include', 'numpy', 'npy_math.h'), - join('include', 'numpy', 'halffloat.h'), - join('include', 'numpy', 'npy_common.h'), - join('include', 'numpy', 'npy_os.h'), - join('include', 'numpy', 'utils.h'), - join('include', 'numpy', 'ndarrayobject.h'), - join('include', 'numpy', 'npy_cpu.h'), - join('include', 'numpy', 'numpyconfig.h'), - join('include', 'numpy', 'ndarraytypes.h'), - join('include', 'numpy', 'npy_1_7_deprecated_api.h'), - # add library sources as distuils does not consider libraries - # dependencies - ] + npysort_sources + npymath_sources - - multiarray_src = [ - join('src', 'multiarray', 'alloc.c'), - join('src', 'multiarray', 'arrayobject.c'), - join('src', 'multiarray', 'arraytypes.c.src'), - join('src', 'multiarray', 'array_assign_scalar.c'), - join('src', 'multiarray', 'array_assign_array.c'), - join('src', 'multiarray', 'arrayfunction_override.c'), - join('src', 'multiarray', 'buffer.c'), - join('src', 'multiarray', 'calculation.c'), - join('src', 'multiarray', 'compiled_base.c'), - join('src', 'multiarray', 'common.c'), - join('src', 'multiarray', 'convert.c'), - join('src', 'multiarray', 'convert_datatype.c'), - join('src', 'multiarray', 'conversion_utils.c'), - join('src', 'multiarray', 'ctors.c'), - join('src', 'multiarray', 'datetime.c'), - join('src', 'multiarray', 'datetime_strings.c'), - join('src', 'multiarray', 'datetime_busday.c'), - join('src', 'multiarray', 'datetime_busdaycal.c'), - join('src', 'multiarray', 'descriptor.c'), - join('src', 'multiarray', 'dragon4.c'), - join('src', 'multiarray', 'dtype_transfer.c'), - join('src', 'multiarray', 'einsum.c.src'), - join('src', 'multiarray', 'flagsobject.c'), - join('src', 'multiarray', 'getset.c'), - join('src', 'multiarray', 'hashdescr.c'), - join('src', 'multiarray', 'item_selection.c'), - join('src', 'multiarray', 'iterators.c'), - join('src', 'multiarray', 'lowlevel_strided_loops.c.src'), - join('src', 'multiarray', 'mapping.c'), - join('src', 'multiarray', 'methods.c'), - join('src', 'multiarray', 'multiarraymodule.c'), - join('src', 'multiarray', 'nditer_templ.c.src'), - join('src', 'multiarray', 'nditer_api.c'), - join('src', 'multiarray', 'nditer_constr.c'), - join('src', 'multiarray', 'nditer_pywrap.c'), - join('src', 'multiarray', 'number.c'), - join('src', 'multiarray', 'refcount.c'), - join('src', 'multiarray', 'sequence.c'), - join('src', 'multiarray', 'shape.c'), - join('src', 'multiarray', 'scalarapi.c'), - join('src', 'multiarray', 'scalartypes.c.src'), - join('src', 'multiarray', 'strfuncs.c'), - join('src', 'multiarray', 'temp_elide.c'), - join('src', 'multiarray', 'typeinfo.c'), - join('src', 'multiarray', 'usertypes.c'), - join('src', 'multiarray', 'vdot.c'), - ] - - ####################################################################### - # _multiarray_umath module - umath part # - ####################################################################### - - def generate_umath_c(ext, build_dir): - target = join(build_dir, header_dir, '__umath_generated.c') - dir = os.path.dirname(target) - if not os.path.exists(dir): - os.makedirs(dir) - script = generate_umath_py - if newer(script, target): - with open(target, 'w') as f: - f.write(generate_umath.make_code(generate_umath.defdict, - generate_umath.__file__)) - return [] - - umath_src = [ - join('src', 'umath', 'umathmodule.c'), - join('src', 'umath', 'reduction.c'), - join('src', 'umath', 'funcs.inc.src'), - join('src', 'umath', 'simd.inc.src'), - join('src', 'umath', 'loops.h.src'), - join('src', 'umath', 'loops.c.src'), - join('src', 'umath', 'matmul.h.src'), - join('src', 'umath', 'matmul.c.src'), - join('src', 'umath', 'clip.h.src'), - join('src', 'umath', 'clip.c.src'), - join('src', 'umath', 'ufunc_object.c'), - join('src', 'umath', 'extobj.c'), - join('src', 'umath', 'cpuid.c'), - join('src', 'umath', 'scalarmath.c.src'), - join('src', 'umath', 'ufunc_type_resolution.c'), - join('src', 'umath', 'override.c'), - ] - - umath_deps = [ - generate_umath_py, - join('include', 'numpy', 'npy_math.h'), - join('include', 'numpy', 'halffloat.h'), - join('src', 'multiarray', 'common.h'), - join('src', 'multiarray', 'number.h'), - join('src', 'common', 'templ_common.h.src'), - join('src', 'umath', 'simd.inc.src'), - join('src', 'umath', 'override.h'), - join(codegen_dir, 'generate_ufunc_api.py'), - ] - - config.add_extension('_multiarray_umath', - sources=multiarray_src + umath_src + - npymath_sources + common_src + - [generate_config_h, - generate_numpyconfig_h, - generate_numpy_api, - join(codegen_dir, 'generate_numpy_api.py'), - join('*.py'), - generate_umath_c, - generate_ufunc_api, - ], - depends=deps + multiarray_deps + umath_deps + - common_deps, - libraries=['npymath', 'npysort'], - extra_info=extra_info) - - ####################################################################### - # umath_tests module # - ####################################################################### - - config.add_extension('_umath_tests', - sources=[join('src', 'umath', '_umath_tests.c.src')]) - - ####################################################################### - # custom rational dtype module # - ####################################################################### - - config.add_extension('_rational_tests', - sources=[join('src', 'umath', '_rational_tests.c.src')]) - - ####################################################################### - # struct_ufunc_test module # - ####################################################################### - - config.add_extension('_struct_ufunc_tests', - sources=[join('src', 'umath', '_struct_ufunc_tests.c.src')]) - - - ####################################################################### - # operand_flag_tests module # - ####################################################################### - - config.add_extension('_operand_flag_tests', - sources=[join('src', 'umath', '_operand_flag_tests.c.src')]) - - config.add_data_dir('tests') - config.add_data_dir('tests/data') - - config.make_svn_version_py() - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/venv/lib/python3.7/site-packages/numpy/core/setup_common.py b/venv/lib/python3.7/site-packages/numpy/core/setup_common.py deleted file mode 100644 index 6356f08..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/setup_common.py +++ /dev/null @@ -1,457 +0,0 @@ -from __future__ import division, absolute_import, print_function - -# Code common to build tools -import sys -import warnings -import copy -import binascii -import textwrap - -from numpy.distutils.misc_util import mingw32 - - -#------------------- -# Versioning support -#------------------- -# How to change C_API_VERSION ? -# - increase C_API_VERSION value -# - record the hash for the new C API with the cversions.py script -# and add the hash to cversions.txt -# The hash values are used to remind developers when the C API number was not -# updated - generates a MismatchCAPIWarning warning which is turned into an -# exception for released version. - -# Binary compatibility version number. This number is increased whenever the -# C-API is changed such that binary compatibility is broken, i.e. whenever a -# recompile of extension modules is needed. -C_ABI_VERSION = 0x01000009 - -# Minor API version. This number is increased whenever a change is made to the -# C-API -- whether it breaks binary compatibility or not. Some changes, such -# as adding a function pointer to the end of the function table, can be made -# without breaking binary compatibility. In this case, only the C_API_VERSION -# (*not* C_ABI_VERSION) would be increased. Whenever binary compatibility is -# broken, both C_API_VERSION and C_ABI_VERSION should be increased. -# -# 0x00000008 - 1.7.x -# 0x00000009 - 1.8.x -# 0x00000009 - 1.9.x -# 0x0000000a - 1.10.x -# 0x0000000a - 1.11.x -# 0x0000000a - 1.12.x -# 0x0000000b - 1.13.x -# 0x0000000c - 1.14.x -# 0x0000000c - 1.15.x -# 0x0000000d - 1.16.x -C_API_VERSION = 0x0000000d - -class MismatchCAPIWarning(Warning): - pass - -def is_released(config): - """Return True if a released version of numpy is detected.""" - from distutils.version import LooseVersion - - v = config.get_version('../version.py') - if v is None: - raise ValueError("Could not get version") - pv = LooseVersion(vstring=v).version - if len(pv) > 3: - return False - return True - -def get_api_versions(apiversion, codegen_dir): - """ - Return current C API checksum and the recorded checksum. - - Return current C API checksum and the recorded checksum for the given - version of the C API version. - - """ - # Compute the hash of the current API as defined in the .txt files in - # code_generators - sys.path.insert(0, codegen_dir) - try: - m = __import__('genapi') - numpy_api = __import__('numpy_api') - curapi_hash = m.fullapi_hash(numpy_api.full_api) - apis_hash = m.get_versions_hash() - finally: - del sys.path[0] - - return curapi_hash, apis_hash[apiversion] - -def check_api_version(apiversion, codegen_dir): - """Emits a MismatchCAPIWarning if the C API version needs updating.""" - curapi_hash, api_hash = get_api_versions(apiversion, codegen_dir) - - # If different hash, it means that the api .txt files in - # codegen_dir have been updated without the API version being - # updated. Any modification in those .txt files should be reflected - # in the api and eventually abi versions. - # To compute the checksum of the current API, use numpy/core/cversions.py - if not curapi_hash == api_hash: - msg = ("API mismatch detected, the C API version " - "numbers have to be updated. Current C api version is %d, " - "with checksum %s, but recorded checksum for C API version %d " - "in core/codegen_dir/cversions.txt is %s. If functions were " - "added in the C API, you have to update C_API_VERSION in %s." - ) - warnings.warn(msg % (apiversion, curapi_hash, apiversion, api_hash, - __file__), - MismatchCAPIWarning, stacklevel=2) -# Mandatory functions: if not found, fail the build -MANDATORY_FUNCS = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", - "floor", "ceil", "sqrt", "log10", "log", "exp", "asin", - "acos", "atan", "fmod", 'modf', 'frexp', 'ldexp'] - -# Standard functions which may not be available and for which we have a -# replacement implementation. Note that some of these are C99 functions. -OPTIONAL_STDFUNCS = ["expm1", "log1p", "acosh", "asinh", "atanh", - "rint", "trunc", "exp2", "log2", "hypot", "atan2", "pow", - "copysign", "nextafter", "ftello", "fseeko", - "strtoll", "strtoull", "cbrt", "strtold_l", "fallocate", - "backtrace", "madvise"] - - -OPTIONAL_HEADERS = [ -# sse headers only enabled automatically on amd64/x32 builds - "xmmintrin.h", # SSE - "emmintrin.h", # SSE2 - "immintrin.h", # AVX - "features.h", # for glibc version linux - "xlocale.h", # see GH#8367 - "dlfcn.h", # dladdr - "sys/mman.h", #madvise -] - -# optional gcc compiler builtins and their call arguments and optional a -# required header and definition name (HAVE_ prepended) -# call arguments are required as the compiler will do strict signature checking -OPTIONAL_INTRINSICS = [("__builtin_isnan", '5.'), - ("__builtin_isinf", '5.'), - ("__builtin_isfinite", '5.'), - ("__builtin_bswap32", '5u'), - ("__builtin_bswap64", '5u'), - ("__builtin_expect", '5, 0'), - ("__builtin_mul_overflow", '5, 5, (int*)5'), - # broken on OSX 10.11, make sure its not optimized away - ("volatile int r = __builtin_cpu_supports", '"sse"', - "stdio.h", "__BUILTIN_CPU_SUPPORTS"), - ("volatile int r = __builtin_cpu_supports", '"avx512f"', - "stdio.h", "__BUILTIN_CPU_SUPPORTS_AVX512F"), - # MMX only needed for icc, but some clangs don't have it - ("_m_from_int64", '0', "emmintrin.h"), - ("_mm_load_ps", '(float*)0', "xmmintrin.h"), # SSE - ("_mm_prefetch", '(float*)0, _MM_HINT_NTA', - "xmmintrin.h"), # SSE - ("_mm_load_pd", '(double*)0', "emmintrin.h"), # SSE2 - ("__builtin_prefetch", "(float*)0, 0, 3"), - # check that the linker can handle avx - ("__asm__ volatile", '"vpand %xmm1, %xmm2, %xmm3"', - "stdio.h", "LINK_AVX"), - ("__asm__ volatile", '"vpand %ymm1, %ymm2, %ymm3"', - "stdio.h", "LINK_AVX2"), - ("__asm__ volatile", '"vpaddd %zmm1, %zmm2, %zmm3"', - "stdio.h", "LINK_AVX512F"), - ("__asm__ volatile", '"xgetbv"', "stdio.h", "XGETBV"), - ] - -# function attributes -# tested via "int %s %s(void *);" % (attribute, name) -# function name will be converted to HAVE_ preprocessor macro -OPTIONAL_FUNCTION_ATTRIBUTES = [('__attribute__((optimize("unroll-loops")))', - 'attribute_optimize_unroll_loops'), - ('__attribute__((optimize("O3")))', - 'attribute_optimize_opt_3'), - ('__attribute__((nonnull (1)))', - 'attribute_nonnull'), - ('__attribute__((target ("avx")))', - 'attribute_target_avx'), - ('__attribute__((target ("avx2")))', - 'attribute_target_avx2'), - ('__attribute__((target ("avx512f")))', - 'attribute_target_avx512f'), - ] - -# function attributes with intrinsics -# To ensure your compiler can compile avx intrinsics with just the attributes -# gcc 4.8.4 support attributes but not with intrisics -# tested via "#include<%s> int %s %s(void *){code; return 0;};" % (header, attribute, name, code) -# function name will be converted to HAVE_ preprocessor macro -OPTIONAL_FUNCTION_ATTRIBUTES_WITH_INTRINSICS = [('__attribute__((target("avx2,fma")))', - 'attribute_target_avx2_with_intrinsics', - '__m256 temp = _mm256_set1_ps(1.0); temp = \ - _mm256_fmadd_ps(temp, temp, temp)', - 'immintrin.h'), - ('__attribute__((target("avx512f")))', - 'attribute_target_avx512f_with_intrinsics', - '__m512 temp = _mm512_set1_ps(1.0)', - 'immintrin.h'), - ] - -# variable attributes tested via "int %s a" % attribute -OPTIONAL_VARIABLE_ATTRIBUTES = ["__thread", "__declspec(thread)"] - -# Subset of OPTIONAL_STDFUNCS which may already have HAVE_* defined by Python.h -OPTIONAL_STDFUNCS_MAYBE = [ - "expm1", "log1p", "acosh", "atanh", "asinh", "hypot", "copysign", - "ftello", "fseeko" - ] - -# C99 functions: float and long double versions -C99_FUNCS = [ - "sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", "floor", "ceil", - "rint", "trunc", "sqrt", "log10", "log", "log1p", "exp", "expm1", - "asin", "acos", "atan", "asinh", "acosh", "atanh", "hypot", "atan2", - "pow", "fmod", "modf", 'frexp', 'ldexp', "exp2", "log2", "copysign", - "nextafter", "cbrt" - ] -C99_FUNCS_SINGLE = [f + 'f' for f in C99_FUNCS] -C99_FUNCS_EXTENDED = [f + 'l' for f in C99_FUNCS] -C99_COMPLEX_TYPES = [ - 'complex double', 'complex float', 'complex long double' - ] -C99_COMPLEX_FUNCS = [ - "cabs", "cacos", "cacosh", "carg", "casin", "casinh", "catan", - "catanh", "ccos", "ccosh", "cexp", "cimag", "clog", "conj", "cpow", - "cproj", "creal", "csin", "csinh", "csqrt", "ctan", "ctanh" - ] - -def fname2def(name): - return "HAVE_%s" % name.upper() - -def sym2def(symbol): - define = symbol.replace(' ', '') - return define.upper() - -def type2def(symbol): - define = symbol.replace(' ', '_') - return define.upper() - -# Code to detect long double representation taken from MPFR m4 macro -def check_long_double_representation(cmd): - cmd._check_compiler() - body = LONG_DOUBLE_REPRESENTATION_SRC % {'type': 'long double'} - - # Disable whole program optimization (the default on vs2015, with python 3.5+) - # which generates intermediary object files and prevents checking the - # float representation. - if sys.platform == "win32" and not mingw32(): - try: - cmd.compiler.compile_options.remove("/GL") - except (AttributeError, ValueError): - pass - - # Disable multi-file interprocedural optimization in the Intel compiler on Linux - # which generates intermediary object files and prevents checking the - # float representation. - elif (sys.platform != "win32" - and cmd.compiler.compiler_type.startswith('intel') - and '-ipo' in cmd.compiler.cc_exe): - newcompiler = cmd.compiler.cc_exe.replace(' -ipo', '') - cmd.compiler.set_executables( - compiler=newcompiler, - compiler_so=newcompiler, - compiler_cxx=newcompiler, - linker_exe=newcompiler, - linker_so=newcompiler + ' -shared' - ) - - # We need to use _compile because we need the object filename - src, obj = cmd._compile(body, None, None, 'c') - try: - ltype = long_double_representation(pyod(obj)) - return ltype - except ValueError: - # try linking to support CC="gcc -flto" or icc -ipo - # struct needs to be volatile so it isn't optimized away - # additionally "clang -flto" requires the foo struct to be used - body = body.replace('struct', 'volatile struct') - body += "int main(void) { return foo.before[0]; }\n" - src, obj = cmd._compile(body, None, None, 'c') - cmd.temp_files.append("_configtest") - cmd.compiler.link_executable([obj], "_configtest") - ltype = long_double_representation(pyod("_configtest")) - return ltype - finally: - cmd._clean() - -LONG_DOUBLE_REPRESENTATION_SRC = r""" -/* "before" is 16 bytes to ensure there's no padding between it and "x". - * We're not expecting any "long double" bigger than 16 bytes or with - * alignment requirements stricter than 16 bytes. */ -typedef %(type)s test_type; - -struct { - char before[16]; - test_type x; - char after[8]; -} foo = { - { '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - '\001', '\043', '\105', '\147', '\211', '\253', '\315', '\357' }, - -123456789.0, - { '\376', '\334', '\272', '\230', '\166', '\124', '\062', '\020' } -}; -""" - -def pyod(filename): - """Python implementation of the od UNIX utility (od -b, more exactly). - - Parameters - ---------- - filename : str - name of the file to get the dump from. - - Returns - ------- - out : seq - list of lines of od output - - Note - ---- - We only implement enough to get the necessary information for long double - representation, this is not intended as a compatible replacement for od. - """ - def _pyod2(): - out = [] - - with open(filename, 'rb') as fid: - yo = [int(oct(int(binascii.b2a_hex(o), 16))) for o in fid.read()] - for i in range(0, len(yo), 16): - line = ['%07d' % int(oct(i))] - line.extend(['%03d' % c for c in yo[i:i+16]]) - out.append(" ".join(line)) - return out - - def _pyod3(): - out = [] - - with open(filename, 'rb') as fid: - yo2 = [oct(o)[2:] for o in fid.read()] - for i in range(0, len(yo2), 16): - line = ['%07d' % int(oct(i)[2:])] - line.extend(['%03d' % int(c) for c in yo2[i:i+16]]) - out.append(" ".join(line)) - return out - - if sys.version_info[0] < 3: - return _pyod2() - else: - return _pyod3() - -_BEFORE_SEQ = ['000', '000', '000', '000', '000', '000', '000', '000', - '001', '043', '105', '147', '211', '253', '315', '357'] -_AFTER_SEQ = ['376', '334', '272', '230', '166', '124', '062', '020'] - -_IEEE_DOUBLE_BE = ['301', '235', '157', '064', '124', '000', '000', '000'] -_IEEE_DOUBLE_LE = _IEEE_DOUBLE_BE[::-1] -_INTEL_EXTENDED_12B = ['000', '000', '000', '000', '240', '242', '171', '353', - '031', '300', '000', '000'] -_INTEL_EXTENDED_16B = ['000', '000', '000', '000', '240', '242', '171', '353', - '031', '300', '000', '000', '000', '000', '000', '000'] -_MOTOROLA_EXTENDED_12B = ['300', '031', '000', '000', '353', '171', - '242', '240', '000', '000', '000', '000'] -_IEEE_QUAD_PREC_BE = ['300', '031', '326', '363', '105', '100', '000', '000', - '000', '000', '000', '000', '000', '000', '000', '000'] -_IEEE_QUAD_PREC_LE = _IEEE_QUAD_PREC_BE[::-1] -_IBM_DOUBLE_DOUBLE_BE = (['301', '235', '157', '064', '124', '000', '000', '000'] + - ['000'] * 8) -_IBM_DOUBLE_DOUBLE_LE = (['000', '000', '000', '124', '064', '157', '235', '301'] + - ['000'] * 8) - -def long_double_representation(lines): - """Given a binary dump as given by GNU od -b, look for long double - representation.""" - - # Read contains a list of 32 items, each item is a byte (in octal - # representation, as a string). We 'slide' over the output until read is of - # the form before_seq + content + after_sequence, where content is the long double - # representation: - # - content is 12 bytes: 80 bits Intel representation - # - content is 16 bytes: 80 bits Intel representation (64 bits) or quad precision - # - content is 8 bytes: same as double (not implemented yet) - read = [''] * 32 - saw = None - for line in lines: - # we skip the first word, as od -b output an index at the beginning of - # each line - for w in line.split()[1:]: - read.pop(0) - read.append(w) - - # If the end of read is equal to the after_sequence, read contains - # the long double - if read[-8:] == _AFTER_SEQ: - saw = copy.copy(read) - # if the content was 12 bytes, we only have 32 - 8 - 12 = 12 - # "before" bytes. In other words the first 4 "before" bytes went - # past the sliding window. - if read[:12] == _BEFORE_SEQ[4:]: - if read[12:-8] == _INTEL_EXTENDED_12B: - return 'INTEL_EXTENDED_12_BYTES_LE' - if read[12:-8] == _MOTOROLA_EXTENDED_12B: - return 'MOTOROLA_EXTENDED_12_BYTES_BE' - # if the content was 16 bytes, we are left with 32-8-16 = 16 - # "before" bytes, so 8 went past the sliding window. - elif read[:8] == _BEFORE_SEQ[8:]: - if read[8:-8] == _INTEL_EXTENDED_16B: - return 'INTEL_EXTENDED_16_BYTES_LE' - elif read[8:-8] == _IEEE_QUAD_PREC_BE: - return 'IEEE_QUAD_BE' - elif read[8:-8] == _IEEE_QUAD_PREC_LE: - return 'IEEE_QUAD_LE' - elif read[8:-8] == _IBM_DOUBLE_DOUBLE_LE: - return 'IBM_DOUBLE_DOUBLE_LE' - elif read[8:-8] == _IBM_DOUBLE_DOUBLE_BE: - return 'IBM_DOUBLE_DOUBLE_BE' - # if the content was 8 bytes, left with 32-8-8 = 16 bytes - elif read[:16] == _BEFORE_SEQ: - if read[16:-8] == _IEEE_DOUBLE_LE: - return 'IEEE_DOUBLE_LE' - elif read[16:-8] == _IEEE_DOUBLE_BE: - return 'IEEE_DOUBLE_BE' - - if saw is not None: - raise ValueError("Unrecognized format (%s)" % saw) - else: - # We never detected the after_sequence - raise ValueError("Could not lock sequences (%s)" % saw) - - -def check_for_right_shift_internal_compiler_error(cmd): - """ - On our arm CI, this fails with an internal compilation error - - The failure looks like the following, and can be reproduced on ARM64 GCC 5.4: - - : In function 'right_shift': - :4:20: internal compiler error: in expand_shift_1, at expmed.c:2349 - ip1[i] = ip1[i] >> in2; - ^ - Please submit a full bug report, - with preprocessed source if appropriate. - See for instructions. - Compiler returned: 1 - - This function returns True if this compiler bug is present, and we need to - turn off optimization for the function - """ - cmd._check_compiler() - has_optimize = cmd.try_compile(textwrap.dedent("""\ - __attribute__((optimize("O3"))) void right_shift() {} - """), None, None) - if not has_optimize: - return False - - no_err = cmd.try_compile(textwrap.dedent("""\ - typedef long the_type; /* fails also for unsigned and long long */ - __attribute__((optimize("O3"))) void right_shift(the_type in2, the_type *ip1, int n) { - for (int i = 0; i < n; i++) { - if (in2 < (the_type)sizeof(the_type) * 8) { - ip1[i] = ip1[i] >> in2; - } - } - } - """), None, None) - return not no_err diff --git a/venv/lib/python3.7/site-packages/numpy/core/shape_base.py b/venv/lib/python3.7/site-packages/numpy/core/shape_base.py deleted file mode 100644 index 31b1c20..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/shape_base.py +++ /dev/null @@ -1,906 +0,0 @@ -from __future__ import division, absolute_import, print_function - -__all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'block', 'hstack', - 'stack', 'vstack'] - -import functools -import operator -import warnings - -from . import numeric as _nx -from . import overrides -from ._asarray import array, asanyarray -from .multiarray import normalize_axis_index -from . import fromnumeric as _from_nx - - -array_function_dispatch = functools.partial( - overrides.array_function_dispatch, module='numpy') - - -def _atleast_1d_dispatcher(*arys): - return arys - - -@array_function_dispatch(_atleast_1d_dispatcher) -def atleast_1d(*arys): - """ - Convert inputs to arrays with at least one dimension. - - Scalar inputs are converted to 1-dimensional arrays, whilst - higher-dimensional inputs are preserved. - - Parameters - ---------- - arys1, arys2, ... : array_like - One or more input arrays. - - Returns - ------- - ret : ndarray - An array, or list of arrays, each with ``a.ndim >= 1``. - Copies are made only if necessary. - - See Also - -------- - atleast_2d, atleast_3d - - Examples - -------- - >>> np.atleast_1d(1.0) - array([1.]) - - >>> x = np.arange(9.0).reshape(3,3) - >>> np.atleast_1d(x) - array([[0., 1., 2.], - [3., 4., 5.], - [6., 7., 8.]]) - >>> np.atleast_1d(x) is x - True - - >>> np.atleast_1d(1, [3, 4]) - [array([1]), array([3, 4])] - - """ - res = [] - for ary in arys: - ary = asanyarray(ary) - if ary.ndim == 0: - result = ary.reshape(1) - else: - result = ary - res.append(result) - if len(res) == 1: - return res[0] - else: - return res - - -def _atleast_2d_dispatcher(*arys): - return arys - - -@array_function_dispatch(_atleast_2d_dispatcher) -def atleast_2d(*arys): - """ - View inputs as arrays with at least two dimensions. - - Parameters - ---------- - arys1, arys2, ... : array_like - One or more array-like sequences. Non-array inputs are converted - to arrays. Arrays that already have two or more dimensions are - preserved. - - Returns - ------- - res, res2, ... : ndarray - An array, or list of arrays, each with ``a.ndim >= 2``. - Copies are avoided where possible, and views with two or more - dimensions are returned. - - See Also - -------- - atleast_1d, atleast_3d - - Examples - -------- - >>> np.atleast_2d(3.0) - array([[3.]]) - - >>> x = np.arange(3.0) - >>> np.atleast_2d(x) - array([[0., 1., 2.]]) - >>> np.atleast_2d(x).base is x - True - - >>> np.atleast_2d(1, [1, 2], [[1, 2]]) - [array([[1]]), array([[1, 2]]), array([[1, 2]])] - - """ - res = [] - for ary in arys: - ary = asanyarray(ary) - if ary.ndim == 0: - result = ary.reshape(1, 1) - elif ary.ndim == 1: - result = ary[_nx.newaxis, :] - else: - result = ary - res.append(result) - if len(res) == 1: - return res[0] - else: - return res - - -def _atleast_3d_dispatcher(*arys): - return arys - - -@array_function_dispatch(_atleast_3d_dispatcher) -def atleast_3d(*arys): - """ - View inputs as arrays with at least three dimensions. - - Parameters - ---------- - arys1, arys2, ... : array_like - One or more array-like sequences. Non-array inputs are converted to - arrays. Arrays that already have three or more dimensions are - preserved. - - Returns - ------- - res1, res2, ... : ndarray - An array, or list of arrays, each with ``a.ndim >= 3``. Copies are - avoided where possible, and views with three or more dimensions are - returned. For example, a 1-D array of shape ``(N,)`` becomes a view - of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a - view of shape ``(M, N, 1)``. - - See Also - -------- - atleast_1d, atleast_2d - - Examples - -------- - >>> np.atleast_3d(3.0) - array([[[3.]]]) - - >>> x = np.arange(3.0) - >>> np.atleast_3d(x).shape - (1, 3, 1) - - >>> x = np.arange(12.0).reshape(4,3) - >>> np.atleast_3d(x).shape - (4, 3, 1) - >>> np.atleast_3d(x).base is x.base # x is a reshape, so not base itself - True - - >>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]): - ... print(arr, arr.shape) # doctest: +SKIP - ... - [[[1] - [2]]] (1, 2, 1) - [[[1] - [2]]] (1, 2, 1) - [[[1 2]]] (1, 1, 2) - - """ - res = [] - for ary in arys: - ary = asanyarray(ary) - if ary.ndim == 0: - result = ary.reshape(1, 1, 1) - elif ary.ndim == 1: - result = ary[_nx.newaxis, :, _nx.newaxis] - elif ary.ndim == 2: - result = ary[:, :, _nx.newaxis] - else: - result = ary - res.append(result) - if len(res) == 1: - return res[0] - else: - return res - - -def _arrays_for_stack_dispatcher(arrays, stacklevel=4): - if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'): - warnings.warn('arrays to stack must be passed as a "sequence" type ' - 'such as list or tuple. Support for non-sequence ' - 'iterables such as generators is deprecated as of ' - 'NumPy 1.16 and will raise an error in the future.', - FutureWarning, stacklevel=stacklevel) - return () - return arrays - - -def _vhstack_dispatcher(tup): - return _arrays_for_stack_dispatcher(tup) - - -@array_function_dispatch(_vhstack_dispatcher) -def vstack(tup): - """ - Stack arrays in sequence vertically (row wise). - - This is equivalent to concatenation along the first axis after 1-D arrays - of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by - `vsplit`. - - This function makes most sense for arrays with up to 3 dimensions. For - instance, for pixel-data with a height (first axis), width (second axis), - and r/g/b channels (third axis). The functions `concatenate`, `stack` and - `block` provide more general stacking and concatenation operations. - - Parameters - ---------- - tup : sequence of ndarrays - The arrays must have the same shape along all but the first axis. - 1-D arrays must have the same length. - - Returns - ------- - stacked : ndarray - The array formed by stacking the given arrays, will be at least 2-D. - - See Also - -------- - stack : Join a sequence of arrays along a new axis. - hstack : Stack arrays in sequence horizontally (column wise). - dstack : Stack arrays in sequence depth wise (along third dimension). - concatenate : Join a sequence of arrays along an existing axis. - vsplit : Split array into a list of multiple sub-arrays vertically. - block : Assemble arrays from blocks. - - Examples - -------- - >>> a = np.array([1, 2, 3]) - >>> b = np.array([2, 3, 4]) - >>> np.vstack((a,b)) - array([[1, 2, 3], - [2, 3, 4]]) - - >>> a = np.array([[1], [2], [3]]) - >>> b = np.array([[2], [3], [4]]) - >>> np.vstack((a,b)) - array([[1], - [2], - [3], - [2], - [3], - [4]]) - - """ - if not overrides.ARRAY_FUNCTION_ENABLED: - # raise warning if necessary - _arrays_for_stack_dispatcher(tup, stacklevel=2) - arrs = atleast_2d(*tup) - if not isinstance(arrs, list): - arrs = [arrs] - return _nx.concatenate(arrs, 0) - - -@array_function_dispatch(_vhstack_dispatcher) -def hstack(tup): - """ - Stack arrays in sequence horizontally (column wise). - - This is equivalent to concatenation along the second axis, except for 1-D - arrays where it concatenates along the first axis. Rebuilds arrays divided - by `hsplit`. - - This function makes most sense for arrays with up to 3 dimensions. For - instance, for pixel-data with a height (first axis), width (second axis), - and r/g/b channels (third axis). The functions `concatenate`, `stack` and - `block` provide more general stacking and concatenation operations. - - Parameters - ---------- - tup : sequence of ndarrays - The arrays must have the same shape along all but the second axis, - except 1-D arrays which can be any length. - - Returns - ------- - stacked : ndarray - The array formed by stacking the given arrays. - - See Also - -------- - stack : Join a sequence of arrays along a new axis. - vstack : Stack arrays in sequence vertically (row wise). - dstack : Stack arrays in sequence depth wise (along third axis). - concatenate : Join a sequence of arrays along an existing axis. - hsplit : Split array along second axis. - block : Assemble arrays from blocks. - - Examples - -------- - >>> a = np.array((1,2,3)) - >>> b = np.array((2,3,4)) - >>> np.hstack((a,b)) - array([1, 2, 3, 2, 3, 4]) - >>> a = np.array([[1],[2],[3]]) - >>> b = np.array([[2],[3],[4]]) - >>> np.hstack((a,b)) - array([[1, 2], - [2, 3], - [3, 4]]) - - """ - if not overrides.ARRAY_FUNCTION_ENABLED: - # raise warning if necessary - _arrays_for_stack_dispatcher(tup, stacklevel=2) - - arrs = atleast_1d(*tup) - if not isinstance(arrs, list): - arrs = [arrs] - # As a special case, dimension 0 of 1-dimensional arrays is "horizontal" - if arrs and arrs[0].ndim == 1: - return _nx.concatenate(arrs, 0) - else: - return _nx.concatenate(arrs, 1) - - -def _stack_dispatcher(arrays, axis=None, out=None): - arrays = _arrays_for_stack_dispatcher(arrays, stacklevel=6) - if out is not None: - # optimize for the typical case where only arrays is provided - arrays = list(arrays) - arrays.append(out) - return arrays - - -@array_function_dispatch(_stack_dispatcher) -def stack(arrays, axis=0, out=None): - """ - Join a sequence of arrays along a new axis. - - The ``axis`` parameter specifies the index of the new axis in the - dimensions of the result. For example, if ``axis=0`` it will be the first - dimension and if ``axis=-1`` it will be the last dimension. - - .. versionadded:: 1.10.0 - - Parameters - ---------- - arrays : sequence of array_like - Each array must have the same shape. - - axis : int, optional - The axis in the result array along which the input arrays are stacked. - - out : ndarray, optional - If provided, the destination to place the result. The shape must be - correct, matching that of what stack would have returned if no - out argument were specified. - - Returns - ------- - stacked : ndarray - The stacked array has one more dimension than the input arrays. - - See Also - -------- - concatenate : Join a sequence of arrays along an existing axis. - split : Split array into a list of multiple sub-arrays of equal size. - block : Assemble arrays from blocks. - - Examples - -------- - >>> arrays = [np.random.randn(3, 4) for _ in range(10)] - >>> np.stack(arrays, axis=0).shape - (10, 3, 4) - - >>> np.stack(arrays, axis=1).shape - (3, 10, 4) - - >>> np.stack(arrays, axis=2).shape - (3, 4, 10) - - >>> a = np.array([1, 2, 3]) - >>> b = np.array([2, 3, 4]) - >>> np.stack((a, b)) - array([[1, 2, 3], - [2, 3, 4]]) - - >>> np.stack((a, b), axis=-1) - array([[1, 2], - [2, 3], - [3, 4]]) - - """ - if not overrides.ARRAY_FUNCTION_ENABLED: - # raise warning if necessary - _arrays_for_stack_dispatcher(arrays, stacklevel=2) - - arrays = [asanyarray(arr) for arr in arrays] - if not arrays: - raise ValueError('need at least one array to stack') - - shapes = {arr.shape for arr in arrays} - if len(shapes) != 1: - raise ValueError('all input arrays must have the same shape') - - result_ndim = arrays[0].ndim + 1 - axis = normalize_axis_index(axis, result_ndim) - - sl = (slice(None),) * axis + (_nx.newaxis,) - expanded_arrays = [arr[sl] for arr in arrays] - return _nx.concatenate(expanded_arrays, axis=axis, out=out) - - -# Internal functions to eliminate the overhead of repeated dispatch in one of -# the two possible paths inside np.block. -# Use getattr to protect against __array_function__ being disabled. -_size = getattr(_from_nx.size, '__wrapped__', _from_nx.size) -_ndim = getattr(_from_nx.ndim, '__wrapped__', _from_nx.ndim) -_concatenate = getattr(_from_nx.concatenate, '__wrapped__', _from_nx.concatenate) - - -def _block_format_index(index): - """ - Convert a list of indices ``[0, 1, 2]`` into ``"arrays[0][1][2]"``. - """ - idx_str = ''.join('[{}]'.format(i) for i in index if i is not None) - return 'arrays' + idx_str - - -def _block_check_depths_match(arrays, parent_index=[]): - """ - Recursive function checking that the depths of nested lists in `arrays` - all match. Mismatch raises a ValueError as described in the block - docstring below. - - The entire index (rather than just the depth) needs to be calculated - for each innermost list, in case an error needs to be raised, so that - the index of the offending list can be printed as part of the error. - - Parameters - ---------- - arrays : nested list of arrays - The arrays to check - parent_index : list of int - The full index of `arrays` within the nested lists passed to - `_block_check_depths_match` at the top of the recursion. - - Returns - ------- - first_index : list of int - The full index of an element from the bottom of the nesting in - `arrays`. If any element at the bottom is an empty list, this will - refer to it, and the last index along the empty axis will be None. - max_arr_ndim : int - The maximum of the ndims of the arrays nested in `arrays`. - final_size: int - The number of elements in the final array. This is used the motivate - the choice of algorithm used using benchmarking wisdom. - - """ - if type(arrays) is tuple: - # not strictly necessary, but saves us from: - # - more than one way to do things - no point treating tuples like - # lists - # - horribly confusing behaviour that results when tuples are - # treated like ndarray - raise TypeError( - '{} is a tuple. ' - 'Only lists can be used to arrange blocks, and np.block does ' - 'not allow implicit conversion from tuple to ndarray.'.format( - _block_format_index(parent_index) - ) - ) - elif type(arrays) is list and len(arrays) > 0: - idxs_ndims = (_block_check_depths_match(arr, parent_index + [i]) - for i, arr in enumerate(arrays)) - - first_index, max_arr_ndim, final_size = next(idxs_ndims) - for index, ndim, size in idxs_ndims: - final_size += size - if ndim > max_arr_ndim: - max_arr_ndim = ndim - if len(index) != len(first_index): - raise ValueError( - "List depths are mismatched. First element was at depth " - "{}, but there is an element at depth {} ({})".format( - len(first_index), - len(index), - _block_format_index(index) - ) - ) - # propagate our flag that indicates an empty list at the bottom - if index[-1] is None: - first_index = index - - return first_index, max_arr_ndim, final_size - elif type(arrays) is list and len(arrays) == 0: - # We've 'bottomed out' on an empty list - return parent_index + [None], 0, 0 - else: - # We've 'bottomed out' - arrays is either a scalar or an array - size = _size(arrays) - return parent_index, _ndim(arrays), size - - -def _atleast_nd(a, ndim): - # Ensures `a` has at least `ndim` dimensions by prepending - # ones to `a.shape` as necessary - return array(a, ndmin=ndim, copy=False, subok=True) - - -def _accumulate(values): - # Helper function because Python 2.7 doesn't have - # itertools.accumulate - value = 0 - accumulated = [] - for v in values: - value += v - accumulated.append(value) - return accumulated - - -def _concatenate_shapes(shapes, axis): - """Given array shapes, return the resulting shape and slices prefixes. - - These help in nested concatation. - Returns - ------- - shape: tuple of int - This tuple satisfies: - ``` - shape, _ = _concatenate_shapes([arr.shape for shape in arrs], axis) - shape == concatenate(arrs, axis).shape - ``` - - slice_prefixes: tuple of (slice(start, end), ) - For a list of arrays being concatenated, this returns the slice - in the larger array at axis that needs to be sliced into. - - For example, the following holds: - ``` - ret = concatenate([a, b, c], axis) - _, (sl_a, sl_b, sl_c) = concatenate_slices([a, b, c], axis) - - ret[(slice(None),) * axis + sl_a] == a - ret[(slice(None),) * axis + sl_b] == b - ret[(slice(None),) * axis + sl_c] == c - ``` - - These are called slice prefixes since they are used in the recursive - blocking algorithm to compute the left-most slices during the - recursion. Therefore, they must be prepended to rest of the slice - that was computed deeper in the recursion. - - These are returned as tuples to ensure that they can quickly be added - to existing slice tuple without creating a new tuple every time. - - """ - # Cache a result that will be reused. - shape_at_axis = [shape[axis] for shape in shapes] - - # Take a shape, any shape - first_shape = shapes[0] - first_shape_pre = first_shape[:axis] - first_shape_post = first_shape[axis+1:] - - if any(shape[:axis] != first_shape_pre or - shape[axis+1:] != first_shape_post for shape in shapes): - raise ValueError( - 'Mismatched array shapes in block along axis {}.'.format(axis)) - - shape = (first_shape_pre + (sum(shape_at_axis),) + first_shape[axis+1:]) - - offsets_at_axis = _accumulate(shape_at_axis) - slice_prefixes = [(slice(start, end),) - for start, end in zip([0] + offsets_at_axis, - offsets_at_axis)] - return shape, slice_prefixes - - -def _block_info_recursion(arrays, max_depth, result_ndim, depth=0): - """ - Returns the shape of the final array, along with a list - of slices and a list of arrays that can be used for assignment inside the - new array - - Parameters - ---------- - arrays : nested list of arrays - The arrays to check - max_depth : list of int - The number of nested lists - result_ndim: int - The number of dimensions in thefinal array. - - Returns - ------- - shape : tuple of int - The shape that the final array will take on. - slices: list of tuple of slices - The slices into the full array required for assignment. These are - required to be prepended with ``(Ellipsis, )`` to obtain to correct - final index. - arrays: list of ndarray - The data to assign to each slice of the full array - - """ - if depth < max_depth: - shapes, slices, arrays = zip( - *[_block_info_recursion(arr, max_depth, result_ndim, depth+1) - for arr in arrays]) - - axis = result_ndim - max_depth + depth - shape, slice_prefixes = _concatenate_shapes(shapes, axis) - - # Prepend the slice prefix and flatten the slices - slices = [slice_prefix + the_slice - for slice_prefix, inner_slices in zip(slice_prefixes, slices) - for the_slice in inner_slices] - - # Flatten the array list - arrays = functools.reduce(operator.add, arrays) - - return shape, slices, arrays - else: - # We've 'bottomed out' - arrays is either a scalar or an array - # type(arrays) is not list - # Return the slice and the array inside a list to be consistent with - # the recursive case. - arr = _atleast_nd(arrays, result_ndim) - return arr.shape, [()], [arr] - - -def _block(arrays, max_depth, result_ndim, depth=0): - """ - Internal implementation of block based on repeated concatenation. - `arrays` is the argument passed to - block. `max_depth` is the depth of nested lists within `arrays` and - `result_ndim` is the greatest of the dimensions of the arrays in - `arrays` and the depth of the lists in `arrays` (see block docstring - for details). - """ - if depth < max_depth: - arrs = [_block(arr, max_depth, result_ndim, depth+1) - for arr in arrays] - return _concatenate(arrs, axis=-(max_depth-depth)) - else: - # We've 'bottomed out' - arrays is either a scalar or an array - # type(arrays) is not list - return _atleast_nd(arrays, result_ndim) - - -def _block_dispatcher(arrays): - # Use type(...) is list to match the behavior of np.block(), which special - # cases list specifically rather than allowing for generic iterables or - # tuple. Also, we know that list.__array_function__ will never exist. - if type(arrays) is list: - for subarrays in arrays: - for subarray in _block_dispatcher(subarrays): - yield subarray - else: - yield arrays - - -@array_function_dispatch(_block_dispatcher) -def block(arrays): - """ - Assemble an nd-array from nested lists of blocks. - - Blocks in the innermost lists are concatenated (see `concatenate`) along - the last dimension (-1), then these are concatenated along the - second-last dimension (-2), and so on until the outermost list is reached. - - Blocks can be of any dimension, but will not be broadcasted using the normal - rules. Instead, leading axes of size 1 are inserted, to make ``block.ndim`` - the same for all blocks. This is primarily useful for working with scalars, - and means that code like ``np.block([v, 1])`` is valid, where - ``v.ndim == 1``. - - When the nested list is two levels deep, this allows block matrices to be - constructed from their components. - - .. versionadded:: 1.13.0 - - Parameters - ---------- - arrays : nested list of array_like or scalars (but not tuples) - If passed a single ndarray or scalar (a nested list of depth 0), this - is returned unmodified (and not copied). - - Elements shapes must match along the appropriate axes (without - broadcasting), but leading 1s will be prepended to the shape as - necessary to make the dimensions match. - - Returns - ------- - block_array : ndarray - The array assembled from the given blocks. - - The dimensionality of the output is equal to the greatest of: - * the dimensionality of all the inputs - * the depth to which the input list is nested - - Raises - ------ - ValueError - * If list depths are mismatched - for instance, ``[[a, b], c]`` is - illegal, and should be spelt ``[[a, b], [c]]`` - * If lists are empty - for instance, ``[[a, b], []]`` - - See Also - -------- - concatenate : Join a sequence of arrays together. - stack : Stack arrays in sequence along a new dimension. - hstack : Stack arrays in sequence horizontally (column wise). - vstack : Stack arrays in sequence vertically (row wise). - dstack : Stack arrays in sequence depth wise (along third dimension). - vsplit : Split array into a list of multiple sub-arrays vertically. - - Notes - ----- - - When called with only scalars, ``np.block`` is equivalent to an ndarray - call. So ``np.block([[1, 2], [3, 4]])`` is equivalent to - ``np.array([[1, 2], [3, 4]])``. - - This function does not enforce that the blocks lie on a fixed grid. - ``np.block([[a, b], [c, d]])`` is not restricted to arrays of the form:: - - AAAbb - AAAbb - cccDD - - But is also allowed to produce, for some ``a, b, c, d``:: - - AAAbb - AAAbb - cDDDD - - Since concatenation happens along the last axis first, `block` is _not_ - capable of producing the following directly:: - - AAAbb - cccbb - cccDD - - Matlab's "square bracket stacking", ``[A, B, ...; p, q, ...]``, is - equivalent to ``np.block([[A, B, ...], [p, q, ...]])``. - - Examples - -------- - The most common use of this function is to build a block matrix - - >>> A = np.eye(2) * 2 - >>> B = np.eye(3) * 3 - >>> np.block([ - ... [A, np.zeros((2, 3))], - ... [np.ones((3, 2)), B ] - ... ]) - array([[2., 0., 0., 0., 0.], - [0., 2., 0., 0., 0.], - [1., 1., 3., 0., 0.], - [1., 1., 0., 3., 0.], - [1., 1., 0., 0., 3.]]) - - With a list of depth 1, `block` can be used as `hstack` - - >>> np.block([1, 2, 3]) # hstack([1, 2, 3]) - array([1, 2, 3]) - - >>> a = np.array([1, 2, 3]) - >>> b = np.array([2, 3, 4]) - >>> np.block([a, b, 10]) # hstack([a, b, 10]) - array([ 1, 2, 3, 2, 3, 4, 10]) - - >>> A = np.ones((2, 2), int) - >>> B = 2 * A - >>> np.block([A, B]) # hstack([A, B]) - array([[1, 1, 2, 2], - [1, 1, 2, 2]]) - - With a list of depth 2, `block` can be used in place of `vstack`: - - >>> a = np.array([1, 2, 3]) - >>> b = np.array([2, 3, 4]) - >>> np.block([[a], [b]]) # vstack([a, b]) - array([[1, 2, 3], - [2, 3, 4]]) - - >>> A = np.ones((2, 2), int) - >>> B = 2 * A - >>> np.block([[A], [B]]) # vstack([A, B]) - array([[1, 1], - [1, 1], - [2, 2], - [2, 2]]) - - It can also be used in places of `atleast_1d` and `atleast_2d` - - >>> a = np.array(0) - >>> b = np.array([1]) - >>> np.block([a]) # atleast_1d(a) - array([0]) - >>> np.block([b]) # atleast_1d(b) - array([1]) - - >>> np.block([[a]]) # atleast_2d(a) - array([[0]]) - >>> np.block([[b]]) # atleast_2d(b) - array([[1]]) - - - """ - arrays, list_ndim, result_ndim, final_size = _block_setup(arrays) - - # It was found through benchmarking that making an array of final size - # around 256x256 was faster by straight concatenation on a - # i7-7700HQ processor and dual channel ram 2400MHz. - # It didn't seem to matter heavily on the dtype used. - # - # A 2D array using repeated concatenation requires 2 copies of the array. - # - # The fastest algorithm will depend on the ratio of CPU power to memory - # speed. - # One can monitor the results of the benchmark - # https://pv.github.io/numpy-bench/#bench_shape_base.Block2D.time_block2d - # to tune this parameter until a C version of the `_block_info_recursion` - # algorithm is implemented which would likely be faster than the python - # version. - if list_ndim * final_size > (2 * 512 * 512): - return _block_slicing(arrays, list_ndim, result_ndim) - else: - return _block_concatenate(arrays, list_ndim, result_ndim) - - -# These helper functions are mostly used for testing. -# They allow us to write tests that directly call `_block_slicing` -# or `_block_concatenate` without blocking large arrays to force the wisdom -# to trigger the desired path. -def _block_setup(arrays): - """ - Returns - (`arrays`, list_ndim, result_ndim, final_size) - """ - bottom_index, arr_ndim, final_size = _block_check_depths_match(arrays) - list_ndim = len(bottom_index) - if bottom_index and bottom_index[-1] is None: - raise ValueError( - 'List at {} cannot be empty'.format( - _block_format_index(bottom_index) - ) - ) - result_ndim = max(arr_ndim, list_ndim) - return arrays, list_ndim, result_ndim, final_size - - -def _block_slicing(arrays, list_ndim, result_ndim): - shape, slices, arrays = _block_info_recursion( - arrays, list_ndim, result_ndim) - dtype = _nx.result_type(*[arr.dtype for arr in arrays]) - - # Test preferring F only in the case that all input arrays are F - F_order = all(arr.flags['F_CONTIGUOUS'] for arr in arrays) - C_order = all(arr.flags['C_CONTIGUOUS'] for arr in arrays) - order = 'F' if F_order and not C_order else 'C' - result = _nx.empty(shape=shape, dtype=dtype, order=order) - # Note: In a c implementation, the function - # PyArray_CreateMultiSortedStridePerm could be used for more advanced - # guessing of the desired order. - - for the_slice, arr in zip(slices, arrays): - result[(Ellipsis,) + the_slice] = arr - return result - - -def _block_concatenate(arrays, list_ndim, result_ndim): - result = _block(arrays, list_ndim, result_ndim) - if list_ndim == 0: - # Catch an edge case where _block returns a view because - # `arrays` is a single numpy array and not a list of numpy arrays. - # This might copy scalars or lists twice, but this isn't a likely - # usecase for those interested in performance - result = result.copy() - return result diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/__init__.py b/venv/lib/python3.7/site-packages/numpy/core/tests/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/_locales.py b/venv/lib/python3.7/site-packages/numpy/core/tests/_locales.py deleted file mode 100644 index 52e4ff3..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/_locales.py +++ /dev/null @@ -1,76 +0,0 @@ -"""Provide class for testing in French locale - -""" -from __future__ import division, absolute_import, print_function - -import sys -import locale - -import pytest - -__ALL__ = ['CommaDecimalPointLocale'] - - -def find_comma_decimal_point_locale(): - """See if platform has a decimal point as comma locale. - - Find a locale that uses a comma instead of a period as the - decimal point. - - Returns - ------- - old_locale: str - Locale when the function was called. - new_locale: {str, None) - First French locale found, None if none found. - - """ - if sys.platform == 'win32': - locales = ['FRENCH'] - else: - locales = ['fr_FR', 'fr_FR.UTF-8', 'fi_FI', 'fi_FI.UTF-8'] - - old_locale = locale.getlocale(locale.LC_NUMERIC) - new_locale = None - try: - for loc in locales: - try: - locale.setlocale(locale.LC_NUMERIC, loc) - new_locale = loc - break - except locale.Error: - pass - finally: - locale.setlocale(locale.LC_NUMERIC, locale=old_locale) - return old_locale, new_locale - - -class CommaDecimalPointLocale(object): - """Sets LC_NUMERIC to a locale with comma as decimal point. - - Classes derived from this class have setup and teardown methods that run - tests with locale.LC_NUMERIC set to a locale where commas (',') are used as - the decimal point instead of periods ('.'). On exit the locale is restored - to the initial locale. It also serves as context manager with the same - effect. If no such locale is available, the test is skipped. - - .. versionadded:: 1.15.0 - - """ - (cur_locale, tst_locale) = find_comma_decimal_point_locale() - - def setup(self): - if self.tst_locale is None: - pytest.skip("No French locale available") - locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale) - - def teardown(self): - locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale) - - def __enter__(self): - if self.tst_locale is None: - pytest.skip("No French locale available") - locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale) - - def __exit__(self, type, value, traceback): - locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/data/astype_copy.pkl b/venv/lib/python3.7/site-packages/numpy/core/tests/data/astype_copy.pkl deleted file mode 100644 index 7397c97..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/core/tests/data/astype_copy.pkl and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/data/recarray_from_file.fits b/venv/lib/python3.7/site-packages/numpy/core/tests/data/recarray_from_file.fits deleted file mode 100644 index ca48ee8..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/core/tests/data/recarray_from_file.fits and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/data/umath-validation-set-README b/venv/lib/python3.7/site-packages/numpy/core/tests/data/umath-validation-set-README deleted file mode 100644 index 6561ca3..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/data/umath-validation-set-README +++ /dev/null @@ -1,15 +0,0 @@ -Steps to validate transcendental functions: -1) Add a file 'umath-validation-set-', where ufuncname is name of - the function in NumPy you want to validate -2) The file should contain 4 columns: dtype,input,expected output,ulperror - a. dtype: one of np.float16, np.float32, np.float64 - b. input: floating point input to ufunc in hex. Example: 0x414570a4 - represents 12.340000152587890625 - c. expected output: floating point output for the corresponding input in hex. - This should be computed using a high(er) precision library and then rounded to - same format as the input. - d. ulperror: expected maximum ulp error of the function. This - should be same across all rows of the same dtype. Otherwise, the function is - tested for the maximum ulp error among all entries of that dtype. -3) Add file umath-validation-set- to the test file test_umath_accuracy.py - which will then validate your ufunc. diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/data/umath-validation-set-cos b/venv/lib/python3.7/site-packages/numpy/core/tests/data/umath-validation-set-cos deleted file mode 100644 index 360ebcd..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/data/umath-validation-set-cos +++ /dev/null @@ -1,707 +0,0 @@ -dtype,input,output,ulperrortol -## +ve denormals ## -np.float32,0x004b4716,0x3f800000,2 -np.float32,0x007b2490,0x3f800000,2 -np.float32,0x007c99fa,0x3f800000,2 -np.float32,0x00734a0c,0x3f800000,2 -np.float32,0x0070de24,0x3f800000,2 -np.float32,0x007fffff,0x3f800000,2 -np.float32,0x00000001,0x3f800000,2 -## -ve denormals ## -np.float32,0x80495d65,0x3f800000,2 -np.float32,0x806894f6,0x3f800000,2 -np.float32,0x80555a76,0x3f800000,2 -np.float32,0x804e1fb8,0x3f800000,2 -np.float32,0x80687de9,0x3f800000,2 -np.float32,0x807fffff,0x3f800000,2 -np.float32,0x80000001,0x3f800000,2 -## +/-0.0f, +/-FLT_MIN +/-FLT_MAX ## -np.float32,0x00000000,0x3f800000,2 -np.float32,0x80000000,0x3f800000,2 -np.float32,0x00800000,0x3f800000,2 -np.float32,0x7f7fffff,0x3f5a5f96,2 -np.float32,0x80800000,0x3f800000,2 -np.float32,0xff7fffff,0x3f5a5f96,2 -## 1.00f + 0x00000001 ## -np.float32,0x3f800000,0x3f0a5140,2 -np.float32,0x3f800001,0x3f0a513f,2 -np.float32,0x3f800002,0x3f0a513d,2 -np.float32,0xc090a8b0,0xbe4332ce,2 -np.float32,0x41ce3184,0x3f4d1de1,2 -np.float32,0xc1d85848,0xbeaa8980,2 -np.float32,0x402b8820,0xbf653aa3,2 -np.float32,0x42b4e454,0xbf4a338b,2 -np.float32,0x42a67a60,0x3c58202e,2 -np.float32,0x41d92388,0xbed987c7,2 -np.float32,0x422dd66c,0x3f5dcab3,2 -np.float32,0xc28f5be6,0xbf5688d8,2 -np.float32,0x41ab2674,0xbf53aa3b,2 -np.float32,0xd0102756,0x3f45d12d,2 -np.float32,0xcf99405e,0xbe9cf281,2 -np.float32,0xcfd83a12,0x3eaae4ca,2 -np.float32,0x4fb54db0,0xbf7b2894,2 -np.float32,0xcfcca29d,0x3f752e4e,2 -np.float32,0xceec2ac0,0xbf745303,2 -np.float32,0xcfdca97f,0x3ef554a7,2 -np.float32,0xcfe92b0a,0x3f4618f2,2 -np.float32,0x5014b0eb,0x3ee933e6,2 -np.float32,0xcfa7ee96,0xbeedeeb2,2 -np.float32,0x754c09a0,0xbef298de,2 -np.float32,0x77a731fb,0x3f24599f,2 -np.float32,0x76de2494,0x3f79576c,2 -np.float32,0xf74920dc,0xbf4d196e,2 -np.float32,0x7707a312,0xbeb5cb8e,2 -np.float32,0x75bf9790,0xbf7fd7fe,2 -np.float32,0xf4ca7c40,0xbe15107d,2 -np.float32,0x77e91899,0xbe8a968b,2 -np.float32,0xf74c9820,0xbf7f9677,2 -np.float32,0x7785ca29,0xbe6ef93b,2 -np.float32,0x3f490fdb,0x3f3504f3,2 -np.float32,0xbf490fdb,0x3f3504f3,2 -np.float32,0x3fc90fdb,0xb33bbd2e,2 -np.float32,0xbfc90fdb,0xb33bbd2e,2 -np.float32,0x40490fdb,0xbf800000,2 -np.float32,0xc0490fdb,0xbf800000,2 -np.float32,0x3fc90fdb,0xb33bbd2e,2 -np.float32,0xbfc90fdb,0xb33bbd2e,2 -np.float32,0x40490fdb,0xbf800000,2 -np.float32,0xc0490fdb,0xbf800000,2 -np.float32,0x40c90fdb,0x3f800000,2 -np.float32,0xc0c90fdb,0x3f800000,2 -np.float32,0x4016cbe4,0xbf3504f3,2 -np.float32,0xc016cbe4,0xbf3504f3,2 -np.float32,0x4096cbe4,0x324cde2e,2 -np.float32,0xc096cbe4,0x324cde2e,2 -np.float32,0x4116cbe4,0xbf800000,2 -np.float32,0xc116cbe4,0xbf800000,2 -np.float32,0x40490fdb,0xbf800000,2 -np.float32,0xc0490fdb,0xbf800000,2 -np.float32,0x40c90fdb,0x3f800000,2 -np.float32,0xc0c90fdb,0x3f800000,2 -np.float32,0x41490fdb,0x3f800000,2 -np.float32,0xc1490fdb,0x3f800000,2 -np.float32,0x407b53d2,0xbf3504f1,2 -np.float32,0xc07b53d2,0xbf3504f1,2 -np.float32,0x40fb53d2,0xb4b5563d,2 -np.float32,0xc0fb53d2,0xb4b5563d,2 -np.float32,0x417b53d2,0xbf800000,2 -np.float32,0xc17b53d2,0xbf800000,2 -np.float32,0x4096cbe4,0x324cde2e,2 -np.float32,0xc096cbe4,0x324cde2e,2 -np.float32,0x4116cbe4,0xbf800000,2 -np.float32,0xc116cbe4,0xbf800000,2 -np.float32,0x4196cbe4,0x3f800000,2 -np.float32,0xc196cbe4,0x3f800000,2 -np.float32,0x40afede0,0x3f3504f7,2 -np.float32,0xc0afede0,0x3f3504f7,2 -np.float32,0x412fede0,0x353222c4,2 -np.float32,0xc12fede0,0x353222c4,2 -np.float32,0x41afede0,0xbf800000,2 -np.float32,0xc1afede0,0xbf800000,2 -np.float32,0x40c90fdb,0x3f800000,2 -np.float32,0xc0c90fdb,0x3f800000,2 -np.float32,0x41490fdb,0x3f800000,2 -np.float32,0xc1490fdb,0x3f800000,2 -np.float32,0x41c90fdb,0x3f800000,2 -np.float32,0xc1c90fdb,0x3f800000,2 -np.float32,0x40e231d6,0x3f3504f3,2 -np.float32,0xc0e231d6,0x3f3504f3,2 -np.float32,0x416231d6,0xb319a6a2,2 -np.float32,0xc16231d6,0xb319a6a2,2 -np.float32,0x41e231d6,0xbf800000,2 -np.float32,0xc1e231d6,0xbf800000,2 -np.float32,0x40fb53d2,0xb4b5563d,2 -np.float32,0xc0fb53d2,0xb4b5563d,2 -np.float32,0x417b53d2,0xbf800000,2 -np.float32,0xc17b53d2,0xbf800000,2 -np.float32,0x41fb53d2,0x3f800000,2 -np.float32,0xc1fb53d2,0x3f800000,2 -np.float32,0x410a3ae7,0xbf3504fb,2 -np.float32,0xc10a3ae7,0xbf3504fb,2 -np.float32,0x418a3ae7,0x35b08908,2 -np.float32,0xc18a3ae7,0x35b08908,2 -np.float32,0x420a3ae7,0xbf800000,2 -np.float32,0xc20a3ae7,0xbf800000,2 -np.float32,0x4116cbe4,0xbf800000,2 -np.float32,0xc116cbe4,0xbf800000,2 -np.float32,0x4196cbe4,0x3f800000,2 -np.float32,0xc196cbe4,0x3f800000,2 -np.float32,0x4216cbe4,0x3f800000,2 -np.float32,0xc216cbe4,0x3f800000,2 -np.float32,0x41235ce2,0xbf3504ef,2 -np.float32,0xc1235ce2,0xbf3504ef,2 -np.float32,0x41a35ce2,0xb53889b6,2 -np.float32,0xc1a35ce2,0xb53889b6,2 -np.float32,0x42235ce2,0xbf800000,2 -np.float32,0xc2235ce2,0xbf800000,2 -np.float32,0x412fede0,0x353222c4,2 -np.float32,0xc12fede0,0x353222c4,2 -np.float32,0x41afede0,0xbf800000,2 -np.float32,0xc1afede0,0xbf800000,2 -np.float32,0x422fede0,0x3f800000,2 -np.float32,0xc22fede0,0x3f800000,2 -np.float32,0x413c7edd,0x3f3504f4,2 -np.float32,0xc13c7edd,0x3f3504f4,2 -np.float32,0x41bc7edd,0x33800add,2 -np.float32,0xc1bc7edd,0x33800add,2 -np.float32,0x423c7edd,0xbf800000,2 -np.float32,0xc23c7edd,0xbf800000,2 -np.float32,0x41490fdb,0x3f800000,2 -np.float32,0xc1490fdb,0x3f800000,2 -np.float32,0x41c90fdb,0x3f800000,2 -np.float32,0xc1c90fdb,0x3f800000,2 -np.float32,0x42490fdb,0x3f800000,2 -np.float32,0xc2490fdb,0x3f800000,2 -np.float32,0x4155a0d9,0x3f3504eb,2 -np.float32,0xc155a0d9,0x3f3504eb,2 -np.float32,0x41d5a0d9,0xb5b3bc81,2 -np.float32,0xc1d5a0d9,0xb5b3bc81,2 -np.float32,0x4255a0d9,0xbf800000,2 -np.float32,0xc255a0d9,0xbf800000,2 -np.float32,0x416231d6,0xb319a6a2,2 -np.float32,0xc16231d6,0xb319a6a2,2 -np.float32,0x41e231d6,0xbf800000,2 -np.float32,0xc1e231d6,0xbf800000,2 -np.float32,0x426231d6,0x3f800000,2 -np.float32,0xc26231d6,0x3f800000,2 -np.float32,0x416ec2d4,0xbf3504f7,2 -np.float32,0xc16ec2d4,0xbf3504f7,2 -np.float32,0x41eec2d4,0x353ef0a7,2 -np.float32,0xc1eec2d4,0x353ef0a7,2 -np.float32,0x426ec2d4,0xbf800000,2 -np.float32,0xc26ec2d4,0xbf800000,2 -np.float32,0x417b53d2,0xbf800000,2 -np.float32,0xc17b53d2,0xbf800000,2 -np.float32,0x41fb53d2,0x3f800000,2 -np.float32,0xc1fb53d2,0x3f800000,2 -np.float32,0x427b53d2,0x3f800000,2 -np.float32,0xc27b53d2,0x3f800000,2 -np.float32,0x4183f268,0xbf3504e7,2 -np.float32,0xc183f268,0xbf3504e7,2 -np.float32,0x4203f268,0xb6059a13,2 -np.float32,0xc203f268,0xb6059a13,2 -np.float32,0x4283f268,0xbf800000,2 -np.float32,0xc283f268,0xbf800000,2 -np.float32,0x418a3ae7,0x35b08908,2 -np.float32,0xc18a3ae7,0x35b08908,2 -np.float32,0x420a3ae7,0xbf800000,2 -np.float32,0xc20a3ae7,0xbf800000,2 -np.float32,0x428a3ae7,0x3f800000,2 -np.float32,0xc28a3ae7,0x3f800000,2 -np.float32,0x41908365,0x3f3504f0,2 -np.float32,0xc1908365,0x3f3504f0,2 -np.float32,0x42108365,0xb512200d,2 -np.float32,0xc2108365,0xb512200d,2 -np.float32,0x42908365,0xbf800000,2 -np.float32,0xc2908365,0xbf800000,2 -np.float32,0x4196cbe4,0x3f800000,2 -np.float32,0xc196cbe4,0x3f800000,2 -np.float32,0x4216cbe4,0x3f800000,2 -np.float32,0xc216cbe4,0x3f800000,2 -np.float32,0x4296cbe4,0x3f800000,2 -np.float32,0xc296cbe4,0x3f800000,2 -np.float32,0x419d1463,0x3f3504ef,2 -np.float32,0xc19d1463,0x3f3504ef,2 -np.float32,0x421d1463,0xb5455799,2 -np.float32,0xc21d1463,0xb5455799,2 -np.float32,0x429d1463,0xbf800000,2 -np.float32,0xc29d1463,0xbf800000,2 -np.float32,0x41a35ce2,0xb53889b6,2 -np.float32,0xc1a35ce2,0xb53889b6,2 -np.float32,0x42235ce2,0xbf800000,2 -np.float32,0xc2235ce2,0xbf800000,2 -np.float32,0x42a35ce2,0x3f800000,2 -np.float32,0xc2a35ce2,0x3f800000,2 -np.float32,0x41a9a561,0xbf3504ff,2 -np.float32,0xc1a9a561,0xbf3504ff,2 -np.float32,0x4229a561,0x360733d0,2 -np.float32,0xc229a561,0x360733d0,2 -np.float32,0x42a9a561,0xbf800000,2 -np.float32,0xc2a9a561,0xbf800000,2 -np.float32,0x41afede0,0xbf800000,2 -np.float32,0xc1afede0,0xbf800000,2 -np.float32,0x422fede0,0x3f800000,2 -np.float32,0xc22fede0,0x3f800000,2 -np.float32,0x42afede0,0x3f800000,2 -np.float32,0xc2afede0,0x3f800000,2 -np.float32,0x41b6365e,0xbf3504f6,2 -np.float32,0xc1b6365e,0xbf3504f6,2 -np.float32,0x4236365e,0x350bb91c,2 -np.float32,0xc236365e,0x350bb91c,2 -np.float32,0x42b6365e,0xbf800000,2 -np.float32,0xc2b6365e,0xbf800000,2 -np.float32,0x41bc7edd,0x33800add,2 -np.float32,0xc1bc7edd,0x33800add,2 -np.float32,0x423c7edd,0xbf800000,2 -np.float32,0xc23c7edd,0xbf800000,2 -np.float32,0x42bc7edd,0x3f800000,2 -np.float32,0xc2bc7edd,0x3f800000,2 -np.float32,0x41c2c75c,0x3f3504f8,2 -np.float32,0xc1c2c75c,0x3f3504f8,2 -np.float32,0x4242c75c,0x354bbe8a,2 -np.float32,0xc242c75c,0x354bbe8a,2 -np.float32,0x42c2c75c,0xbf800000,2 -np.float32,0xc2c2c75c,0xbf800000,2 -np.float32,0x41c90fdb,0x3f800000,2 -np.float32,0xc1c90fdb,0x3f800000,2 -np.float32,0x42490fdb,0x3f800000,2 -np.float32,0xc2490fdb,0x3f800000,2 -np.float32,0x42c90fdb,0x3f800000,2 -np.float32,0xc2c90fdb,0x3f800000,2 -np.float32,0x41cf585a,0x3f3504e7,2 -np.float32,0xc1cf585a,0x3f3504e7,2 -np.float32,0x424f585a,0xb608cd8c,2 -np.float32,0xc24f585a,0xb608cd8c,2 -np.float32,0x42cf585a,0xbf800000,2 -np.float32,0xc2cf585a,0xbf800000,2 -np.float32,0x41d5a0d9,0xb5b3bc81,2 -np.float32,0xc1d5a0d9,0xb5b3bc81,2 -np.float32,0x4255a0d9,0xbf800000,2 -np.float32,0xc255a0d9,0xbf800000,2 -np.float32,0x42d5a0d9,0x3f800000,2 -np.float32,0xc2d5a0d9,0x3f800000,2 -np.float32,0x41dbe958,0xbf350507,2 -np.float32,0xc1dbe958,0xbf350507,2 -np.float32,0x425be958,0x365eab75,2 -np.float32,0xc25be958,0x365eab75,2 -np.float32,0x42dbe958,0xbf800000,2 -np.float32,0xc2dbe958,0xbf800000,2 -np.float32,0x41e231d6,0xbf800000,2 -np.float32,0xc1e231d6,0xbf800000,2 -np.float32,0x426231d6,0x3f800000,2 -np.float32,0xc26231d6,0x3f800000,2 -np.float32,0x42e231d6,0x3f800000,2 -np.float32,0xc2e231d6,0x3f800000,2 -np.float32,0x41e87a55,0xbf3504ef,2 -np.float32,0xc1e87a55,0xbf3504ef,2 -np.float32,0x42687a55,0xb552257b,2 -np.float32,0xc2687a55,0xb552257b,2 -np.float32,0x42e87a55,0xbf800000,2 -np.float32,0xc2e87a55,0xbf800000,2 -np.float32,0x41eec2d4,0x353ef0a7,2 -np.float32,0xc1eec2d4,0x353ef0a7,2 -np.float32,0x426ec2d4,0xbf800000,2 -np.float32,0xc26ec2d4,0xbf800000,2 -np.float32,0x42eec2d4,0x3f800000,2 -np.float32,0xc2eec2d4,0x3f800000,2 -np.float32,0x41f50b53,0x3f3504ff,2 -np.float32,0xc1f50b53,0x3f3504ff,2 -np.float32,0x42750b53,0x360a6748,2 -np.float32,0xc2750b53,0x360a6748,2 -np.float32,0x42f50b53,0xbf800000,2 -np.float32,0xc2f50b53,0xbf800000,2 -np.float32,0x41fb53d2,0x3f800000,2 -np.float32,0xc1fb53d2,0x3f800000,2 -np.float32,0x427b53d2,0x3f800000,2 -np.float32,0xc27b53d2,0x3f800000,2 -np.float32,0x42fb53d2,0x3f800000,2 -np.float32,0xc2fb53d2,0x3f800000,2 -np.float32,0x4200ce28,0x3f3504f6,2 -np.float32,0xc200ce28,0x3f3504f6,2 -np.float32,0x4280ce28,0x34fdd672,2 -np.float32,0xc280ce28,0x34fdd672,2 -np.float32,0x4300ce28,0xbf800000,2 -np.float32,0xc300ce28,0xbf800000,2 -np.float32,0x4203f268,0xb6059a13,2 -np.float32,0xc203f268,0xb6059a13,2 -np.float32,0x4283f268,0xbf800000,2 -np.float32,0xc283f268,0xbf800000,2 -np.float32,0x4303f268,0x3f800000,2 -np.float32,0xc303f268,0x3f800000,2 -np.float32,0x420716a7,0xbf3504f8,2 -np.float32,0xc20716a7,0xbf3504f8,2 -np.float32,0x428716a7,0x35588c6d,2 -np.float32,0xc28716a7,0x35588c6d,2 -np.float32,0x430716a7,0xbf800000,2 -np.float32,0xc30716a7,0xbf800000,2 -np.float32,0x420a3ae7,0xbf800000,2 -np.float32,0xc20a3ae7,0xbf800000,2 -np.float32,0x428a3ae7,0x3f800000,2 -np.float32,0xc28a3ae7,0x3f800000,2 -np.float32,0x430a3ae7,0x3f800000,2 -np.float32,0xc30a3ae7,0x3f800000,2 -np.float32,0x420d5f26,0xbf3504e7,2 -np.float32,0xc20d5f26,0xbf3504e7,2 -np.float32,0x428d5f26,0xb60c0105,2 -np.float32,0xc28d5f26,0xb60c0105,2 -np.float32,0x430d5f26,0xbf800000,2 -np.float32,0xc30d5f26,0xbf800000,2 -np.float32,0x42108365,0xb512200d,2 -np.float32,0xc2108365,0xb512200d,2 -np.float32,0x42908365,0xbf800000,2 -np.float32,0xc2908365,0xbf800000,2 -np.float32,0x43108365,0x3f800000,2 -np.float32,0xc3108365,0x3f800000,2 -np.float32,0x4213a7a5,0x3f350507,2 -np.float32,0xc213a7a5,0x3f350507,2 -np.float32,0x4293a7a5,0x3661deee,2 -np.float32,0xc293a7a5,0x3661deee,2 -np.float32,0x4313a7a5,0xbf800000,2 -np.float32,0xc313a7a5,0xbf800000,2 -np.float32,0x4216cbe4,0x3f800000,2 -np.float32,0xc216cbe4,0x3f800000,2 -np.float32,0x4296cbe4,0x3f800000,2 -np.float32,0xc296cbe4,0x3f800000,2 -np.float32,0x4316cbe4,0x3f800000,2 -np.float32,0xc316cbe4,0x3f800000,2 -np.float32,0x4219f024,0x3f3504d8,2 -np.float32,0xc219f024,0x3f3504d8,2 -np.float32,0x4299f024,0xb69bde6c,2 -np.float32,0xc299f024,0xb69bde6c,2 -np.float32,0x4319f024,0xbf800000,2 -np.float32,0xc319f024,0xbf800000,2 -np.float32,0x421d1463,0xb5455799,2 -np.float32,0xc21d1463,0xb5455799,2 -np.float32,0x429d1463,0xbf800000,2 -np.float32,0xc29d1463,0xbf800000,2 -np.float32,0x431d1463,0x3f800000,2 -np.float32,0xc31d1463,0x3f800000,2 -np.float32,0x422038a3,0xbf350516,2 -np.float32,0xc22038a3,0xbf350516,2 -np.float32,0x42a038a3,0x36c6cd61,2 -np.float32,0xc2a038a3,0x36c6cd61,2 -np.float32,0x432038a3,0xbf800000,2 -np.float32,0xc32038a3,0xbf800000,2 -np.float32,0x42235ce2,0xbf800000,2 -np.float32,0xc2235ce2,0xbf800000,2 -np.float32,0x42a35ce2,0x3f800000,2 -np.float32,0xc2a35ce2,0x3f800000,2 -np.float32,0x43235ce2,0x3f800000,2 -np.float32,0xc3235ce2,0x3f800000,2 -np.float32,0x42268121,0xbf3504f6,2 -np.float32,0xc2268121,0xbf3504f6,2 -np.float32,0x42a68121,0x34e43aac,2 -np.float32,0xc2a68121,0x34e43aac,2 -np.float32,0x43268121,0xbf800000,2 -np.float32,0xc3268121,0xbf800000,2 -np.float32,0x4229a561,0x360733d0,2 -np.float32,0xc229a561,0x360733d0,2 -np.float32,0x42a9a561,0xbf800000,2 -np.float32,0xc2a9a561,0xbf800000,2 -np.float32,0x4329a561,0x3f800000,2 -np.float32,0xc329a561,0x3f800000,2 -np.float32,0x422cc9a0,0x3f3504f8,2 -np.float32,0xc22cc9a0,0x3f3504f8,2 -np.float32,0x42acc9a0,0x35655a50,2 -np.float32,0xc2acc9a0,0x35655a50,2 -np.float32,0x432cc9a0,0xbf800000,2 -np.float32,0xc32cc9a0,0xbf800000,2 -np.float32,0x422fede0,0x3f800000,2 -np.float32,0xc22fede0,0x3f800000,2 -np.float32,0x42afede0,0x3f800000,2 -np.float32,0xc2afede0,0x3f800000,2 -np.float32,0x432fede0,0x3f800000,2 -np.float32,0xc32fede0,0x3f800000,2 -np.float32,0x4233121f,0x3f3504e7,2 -np.float32,0xc233121f,0x3f3504e7,2 -np.float32,0x42b3121f,0xb60f347d,2 -np.float32,0xc2b3121f,0xb60f347d,2 -np.float32,0x4333121f,0xbf800000,2 -np.float32,0xc333121f,0xbf800000,2 -np.float32,0x4236365e,0x350bb91c,2 -np.float32,0xc236365e,0x350bb91c,2 -np.float32,0x42b6365e,0xbf800000,2 -np.float32,0xc2b6365e,0xbf800000,2 -np.float32,0x4336365e,0x3f800000,2 -np.float32,0xc336365e,0x3f800000,2 -np.float32,0x42395a9e,0xbf350507,2 -np.float32,0xc2395a9e,0xbf350507,2 -np.float32,0x42b95a9e,0x36651267,2 -np.float32,0xc2b95a9e,0x36651267,2 -np.float32,0x43395a9e,0xbf800000,2 -np.float32,0xc3395a9e,0xbf800000,2 -np.float32,0x423c7edd,0xbf800000,2 -np.float32,0xc23c7edd,0xbf800000,2 -np.float32,0x42bc7edd,0x3f800000,2 -np.float32,0xc2bc7edd,0x3f800000,2 -np.float32,0x433c7edd,0x3f800000,2 -np.float32,0xc33c7edd,0x3f800000,2 -np.float32,0x423fa31d,0xbf3504d7,2 -np.float32,0xc23fa31d,0xbf3504d7,2 -np.float32,0x42bfa31d,0xb69d7828,2 -np.float32,0xc2bfa31d,0xb69d7828,2 -np.float32,0x433fa31d,0xbf800000,2 -np.float32,0xc33fa31d,0xbf800000,2 -np.float32,0x4242c75c,0x354bbe8a,2 -np.float32,0xc242c75c,0x354bbe8a,2 -np.float32,0x42c2c75c,0xbf800000,2 -np.float32,0xc2c2c75c,0xbf800000,2 -np.float32,0x4342c75c,0x3f800000,2 -np.float32,0xc342c75c,0x3f800000,2 -np.float32,0x4245eb9c,0x3f350517,2 -np.float32,0xc245eb9c,0x3f350517,2 -np.float32,0x42c5eb9c,0x36c8671d,2 -np.float32,0xc2c5eb9c,0x36c8671d,2 -np.float32,0x4345eb9c,0xbf800000,2 -np.float32,0xc345eb9c,0xbf800000,2 -np.float32,0x42490fdb,0x3f800000,2 -np.float32,0xc2490fdb,0x3f800000,2 -np.float32,0x42c90fdb,0x3f800000,2 -np.float32,0xc2c90fdb,0x3f800000,2 -np.float32,0x43490fdb,0x3f800000,2 -np.float32,0xc3490fdb,0x3f800000,2 -np.float32,0x424c341a,0x3f3504f5,2 -np.float32,0xc24c341a,0x3f3504f5,2 -np.float32,0x42cc341a,0x34ca9ee6,2 -np.float32,0xc2cc341a,0x34ca9ee6,2 -np.float32,0x434c341a,0xbf800000,2 -np.float32,0xc34c341a,0xbf800000,2 -np.float32,0x424f585a,0xb608cd8c,2 -np.float32,0xc24f585a,0xb608cd8c,2 -np.float32,0x42cf585a,0xbf800000,2 -np.float32,0xc2cf585a,0xbf800000,2 -np.float32,0x434f585a,0x3f800000,2 -np.float32,0xc34f585a,0x3f800000,2 -np.float32,0x42527c99,0xbf3504f9,2 -np.float32,0xc2527c99,0xbf3504f9,2 -np.float32,0x42d27c99,0x35722833,2 -np.float32,0xc2d27c99,0x35722833,2 -np.float32,0x43527c99,0xbf800000,2 -np.float32,0xc3527c99,0xbf800000,2 -np.float32,0x4255a0d9,0xbf800000,2 -np.float32,0xc255a0d9,0xbf800000,2 -np.float32,0x42d5a0d9,0x3f800000,2 -np.float32,0xc2d5a0d9,0x3f800000,2 -np.float32,0x4355a0d9,0x3f800000,2 -np.float32,0xc355a0d9,0x3f800000,2 -np.float32,0x4258c518,0xbf3504e6,2 -np.float32,0xc258c518,0xbf3504e6,2 -np.float32,0x42d8c518,0xb61267f6,2 -np.float32,0xc2d8c518,0xb61267f6,2 -np.float32,0x4358c518,0xbf800000,2 -np.float32,0xc358c518,0xbf800000,2 -np.float32,0x425be958,0x365eab75,2 -np.float32,0xc25be958,0x365eab75,2 -np.float32,0x42dbe958,0xbf800000,2 -np.float32,0xc2dbe958,0xbf800000,2 -np.float32,0x435be958,0x3f800000,2 -np.float32,0xc35be958,0x3f800000,2 -np.float32,0x425f0d97,0x3f350508,2 -np.float32,0xc25f0d97,0x3f350508,2 -np.float32,0x42df0d97,0x366845e0,2 -np.float32,0xc2df0d97,0x366845e0,2 -np.float32,0x435f0d97,0xbf800000,2 -np.float32,0xc35f0d97,0xbf800000,2 -np.float32,0x426231d6,0x3f800000,2 -np.float32,0xc26231d6,0x3f800000,2 -np.float32,0x42e231d6,0x3f800000,2 -np.float32,0xc2e231d6,0x3f800000,2 -np.float32,0x436231d6,0x3f800000,2 -np.float32,0xc36231d6,0x3f800000,2 -np.float32,0x42655616,0x3f3504d7,2 -np.float32,0xc2655616,0x3f3504d7,2 -np.float32,0x42e55616,0xb69f11e5,2 -np.float32,0xc2e55616,0xb69f11e5,2 -np.float32,0x43655616,0xbf800000,2 -np.float32,0xc3655616,0xbf800000,2 -np.float32,0x42687a55,0xb552257b,2 -np.float32,0xc2687a55,0xb552257b,2 -np.float32,0x42e87a55,0xbf800000,2 -np.float32,0xc2e87a55,0xbf800000,2 -np.float32,0x43687a55,0x3f800000,2 -np.float32,0xc3687a55,0x3f800000,2 -np.float32,0x426b9e95,0xbf350517,2 -np.float32,0xc26b9e95,0xbf350517,2 -np.float32,0x42eb9e95,0x36ca00d9,2 -np.float32,0xc2eb9e95,0x36ca00d9,2 -np.float32,0x436b9e95,0xbf800000,2 -np.float32,0xc36b9e95,0xbf800000,2 -np.float32,0x426ec2d4,0xbf800000,2 -np.float32,0xc26ec2d4,0xbf800000,2 -np.float32,0x42eec2d4,0x3f800000,2 -np.float32,0xc2eec2d4,0x3f800000,2 -np.float32,0x436ec2d4,0x3f800000,2 -np.float32,0xc36ec2d4,0x3f800000,2 -np.float32,0x4271e713,0xbf3504f5,2 -np.float32,0xc271e713,0xbf3504f5,2 -np.float32,0x42f1e713,0x34b10321,2 -np.float32,0xc2f1e713,0x34b10321,2 -np.float32,0x4371e713,0xbf800000,2 -np.float32,0xc371e713,0xbf800000,2 -np.float32,0x42750b53,0x360a6748,2 -np.float32,0xc2750b53,0x360a6748,2 -np.float32,0x42f50b53,0xbf800000,2 -np.float32,0xc2f50b53,0xbf800000,2 -np.float32,0x43750b53,0x3f800000,2 -np.float32,0xc3750b53,0x3f800000,2 -np.float32,0x42782f92,0x3f3504f9,2 -np.float32,0xc2782f92,0x3f3504f9,2 -np.float32,0x42f82f92,0x357ef616,2 -np.float32,0xc2f82f92,0x357ef616,2 -np.float32,0x43782f92,0xbf800000,2 -np.float32,0xc3782f92,0xbf800000,2 -np.float32,0x427b53d2,0x3f800000,2 -np.float32,0xc27b53d2,0x3f800000,2 -np.float32,0x42fb53d2,0x3f800000,2 -np.float32,0xc2fb53d2,0x3f800000,2 -np.float32,0x437b53d2,0x3f800000,2 -np.float32,0xc37b53d2,0x3f800000,2 -np.float32,0x427e7811,0x3f3504e6,2 -np.float32,0xc27e7811,0x3f3504e6,2 -np.float32,0x42fe7811,0xb6159b6f,2 -np.float32,0xc2fe7811,0xb6159b6f,2 -np.float32,0x437e7811,0xbf800000,2 -np.float32,0xc37e7811,0xbf800000,2 -np.float32,0x4280ce28,0x34fdd672,2 -np.float32,0xc280ce28,0x34fdd672,2 -np.float32,0x4300ce28,0xbf800000,2 -np.float32,0xc300ce28,0xbf800000,2 -np.float32,0x4380ce28,0x3f800000,2 -np.float32,0xc380ce28,0x3f800000,2 -np.float32,0x42826048,0xbf350508,2 -np.float32,0xc2826048,0xbf350508,2 -np.float32,0x43026048,0x366b7958,2 -np.float32,0xc3026048,0x366b7958,2 -np.float32,0x43826048,0xbf800000,2 -np.float32,0xc3826048,0xbf800000,2 -np.float32,0x4283f268,0xbf800000,2 -np.float32,0xc283f268,0xbf800000,2 -np.float32,0x4303f268,0x3f800000,2 -np.float32,0xc303f268,0x3f800000,2 -np.float32,0x4383f268,0x3f800000,2 -np.float32,0xc383f268,0x3f800000,2 -np.float32,0x42858487,0xbf350504,2 -np.float32,0xc2858487,0xbf350504,2 -np.float32,0x43058487,0x363ea8be,2 -np.float32,0xc3058487,0x363ea8be,2 -np.float32,0x43858487,0xbf800000,2 -np.float32,0xc3858487,0xbf800000,2 -np.float32,0x428716a7,0x35588c6d,2 -np.float32,0xc28716a7,0x35588c6d,2 -np.float32,0x430716a7,0xbf800000,2 -np.float32,0xc30716a7,0xbf800000,2 -np.float32,0x438716a7,0x3f800000,2 -np.float32,0xc38716a7,0x3f800000,2 -np.float32,0x4288a8c7,0x3f350517,2 -np.float32,0xc288a8c7,0x3f350517,2 -np.float32,0x4308a8c7,0x36cb9a96,2 -np.float32,0xc308a8c7,0x36cb9a96,2 -np.float32,0x4388a8c7,0xbf800000,2 -np.float32,0xc388a8c7,0xbf800000,2 -np.float32,0x428a3ae7,0x3f800000,2 -np.float32,0xc28a3ae7,0x3f800000,2 -np.float32,0x430a3ae7,0x3f800000,2 -np.float32,0xc30a3ae7,0x3f800000,2 -np.float32,0x438a3ae7,0x3f800000,2 -np.float32,0xc38a3ae7,0x3f800000,2 -np.float32,0x428bcd06,0x3f3504f5,2 -np.float32,0xc28bcd06,0x3f3504f5,2 -np.float32,0x430bcd06,0x3497675b,2 -np.float32,0xc30bcd06,0x3497675b,2 -np.float32,0x438bcd06,0xbf800000,2 -np.float32,0xc38bcd06,0xbf800000,2 -np.float32,0x428d5f26,0xb60c0105,2 -np.float32,0xc28d5f26,0xb60c0105,2 -np.float32,0x430d5f26,0xbf800000,2 -np.float32,0xc30d5f26,0xbf800000,2 -np.float32,0x438d5f26,0x3f800000,2 -np.float32,0xc38d5f26,0x3f800000,2 -np.float32,0x428ef146,0xbf350526,2 -np.float32,0xc28ef146,0xbf350526,2 -np.float32,0x430ef146,0x3710bc40,2 -np.float32,0xc30ef146,0x3710bc40,2 -np.float32,0x438ef146,0xbf800000,2 -np.float32,0xc38ef146,0xbf800000,2 -np.float32,0x42908365,0xbf800000,2 -np.float32,0xc2908365,0xbf800000,2 -np.float32,0x43108365,0x3f800000,2 -np.float32,0xc3108365,0x3f800000,2 -np.float32,0x43908365,0x3f800000,2 -np.float32,0xc3908365,0x3f800000,2 -np.float32,0x42921585,0xbf3504e6,2 -np.float32,0xc2921585,0xbf3504e6,2 -np.float32,0x43121585,0xb618cee8,2 -np.float32,0xc3121585,0xb618cee8,2 -np.float32,0x43921585,0xbf800000,2 -np.float32,0xc3921585,0xbf800000,2 -np.float32,0x4293a7a5,0x3661deee,2 -np.float32,0xc293a7a5,0x3661deee,2 -np.float32,0x4313a7a5,0xbf800000,2 -np.float32,0xc313a7a5,0xbf800000,2 -np.float32,0x4393a7a5,0x3f800000,2 -np.float32,0xc393a7a5,0x3f800000,2 -np.float32,0x429539c5,0x3f350536,2 -np.float32,0xc29539c5,0x3f350536,2 -np.float32,0x431539c5,0x373bab34,2 -np.float32,0xc31539c5,0x373bab34,2 -np.float32,0x439539c5,0xbf800000,2 -np.float32,0xc39539c5,0xbf800000,2 -np.float32,0x4296cbe4,0x3f800000,2 -np.float32,0xc296cbe4,0x3f800000,2 -np.float32,0x4316cbe4,0x3f800000,2 -np.float32,0xc316cbe4,0x3f800000,2 -np.float32,0x4396cbe4,0x3f800000,2 -np.float32,0xc396cbe4,0x3f800000,2 -np.float32,0x42985e04,0x3f3504d7,2 -np.float32,0xc2985e04,0x3f3504d7,2 -np.float32,0x43185e04,0xb6a2455d,2 -np.float32,0xc3185e04,0xb6a2455d,2 -np.float32,0x43985e04,0xbf800000,2 -np.float32,0xc3985e04,0xbf800000,2 -np.float32,0x4299f024,0xb69bde6c,2 -np.float32,0xc299f024,0xb69bde6c,2 -np.float32,0x4319f024,0xbf800000,2 -np.float32,0xc319f024,0xbf800000,2 -np.float32,0x4399f024,0x3f800000,2 -np.float32,0xc399f024,0x3f800000,2 -np.float32,0x429b8243,0xbf3504ea,2 -np.float32,0xc29b8243,0xbf3504ea,2 -np.float32,0x431b8243,0xb5cb2eb8,2 -np.float32,0xc31b8243,0xb5cb2eb8,2 -np.float32,0x439b8243,0xbf800000,2 -np.float32,0xc39b8243,0xbf800000,2 -np.float32,0x435b2047,0x3f3504c1,2 -np.float32,0x42a038a2,0xb5e4ca7e,2 -np.float32,0x432038a2,0xbf800000,2 -np.float32,0x4345eb9b,0xbf800000,2 -np.float32,0x42c5eb9b,0xb5de638c,2 -np.float32,0x42eb9e94,0xb5d7fc9b,2 -np.float32,0x4350ea79,0x3631dadb,2 -np.float32,0x42dbe957,0xbf800000,2 -np.float32,0x425be957,0xb505522a,2 -np.float32,0x435be957,0x3f800000,2 -np.float32,0x487fe5ab,0xba140185,2 -np.float32,0x497fe5ab,0x3f7fffd5,2 -np.float32,0x49ffe5ab,0x3f7fff55,2 -np.float32,0x49ffeb37,0x3b9382f5,2 -np.float32,0x497ff0c3,0x3b13049f,2 -np.float32,0x49fff0c3,0xbf7fff57,2 -np.float32,0x49fff64f,0xbb928618,2 -np.float32,0x497ffbdb,0xbf7fffd6,2 -np.float32,0x49fffbdb,0x3f7fff59,2 -np.float32,0x48fffbdb,0xba9207c6,2 -np.float32,0x4e736e56,0xbf800000,2 -np.float32,0x4d4da377,0xbf800000,2 -np.float32,0x4ece58c3,0xbf800000,2 -np.float32,0x4ee0db9c,0xbf800000,2 -np.float32,0x4dee7002,0x3f800000,2 -np.float32,0x4ee86afc,0x38857a23,2 -np.float32,0x4dca4f3f,0xbf800000,2 -np.float32,0x4ecb48af,0xb95d1e10,2 -np.float32,0x4e51e33f,0xbf800000,2 -np.float32,0x4ef5f421,0xbf800000,2 -np.float32,0x46027eb2,0x3e7d94c9,2 -np.float32,0x4477baed,0xbe7f1824,2 -np.float32,0x454b8024,0x3e7f5268,2 -np.float32,0x455d2c09,0x3e7f40cb,2 -np.float32,0x4768d3de,0xba14b4af,2 -np.float32,0x46c1e7cd,0x3e7fb102,2 -np.float32,0x44a52949,0xbe7dc9d5,2 -np.float32,0x4454633a,0x3e7dbc7d,2 -np.float32,0x4689810b,0x3e7eb02b,2 -np.float32,0x473473cd,0xbe7eef6f,2 -np.float32,0x44a5193f,0x3e7e1b1f,2 -np.float32,0x46004b36,0x3e7dac59,2 -np.float32,0x467f604b,0x3d7ffd3a,2 -np.float32,0x45ea1805,0x3dffd2e0,2 -np.float32,0x457b6af3,0x3dff7831,2 -np.float32,0x44996159,0xbe7d85f4,2 -np.float32,0x47883553,0xbb80584e,2 -np.float32,0x44e19f0c,0xbdffcfe6,2 -np.float32,0x472b3bf6,0xbe7f7a82,2 -np.float32,0x4600bb4e,0x3a135e33,2 -np.float32,0x449f4556,0x3e7e42e5,2 -np.float32,0x474e9420,0x3dff77b2,2 -np.float32,0x45cbdb23,0x3dff7240,2 -np.float32,0x44222747,0x3dffb039,2 -np.float32,0x4772e419,0xbdff74b8,2 diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/data/umath-validation-set-exp b/venv/lib/python3.7/site-packages/numpy/core/tests/data/umath-validation-set-exp deleted file mode 100644 index 1b2cc9c..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/data/umath-validation-set-exp +++ /dev/null @@ -1,135 +0,0 @@ -dtype,input,output,ulperrortol -## +ve denormals ## -np.float32,0x004b4716,0x3f800000,3 -np.float32,0x007b2490,0x3f800000,3 -np.float32,0x007c99fa,0x3f800000,3 -np.float32,0x00734a0c,0x3f800000,3 -np.float32,0x0070de24,0x3f800000,3 -np.float32,0x00495d65,0x3f800000,3 -np.float32,0x006894f6,0x3f800000,3 -np.float32,0x00555a76,0x3f800000,3 -np.float32,0x004e1fb8,0x3f800000,3 -np.float32,0x00687de9,0x3f800000,3 -## -ve denormals ## -np.float32,0x805b59af,0x3f800000,3 -np.float32,0x807ed8ed,0x3f800000,3 -np.float32,0x807142ad,0x3f800000,3 -np.float32,0x80772002,0x3f800000,3 -np.float32,0x8062abcb,0x3f800000,3 -np.float32,0x8045e31c,0x3f800000,3 -np.float32,0x805f01c2,0x3f800000,3 -np.float32,0x80506432,0x3f800000,3 -np.float32,0x8060089d,0x3f800000,3 -np.float32,0x8071292f,0x3f800000,3 -## floats that output a denormal ## -np.float32,0xc2cf3fc1,0x00000001,3 -np.float32,0xc2c79726,0x00000021,3 -np.float32,0xc2cb295d,0x00000005,3 -np.float32,0xc2b49e6b,0x00068c4c,3 -np.float32,0xc2ca8116,0x00000008,3 -np.float32,0xc2c23f82,0x000001d7,3 -np.float32,0xc2cb69c0,0x00000005,3 -np.float32,0xc2cc1f4d,0x00000003,3 -np.float32,0xc2ae094e,0x00affc4c,3 -np.float32,0xc2c86c44,0x00000015,3 -## random floats between -87.0f and 88.0f ## -np.float32,0x4030d7e0,0x417d9a05,3 -np.float32,0x426f60e8,0x6aa1be2c,3 -np.float32,0x41a1b220,0x4e0efc11,3 -np.float32,0xc20cc722,0x26159da7,3 -np.float32,0x41c492bc,0x512ec79d,3 -np.float32,0x40980210,0x42e73a0e,3 -np.float32,0xbf1f7b80,0x3f094de3,3 -np.float32,0x42a678a4,0x7b87a383,3 -np.float32,0xc20f3cfd,0x25a1c304,3 -np.float32,0x423ff34c,0x6216467f,3 -np.float32,0x00000000,0x3f800000,3 -## floats that cause an overflow ## -np.float32,0x7f06d8c1,0x7f800000,3 -np.float32,0x7f451912,0x7f800000,3 -np.float32,0x7ecceac3,0x7f800000,3 -np.float32,0x7f643b45,0x7f800000,3 -np.float32,0x7e910ea0,0x7f800000,3 -np.float32,0x7eb4756b,0x7f800000,3 -np.float32,0x7f4ec708,0x7f800000,3 -np.float32,0x7f6b4551,0x7f800000,3 -np.float32,0x7d8edbda,0x7f800000,3 -np.float32,0x7f730718,0x7f800000,3 -np.float32,0x42b17217,0x7f7fff84,3 -np.float32,0x42b17218,0x7f800000,3 -np.float32,0x42b17219,0x7f800000,3 -np.float32,0xfef2b0bc,0x00000000,3 -np.float32,0xff69f83e,0x00000000,3 -np.float32,0xff4ecb12,0x00000000,3 -np.float32,0xfeac6d86,0x00000000,3 -np.float32,0xfde0cdb8,0x00000000,3 -np.float32,0xff26aef4,0x00000000,3 -np.float32,0xff6f9277,0x00000000,3 -np.float32,0xff7adfc4,0x00000000,3 -np.float32,0xff0ad40e,0x00000000,3 -np.float32,0xff6fd8f3,0x00000000,3 -np.float32,0xc2cff1b4,0x00000001,3 -np.float32,0xc2cff1b5,0x00000000,3 -np.float32,0xc2cff1b6,0x00000000,3 -np.float32,0x7f800000,0x7f800000,3 -np.float32,0xff800000,0x00000000,3 -np.float32,0x4292f27c,0x7480000a,3 -np.float32,0x42a920be,0x7c7fff94,3 -np.float32,0x41c214c9,0x50ffffd9,3 -np.float32,0x41abe686,0x4effffd9,3 -np.float32,0x4287db5a,0x707fffd3,3 -np.float32,0x41902cbb,0x4c800078,3 -np.float32,0x42609466,0x67ffffeb,3 -np.float32,0x41a65af5,0x4e7fffd1,3 -np.float32,0x417f13ff,0x4affffc9,3 -np.float32,0x426d0e6c,0x6a3504f2,3 -np.float32,0x41bc8934,0x507fff51,3 -np.float32,0x42a7bdde,0x7c0000d6,3 -np.float32,0x4120cf66,0x46b504f6,3 -np.float32,0x4244da8f,0x62ffff1a,3 -np.float32,0x41a0cf69,0x4e000034,3 -np.float32,0x41cd2bec,0x52000005,3 -np.float32,0x42893e41,0x7100009e,3 -np.float32,0x41b437e1,0x4fb50502,3 -np.float32,0x41d8430f,0x5300001d,3 -np.float32,0x4244da92,0x62ffffda,3 -np.float32,0x41a0cf63,0x4dffffa9,3 -np.float32,0x3eb17218,0x3fb504f3,3 -np.float32,0x428729e8,0x703504dc,3 -np.float32,0x41a0cf67,0x4e000014,3 -np.float32,0x4252b77d,0x65800011,3 -np.float32,0x41902cb9,0x4c800058,3 -np.float32,0x42a0cf67,0x79800052,3 -np.float32,0x4152b77b,0x48ffffe9,3 -np.float32,0x41265af3,0x46ffffc8,3 -np.float32,0x42187e0b,0x5affff9a,3 -np.float32,0xc0d2b77c,0x3ab504f6,3 -np.float32,0xc283b2ac,0x10000072,3 -np.float32,0xc1cff1b4,0x2cb504f5,3 -np.float32,0xc05dce9e,0x3d000000,3 -np.float32,0xc28ec9d2,0x0bfffea5,3 -np.float32,0xc23c893a,0x1d7fffde,3 -np.float32,0xc2a920c0,0x027fff6c,3 -np.float32,0xc1f9886f,0x2900002b,3 -np.float32,0xc2c42920,0x000000b5,3 -np.float32,0xc2893e41,0x0dfffec5,3 -np.float32,0xc2c4da93,0x00000080,3 -np.float32,0xc17f1401,0x3400000c,3 -np.float32,0xc1902cb6,0x327fffaf,3 -np.float32,0xc27c4e3b,0x11ffffc5,3 -np.float32,0xc268e5c5,0x157ffe9d,3 -np.float32,0xc2b4e953,0x0005a826,3 -np.float32,0xc287db5a,0x0e800016,3 -np.float32,0xc207db5a,0x2700000b,3 -np.float32,0xc2b2d4fe,0x000ffff1,3 -np.float32,0xc268e5c0,0x157fffdd,3 -np.float32,0xc22920bd,0x2100003b,3 -np.float32,0xc2902caf,0x0b80011e,3 -np.float32,0xc1902cba,0x327fff2f,3 -np.float32,0xc2ca6625,0x00000008,3 -np.float32,0xc280ece8,0x10fffeb5,3 -np.float32,0xc2918f94,0x0b0000ea,3 -np.float32,0xc29b43d5,0x077ffffc,3 -np.float32,0xc1e61ff7,0x2ab504f5,3 -np.float32,0xc2867878,0x0effff15,3 -np.float32,0xc2a2324a,0x04fffff4,3 diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/data/umath-validation-set-log b/venv/lib/python3.7/site-packages/numpy/core/tests/data/umath-validation-set-log deleted file mode 100644 index a7bd984..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/data/umath-validation-set-log +++ /dev/null @@ -1,118 +0,0 @@ -dtype,input,output,ulperrortol -## +ve denormals ## -np.float32,0x004b4716,0xc2afbc1b,4 -np.float32,0x007b2490,0xc2aec01e,4 -np.float32,0x007c99fa,0xc2aeba17,4 -np.float32,0x00734a0c,0xc2aee1dc,4 -np.float32,0x0070de24,0xc2aeecba,4 -np.float32,0x007fffff,0xc2aeac50,4 -np.float32,0x00000001,0xc2ce8ed0,4 -## -ve denormals ## -np.float32,0x80495d65,0xffc00000,4 -np.float32,0x806894f6,0xffc00000,4 -np.float32,0x80555a76,0xffc00000,4 -np.float32,0x804e1fb8,0xffc00000,4 -np.float32,0x80687de9,0xffc00000,4 -np.float32,0x807fffff,0xffc00000,4 -np.float32,0x80000001,0xffc00000,4 -## +/-0.0f, +/-FLT_MIN +/-FLT_MAX ## -np.float32,0x00000000,0xff800000,4 -np.float32,0x80000000,0xff800000,4 -np.float32,0x7f7fffff,0x42b17218,4 -np.float32,0x80800000,0xffc00000,4 -np.float32,0xff7fffff,0xffc00000,4 -## 1.00f + 0x00000001 ## -np.float32,0x3f800000,0x00000000,4 -np.float32,0x3f800001,0x33ffffff,4 -np.float32,0x3f800002,0x347ffffe,4 -np.float32,0x3f7fffff,0xb3800000,4 -np.float32,0x3f7ffffe,0xb4000000,4 -np.float32,0x3f7ffffd,0xb4400001,4 -np.float32,0x402df853,0x3f7ffffe,4 -np.float32,0x402df854,0x3f7fffff,4 -np.float32,0x402df855,0x3f800000,4 -np.float32,0x402df856,0x3f800001,4 -np.float32,0x3ebc5ab0,0xbf800001,4 -np.float32,0x3ebc5ab1,0xbf800000,4 -np.float32,0x3ebc5ab2,0xbf800000,4 -np.float32,0x3ebc5ab3,0xbf7ffffe,4 -np.float32,0x423ef575,0x407768ab,4 -np.float32,0x427b8c61,0x408485dd,4 -np.float32,0x4211e9ee,0x406630b0,4 -np.float32,0x424d5c41,0x407c0fed,4 -np.float32,0x42be722a,0x4091cc91,4 -np.float32,0x42b73d30,0x4090908b,4 -np.float32,0x427e48e2,0x4084de7f,4 -np.float32,0x428f759b,0x4088bba3,4 -np.float32,0x41629069,0x4029a0cc,4 -np.float32,0x4272c99d,0x40836379,4 -np.float32,0x4d1b7458,0x4197463d,4 -np.float32,0x4f10c594,0x41ace2b2,4 -np.float32,0x4ea397c2,0x41a85171,4 -np.float32,0x4fefa9d1,0x41b6769c,4 -np.float32,0x4ebac6ab,0x41a960dc,4 -np.float32,0x4f6efb42,0x41b0e535,4 -np.float32,0x4e9ab8e7,0x41a7df44,4 -np.float32,0x4e81b5d1,0x41a67625,4 -np.float32,0x5014d9f2,0x41b832bd,4 -np.float32,0x4f02175c,0x41ac07b8,4 -np.float32,0x7f034f89,0x42b01c47,4 -np.float32,0x7f56d00e,0x42b11849,4 -np.float32,0x7f1cd5f6,0x42b0773a,4 -np.float32,0x7e979174,0x42af02d7,4 -np.float32,0x7f23369f,0x42b08ba2,4 -np.float32,0x7f0637ae,0x42b0277d,4 -np.float32,0x7efcb6e8,0x42b00897,4 -np.float32,0x7f7907c8,0x42b163f6,4 -np.float32,0x7e95c4c2,0x42aefcba,4 -np.float32,0x7f4577b2,0x42b0ed2d,4 -np.float32,0x3f49c92e,0xbe73ae84,4 -np.float32,0x3f4a23d1,0xbe71e2f8,4 -np.float32,0x3f4abb67,0xbe6ee430,4 -np.float32,0x3f48169a,0xbe7c5532,4 -np.float32,0x3f47f5fa,0xbe7cfc37,4 -np.float32,0x3f488309,0xbe7a2ad8,4 -np.float32,0x3f479df4,0xbe7ebf5f,4 -np.float32,0x3f47cfff,0xbe7dbec9,4 -np.float32,0x3f496704,0xbe75a125,4 -np.float32,0x3f478ee8,0xbe7f0c92,4 -np.float32,0x3f4a763b,0xbe7041ce,4 -np.float32,0x3f47a108,0xbe7eaf94,4 -np.float32,0x3f48136c,0xbe7c6578,4 -np.float32,0x3f481c17,0xbe7c391c,4 -np.float32,0x3f47cd28,0xbe7dcd56,4 -np.float32,0x3f478be8,0xbe7f1bf7,4 -np.float32,0x3f4c1f8e,0xbe67e367,4 -np.float32,0x3f489b0c,0xbe79b03f,4 -np.float32,0x3f4934cf,0xbe76a08a,4 -np.float32,0x3f4954df,0xbe75fd6a,4 -np.float32,0x3f47a3f5,0xbe7ea093,4 -np.float32,0x3f4ba4fc,0xbe6a4b02,4 -np.float32,0x3f47a0e1,0xbe7eb05c,4 -np.float32,0x3f48c30a,0xbe78e42f,4 -np.float32,0x3f48cab8,0xbe78bd05,4 -np.float32,0x3f4b0569,0xbe6d6ea4,4 -np.float32,0x3f47de32,0xbe7d7607,4 -np.float32,0x3f477328,0xbe7f9b00,4 -np.float32,0x3f496dab,0xbe757f52,4 -np.float32,0x3f47662c,0xbe7fddac,4 -np.float32,0x3f48ddd8,0xbe785b80,4 -np.float32,0x3f481866,0xbe7c4bff,4 -np.float32,0x3f48b119,0xbe793fb6,4 -np.float32,0x3f48c7e8,0xbe78cb5c,4 -np.float32,0x3f4985f6,0xbe7503da,4 -np.float32,0x3f483fdf,0xbe7b8212,4 -np.float32,0x3f4b1c76,0xbe6cfa67,4 -np.float32,0x3f480b2e,0xbe7c8fa8,4 -np.float32,0x3f48745f,0xbe7a75bf,4 -np.float32,0x3f485bda,0xbe7af308,4 -np.float32,0x3f47a660,0xbe7e942c,4 -np.float32,0x3f47d4d5,0xbe7da600,4 -np.float32,0x3f4b0a26,0xbe6d56be,4 -np.float32,0x3f4a4883,0xbe712924,4 -np.float32,0x3f4769e7,0xbe7fca84,4 -np.float32,0x3f499702,0xbe74ad3f,4 -np.float32,0x3f494ab1,0xbe763131,4 -np.float32,0x3f476b69,0xbe7fc2c6,4 -np.float32,0x3f4884e8,0xbe7a214a,4 -np.float32,0x3f486945,0xbe7aae76,4 diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/data/umath-validation-set-sin b/venv/lib/python3.7/site-packages/numpy/core/tests/data/umath-validation-set-sin deleted file mode 100644 index a562731..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/data/umath-validation-set-sin +++ /dev/null @@ -1,707 +0,0 @@ -dtype,input,output,ulperrortol -## +ve denormals ## -np.float32,0x004b4716,0x004b4716,2 -np.float32,0x007b2490,0x007b2490,2 -np.float32,0x007c99fa,0x007c99fa,2 -np.float32,0x00734a0c,0x00734a0c,2 -np.float32,0x0070de24,0x0070de24,2 -np.float32,0x007fffff,0x007fffff,2 -np.float32,0x00000001,0x00000001,2 -## -ve denormals ## -np.float32,0x80495d65,0x80495d65,2 -np.float32,0x806894f6,0x806894f6,2 -np.float32,0x80555a76,0x80555a76,2 -np.float32,0x804e1fb8,0x804e1fb8,2 -np.float32,0x80687de9,0x80687de9,2 -np.float32,0x807fffff,0x807fffff,2 -np.float32,0x80000001,0x80000001,2 -## +/-0.0f, +/-FLT_MIN +/-FLT_MAX ## -np.float32,0x00000000,0x00000000,2 -np.float32,0x80000000,0x80000000,2 -np.float32,0x00800000,0x00800000,2 -np.float32,0x7f7fffff,0xbf0599b3,2 -np.float32,0x80800000,0x80800000,2 -np.float32,0xff7fffff,0x3f0599b3,2 -## 1.00f ## -np.float32,0x3f800000,0x3f576aa4,2 -np.float32,0x3f800001,0x3f576aa6,2 -np.float32,0x3f800002,0x3f576aa7,2 -np.float32,0xc090a8b0,0x3f7b4e48,2 -np.float32,0x41ce3184,0x3f192d43,2 -np.float32,0xc1d85848,0xbf7161cb,2 -np.float32,0x402b8820,0x3ee3f29f,2 -np.float32,0x42b4e454,0x3f1d0151,2 -np.float32,0x42a67a60,0x3f7ffa4c,2 -np.float32,0x41d92388,0x3f67beef,2 -np.float32,0x422dd66c,0xbeffb0c1,2 -np.float32,0xc28f5be6,0xbf0bae79,2 -np.float32,0x41ab2674,0x3f0ffe2b,2 -np.float32,0xd0102756,0x3f227e8a,2 -np.float32,0xcf99405e,0x3f73ad00,2 -np.float32,0xcfd83a12,0xbf7151a7,2 -np.float32,0x4fb54db0,0xbe46354b,2 -np.float32,0xcfcca29d,0xbe9345e6,2 -np.float32,0xceec2ac0,0x3e98dc89,2 -np.float32,0xcfdca97f,0xbf60b2b4,2 -np.float32,0xcfe92b0a,0xbf222705,2 -np.float32,0x5014b0eb,0x3f63e75c,2 -np.float32,0xcfa7ee96,0x3f62ada4,2 -np.float32,0x754c09a0,0xbf617056,2 -np.float32,0x77a731fb,0x3f44472b,2 -np.float32,0x76de2494,0xbe680739,2 -np.float32,0xf74920dc,0xbf193338,2 -np.float32,0x7707a312,0xbf6f51b1,2 -np.float32,0x75bf9790,0xbd0f1a47,2 -np.float32,0xf4ca7c40,0xbf7d45e7,2 -np.float32,0x77e91899,0x3f767181,2 -np.float32,0xf74c9820,0xbd685b75,2 -np.float32,0x7785ca29,0x3f78ee61,2 -np.float32,0x3f490fdb,0x3f3504f3,2 -np.float32,0xbf490fdb,0xbf3504f3,2 -np.float32,0x3fc90fdb,0x3f800000,2 -np.float32,0xbfc90fdb,0xbf800000,2 -np.float32,0x40490fdb,0xb3bbbd2e,2 -np.float32,0xc0490fdb,0x33bbbd2e,2 -np.float32,0x3fc90fdb,0x3f800000,2 -np.float32,0xbfc90fdb,0xbf800000,2 -np.float32,0x40490fdb,0xb3bbbd2e,2 -np.float32,0xc0490fdb,0x33bbbd2e,2 -np.float32,0x40c90fdb,0x343bbd2e,2 -np.float32,0xc0c90fdb,0xb43bbd2e,2 -np.float32,0x4016cbe4,0x3f3504f3,2 -np.float32,0xc016cbe4,0xbf3504f3,2 -np.float32,0x4096cbe4,0xbf800000,2 -np.float32,0xc096cbe4,0x3f800000,2 -np.float32,0x4116cbe4,0xb2ccde2e,2 -np.float32,0xc116cbe4,0x32ccde2e,2 -np.float32,0x40490fdb,0xb3bbbd2e,2 -np.float32,0xc0490fdb,0x33bbbd2e,2 -np.float32,0x40c90fdb,0x343bbd2e,2 -np.float32,0xc0c90fdb,0xb43bbd2e,2 -np.float32,0x41490fdb,0x34bbbd2e,2 -np.float32,0xc1490fdb,0xb4bbbd2e,2 -np.float32,0x407b53d2,0xbf3504f5,2 -np.float32,0xc07b53d2,0x3f3504f5,2 -np.float32,0x40fb53d2,0x3f800000,2 -np.float32,0xc0fb53d2,0xbf800000,2 -np.float32,0x417b53d2,0xb535563d,2 -np.float32,0xc17b53d2,0x3535563d,2 -np.float32,0x4096cbe4,0xbf800000,2 -np.float32,0xc096cbe4,0x3f800000,2 -np.float32,0x4116cbe4,0xb2ccde2e,2 -np.float32,0xc116cbe4,0x32ccde2e,2 -np.float32,0x4196cbe4,0x334cde2e,2 -np.float32,0xc196cbe4,0xb34cde2e,2 -np.float32,0x40afede0,0xbf3504ef,2 -np.float32,0xc0afede0,0x3f3504ef,2 -np.float32,0x412fede0,0xbf800000,2 -np.float32,0xc12fede0,0x3f800000,2 -np.float32,0x41afede0,0xb5b222c4,2 -np.float32,0xc1afede0,0x35b222c4,2 -np.float32,0x40c90fdb,0x343bbd2e,2 -np.float32,0xc0c90fdb,0xb43bbd2e,2 -np.float32,0x41490fdb,0x34bbbd2e,2 -np.float32,0xc1490fdb,0xb4bbbd2e,2 -np.float32,0x41c90fdb,0x353bbd2e,2 -np.float32,0xc1c90fdb,0xb53bbd2e,2 -np.float32,0x40e231d6,0x3f3504f3,2 -np.float32,0xc0e231d6,0xbf3504f3,2 -np.float32,0x416231d6,0x3f800000,2 -np.float32,0xc16231d6,0xbf800000,2 -np.float32,0x41e231d6,0xb399a6a2,2 -np.float32,0xc1e231d6,0x3399a6a2,2 -np.float32,0x40fb53d2,0x3f800000,2 -np.float32,0xc0fb53d2,0xbf800000,2 -np.float32,0x417b53d2,0xb535563d,2 -np.float32,0xc17b53d2,0x3535563d,2 -np.float32,0x41fb53d2,0x35b5563d,2 -np.float32,0xc1fb53d2,0xb5b5563d,2 -np.float32,0x410a3ae7,0x3f3504eb,2 -np.float32,0xc10a3ae7,0xbf3504eb,2 -np.float32,0x418a3ae7,0xbf800000,2 -np.float32,0xc18a3ae7,0x3f800000,2 -np.float32,0x420a3ae7,0xb6308908,2 -np.float32,0xc20a3ae7,0x36308908,2 -np.float32,0x4116cbe4,0xb2ccde2e,2 -np.float32,0xc116cbe4,0x32ccde2e,2 -np.float32,0x4196cbe4,0x334cde2e,2 -np.float32,0xc196cbe4,0xb34cde2e,2 -np.float32,0x4216cbe4,0x33ccde2e,2 -np.float32,0xc216cbe4,0xb3ccde2e,2 -np.float32,0x41235ce2,0xbf3504f7,2 -np.float32,0xc1235ce2,0x3f3504f7,2 -np.float32,0x41a35ce2,0x3f800000,2 -np.float32,0xc1a35ce2,0xbf800000,2 -np.float32,0x42235ce2,0xb5b889b6,2 -np.float32,0xc2235ce2,0x35b889b6,2 -np.float32,0x412fede0,0xbf800000,2 -np.float32,0xc12fede0,0x3f800000,2 -np.float32,0x41afede0,0xb5b222c4,2 -np.float32,0xc1afede0,0x35b222c4,2 -np.float32,0x422fede0,0x363222c4,2 -np.float32,0xc22fede0,0xb63222c4,2 -np.float32,0x413c7edd,0xbf3504f3,2 -np.float32,0xc13c7edd,0x3f3504f3,2 -np.float32,0x41bc7edd,0xbf800000,2 -np.float32,0xc1bc7edd,0x3f800000,2 -np.float32,0x423c7edd,0xb4000add,2 -np.float32,0xc23c7edd,0x34000add,2 -np.float32,0x41490fdb,0x34bbbd2e,2 -np.float32,0xc1490fdb,0xb4bbbd2e,2 -np.float32,0x41c90fdb,0x353bbd2e,2 -np.float32,0xc1c90fdb,0xb53bbd2e,2 -np.float32,0x42490fdb,0x35bbbd2e,2 -np.float32,0xc2490fdb,0xb5bbbd2e,2 -np.float32,0x4155a0d9,0x3f3504fb,2 -np.float32,0xc155a0d9,0xbf3504fb,2 -np.float32,0x41d5a0d9,0x3f800000,2 -np.float32,0xc1d5a0d9,0xbf800000,2 -np.float32,0x4255a0d9,0xb633bc81,2 -np.float32,0xc255a0d9,0x3633bc81,2 -np.float32,0x416231d6,0x3f800000,2 -np.float32,0xc16231d6,0xbf800000,2 -np.float32,0x41e231d6,0xb399a6a2,2 -np.float32,0xc1e231d6,0x3399a6a2,2 -np.float32,0x426231d6,0x3419a6a2,2 -np.float32,0xc26231d6,0xb419a6a2,2 -np.float32,0x416ec2d4,0x3f3504ef,2 -np.float32,0xc16ec2d4,0xbf3504ef,2 -np.float32,0x41eec2d4,0xbf800000,2 -np.float32,0xc1eec2d4,0x3f800000,2 -np.float32,0x426ec2d4,0xb5bef0a7,2 -np.float32,0xc26ec2d4,0x35bef0a7,2 -np.float32,0x417b53d2,0xb535563d,2 -np.float32,0xc17b53d2,0x3535563d,2 -np.float32,0x41fb53d2,0x35b5563d,2 -np.float32,0xc1fb53d2,0xb5b5563d,2 -np.float32,0x427b53d2,0x3635563d,2 -np.float32,0xc27b53d2,0xb635563d,2 -np.float32,0x4183f268,0xbf3504ff,2 -np.float32,0xc183f268,0x3f3504ff,2 -np.float32,0x4203f268,0x3f800000,2 -np.float32,0xc203f268,0xbf800000,2 -np.float32,0x4283f268,0xb6859a13,2 -np.float32,0xc283f268,0x36859a13,2 -np.float32,0x418a3ae7,0xbf800000,2 -np.float32,0xc18a3ae7,0x3f800000,2 -np.float32,0x420a3ae7,0xb6308908,2 -np.float32,0xc20a3ae7,0x36308908,2 -np.float32,0x428a3ae7,0x36b08908,2 -np.float32,0xc28a3ae7,0xb6b08908,2 -np.float32,0x41908365,0xbf3504f6,2 -np.float32,0xc1908365,0x3f3504f6,2 -np.float32,0x42108365,0xbf800000,2 -np.float32,0xc2108365,0x3f800000,2 -np.float32,0x42908365,0x3592200d,2 -np.float32,0xc2908365,0xb592200d,2 -np.float32,0x4196cbe4,0x334cde2e,2 -np.float32,0xc196cbe4,0xb34cde2e,2 -np.float32,0x4216cbe4,0x33ccde2e,2 -np.float32,0xc216cbe4,0xb3ccde2e,2 -np.float32,0x4296cbe4,0x344cde2e,2 -np.float32,0xc296cbe4,0xb44cde2e,2 -np.float32,0x419d1463,0x3f3504f8,2 -np.float32,0xc19d1463,0xbf3504f8,2 -np.float32,0x421d1463,0x3f800000,2 -np.float32,0xc21d1463,0xbf800000,2 -np.float32,0x429d1463,0xb5c55799,2 -np.float32,0xc29d1463,0x35c55799,2 -np.float32,0x41a35ce2,0x3f800000,2 -np.float32,0xc1a35ce2,0xbf800000,2 -np.float32,0x42235ce2,0xb5b889b6,2 -np.float32,0xc2235ce2,0x35b889b6,2 -np.float32,0x42a35ce2,0x363889b6,2 -np.float32,0xc2a35ce2,0xb63889b6,2 -np.float32,0x41a9a561,0x3f3504e7,2 -np.float32,0xc1a9a561,0xbf3504e7,2 -np.float32,0x4229a561,0xbf800000,2 -np.float32,0xc229a561,0x3f800000,2 -np.float32,0x42a9a561,0xb68733d0,2 -np.float32,0xc2a9a561,0x368733d0,2 -np.float32,0x41afede0,0xb5b222c4,2 -np.float32,0xc1afede0,0x35b222c4,2 -np.float32,0x422fede0,0x363222c4,2 -np.float32,0xc22fede0,0xb63222c4,2 -np.float32,0x42afede0,0x36b222c4,2 -np.float32,0xc2afede0,0xb6b222c4,2 -np.float32,0x41b6365e,0xbf3504f0,2 -np.float32,0xc1b6365e,0x3f3504f0,2 -np.float32,0x4236365e,0x3f800000,2 -np.float32,0xc236365e,0xbf800000,2 -np.float32,0x42b6365e,0x358bb91c,2 -np.float32,0xc2b6365e,0xb58bb91c,2 -np.float32,0x41bc7edd,0xbf800000,2 -np.float32,0xc1bc7edd,0x3f800000,2 -np.float32,0x423c7edd,0xb4000add,2 -np.float32,0xc23c7edd,0x34000add,2 -np.float32,0x42bc7edd,0x34800add,2 -np.float32,0xc2bc7edd,0xb4800add,2 -np.float32,0x41c2c75c,0xbf3504ef,2 -np.float32,0xc1c2c75c,0x3f3504ef,2 -np.float32,0x4242c75c,0xbf800000,2 -np.float32,0xc242c75c,0x3f800000,2 -np.float32,0x42c2c75c,0xb5cbbe8a,2 -np.float32,0xc2c2c75c,0x35cbbe8a,2 -np.float32,0x41c90fdb,0x353bbd2e,2 -np.float32,0xc1c90fdb,0xb53bbd2e,2 -np.float32,0x42490fdb,0x35bbbd2e,2 -np.float32,0xc2490fdb,0xb5bbbd2e,2 -np.float32,0x42c90fdb,0x363bbd2e,2 -np.float32,0xc2c90fdb,0xb63bbd2e,2 -np.float32,0x41cf585a,0x3f3504ff,2 -np.float32,0xc1cf585a,0xbf3504ff,2 -np.float32,0x424f585a,0x3f800000,2 -np.float32,0xc24f585a,0xbf800000,2 -np.float32,0x42cf585a,0xb688cd8c,2 -np.float32,0xc2cf585a,0x3688cd8c,2 -np.float32,0x41d5a0d9,0x3f800000,2 -np.float32,0xc1d5a0d9,0xbf800000,2 -np.float32,0x4255a0d9,0xb633bc81,2 -np.float32,0xc255a0d9,0x3633bc81,2 -np.float32,0x42d5a0d9,0x36b3bc81,2 -np.float32,0xc2d5a0d9,0xb6b3bc81,2 -np.float32,0x41dbe958,0x3f3504e0,2 -np.float32,0xc1dbe958,0xbf3504e0,2 -np.float32,0x425be958,0xbf800000,2 -np.float32,0xc25be958,0x3f800000,2 -np.float32,0x42dbe958,0xb6deab75,2 -np.float32,0xc2dbe958,0x36deab75,2 -np.float32,0x41e231d6,0xb399a6a2,2 -np.float32,0xc1e231d6,0x3399a6a2,2 -np.float32,0x426231d6,0x3419a6a2,2 -np.float32,0xc26231d6,0xb419a6a2,2 -np.float32,0x42e231d6,0x3499a6a2,2 -np.float32,0xc2e231d6,0xb499a6a2,2 -np.float32,0x41e87a55,0xbf3504f8,2 -np.float32,0xc1e87a55,0x3f3504f8,2 -np.float32,0x42687a55,0x3f800000,2 -np.float32,0xc2687a55,0xbf800000,2 -np.float32,0x42e87a55,0xb5d2257b,2 -np.float32,0xc2e87a55,0x35d2257b,2 -np.float32,0x41eec2d4,0xbf800000,2 -np.float32,0xc1eec2d4,0x3f800000,2 -np.float32,0x426ec2d4,0xb5bef0a7,2 -np.float32,0xc26ec2d4,0x35bef0a7,2 -np.float32,0x42eec2d4,0x363ef0a7,2 -np.float32,0xc2eec2d4,0xb63ef0a7,2 -np.float32,0x41f50b53,0xbf3504e7,2 -np.float32,0xc1f50b53,0x3f3504e7,2 -np.float32,0x42750b53,0xbf800000,2 -np.float32,0xc2750b53,0x3f800000,2 -np.float32,0x42f50b53,0xb68a6748,2 -np.float32,0xc2f50b53,0x368a6748,2 -np.float32,0x41fb53d2,0x35b5563d,2 -np.float32,0xc1fb53d2,0xb5b5563d,2 -np.float32,0x427b53d2,0x3635563d,2 -np.float32,0xc27b53d2,0xb635563d,2 -np.float32,0x42fb53d2,0x36b5563d,2 -np.float32,0xc2fb53d2,0xb6b5563d,2 -np.float32,0x4200ce28,0x3f3504f0,2 -np.float32,0xc200ce28,0xbf3504f0,2 -np.float32,0x4280ce28,0x3f800000,2 -np.float32,0xc280ce28,0xbf800000,2 -np.float32,0x4300ce28,0x357dd672,2 -np.float32,0xc300ce28,0xb57dd672,2 -np.float32,0x4203f268,0x3f800000,2 -np.float32,0xc203f268,0xbf800000,2 -np.float32,0x4283f268,0xb6859a13,2 -np.float32,0xc283f268,0x36859a13,2 -np.float32,0x4303f268,0x37059a13,2 -np.float32,0xc303f268,0xb7059a13,2 -np.float32,0x420716a7,0x3f3504ee,2 -np.float32,0xc20716a7,0xbf3504ee,2 -np.float32,0x428716a7,0xbf800000,2 -np.float32,0xc28716a7,0x3f800000,2 -np.float32,0x430716a7,0xb5d88c6d,2 -np.float32,0xc30716a7,0x35d88c6d,2 -np.float32,0x420a3ae7,0xb6308908,2 -np.float32,0xc20a3ae7,0x36308908,2 -np.float32,0x428a3ae7,0x36b08908,2 -np.float32,0xc28a3ae7,0xb6b08908,2 -np.float32,0x430a3ae7,0x37308908,2 -np.float32,0xc30a3ae7,0xb7308908,2 -np.float32,0x420d5f26,0xbf350500,2 -np.float32,0xc20d5f26,0x3f350500,2 -np.float32,0x428d5f26,0x3f800000,2 -np.float32,0xc28d5f26,0xbf800000,2 -np.float32,0x430d5f26,0xb68c0105,2 -np.float32,0xc30d5f26,0x368c0105,2 -np.float32,0x42108365,0xbf800000,2 -np.float32,0xc2108365,0x3f800000,2 -np.float32,0x42908365,0x3592200d,2 -np.float32,0xc2908365,0xb592200d,2 -np.float32,0x43108365,0xb612200d,2 -np.float32,0xc3108365,0x3612200d,2 -np.float32,0x4213a7a5,0xbf3504df,2 -np.float32,0xc213a7a5,0x3f3504df,2 -np.float32,0x4293a7a5,0xbf800000,2 -np.float32,0xc293a7a5,0x3f800000,2 -np.float32,0x4313a7a5,0xb6e1deee,2 -np.float32,0xc313a7a5,0x36e1deee,2 -np.float32,0x4216cbe4,0x33ccde2e,2 -np.float32,0xc216cbe4,0xb3ccde2e,2 -np.float32,0x4296cbe4,0x344cde2e,2 -np.float32,0xc296cbe4,0xb44cde2e,2 -np.float32,0x4316cbe4,0x34ccde2e,2 -np.float32,0xc316cbe4,0xb4ccde2e,2 -np.float32,0x4219f024,0x3f35050f,2 -np.float32,0xc219f024,0xbf35050f,2 -np.float32,0x4299f024,0x3f800000,2 -np.float32,0xc299f024,0xbf800000,2 -np.float32,0x4319f024,0xb71bde6c,2 -np.float32,0xc319f024,0x371bde6c,2 -np.float32,0x421d1463,0x3f800000,2 -np.float32,0xc21d1463,0xbf800000,2 -np.float32,0x429d1463,0xb5c55799,2 -np.float32,0xc29d1463,0x35c55799,2 -np.float32,0x431d1463,0x36455799,2 -np.float32,0xc31d1463,0xb6455799,2 -np.float32,0x422038a3,0x3f3504d0,2 -np.float32,0xc22038a3,0xbf3504d0,2 -np.float32,0x42a038a3,0xbf800000,2 -np.float32,0xc2a038a3,0x3f800000,2 -np.float32,0x432038a3,0xb746cd61,2 -np.float32,0xc32038a3,0x3746cd61,2 -np.float32,0x42235ce2,0xb5b889b6,2 -np.float32,0xc2235ce2,0x35b889b6,2 -np.float32,0x42a35ce2,0x363889b6,2 -np.float32,0xc2a35ce2,0xb63889b6,2 -np.float32,0x43235ce2,0x36b889b6,2 -np.float32,0xc3235ce2,0xb6b889b6,2 -np.float32,0x42268121,0xbf3504f1,2 -np.float32,0xc2268121,0x3f3504f1,2 -np.float32,0x42a68121,0x3f800000,2 -np.float32,0xc2a68121,0xbf800000,2 -np.float32,0x43268121,0x35643aac,2 -np.float32,0xc3268121,0xb5643aac,2 -np.float32,0x4229a561,0xbf800000,2 -np.float32,0xc229a561,0x3f800000,2 -np.float32,0x42a9a561,0xb68733d0,2 -np.float32,0xc2a9a561,0x368733d0,2 -np.float32,0x4329a561,0x370733d0,2 -np.float32,0xc329a561,0xb70733d0,2 -np.float32,0x422cc9a0,0xbf3504ee,2 -np.float32,0xc22cc9a0,0x3f3504ee,2 -np.float32,0x42acc9a0,0xbf800000,2 -np.float32,0xc2acc9a0,0x3f800000,2 -np.float32,0x432cc9a0,0xb5e55a50,2 -np.float32,0xc32cc9a0,0x35e55a50,2 -np.float32,0x422fede0,0x363222c4,2 -np.float32,0xc22fede0,0xb63222c4,2 -np.float32,0x42afede0,0x36b222c4,2 -np.float32,0xc2afede0,0xb6b222c4,2 -np.float32,0x432fede0,0x373222c4,2 -np.float32,0xc32fede0,0xb73222c4,2 -np.float32,0x4233121f,0x3f350500,2 -np.float32,0xc233121f,0xbf350500,2 -np.float32,0x42b3121f,0x3f800000,2 -np.float32,0xc2b3121f,0xbf800000,2 -np.float32,0x4333121f,0xb68f347d,2 -np.float32,0xc333121f,0x368f347d,2 -np.float32,0x4236365e,0x3f800000,2 -np.float32,0xc236365e,0xbf800000,2 -np.float32,0x42b6365e,0x358bb91c,2 -np.float32,0xc2b6365e,0xb58bb91c,2 -np.float32,0x4336365e,0xb60bb91c,2 -np.float32,0xc336365e,0x360bb91c,2 -np.float32,0x42395a9e,0x3f3504df,2 -np.float32,0xc2395a9e,0xbf3504df,2 -np.float32,0x42b95a9e,0xbf800000,2 -np.float32,0xc2b95a9e,0x3f800000,2 -np.float32,0x43395a9e,0xb6e51267,2 -np.float32,0xc3395a9e,0x36e51267,2 -np.float32,0x423c7edd,0xb4000add,2 -np.float32,0xc23c7edd,0x34000add,2 -np.float32,0x42bc7edd,0x34800add,2 -np.float32,0xc2bc7edd,0xb4800add,2 -np.float32,0x433c7edd,0x35000add,2 -np.float32,0xc33c7edd,0xb5000add,2 -np.float32,0x423fa31d,0xbf35050f,2 -np.float32,0xc23fa31d,0x3f35050f,2 -np.float32,0x42bfa31d,0x3f800000,2 -np.float32,0xc2bfa31d,0xbf800000,2 -np.float32,0x433fa31d,0xb71d7828,2 -np.float32,0xc33fa31d,0x371d7828,2 -np.float32,0x4242c75c,0xbf800000,2 -np.float32,0xc242c75c,0x3f800000,2 -np.float32,0x42c2c75c,0xb5cbbe8a,2 -np.float32,0xc2c2c75c,0x35cbbe8a,2 -np.float32,0x4342c75c,0x364bbe8a,2 -np.float32,0xc342c75c,0xb64bbe8a,2 -np.float32,0x4245eb9c,0xbf3504d0,2 -np.float32,0xc245eb9c,0x3f3504d0,2 -np.float32,0x42c5eb9c,0xbf800000,2 -np.float32,0xc2c5eb9c,0x3f800000,2 -np.float32,0x4345eb9c,0xb748671d,2 -np.float32,0xc345eb9c,0x3748671d,2 -np.float32,0x42490fdb,0x35bbbd2e,2 -np.float32,0xc2490fdb,0xb5bbbd2e,2 -np.float32,0x42c90fdb,0x363bbd2e,2 -np.float32,0xc2c90fdb,0xb63bbd2e,2 -np.float32,0x43490fdb,0x36bbbd2e,2 -np.float32,0xc3490fdb,0xb6bbbd2e,2 -np.float32,0x424c341a,0x3f3504f1,2 -np.float32,0xc24c341a,0xbf3504f1,2 -np.float32,0x42cc341a,0x3f800000,2 -np.float32,0xc2cc341a,0xbf800000,2 -np.float32,0x434c341a,0x354a9ee6,2 -np.float32,0xc34c341a,0xb54a9ee6,2 -np.float32,0x424f585a,0x3f800000,2 -np.float32,0xc24f585a,0xbf800000,2 -np.float32,0x42cf585a,0xb688cd8c,2 -np.float32,0xc2cf585a,0x3688cd8c,2 -np.float32,0x434f585a,0x3708cd8c,2 -np.float32,0xc34f585a,0xb708cd8c,2 -np.float32,0x42527c99,0x3f3504ee,2 -np.float32,0xc2527c99,0xbf3504ee,2 -np.float32,0x42d27c99,0xbf800000,2 -np.float32,0xc2d27c99,0x3f800000,2 -np.float32,0x43527c99,0xb5f22833,2 -np.float32,0xc3527c99,0x35f22833,2 -np.float32,0x4255a0d9,0xb633bc81,2 -np.float32,0xc255a0d9,0x3633bc81,2 -np.float32,0x42d5a0d9,0x36b3bc81,2 -np.float32,0xc2d5a0d9,0xb6b3bc81,2 -np.float32,0x4355a0d9,0x3733bc81,2 -np.float32,0xc355a0d9,0xb733bc81,2 -np.float32,0x4258c518,0xbf350500,2 -np.float32,0xc258c518,0x3f350500,2 -np.float32,0x42d8c518,0x3f800000,2 -np.float32,0xc2d8c518,0xbf800000,2 -np.float32,0x4358c518,0xb69267f6,2 -np.float32,0xc358c518,0x369267f6,2 -np.float32,0x425be958,0xbf800000,2 -np.float32,0xc25be958,0x3f800000,2 -np.float32,0x42dbe958,0xb6deab75,2 -np.float32,0xc2dbe958,0x36deab75,2 -np.float32,0x435be958,0x375eab75,2 -np.float32,0xc35be958,0xb75eab75,2 -np.float32,0x425f0d97,0xbf3504df,2 -np.float32,0xc25f0d97,0x3f3504df,2 -np.float32,0x42df0d97,0xbf800000,2 -np.float32,0xc2df0d97,0x3f800000,2 -np.float32,0x435f0d97,0xb6e845e0,2 -np.float32,0xc35f0d97,0x36e845e0,2 -np.float32,0x426231d6,0x3419a6a2,2 -np.float32,0xc26231d6,0xb419a6a2,2 -np.float32,0x42e231d6,0x3499a6a2,2 -np.float32,0xc2e231d6,0xb499a6a2,2 -np.float32,0x436231d6,0x3519a6a2,2 -np.float32,0xc36231d6,0xb519a6a2,2 -np.float32,0x42655616,0x3f35050f,2 -np.float32,0xc2655616,0xbf35050f,2 -np.float32,0x42e55616,0x3f800000,2 -np.float32,0xc2e55616,0xbf800000,2 -np.float32,0x43655616,0xb71f11e5,2 -np.float32,0xc3655616,0x371f11e5,2 -np.float32,0x42687a55,0x3f800000,2 -np.float32,0xc2687a55,0xbf800000,2 -np.float32,0x42e87a55,0xb5d2257b,2 -np.float32,0xc2e87a55,0x35d2257b,2 -np.float32,0x43687a55,0x3652257b,2 -np.float32,0xc3687a55,0xb652257b,2 -np.float32,0x426b9e95,0x3f3504cf,2 -np.float32,0xc26b9e95,0xbf3504cf,2 -np.float32,0x42eb9e95,0xbf800000,2 -np.float32,0xc2eb9e95,0x3f800000,2 -np.float32,0x436b9e95,0xb74a00d9,2 -np.float32,0xc36b9e95,0x374a00d9,2 -np.float32,0x426ec2d4,0xb5bef0a7,2 -np.float32,0xc26ec2d4,0x35bef0a7,2 -np.float32,0x42eec2d4,0x363ef0a7,2 -np.float32,0xc2eec2d4,0xb63ef0a7,2 -np.float32,0x436ec2d4,0x36bef0a7,2 -np.float32,0xc36ec2d4,0xb6bef0a7,2 -np.float32,0x4271e713,0xbf3504f1,2 -np.float32,0xc271e713,0x3f3504f1,2 -np.float32,0x42f1e713,0x3f800000,2 -np.float32,0xc2f1e713,0xbf800000,2 -np.float32,0x4371e713,0x35310321,2 -np.float32,0xc371e713,0xb5310321,2 -np.float32,0x42750b53,0xbf800000,2 -np.float32,0xc2750b53,0x3f800000,2 -np.float32,0x42f50b53,0xb68a6748,2 -np.float32,0xc2f50b53,0x368a6748,2 -np.float32,0x43750b53,0x370a6748,2 -np.float32,0xc3750b53,0xb70a6748,2 -np.float32,0x42782f92,0xbf3504ee,2 -np.float32,0xc2782f92,0x3f3504ee,2 -np.float32,0x42f82f92,0xbf800000,2 -np.float32,0xc2f82f92,0x3f800000,2 -np.float32,0x43782f92,0xb5fef616,2 -np.float32,0xc3782f92,0x35fef616,2 -np.float32,0x427b53d2,0x3635563d,2 -np.float32,0xc27b53d2,0xb635563d,2 -np.float32,0x42fb53d2,0x36b5563d,2 -np.float32,0xc2fb53d2,0xb6b5563d,2 -np.float32,0x437b53d2,0x3735563d,2 -np.float32,0xc37b53d2,0xb735563d,2 -np.float32,0x427e7811,0x3f350500,2 -np.float32,0xc27e7811,0xbf350500,2 -np.float32,0x42fe7811,0x3f800000,2 -np.float32,0xc2fe7811,0xbf800000,2 -np.float32,0x437e7811,0xb6959b6f,2 -np.float32,0xc37e7811,0x36959b6f,2 -np.float32,0x4280ce28,0x3f800000,2 -np.float32,0xc280ce28,0xbf800000,2 -np.float32,0x4300ce28,0x357dd672,2 -np.float32,0xc300ce28,0xb57dd672,2 -np.float32,0x4380ce28,0xb5fdd672,2 -np.float32,0xc380ce28,0x35fdd672,2 -np.float32,0x42826048,0x3f3504de,2 -np.float32,0xc2826048,0xbf3504de,2 -np.float32,0x43026048,0xbf800000,2 -np.float32,0xc3026048,0x3f800000,2 -np.float32,0x43826048,0xb6eb7958,2 -np.float32,0xc3826048,0x36eb7958,2 -np.float32,0x4283f268,0xb6859a13,2 -np.float32,0xc283f268,0x36859a13,2 -np.float32,0x4303f268,0x37059a13,2 -np.float32,0xc303f268,0xb7059a13,2 -np.float32,0x4383f268,0x37859a13,2 -np.float32,0xc383f268,0xb7859a13,2 -np.float32,0x42858487,0xbf3504e2,2 -np.float32,0xc2858487,0x3f3504e2,2 -np.float32,0x43058487,0x3f800000,2 -np.float32,0xc3058487,0xbf800000,2 -np.float32,0x43858487,0x36bea8be,2 -np.float32,0xc3858487,0xb6bea8be,2 -np.float32,0x428716a7,0xbf800000,2 -np.float32,0xc28716a7,0x3f800000,2 -np.float32,0x430716a7,0xb5d88c6d,2 -np.float32,0xc30716a7,0x35d88c6d,2 -np.float32,0x438716a7,0x36588c6d,2 -np.float32,0xc38716a7,0xb6588c6d,2 -np.float32,0x4288a8c7,0xbf3504cf,2 -np.float32,0xc288a8c7,0x3f3504cf,2 -np.float32,0x4308a8c7,0xbf800000,2 -np.float32,0xc308a8c7,0x3f800000,2 -np.float32,0x4388a8c7,0xb74b9a96,2 -np.float32,0xc388a8c7,0x374b9a96,2 -np.float32,0x428a3ae7,0x36b08908,2 -np.float32,0xc28a3ae7,0xb6b08908,2 -np.float32,0x430a3ae7,0x37308908,2 -np.float32,0xc30a3ae7,0xb7308908,2 -np.float32,0x438a3ae7,0x37b08908,2 -np.float32,0xc38a3ae7,0xb7b08908,2 -np.float32,0x428bcd06,0x3f3504f2,2 -np.float32,0xc28bcd06,0xbf3504f2,2 -np.float32,0x430bcd06,0x3f800000,2 -np.float32,0xc30bcd06,0xbf800000,2 -np.float32,0x438bcd06,0x3517675b,2 -np.float32,0xc38bcd06,0xb517675b,2 -np.float32,0x428d5f26,0x3f800000,2 -np.float32,0xc28d5f26,0xbf800000,2 -np.float32,0x430d5f26,0xb68c0105,2 -np.float32,0xc30d5f26,0x368c0105,2 -np.float32,0x438d5f26,0x370c0105,2 -np.float32,0xc38d5f26,0xb70c0105,2 -np.float32,0x428ef146,0x3f3504c0,2 -np.float32,0xc28ef146,0xbf3504c0,2 -np.float32,0x430ef146,0xbf800000,2 -np.float32,0xc30ef146,0x3f800000,2 -np.float32,0x438ef146,0xb790bc40,2 -np.float32,0xc38ef146,0x3790bc40,2 -np.float32,0x42908365,0x3592200d,2 -np.float32,0xc2908365,0xb592200d,2 -np.float32,0x43108365,0xb612200d,2 -np.float32,0xc3108365,0x3612200d,2 -np.float32,0x43908365,0xb692200d,2 -np.float32,0xc3908365,0x3692200d,2 -np.float32,0x42921585,0xbf350501,2 -np.float32,0xc2921585,0x3f350501,2 -np.float32,0x43121585,0x3f800000,2 -np.float32,0xc3121585,0xbf800000,2 -np.float32,0x43921585,0xb698cee8,2 -np.float32,0xc3921585,0x3698cee8,2 -np.float32,0x4293a7a5,0xbf800000,2 -np.float32,0xc293a7a5,0x3f800000,2 -np.float32,0x4313a7a5,0xb6e1deee,2 -np.float32,0xc313a7a5,0x36e1deee,2 -np.float32,0x4393a7a5,0x3761deee,2 -np.float32,0xc393a7a5,0xb761deee,2 -np.float32,0x429539c5,0xbf3504b1,2 -np.float32,0xc29539c5,0x3f3504b1,2 -np.float32,0x431539c5,0xbf800000,2 -np.float32,0xc31539c5,0x3f800000,2 -np.float32,0x439539c5,0xb7bbab34,2 -np.float32,0xc39539c5,0x37bbab34,2 -np.float32,0x4296cbe4,0x344cde2e,2 -np.float32,0xc296cbe4,0xb44cde2e,2 -np.float32,0x4316cbe4,0x34ccde2e,2 -np.float32,0xc316cbe4,0xb4ccde2e,2 -np.float32,0x4396cbe4,0x354cde2e,2 -np.float32,0xc396cbe4,0xb54cde2e,2 -np.float32,0x42985e04,0x3f350510,2 -np.float32,0xc2985e04,0xbf350510,2 -np.float32,0x43185e04,0x3f800000,2 -np.float32,0xc3185e04,0xbf800000,2 -np.float32,0x43985e04,0xb722455d,2 -np.float32,0xc3985e04,0x3722455d,2 -np.float32,0x4299f024,0x3f800000,2 -np.float32,0xc299f024,0xbf800000,2 -np.float32,0x4319f024,0xb71bde6c,2 -np.float32,0xc319f024,0x371bde6c,2 -np.float32,0x4399f024,0x379bde6c,2 -np.float32,0xc399f024,0xb79bde6c,2 -np.float32,0x429b8243,0x3f3504fc,2 -np.float32,0xc29b8243,0xbf3504fc,2 -np.float32,0x431b8243,0xbf800000,2 -np.float32,0xc31b8243,0x3f800000,2 -np.float32,0x439b8243,0x364b2eb8,2 -np.float32,0xc39b8243,0xb64b2eb8,2 -np.float32,0x435b2047,0xbf350525,2 -np.float32,0x42a038a2,0xbf800000,2 -np.float32,0x432038a2,0x3664ca7e,2 -np.float32,0x4345eb9b,0x365e638c,2 -np.float32,0x42c5eb9b,0xbf800000,2 -np.float32,0x42eb9e94,0xbf800000,2 -np.float32,0x4350ea79,0x3f800000,2 -np.float32,0x42dbe957,0x3585522a,2 -np.float32,0x425be957,0xbf800000,2 -np.float32,0x435be957,0xb605522a,2 -np.float32,0x487fe5ab,0xbf7ffffd,2 -np.float32,0x497fe5ab,0xbb14017d,2 -np.float32,0x49ffe5ab,0xbb940164,2 -np.float32,0x49ffeb37,0x3f7fff56,2 -np.float32,0x497ff0c3,0x3f7fffd6,2 -np.float32,0x49fff0c3,0x3b930487,2 -np.float32,0x49fff64f,0xbf7fff58,2 -np.float32,0x497ffbdb,0x3b1207c0,2 -np.float32,0x49fffbdb,0xbb9207a9,2 -np.float32,0x48fffbdb,0xbf7ffff6,2 -np.float32,0x4e736e56,0x397fa7f2,2 -np.float32,0x4d4da377,0xb57c64bc,2 -np.float32,0x4ece58c3,0xb80846c8,2 -np.float32,0x4ee0db9c,0x394c4786,2 -np.float32,0x4dee7002,0x381bce96,2 -np.float32,0x4ee86afc,0x3f800000,2 -np.float32,0x4dca4f3f,0xb8e25111,2 -np.float32,0x4ecb48af,0xbf800000,2 -np.float32,0x4e51e33f,0xb8a4fa6f,2 -np.float32,0x4ef5f421,0x387ca7df,2 -np.float32,0x476362a2,0xbd7ff911,2 -np.float32,0x464c99a4,0x3e7f4d41,2 -np.float32,0x4471f73d,0x3e7fe1b0,2 -np.float32,0x445a6752,0x3e7ef367,2 -np.float32,0x474fa400,0x3e7f9fcd,2 -np.float32,0x47c9e70e,0xbb4bba09,2 -np.float32,0x45c1e72f,0xbe7fc7af,2 -np.float32,0x4558c91d,0x3e7e9f31,2 -np.float32,0x43784f94,0xbdff6654,2 -np.float32,0x466e8500,0xbe7ea0a3,2 -np.float32,0x468e1c25,0x3e7e22fb,2 -np.float32,0x47d28adc,0xbe7d5e6b,2 -np.float32,0x44ea6cfc,0x3dff70c3,2 -np.float32,0x4605126c,0x3e7f89ef,2 -np.float32,0x4788b3c6,0xbb87d853,2 -np.float32,0x4531b042,0x3dffd163,2 -np.float32,0x47e46c29,0xbe7def2b,2 -np.float32,0x47c10e07,0xbdff63d4,2 -np.float32,0x43f1f71d,0x3dfff387,2 -np.float32,0x47c3e38c,0x3e7f0b2f,2 -np.float32,0x462c3fa5,0xbd7fe13d,2 -np.float32,0x441c5354,0xbdff76b4,2 -np.float32,0x44908b69,0x3e7dcf0d,2 -np.float32,0x478813ad,0xbe7e9d80,2 -np.float32,0x441c4351,0x3dff937b,2 diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test__exceptions.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test__exceptions.py deleted file mode 100644 index 494b51f..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test__exceptions.py +++ /dev/null @@ -1,42 +0,0 @@ -""" -Tests of the ._exceptions module. Primarily for exercising the __str__ methods. -""" -import numpy as np - -_ArrayMemoryError = np.core._exceptions._ArrayMemoryError - -class TestArrayMemoryError: - def test_str(self): - e = _ArrayMemoryError((1023,), np.dtype(np.uint8)) - str(e) # not crashing is enough - - # testing these properties is easier than testing the full string repr - def test__size_to_string(self): - """ Test e._size_to_string """ - f = _ArrayMemoryError._size_to_string - Ki = 1024 - assert f(0) == '0 bytes' - assert f(1) == '1 bytes' - assert f(1023) == '1023 bytes' - assert f(Ki) == '1.00 KiB' - assert f(Ki+1) == '1.00 KiB' - assert f(10*Ki) == '10.0 KiB' - assert f(int(999.4*Ki)) == '999. KiB' - assert f(int(1023.4*Ki)) == '1023. KiB' - assert f(int(1023.5*Ki)) == '1.00 MiB' - assert f(Ki*Ki) == '1.00 MiB' - - # 1023.9999 Mib should round to 1 GiB - assert f(int(Ki*Ki*Ki*0.9999)) == '1.00 GiB' - assert f(Ki*Ki*Ki*Ki*Ki*Ki) == '1.00 EiB' - # larger than sys.maxsize, adding larger prefices isn't going to help - # anyway. - assert f(Ki*Ki*Ki*Ki*Ki*Ki*123456) == '123456. EiB' - - def test__total_size(self): - """ Test e._total_size """ - e = _ArrayMemoryError((1,), np.dtype(np.uint8)) - assert e._total_size == 1 - - e = _ArrayMemoryError((2, 4), np.dtype((np.uint64, 16))) - assert e._total_size == 1024 diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_abc.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_abc.py deleted file mode 100644 index d9c61b0..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_abc.py +++ /dev/null @@ -1,56 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy.testing import assert_ - -import numbers - -import numpy as np -from numpy.core.numerictypes import sctypes - -class TestABC(object): - def test_abstract(self): - assert_(issubclass(np.number, numbers.Number)) - - assert_(issubclass(np.inexact, numbers.Complex)) - assert_(issubclass(np.complexfloating, numbers.Complex)) - assert_(issubclass(np.floating, numbers.Real)) - - assert_(issubclass(np.integer, numbers.Integral)) - assert_(issubclass(np.signedinteger, numbers.Integral)) - assert_(issubclass(np.unsignedinteger, numbers.Integral)) - - def test_floats(self): - for t in sctypes['float']: - assert_(isinstance(t(), numbers.Real), - "{0} is not instance of Real".format(t.__name__)) - assert_(issubclass(t, numbers.Real), - "{0} is not subclass of Real".format(t.__name__)) - assert_(not isinstance(t(), numbers.Rational), - "{0} is instance of Rational".format(t.__name__)) - assert_(not issubclass(t, numbers.Rational), - "{0} is subclass of Rational".format(t.__name__)) - - def test_complex(self): - for t in sctypes['complex']: - assert_(isinstance(t(), numbers.Complex), - "{0} is not instance of Complex".format(t.__name__)) - assert_(issubclass(t, numbers.Complex), - "{0} is not subclass of Complex".format(t.__name__)) - assert_(not isinstance(t(), numbers.Real), - "{0} is instance of Real".format(t.__name__)) - assert_(not issubclass(t, numbers.Real), - "{0} is subclass of Real".format(t.__name__)) - - def test_int(self): - for t in sctypes['int']: - assert_(isinstance(t(), numbers.Integral), - "{0} is not instance of Integral".format(t.__name__)) - assert_(issubclass(t, numbers.Integral), - "{0} is not subclass of Integral".format(t.__name__)) - - def test_uint(self): - for t in sctypes['uint']: - assert_(isinstance(t(), numbers.Integral), - "{0} is not instance of Integral".format(t.__name__)) - assert_(issubclass(t, numbers.Integral), - "{0} is not subclass of Integral".format(t.__name__)) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_api.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_api.py deleted file mode 100644 index 89fc2b0..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_api.py +++ /dev/null @@ -1,526 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys - -import numpy as np -import pytest -from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_raises, assert_warns, - HAS_REFCOUNT - ) - -# Switch between new behaviour when NPY_RELAXED_STRIDES_CHECKING is set. -NPY_RELAXED_STRIDES_CHECKING = np.ones((10, 1), order='C').flags.f_contiguous - - -def test_array_array(): - tobj = type(object) - ones11 = np.ones((1, 1), np.float64) - tndarray = type(ones11) - # Test is_ndarray - assert_equal(np.array(ones11, dtype=np.float64), ones11) - if HAS_REFCOUNT: - old_refcount = sys.getrefcount(tndarray) - np.array(ones11) - assert_equal(old_refcount, sys.getrefcount(tndarray)) - - # test None - assert_equal(np.array(None, dtype=np.float64), - np.array(np.nan, dtype=np.float64)) - if HAS_REFCOUNT: - old_refcount = sys.getrefcount(tobj) - np.array(None, dtype=np.float64) - assert_equal(old_refcount, sys.getrefcount(tobj)) - - # test scalar - assert_equal(np.array(1.0, dtype=np.float64), - np.ones((), dtype=np.float64)) - if HAS_REFCOUNT: - old_refcount = sys.getrefcount(np.float64) - np.array(np.array(1.0, dtype=np.float64), dtype=np.float64) - assert_equal(old_refcount, sys.getrefcount(np.float64)) - - # test string - S2 = np.dtype((str, 2)) - S3 = np.dtype((str, 3)) - S5 = np.dtype((str, 5)) - assert_equal(np.array("1.0", dtype=np.float64), - np.ones((), dtype=np.float64)) - assert_equal(np.array("1.0").dtype, S3) - assert_equal(np.array("1.0", dtype=str).dtype, S3) - assert_equal(np.array("1.0", dtype=S2), np.array("1.")) - assert_equal(np.array("1", dtype=S5), np.ones((), dtype=S5)) - - # test unicode - _unicode = globals().get("unicode") - if _unicode: - U2 = np.dtype((_unicode, 2)) - U3 = np.dtype((_unicode, 3)) - U5 = np.dtype((_unicode, 5)) - assert_equal(np.array(_unicode("1.0"), dtype=np.float64), - np.ones((), dtype=np.float64)) - assert_equal(np.array(_unicode("1.0")).dtype, U3) - assert_equal(np.array(_unicode("1.0"), dtype=_unicode).dtype, U3) - assert_equal(np.array(_unicode("1.0"), dtype=U2), - np.array(_unicode("1."))) - assert_equal(np.array(_unicode("1"), dtype=U5), - np.ones((), dtype=U5)) - - builtins = getattr(__builtins__, '__dict__', __builtins__) - assert_(hasattr(builtins, 'get')) - - # test buffer - _buffer = builtins.get("buffer") - if _buffer and sys.version_info[:3] >= (2, 7, 5): - # This test fails for earlier versions of Python. - # Evidently a bug got fixed in 2.7.5. - dat = np.array(_buffer('1.0'), dtype=np.float64) - assert_equal(dat, [49.0, 46.0, 48.0]) - assert_(dat.dtype.type is np.float64) - - dat = np.array(_buffer(b'1.0')) - assert_equal(dat, [49, 46, 48]) - assert_(dat.dtype.type is np.uint8) - - # test memoryview, new version of buffer - _memoryview = builtins.get("memoryview") - if _memoryview: - dat = np.array(_memoryview(b'1.0'), dtype=np.float64) - assert_equal(dat, [49.0, 46.0, 48.0]) - assert_(dat.dtype.type is np.float64) - - dat = np.array(_memoryview(b'1.0')) - assert_equal(dat, [49, 46, 48]) - assert_(dat.dtype.type is np.uint8) - - # test array interface - a = np.array(100.0, dtype=np.float64) - o = type("o", (object,), - dict(__array_interface__=a.__array_interface__)) - assert_equal(np.array(o, dtype=np.float64), a) - - # test array_struct interface - a = np.array([(1, 4.0, 'Hello'), (2, 6.0, 'World')], - dtype=[('f0', int), ('f1', float), ('f2', str)]) - o = type("o", (object,), - dict(__array_struct__=a.__array_struct__)) - ## wasn't what I expected... is np.array(o) supposed to equal a ? - ## instead we get a array([...], dtype=">V18") - assert_equal(bytes(np.array(o).data), bytes(a.data)) - - # test array - o = type("o", (object,), - dict(__array__=lambda *x: np.array(100.0, dtype=np.float64)))() - assert_equal(np.array(o, dtype=np.float64), np.array(100.0, np.float64)) - - # test recursion - nested = 1.5 - for i in range(np.MAXDIMS): - nested = [nested] - - # no error - np.array(nested) - - # Exceeds recursion limit - assert_raises(ValueError, np.array, [nested], dtype=np.float64) - - # Try with lists... - assert_equal(np.array([None] * 10, dtype=np.float64), - np.full((10,), np.nan, dtype=np.float64)) - assert_equal(np.array([[None]] * 10, dtype=np.float64), - np.full((10, 1), np.nan, dtype=np.float64)) - assert_equal(np.array([[None] * 10], dtype=np.float64), - np.full((1, 10), np.nan, dtype=np.float64)) - assert_equal(np.array([[None] * 10] * 10, dtype=np.float64), - np.full((10, 10), np.nan, dtype=np.float64)) - - assert_equal(np.array([1.0] * 10, dtype=np.float64), - np.ones((10,), dtype=np.float64)) - assert_equal(np.array([[1.0]] * 10, dtype=np.float64), - np.ones((10, 1), dtype=np.float64)) - assert_equal(np.array([[1.0] * 10], dtype=np.float64), - np.ones((1, 10), dtype=np.float64)) - assert_equal(np.array([[1.0] * 10] * 10, dtype=np.float64), - np.ones((10, 10), dtype=np.float64)) - - # Try with tuples - assert_equal(np.array((None,) * 10, dtype=np.float64), - np.full((10,), np.nan, dtype=np.float64)) - assert_equal(np.array([(None,)] * 10, dtype=np.float64), - np.full((10, 1), np.nan, dtype=np.float64)) - assert_equal(np.array([(None,) * 10], dtype=np.float64), - np.full((1, 10), np.nan, dtype=np.float64)) - assert_equal(np.array([(None,) * 10] * 10, dtype=np.float64), - np.full((10, 10), np.nan, dtype=np.float64)) - - assert_equal(np.array((1.0,) * 10, dtype=np.float64), - np.ones((10,), dtype=np.float64)) - assert_equal(np.array([(1.0,)] * 10, dtype=np.float64), - np.ones((10, 1), dtype=np.float64)) - assert_equal(np.array([(1.0,) * 10], dtype=np.float64), - np.ones((1, 10), dtype=np.float64)) - assert_equal(np.array([(1.0,) * 10] * 10, dtype=np.float64), - np.ones((10, 10), dtype=np.float64)) - - -def test_fastCopyAndTranspose(): - # 0D array - a = np.array(2) - b = np.fastCopyAndTranspose(a) - assert_equal(b, a.T) - assert_(b.flags.owndata) - - # 1D array - a = np.array([3, 2, 7, 0]) - b = np.fastCopyAndTranspose(a) - assert_equal(b, a.T) - assert_(b.flags.owndata) - - # 2D array - a = np.arange(6).reshape(2, 3) - b = np.fastCopyAndTranspose(a) - assert_equal(b, a.T) - assert_(b.flags.owndata) - -def test_array_astype(): - a = np.arange(6, dtype='f4').reshape(2, 3) - # Default behavior: allows unsafe casts, keeps memory layout, - # always copies. - b = a.astype('i4') - assert_equal(a, b) - assert_equal(b.dtype, np.dtype('i4')) - assert_equal(a.strides, b.strides) - b = a.T.astype('i4') - assert_equal(a.T, b) - assert_equal(b.dtype, np.dtype('i4')) - assert_equal(a.T.strides, b.strides) - b = a.astype('f4') - assert_equal(a, b) - assert_(not (a is b)) - - # copy=False parameter can sometimes skip a copy - b = a.astype('f4', copy=False) - assert_(a is b) - - # order parameter allows overriding of the memory layout, - # forcing a copy if the layout is wrong - b = a.astype('f4', order='F', copy=False) - assert_equal(a, b) - assert_(not (a is b)) - assert_(b.flags.f_contiguous) - - b = a.astype('f4', order='C', copy=False) - assert_equal(a, b) - assert_(a is b) - assert_(b.flags.c_contiguous) - - # casting parameter allows catching bad casts - b = a.astype('c8', casting='safe') - assert_equal(a, b) - assert_equal(b.dtype, np.dtype('c8')) - - assert_raises(TypeError, a.astype, 'i4', casting='safe') - - # subok=False passes through a non-subclassed array - b = a.astype('f4', subok=0, copy=False) - assert_(a is b) - - class MyNDArray(np.ndarray): - pass - - a = np.array([[0, 1, 2], [3, 4, 5]], dtype='f4').view(MyNDArray) - - # subok=True passes through a subclass - b = a.astype('f4', subok=True, copy=False) - assert_(a is b) - - # subok=True is default, and creates a subtype on a cast - b = a.astype('i4', copy=False) - assert_equal(a, b) - assert_equal(type(b), MyNDArray) - - # subok=False never returns a subclass - b = a.astype('f4', subok=False, copy=False) - assert_equal(a, b) - assert_(not (a is b)) - assert_(type(b) is not MyNDArray) - - # Make sure converting from string object to fixed length string - # does not truncate. - a = np.array([b'a'*100], dtype='O') - b = a.astype('S') - assert_equal(a, b) - assert_equal(b.dtype, np.dtype('S100')) - a = np.array([u'a'*100], dtype='O') - b = a.astype('U') - assert_equal(a, b) - assert_equal(b.dtype, np.dtype('U100')) - - # Same test as above but for strings shorter than 64 characters - a = np.array([b'a'*10], dtype='O') - b = a.astype('S') - assert_equal(a, b) - assert_equal(b.dtype, np.dtype('S10')) - a = np.array([u'a'*10], dtype='O') - b = a.astype('U') - assert_equal(a, b) - assert_equal(b.dtype, np.dtype('U10')) - - a = np.array(123456789012345678901234567890, dtype='O').astype('S') - assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30')) - a = np.array(123456789012345678901234567890, dtype='O').astype('U') - assert_array_equal(a, np.array(u'1234567890' * 3, dtype='U30')) - - a = np.array([123456789012345678901234567890], dtype='O').astype('S') - assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30')) - a = np.array([123456789012345678901234567890], dtype='O').astype('U') - assert_array_equal(a, np.array(u'1234567890' * 3, dtype='U30')) - - a = np.array(123456789012345678901234567890, dtype='S') - assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30')) - a = np.array(123456789012345678901234567890, dtype='U') - assert_array_equal(a, np.array(u'1234567890' * 3, dtype='U30')) - - a = np.array(u'a\u0140', dtype='U') - b = np.ndarray(buffer=a, dtype='uint32', shape=2) - assert_(b.size == 2) - - a = np.array([1000], dtype='i4') - assert_raises(TypeError, a.astype, 'S1', casting='safe') - - a = np.array(1000, dtype='i4') - assert_raises(TypeError, a.astype, 'U1', casting='safe') - -@pytest.mark.parametrize("t", - np.sctypes['uint'] + np.sctypes['int'] + np.sctypes['float'] -) -def test_array_astype_warning(t): - # test ComplexWarning when casting from complex to float or int - a = np.array(10, dtype=np.complex_) - assert_warns(np.ComplexWarning, a.astype, t) - -def test_copyto_fromscalar(): - a = np.arange(6, dtype='f4').reshape(2, 3) - - # Simple copy - np.copyto(a, 1.5) - assert_equal(a, 1.5) - np.copyto(a.T, 2.5) - assert_equal(a, 2.5) - - # Where-masked copy - mask = np.array([[0, 1, 0], [0, 0, 1]], dtype='?') - np.copyto(a, 3.5, where=mask) - assert_equal(a, [[2.5, 3.5, 2.5], [2.5, 2.5, 3.5]]) - mask = np.array([[0, 1], [1, 1], [1, 0]], dtype='?') - np.copyto(a.T, 4.5, where=mask) - assert_equal(a, [[2.5, 4.5, 4.5], [4.5, 4.5, 3.5]]) - -def test_copyto(): - a = np.arange(6, dtype='i4').reshape(2, 3) - - # Simple copy - np.copyto(a, [[3, 1, 5], [6, 2, 1]]) - assert_equal(a, [[3, 1, 5], [6, 2, 1]]) - - # Overlapping copy should work - np.copyto(a[:, :2], a[::-1, 1::-1]) - assert_equal(a, [[2, 6, 5], [1, 3, 1]]) - - # Defaults to 'same_kind' casting - assert_raises(TypeError, np.copyto, a, 1.5) - - # Force a copy with 'unsafe' casting, truncating 1.5 to 1 - np.copyto(a, 1.5, casting='unsafe') - assert_equal(a, 1) - - # Copying with a mask - np.copyto(a, 3, where=[True, False, True]) - assert_equal(a, [[3, 1, 3], [3, 1, 3]]) - - # Casting rule still applies with a mask - assert_raises(TypeError, np.copyto, a, 3.5, where=[True, False, True]) - - # Lists of integer 0's and 1's is ok too - np.copyto(a, 4.0, casting='unsafe', where=[[0, 1, 1], [1, 0, 0]]) - assert_equal(a, [[3, 4, 4], [4, 1, 3]]) - - # Overlapping copy with mask should work - np.copyto(a[:, :2], a[::-1, 1::-1], where=[[0, 1], [1, 1]]) - assert_equal(a, [[3, 4, 4], [4, 3, 3]]) - - # 'dst' must be an array - assert_raises(TypeError, np.copyto, [1, 2, 3], [2, 3, 4]) - -def test_copyto_permut(): - # test explicit overflow case - pad = 500 - l = [True] * pad + [True, True, True, True] - r = np.zeros(len(l)-pad) - d = np.ones(len(l)-pad) - mask = np.array(l)[pad:] - np.copyto(r, d, where=mask[::-1]) - - # test all permutation of possible masks, 9 should be sufficient for - # current 4 byte unrolled code - power = 9 - d = np.ones(power) - for i in range(2**power): - r = np.zeros(power) - l = [(i & x) != 0 for x in range(power)] - mask = np.array(l) - np.copyto(r, d, where=mask) - assert_array_equal(r == 1, l) - assert_equal(r.sum(), sum(l)) - - r = np.zeros(power) - np.copyto(r, d, where=mask[::-1]) - assert_array_equal(r == 1, l[::-1]) - assert_equal(r.sum(), sum(l)) - - r = np.zeros(power) - np.copyto(r[::2], d[::2], where=mask[::2]) - assert_array_equal(r[::2] == 1, l[::2]) - assert_equal(r[::2].sum(), sum(l[::2])) - - r = np.zeros(power) - np.copyto(r[::2], d[::2], where=mask[::-2]) - assert_array_equal(r[::2] == 1, l[::-2]) - assert_equal(r[::2].sum(), sum(l[::-2])) - - for c in [0xFF, 0x7F, 0x02, 0x10]: - r = np.zeros(power) - mask = np.array(l) - imask = np.array(l).view(np.uint8) - imask[mask != 0] = c - np.copyto(r, d, where=mask) - assert_array_equal(r == 1, l) - assert_equal(r.sum(), sum(l)) - - r = np.zeros(power) - np.copyto(r, d, where=True) - assert_equal(r.sum(), r.size) - r = np.ones(power) - d = np.zeros(power) - np.copyto(r, d, where=False) - assert_equal(r.sum(), r.size) - -def test_copy_order(): - a = np.arange(24).reshape(2, 1, 3, 4) - b = a.copy(order='F') - c = np.arange(24).reshape(2, 1, 4, 3).swapaxes(2, 3) - - def check_copy_result(x, y, ccontig, fcontig, strides=False): - assert_(not (x is y)) - assert_equal(x, y) - assert_equal(res.flags.c_contiguous, ccontig) - assert_equal(res.flags.f_contiguous, fcontig) - # This check is impossible only because - # NPY_RELAXED_STRIDES_CHECKING changes the strides actively - if not NPY_RELAXED_STRIDES_CHECKING: - if strides: - assert_equal(x.strides, y.strides) - else: - assert_(x.strides != y.strides) - - # Validate the initial state of a, b, and c - assert_(a.flags.c_contiguous) - assert_(not a.flags.f_contiguous) - assert_(not b.flags.c_contiguous) - assert_(b.flags.f_contiguous) - assert_(not c.flags.c_contiguous) - assert_(not c.flags.f_contiguous) - - # Copy with order='C' - res = a.copy(order='C') - check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) - res = b.copy(order='C') - check_copy_result(res, b, ccontig=True, fcontig=False, strides=False) - res = c.copy(order='C') - check_copy_result(res, c, ccontig=True, fcontig=False, strides=False) - res = np.copy(a, order='C') - check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) - res = np.copy(b, order='C') - check_copy_result(res, b, ccontig=True, fcontig=False, strides=False) - res = np.copy(c, order='C') - check_copy_result(res, c, ccontig=True, fcontig=False, strides=False) - - # Copy with order='F' - res = a.copy(order='F') - check_copy_result(res, a, ccontig=False, fcontig=True, strides=False) - res = b.copy(order='F') - check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) - res = c.copy(order='F') - check_copy_result(res, c, ccontig=False, fcontig=True, strides=False) - res = np.copy(a, order='F') - check_copy_result(res, a, ccontig=False, fcontig=True, strides=False) - res = np.copy(b, order='F') - check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) - res = np.copy(c, order='F') - check_copy_result(res, c, ccontig=False, fcontig=True, strides=False) - - # Copy with order='K' - res = a.copy(order='K') - check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) - res = b.copy(order='K') - check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) - res = c.copy(order='K') - check_copy_result(res, c, ccontig=False, fcontig=False, strides=True) - res = np.copy(a, order='K') - check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) - res = np.copy(b, order='K') - check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) - res = np.copy(c, order='K') - check_copy_result(res, c, ccontig=False, fcontig=False, strides=True) - -def test_contiguous_flags(): - a = np.ones((4, 4, 1))[::2,:,:] - if NPY_RELAXED_STRIDES_CHECKING: - a.strides = a.strides[:2] + (-123,) - b = np.ones((2, 2, 1, 2, 2)).swapaxes(3, 4) - - def check_contig(a, ccontig, fcontig): - assert_(a.flags.c_contiguous == ccontig) - assert_(a.flags.f_contiguous == fcontig) - - # Check if new arrays are correct: - check_contig(a, False, False) - check_contig(b, False, False) - if NPY_RELAXED_STRIDES_CHECKING: - check_contig(np.empty((2, 2, 0, 2, 2)), True, True) - check_contig(np.array([[[1], [2]]], order='F'), True, True) - else: - check_contig(np.empty((2, 2, 0, 2, 2)), True, False) - check_contig(np.array([[[1], [2]]], order='F'), False, True) - check_contig(np.empty((2, 2)), True, False) - check_contig(np.empty((2, 2), order='F'), False, True) - - # Check that np.array creates correct contiguous flags: - check_contig(np.array(a, copy=False), False, False) - check_contig(np.array(a, copy=False, order='C'), True, False) - check_contig(np.array(a, ndmin=4, copy=False, order='F'), False, True) - - if NPY_RELAXED_STRIDES_CHECKING: - # Check slicing update of flags and : - check_contig(a[0], True, True) - check_contig(a[None, ::4, ..., None], True, True) - check_contig(b[0, 0, ...], False, True) - check_contig(b[:,:, 0:0,:,:], True, True) - else: - # Check slicing update of flags: - check_contig(a[0], True, False) - # Would be nice if this was C-Contiguous: - check_contig(a[None, 0, ..., None], False, False) - check_contig(b[0, 0, 0, ...], False, True) - - # Test ravel and squeeze. - check_contig(a.ravel(), True, True) - check_contig(np.ones((1, 3, 1)).squeeze(), True, True) - -def test_broadcast_arrays(): - # Test user defined dtypes - a = np.array([(1, 2, 3)], dtype='u4,u4,u4') - b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4') - result = np.broadcast_arrays(a, b) - assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4')) - assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4')) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_arrayprint.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_arrayprint.py deleted file mode 100644 index 702e68e..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_arrayprint.py +++ /dev/null @@ -1,888 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import division, absolute_import, print_function - -import sys -import gc -import pytest - -import numpy as np -from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_warns, HAS_REFCOUNT, - assert_raises_regex, - ) -import textwrap - -class TestArrayRepr(object): - def test_nan_inf(self): - x = np.array([np.nan, np.inf]) - assert_equal(repr(x), 'array([nan, inf])') - - def test_subclass(self): - class sub(np.ndarray): pass - - # one dimensional - x1d = np.array([1, 2]).view(sub) - assert_equal(repr(x1d), 'sub([1, 2])') - - # two dimensional - x2d = np.array([[1, 2], [3, 4]]).view(sub) - assert_equal(repr(x2d), - 'sub([[1, 2],\n' - ' [3, 4]])') - - # two dimensional with flexible dtype - xstruct = np.ones((2,2), dtype=[('a', ' 1) - y = sub(None) - x[()] = y - y[()] = x - assert_equal(repr(x), - 'sub(sub(sub(..., dtype=object), dtype=object), dtype=object)') - assert_equal(str(x), '...') - x[()] = 0 # resolve circular references for garbage collector - - # nested 0d-subclass-object - x = sub(None) - x[()] = sub(None) - assert_equal(repr(x), 'sub(sub(None, dtype=object), dtype=object)') - assert_equal(str(x), 'None') - - # gh-10663 - class DuckCounter(np.ndarray): - def __getitem__(self, item): - result = super(DuckCounter, self).__getitem__(item) - if not isinstance(result, DuckCounter): - result = result[...].view(DuckCounter) - return result - - def to_string(self): - return {0: 'zero', 1: 'one', 2: 'two'}.get(self.item(), 'many') - - def __str__(self): - if self.shape == (): - return self.to_string() - else: - fmt = {'all': lambda x: x.to_string()} - return np.array2string(self, formatter=fmt) - - dc = np.arange(5).view(DuckCounter) - assert_equal(str(dc), "[zero one two many many]") - assert_equal(str(dc[0]), "zero") - - def test_self_containing(self): - arr0d = np.array(None) - arr0d[()] = arr0d - assert_equal(repr(arr0d), - 'array(array(..., dtype=object), dtype=object)') - arr0d[()] = 0 # resolve recursion for garbage collector - - arr1d = np.array([None, None]) - arr1d[1] = arr1d - assert_equal(repr(arr1d), - 'array([None, array(..., dtype=object)], dtype=object)') - arr1d[1] = 0 # resolve recursion for garbage collector - - first = np.array(None) - second = np.array(None) - first[()] = second - second[()] = first - assert_equal(repr(first), - 'array(array(array(..., dtype=object), dtype=object), dtype=object)') - first[()] = 0 # resolve circular references for garbage collector - - def test_containing_list(self): - # printing square brackets directly would be ambiguuous - arr1d = np.array([None, None]) - arr1d[0] = [1, 2] - arr1d[1] = [3] - assert_equal(repr(arr1d), - 'array([list([1, 2]), list([3])], dtype=object)') - - def test_void_scalar_recursion(self): - # gh-9345 - repr(np.void(b'test')) # RecursionError ? - - def test_fieldless_structured(self): - # gh-10366 - no_fields = np.dtype([]) - arr_no_fields = np.empty(4, dtype=no_fields) - assert_equal(repr(arr_no_fields), 'array([(), (), (), ()], dtype=[])') - - -class TestComplexArray(object): - def test_str(self): - rvals = [0, 1, -1, np.inf, -np.inf, np.nan] - cvals = [complex(rp, ip) for rp in rvals for ip in rvals] - dtypes = [np.complex64, np.cdouble, np.clongdouble] - actual = [str(np.array([c], dt)) for c in cvals for dt in dtypes] - wanted = [ - '[0.+0.j]', '[0.+0.j]', '[0.+0.j]', - '[0.+1.j]', '[0.+1.j]', '[0.+1.j]', - '[0.-1.j]', '[0.-1.j]', '[0.-1.j]', - '[0.+infj]', '[0.+infj]', '[0.+infj]', - '[0.-infj]', '[0.-infj]', '[0.-infj]', - '[0.+nanj]', '[0.+nanj]', '[0.+nanj]', - '[1.+0.j]', '[1.+0.j]', '[1.+0.j]', - '[1.+1.j]', '[1.+1.j]', '[1.+1.j]', - '[1.-1.j]', '[1.-1.j]', '[1.-1.j]', - '[1.+infj]', '[1.+infj]', '[1.+infj]', - '[1.-infj]', '[1.-infj]', '[1.-infj]', - '[1.+nanj]', '[1.+nanj]', '[1.+nanj]', - '[-1.+0.j]', '[-1.+0.j]', '[-1.+0.j]', - '[-1.+1.j]', '[-1.+1.j]', '[-1.+1.j]', - '[-1.-1.j]', '[-1.-1.j]', '[-1.-1.j]', - '[-1.+infj]', '[-1.+infj]', '[-1.+infj]', - '[-1.-infj]', '[-1.-infj]', '[-1.-infj]', - '[-1.+nanj]', '[-1.+nanj]', '[-1.+nanj]', - '[inf+0.j]', '[inf+0.j]', '[inf+0.j]', - '[inf+1.j]', '[inf+1.j]', '[inf+1.j]', - '[inf-1.j]', '[inf-1.j]', '[inf-1.j]', - '[inf+infj]', '[inf+infj]', '[inf+infj]', - '[inf-infj]', '[inf-infj]', '[inf-infj]', - '[inf+nanj]', '[inf+nanj]', '[inf+nanj]', - '[-inf+0.j]', '[-inf+0.j]', '[-inf+0.j]', - '[-inf+1.j]', '[-inf+1.j]', '[-inf+1.j]', - '[-inf-1.j]', '[-inf-1.j]', '[-inf-1.j]', - '[-inf+infj]', '[-inf+infj]', '[-inf+infj]', - '[-inf-infj]', '[-inf-infj]', '[-inf-infj]', - '[-inf+nanj]', '[-inf+nanj]', '[-inf+nanj]', - '[nan+0.j]', '[nan+0.j]', '[nan+0.j]', - '[nan+1.j]', '[nan+1.j]', '[nan+1.j]', - '[nan-1.j]', '[nan-1.j]', '[nan-1.j]', - '[nan+infj]', '[nan+infj]', '[nan+infj]', - '[nan-infj]', '[nan-infj]', '[nan-infj]', - '[nan+nanj]', '[nan+nanj]', '[nan+nanj]'] - - for res, val in zip(actual, wanted): - assert_equal(res, val) - -class TestArray2String(object): - def test_basic(self): - """Basic test of array2string.""" - a = np.arange(3) - assert_(np.array2string(a) == '[0 1 2]') - assert_(np.array2string(a, max_line_width=4, legacy='1.13') == '[0 1\n 2]') - assert_(np.array2string(a, max_line_width=4) == '[0\n 1\n 2]') - - def test_unexpected_kwarg(self): - # ensure than an appropriate TypeError - # is raised when array2string receives - # an unexpected kwarg - - with assert_raises_regex(TypeError, 'nonsense'): - np.array2string(np.array([1, 2, 3]), - nonsense=None) - - def test_format_function(self): - """Test custom format function for each element in array.""" - def _format_function(x): - if np.abs(x) < 1: - return '.' - elif np.abs(x) < 2: - return 'o' - else: - return 'O' - - x = np.arange(3) - if sys.version_info[0] >= 3: - x_hex = "[0x0 0x1 0x2]" - x_oct = "[0o0 0o1 0o2]" - else: - x_hex = "[0x0L 0x1L 0x2L]" - x_oct = "[0L 01L 02L]" - assert_(np.array2string(x, formatter={'all':_format_function}) == - "[. o O]") - assert_(np.array2string(x, formatter={'int_kind':_format_function}) == - "[. o O]") - assert_(np.array2string(x, formatter={'all':lambda x: "%.4f" % x}) == - "[0.0000 1.0000 2.0000]") - assert_equal(np.array2string(x, formatter={'int':lambda x: hex(x)}), - x_hex) - assert_equal(np.array2string(x, formatter={'int':lambda x: oct(x)}), - x_oct) - - x = np.arange(3.) - assert_(np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) == - "[0.00 1.00 2.00]") - assert_(np.array2string(x, formatter={'float':lambda x: "%.2f" % x}) == - "[0.00 1.00 2.00]") - - s = np.array(['abc', 'def']) - assert_(np.array2string(s, formatter={'numpystr':lambda s: s*2}) == - '[abcabc defdef]') - - - def test_structure_format(self): - dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) - x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt) - assert_equal(np.array2string(x), - "[('Sarah', [8., 7.]) ('John', [6., 7.])]") - - np.set_printoptions(legacy='1.13') - try: - # for issue #5692 - A = np.zeros(shape=10, dtype=[("A", "M8[s]")]) - A[5:].fill(np.datetime64('NaT')) - assert_equal( - np.array2string(A), - textwrap.dedent("""\ - [('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) - ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('NaT',) ('NaT',) - ('NaT',) ('NaT',) ('NaT',)]""") - ) - finally: - np.set_printoptions(legacy=False) - - # same again, but with non-legacy behavior - assert_equal( - np.array2string(A), - textwrap.dedent("""\ - [('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) - ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) - ('1970-01-01T00:00:00',) ( 'NaT',) - ( 'NaT',) ( 'NaT',) - ( 'NaT',) ( 'NaT',)]""") - ) - - # and again, with timedeltas - A = np.full(10, 123456, dtype=[("A", "m8[s]")]) - A[5:].fill(np.datetime64('NaT')) - assert_equal( - np.array2string(A), - textwrap.dedent("""\ - [(123456,) (123456,) (123456,) (123456,) (123456,) ( 'NaT',) ( 'NaT',) - ( 'NaT',) ( 'NaT',) ( 'NaT',)]""") - ) - - # See #8160 - struct_int = np.array([([1, -1],), ([123, 1],)], dtype=[('B', 'i4', 2)]) - assert_equal(np.array2string(struct_int), - "[([ 1, -1],) ([123, 1],)]") - struct_2dint = np.array([([[0, 1], [2, 3]],), ([[12, 0], [0, 0]],)], - dtype=[('B', 'i4', (2, 2))]) - assert_equal(np.array2string(struct_2dint), - "[([[ 0, 1], [ 2, 3]],) ([[12, 0], [ 0, 0]],)]") - - # See #8172 - array_scalar = np.array( - (1., 2.1234567890123456789, 3.), dtype=('f8,f8,f8')) - assert_equal(np.array2string(array_scalar), "(1., 2.12345679, 3.)") - - def test_unstructured_void_repr(self): - a = np.array([27, 91, 50, 75, 7, 65, 10, 8, - 27, 91, 51, 49,109, 82,101,100], dtype='u1').view('V8') - assert_equal(repr(a[0]), r"void(b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08')") - assert_equal(str(a[0]), r"b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'") - assert_equal(repr(a), - r"array([b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'," "\n" - r" b'\x1B\x5B\x33\x31\x6D\x52\x65\x64'], dtype='|V8')") - - assert_equal(eval(repr(a), vars(np)), a) - assert_equal(eval(repr(a[0]), vars(np)), a[0]) - - def test_edgeitems_kwarg(self): - # previously the global print options would be taken over the kwarg - arr = np.zeros(3, int) - assert_equal( - np.array2string(arr, edgeitems=1, threshold=0), - "[0 ... 0]" - ) - - def test_summarize_1d(self): - A = np.arange(1001) - strA = '[ 0 1 2 ... 998 999 1000]' - assert_equal(str(A), strA) - - reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])' - assert_equal(repr(A), reprA) - - def test_summarize_2d(self): - A = np.arange(1002).reshape(2, 501) - strA = '[[ 0 1 2 ... 498 499 500]\n' \ - ' [ 501 502 503 ... 999 1000 1001]]' - assert_equal(str(A), strA) - - reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \ - ' [ 501, 502, 503, ..., 999, 1000, 1001]])' - assert_equal(repr(A), reprA) - - def test_linewidth(self): - a = np.full(6, 1) - - def make_str(a, width, **kw): - return np.array2string(a, separator="", max_line_width=width, **kw) - - assert_equal(make_str(a, 8, legacy='1.13'), '[111111]') - assert_equal(make_str(a, 7, legacy='1.13'), '[111111]') - assert_equal(make_str(a, 5, legacy='1.13'), '[1111\n' - ' 11]') - - assert_equal(make_str(a, 8), '[111111]') - assert_equal(make_str(a, 7), '[11111\n' - ' 1]') - assert_equal(make_str(a, 5), '[111\n' - ' 111]') - - b = a[None,None,:] - - assert_equal(make_str(b, 12, legacy='1.13'), '[[[111111]]]') - assert_equal(make_str(b, 9, legacy='1.13'), '[[[111111]]]') - assert_equal(make_str(b, 8, legacy='1.13'), '[[[11111\n' - ' 1]]]') - - assert_equal(make_str(b, 12), '[[[111111]]]') - assert_equal(make_str(b, 9), '[[[111\n' - ' 111]]]') - assert_equal(make_str(b, 8), '[[[11\n' - ' 11\n' - ' 11]]]') - - def test_wide_element(self): - a = np.array(['xxxxx']) - assert_equal( - np.array2string(a, max_line_width=5), - "['xxxxx']" - ) - assert_equal( - np.array2string(a, max_line_width=5, legacy='1.13'), - "[ 'xxxxx']" - ) - - @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") - def test_refcount(self): - # make sure we do not hold references to the array due to a recursive - # closure (gh-10620) - gc.disable() - a = np.arange(2) - r1 = sys.getrefcount(a) - np.array2string(a) - np.array2string(a) - r2 = sys.getrefcount(a) - gc.collect() - gc.enable() - assert_(r1 == r2) - -class TestPrintOptions(object): - """Test getting and setting global print options.""" - - def setup(self): - self.oldopts = np.get_printoptions() - - def teardown(self): - np.set_printoptions(**self.oldopts) - - def test_basic(self): - x = np.array([1.5, 0, 1.234567890]) - assert_equal(repr(x), "array([1.5 , 0. , 1.23456789])") - np.set_printoptions(precision=4) - assert_equal(repr(x), "array([1.5 , 0. , 1.2346])") - - def test_precision_zero(self): - np.set_printoptions(precision=0) - for values, string in ( - ([0.], "0."), ([.3], "0."), ([-.3], "-0."), ([.7], "1."), - ([1.5], "2."), ([-1.5], "-2."), ([-15.34], "-15."), - ([100.], "100."), ([.2, -1, 122.51], " 0., -1., 123."), - ([0], "0"), ([-12], "-12"), ([complex(.3, -.7)], "0.-1.j")): - x = np.array(values) - assert_equal(repr(x), "array([%s])" % string) - - def test_formatter(self): - x = np.arange(3) - np.set_printoptions(formatter={'all':lambda x: str(x-1)}) - assert_equal(repr(x), "array([-1, 0, 1])") - - def test_formatter_reset(self): - x = np.arange(3) - np.set_printoptions(formatter={'all':lambda x: str(x-1)}) - assert_equal(repr(x), "array([-1, 0, 1])") - np.set_printoptions(formatter={'int':None}) - assert_equal(repr(x), "array([0, 1, 2])") - - np.set_printoptions(formatter={'all':lambda x: str(x-1)}) - assert_equal(repr(x), "array([-1, 0, 1])") - np.set_printoptions(formatter={'all':None}) - assert_equal(repr(x), "array([0, 1, 2])") - - np.set_printoptions(formatter={'int':lambda x: str(x-1)}) - assert_equal(repr(x), "array([-1, 0, 1])") - np.set_printoptions(formatter={'int_kind':None}) - assert_equal(repr(x), "array([0, 1, 2])") - - x = np.arange(3.) - np.set_printoptions(formatter={'float':lambda x: str(x-1)}) - assert_equal(repr(x), "array([-1.0, 0.0, 1.0])") - np.set_printoptions(formatter={'float_kind':None}) - assert_equal(repr(x), "array([0., 1., 2.])") - - def test_0d_arrays(self): - unicode = type(u'') - - assert_equal(unicode(np.array(u'café', '= 3: - assert_equal(repr(np.array('café', '= 3 else '|S4' - assert_equal(repr(np.ones(3, dtype=styp)), - "array(['1', '1', '1'], dtype='{}')".format(styp)) - assert_equal(repr(np.ones(12, dtype=styp)), textwrap.dedent("""\ - array(['1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1'], - dtype='{}')""".format(styp))) - - def test_linewidth_repr(self): - a = np.full(7, fill_value=2) - np.set_printoptions(linewidth=17) - assert_equal( - repr(a), - textwrap.dedent("""\ - array([2, 2, 2, - 2, 2, 2, - 2])""") - ) - np.set_printoptions(linewidth=17, legacy='1.13') - assert_equal( - repr(a), - textwrap.dedent("""\ - array([2, 2, 2, - 2, 2, 2, 2])""") - ) - - a = np.full(8, fill_value=2) - - np.set_printoptions(linewidth=18, legacy=False) - assert_equal( - repr(a), - textwrap.dedent("""\ - array([2, 2, 2, - 2, 2, 2, - 2, 2])""") - ) - - np.set_printoptions(linewidth=18, legacy='1.13') - assert_equal( - repr(a), - textwrap.dedent("""\ - array([2, 2, 2, 2, - 2, 2, 2, 2])""") - ) - - def test_linewidth_str(self): - a = np.full(18, fill_value=2) - np.set_printoptions(linewidth=18) - assert_equal( - str(a), - textwrap.dedent("""\ - [2 2 2 2 2 2 2 2 - 2 2 2 2 2 2 2 2 - 2 2]""") - ) - np.set_printoptions(linewidth=18, legacy='1.13') - assert_equal( - str(a), - textwrap.dedent("""\ - [2 2 2 2 2 2 2 2 2 - 2 2 2 2 2 2 2 2 2]""") - ) - - def test_edgeitems(self): - np.set_printoptions(edgeitems=1, threshold=1) - a = np.arange(27).reshape((3, 3, 3)) - assert_equal( - repr(a), - textwrap.dedent("""\ - array([[[ 0, ..., 2], - ..., - [ 6, ..., 8]], - - ..., - - [[18, ..., 20], - ..., - [24, ..., 26]]])""") - ) - - b = np.zeros((3, 3, 1, 1)) - assert_equal( - repr(b), - textwrap.dedent("""\ - array([[[[0.]], - - ..., - - [[0.]]], - - - ..., - - - [[[0.]], - - ..., - - [[0.]]]])""") - ) - - # 1.13 had extra trailing spaces, and was missing newlines - np.set_printoptions(legacy='1.13') - - assert_equal( - repr(a), - textwrap.dedent("""\ - array([[[ 0, ..., 2], - ..., - [ 6, ..., 8]], - - ..., - [[18, ..., 20], - ..., - [24, ..., 26]]])""") - ) - - assert_equal( - repr(b), - textwrap.dedent("""\ - array([[[[ 0.]], - - ..., - [[ 0.]]], - - - ..., - [[[ 0.]], - - ..., - [[ 0.]]]])""") - ) - - def test_bad_args(self): - assert_raises(ValueError, np.set_printoptions, threshold=float('nan')) - assert_raises(TypeError, np.set_printoptions, threshold='1') - assert_raises(TypeError, np.set_printoptions, threshold=b'1') - -def test_unicode_object_array(): - import sys - if sys.version_info[0] >= 3: - expected = "array(['é'], dtype=object)" - else: - expected = "array([u'\\xe9'], dtype=object)" - x = np.array([u'\xe9'], dtype=object) - assert_equal(repr(x), expected) - - -class TestContextManager(object): - def test_ctx_mgr(self): - # test that context manager actuall works - with np.printoptions(precision=2): - s = str(np.array([2.0]) / 3) - assert_equal(s, '[0.67]') - - def test_ctx_mgr_restores(self): - # test that print options are actually restrored - opts = np.get_printoptions() - with np.printoptions(precision=opts['precision'] - 1, - linewidth=opts['linewidth'] - 4): - pass - assert_equal(np.get_printoptions(), opts) - - def test_ctx_mgr_exceptions(self): - # test that print options are restored even if an exception is raised - opts = np.get_printoptions() - try: - with np.printoptions(precision=2, linewidth=11): - raise ValueError - except ValueError: - pass - assert_equal(np.get_printoptions(), opts) - - def test_ctx_mgr_as_smth(self): - opts = {"precision": 2} - with np.printoptions(**opts) as ctx: - saved_opts = ctx.copy() - assert_equal({k: saved_opts[k] for k in opts}, opts) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_datetime.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_datetime.py deleted file mode 100644 index d38444e..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_datetime.py +++ /dev/null @@ -1,2375 +0,0 @@ -from __future__ import division, absolute_import, print_function - - -import numpy -import numpy as np -import datetime -import pytest -from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_warns, suppress_warnings, - assert_raises_regex, - ) -from numpy.compat import pickle - -# Use pytz to test out various time zones if available -try: - from pytz import timezone as tz - _has_pytz = True -except ImportError: - _has_pytz = False - -try: - RecursionError -except NameError: - RecursionError = RuntimeError # python < 3.5 - - -class TestDateTime(object): - def test_datetime_dtype_creation(self): - for unit in ['Y', 'M', 'W', 'D', - 'h', 'm', 's', 'ms', 'us', - 'ns', 'ps', 'fs', 'as']: - dt1 = np.dtype('M8[750%s]' % unit) - assert_(dt1 == np.dtype('datetime64[750%s]' % unit)) - dt2 = np.dtype('m8[%s]' % unit) - assert_(dt2 == np.dtype('timedelta64[%s]' % unit)) - - # Generic units shouldn't add [] to the end - assert_equal(str(np.dtype("M8")), "datetime64") - - # Should be possible to specify the endianness - assert_equal(np.dtype("=M8"), np.dtype("M8")) - assert_equal(np.dtype("=M8[s]"), np.dtype("M8[s]")) - assert_(np.dtype(">M8") == np.dtype("M8") or - np.dtype("M8[D]") == np.dtype("M8[D]") or - np.dtype("M8") != np.dtype("m8") == np.dtype("m8") or - np.dtype("m8[D]") == np.dtype("m8[D]") or - np.dtype("m8") != np.dtype(" Scalars - assert_equal(np.datetime64(b, '[s]'), np.datetime64('NaT', '[s]')) - assert_equal(np.datetime64(b, '[ms]'), np.datetime64('NaT', '[ms]')) - assert_equal(np.datetime64(b, '[M]'), np.datetime64('NaT', '[M]')) - assert_equal(np.datetime64(b, '[Y]'), np.datetime64('NaT', '[Y]')) - assert_equal(np.datetime64(b, '[W]'), np.datetime64('NaT', '[W]')) - - # Arrays -> Scalars - assert_equal(np.datetime64(a, '[s]'), np.datetime64('NaT', '[s]')) - assert_equal(np.datetime64(a, '[ms]'), np.datetime64('NaT', '[ms]')) - assert_equal(np.datetime64(a, '[M]'), np.datetime64('NaT', '[M]')) - assert_equal(np.datetime64(a, '[Y]'), np.datetime64('NaT', '[Y]')) - assert_equal(np.datetime64(a, '[W]'), np.datetime64('NaT', '[W]')) - - # NaN -> NaT - nan = np.array([np.nan] * 8) - fnan = nan.astype('f') - lnan = nan.astype('g') - cnan = nan.astype('D') - cfnan = nan.astype('F') - clnan = nan.astype('G') - - nat = np.array([np.datetime64('NaT')] * 8) - assert_equal(nan.astype('M8[ns]'), nat) - assert_equal(fnan.astype('M8[ns]'), nat) - assert_equal(lnan.astype('M8[ns]'), nat) - assert_equal(cnan.astype('M8[ns]'), nat) - assert_equal(cfnan.astype('M8[ns]'), nat) - assert_equal(clnan.astype('M8[ns]'), nat) - - nat = np.array([np.timedelta64('NaT')] * 8) - assert_equal(nan.astype('timedelta64[ns]'), nat) - assert_equal(fnan.astype('timedelta64[ns]'), nat) - assert_equal(lnan.astype('timedelta64[ns]'), nat) - assert_equal(cnan.astype('timedelta64[ns]'), nat) - assert_equal(cfnan.astype('timedelta64[ns]'), nat) - assert_equal(clnan.astype('timedelta64[ns]'), nat) - - def test_days_creation(self): - assert_equal(np.array('1599', dtype='M8[D]').astype('i8'), - (1600-1970)*365 - (1972-1600)/4 + 3 - 365) - assert_equal(np.array('1600', dtype='M8[D]').astype('i8'), - (1600-1970)*365 - (1972-1600)/4 + 3) - assert_equal(np.array('1601', dtype='M8[D]').astype('i8'), - (1600-1970)*365 - (1972-1600)/4 + 3 + 366) - assert_equal(np.array('1900', dtype='M8[D]').astype('i8'), - (1900-1970)*365 - (1970-1900)//4) - assert_equal(np.array('1901', dtype='M8[D]').astype('i8'), - (1900-1970)*365 - (1970-1900)//4 + 365) - assert_equal(np.array('1967', dtype='M8[D]').astype('i8'), -3*365 - 1) - assert_equal(np.array('1968', dtype='M8[D]').astype('i8'), -2*365 - 1) - assert_equal(np.array('1969', dtype='M8[D]').astype('i8'), -1*365) - assert_equal(np.array('1970', dtype='M8[D]').astype('i8'), 0*365) - assert_equal(np.array('1971', dtype='M8[D]').astype('i8'), 1*365) - assert_equal(np.array('1972', dtype='M8[D]').astype('i8'), 2*365) - assert_equal(np.array('1973', dtype='M8[D]').astype('i8'), 3*365 + 1) - assert_equal(np.array('1974', dtype='M8[D]').astype('i8'), 4*365 + 1) - assert_equal(np.array('2000', dtype='M8[D]').astype('i8'), - (2000 - 1970)*365 + (2000 - 1972)//4) - assert_equal(np.array('2001', dtype='M8[D]').astype('i8'), - (2000 - 1970)*365 + (2000 - 1972)//4 + 366) - assert_equal(np.array('2400', dtype='M8[D]').astype('i8'), - (2400 - 1970)*365 + (2400 - 1972)//4 - 3) - assert_equal(np.array('2401', dtype='M8[D]').astype('i8'), - (2400 - 1970)*365 + (2400 - 1972)//4 - 3 + 366) - - assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('i8'), - (1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 28) - assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('i8'), - (1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 29) - assert_equal(np.array('2000-02-29', dtype='M8[D]').astype('i8'), - (2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 28) - assert_equal(np.array('2000-03-01', dtype='M8[D]').astype('i8'), - (2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 29) - assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('i8'), - (2000 - 1970)*365 + (2000 - 1972)//4 + 366 + 31 + 28 + 21) - - def test_days_to_pydate(self): - assert_equal(np.array('1599', dtype='M8[D]').astype('O'), - datetime.date(1599, 1, 1)) - assert_equal(np.array('1600', dtype='M8[D]').astype('O'), - datetime.date(1600, 1, 1)) - assert_equal(np.array('1601', dtype='M8[D]').astype('O'), - datetime.date(1601, 1, 1)) - assert_equal(np.array('1900', dtype='M8[D]').astype('O'), - datetime.date(1900, 1, 1)) - assert_equal(np.array('1901', dtype='M8[D]').astype('O'), - datetime.date(1901, 1, 1)) - assert_equal(np.array('2000', dtype='M8[D]').astype('O'), - datetime.date(2000, 1, 1)) - assert_equal(np.array('2001', dtype='M8[D]').astype('O'), - datetime.date(2001, 1, 1)) - assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('O'), - datetime.date(1600, 2, 29)) - assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('O'), - datetime.date(1600, 3, 1)) - assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('O'), - datetime.date(2001, 3, 22)) - - def test_dtype_comparison(self): - assert_(not (np.dtype('M8[us]') == np.dtype('M8[ms]'))) - assert_(np.dtype('M8[us]') != np.dtype('M8[ms]')) - assert_(np.dtype('M8[2D]') != np.dtype('M8[D]')) - assert_(np.dtype('M8[D]') != np.dtype('M8[2D]')) - - def test_pydatetime_creation(self): - a = np.array(['1960-03-12', datetime.date(1960, 3, 12)], dtype='M8[D]') - assert_equal(a[0], a[1]) - a = np.array(['1999-12-31', datetime.date(1999, 12, 31)], dtype='M8[D]') - assert_equal(a[0], a[1]) - a = np.array(['2000-01-01', datetime.date(2000, 1, 1)], dtype='M8[D]') - assert_equal(a[0], a[1]) - # Will fail if the date changes during the exact right moment - a = np.array(['today', datetime.date.today()], dtype='M8[D]') - assert_equal(a[0], a[1]) - # datetime.datetime.now() returns local time, not UTC - #a = np.array(['now', datetime.datetime.now()], dtype='M8[s]') - #assert_equal(a[0], a[1]) - - # we can give a datetime.date time units - assert_equal(np.array(datetime.date(1960, 3, 12), dtype='M8[s]'), - np.array(np.datetime64('1960-03-12T00:00:00'))) - - def test_datetime_string_conversion(self): - a = ['2011-03-16', '1920-01-01', '2013-05-19'] - str_a = np.array(a, dtype='S') - uni_a = np.array(a, dtype='U') - dt_a = np.array(a, dtype='M') - - # String to datetime - assert_equal(dt_a, str_a.astype('M')) - assert_equal(dt_a.dtype, str_a.astype('M').dtype) - dt_b = np.empty_like(dt_a) - dt_b[...] = str_a - assert_equal(dt_a, dt_b) - - # Datetime to string - assert_equal(str_a, dt_a.astype('S0')) - str_b = np.empty_like(str_a) - str_b[...] = dt_a - assert_equal(str_a, str_b) - - # Unicode to datetime - assert_equal(dt_a, uni_a.astype('M')) - assert_equal(dt_a.dtype, uni_a.astype('M').dtype) - dt_b = np.empty_like(dt_a) - dt_b[...] = uni_a - assert_equal(dt_a, dt_b) - - # Datetime to unicode - assert_equal(uni_a, dt_a.astype('U')) - uni_b = np.empty_like(uni_a) - uni_b[...] = dt_a - assert_equal(uni_a, uni_b) - - # Datetime to long string - gh-9712 - assert_equal(str_a, dt_a.astype((np.string_, 128))) - str_b = np.empty(str_a.shape, dtype=(np.string_, 128)) - str_b[...] = dt_a - assert_equal(str_a, str_b) - - def test_datetime_array_str(self): - a = np.array(['2011-03-16', '1920-01-01', '2013-05-19'], dtype='M') - assert_equal(str(a), "['2011-03-16' '1920-01-01' '2013-05-19']") - - a = np.array(['2011-03-16T13:55', '1920-01-01T03:12'], dtype='M') - assert_equal(np.array2string(a, separator=', ', - formatter={'datetime': lambda x: - "'%s'" % np.datetime_as_string(x, timezone='UTC')}), - "['2011-03-16T13:55Z', '1920-01-01T03:12Z']") - - # Check that one NaT doesn't corrupt subsequent entries - a = np.array(['2010', 'NaT', '2030']).astype('M') - assert_equal(str(a), "['2010' 'NaT' '2030']") - - def test_timedelta_array_str(self): - a = np.array([-1, 0, 100], dtype='m') - assert_equal(str(a), "[ -1 0 100]") - a = np.array(['NaT', 'NaT'], dtype='m') - assert_equal(str(a), "['NaT' 'NaT']") - # Check right-alignment with NaTs - a = np.array([-1, 'NaT', 0], dtype='m') - assert_equal(str(a), "[ -1 'NaT' 0]") - a = np.array([-1, 'NaT', 1234567], dtype='m') - assert_equal(str(a), "[ -1 'NaT' 1234567]") - - # Test with other byteorder: - a = np.array([-1, 'NaT', 1234567], dtype='>m') - assert_equal(str(a), "[ -1 'NaT' 1234567]") - a = np.array([-1, 'NaT', 1234567], dtype=''\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n" + \ - b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb." - assert_equal(pickle.loads(pkl), np.dtype('>M8[us]')) - - def test_setstate(self): - "Verify that datetime dtype __setstate__ can handle bad arguments" - dt = np.dtype('>M8[us]') - assert_raises(ValueError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, 1)) - assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2]) - assert_raises(TypeError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx'))) - assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2]) - - def test_dtype_promotion(self): - # datetime datetime computes the metadata gcd - # timedelta timedelta computes the metadata gcd - for mM in ['m', 'M']: - assert_equal( - np.promote_types(np.dtype(mM+'8[2Y]'), np.dtype(mM+'8[2Y]')), - np.dtype(mM+'8[2Y]')) - assert_equal( - np.promote_types(np.dtype(mM+'8[12Y]'), np.dtype(mM+'8[15Y]')), - np.dtype(mM+'8[3Y]')) - assert_equal( - np.promote_types(np.dtype(mM+'8[62M]'), np.dtype(mM+'8[24M]')), - np.dtype(mM+'8[2M]')) - assert_equal( - np.promote_types(np.dtype(mM+'8[1W]'), np.dtype(mM+'8[2D]')), - np.dtype(mM+'8[1D]')) - assert_equal( - np.promote_types(np.dtype(mM+'8[W]'), np.dtype(mM+'8[13s]')), - np.dtype(mM+'8[s]')) - assert_equal( - np.promote_types(np.dtype(mM+'8[13W]'), np.dtype(mM+'8[49s]')), - np.dtype(mM+'8[7s]')) - # timedelta timedelta raises when there is no reasonable gcd - assert_raises(TypeError, np.promote_types, - np.dtype('m8[Y]'), np.dtype('m8[D]')) - assert_raises(TypeError, np.promote_types, - np.dtype('m8[M]'), np.dtype('m8[W]')) - # timedelta timedelta may overflow with big unit ranges - assert_raises(OverflowError, np.promote_types, - np.dtype('m8[W]'), np.dtype('m8[fs]')) - assert_raises(OverflowError, np.promote_types, - np.dtype('m8[s]'), np.dtype('m8[as]')) - - def test_cast_overflow(self): - # gh-4486 - def cast(): - numpy.datetime64("1971-01-01 00:00:00.000000000000000").astype("datetime64[%s]', - 'timedelta64[%s]']) - def test_isfinite_isinf_isnan_units(self, unit, dstr): - '''check isfinite, isinf, isnan for all units of M, m dtypes - ''' - arr_val = [123, -321, "NaT"] - arr = np.array(arr_val, dtype= dstr % unit) - pos = np.array([True, True, False]) - neg = np.array([False, False, True]) - false = np.array([False, False, False]) - assert_equal(np.isfinite(arr), pos) - assert_equal(np.isinf(arr), false) - assert_equal(np.isnan(arr), neg) - - def test_assert_equal(self): - assert_raises(AssertionError, assert_equal, - np.datetime64('nat'), np.timedelta64('nat')) - - def test_corecursive_input(self): - # construct a co-recursive list - a, b = [], [] - a.append(b) - b.append(a) - obj_arr = np.array([None]) - obj_arr[0] = a - - # gh-11154: This shouldn't cause a C stack overflow - assert_raises(RecursionError, obj_arr.astype, 'M8') - assert_raises(RecursionError, obj_arr.astype, 'm8') - - @pytest.mark.parametrize("time_unit", [ - "Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as", - # compound units - "10D", "2M", - ]) - def test_limit_symmetry(self, time_unit): - """ - Dates should have symmetric limits around the unix epoch at +/-np.int64 - """ - epoch = np.datetime64(0, time_unit) - latest = np.datetime64(np.iinfo(np.int64).max, time_unit) - earliest = np.datetime64(-np.iinfo(np.int64).max, time_unit) - - # above should not have overflowed - assert earliest < epoch < latest - - @pytest.mark.parametrize("time_unit", [ - "Y", "M", - pytest.param("W", marks=pytest.mark.xfail(reason="gh-13197")), - "D", "h", "m", - "s", "ms", "us", "ns", "ps", "fs", "as", - pytest.param("10D", marks=pytest.mark.xfail(reason="similar to gh-13197")), - ]) - @pytest.mark.parametrize("sign", [-1, 1]) - def test_limit_str_roundtrip(self, time_unit, sign): - """ - Limits should roundtrip when converted to strings. - - This tests the conversion to and from npy_datetimestruct. - """ - # TODO: add absolute (gold standard) time span limit strings - limit = np.datetime64(np.iinfo(np.int64).max * sign, time_unit) - - # Convert to string and back. Explicit unit needed since the day and - # week reprs are not distinguishable. - limit_via_str = np.datetime64(str(limit), time_unit) - assert limit_via_str == limit - - -class TestDateTimeData(object): - - def test_basic(self): - a = np.array(['1980-03-23'], dtype=np.datetime64) - assert_equal(np.datetime_data(a.dtype), ('D', 1)) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_defchararray.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_defchararray.py deleted file mode 100644 index 7b0e6f8..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_defchararray.py +++ /dev/null @@ -1,692 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys - -import numpy as np -from numpy.core.multiarray import _vec_string -from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_raises, - assert_raises_regex, suppress_warnings, - ) - -kw_unicode_true = {'unicode': True} # make 2to3 work properly -kw_unicode_false = {'unicode': False} - -class TestBasic(object): - def test_from_object_array(self): - A = np.array([['abc', 2], - ['long ', '0123456789']], dtype='O') - B = np.char.array(A) - assert_equal(B.dtype.itemsize, 10) - assert_array_equal(B, [[b'abc', b'2'], - [b'long', b'0123456789']]) - - def test_from_object_array_unicode(self): - A = np.array([['abc', u'Sigma \u03a3'], - ['long ', '0123456789']], dtype='O') - assert_raises(ValueError, np.char.array, (A,)) - B = np.char.array(A, **kw_unicode_true) - assert_equal(B.dtype.itemsize, 10 * np.array('a', 'U').dtype.itemsize) - assert_array_equal(B, [['abc', u'Sigma \u03a3'], - ['long', '0123456789']]) - - def test_from_string_array(self): - A = np.array([[b'abc', b'foo'], - [b'long ', b'0123456789']]) - assert_equal(A.dtype.type, np.string_) - B = np.char.array(A) - assert_array_equal(B, A) - assert_equal(B.dtype, A.dtype) - assert_equal(B.shape, A.shape) - B[0, 0] = 'changed' - assert_(B[0, 0] != A[0, 0]) - C = np.char.asarray(A) - assert_array_equal(C, A) - assert_equal(C.dtype, A.dtype) - C[0, 0] = 'changed again' - assert_(C[0, 0] != B[0, 0]) - assert_(C[0, 0] == A[0, 0]) - - def test_from_unicode_array(self): - A = np.array([['abc', u'Sigma \u03a3'], - ['long ', '0123456789']]) - assert_equal(A.dtype.type, np.unicode_) - B = np.char.array(A) - assert_array_equal(B, A) - assert_equal(B.dtype, A.dtype) - assert_equal(B.shape, A.shape) - B = np.char.array(A, **kw_unicode_true) - assert_array_equal(B, A) - assert_equal(B.dtype, A.dtype) - assert_equal(B.shape, A.shape) - - def fail(): - np.char.array(A, **kw_unicode_false) - - assert_raises(UnicodeEncodeError, fail) - - def test_unicode_upconvert(self): - A = np.char.array(['abc']) - B = np.char.array([u'\u03a3']) - assert_(issubclass((A + B).dtype.type, np.unicode_)) - - def test_from_string(self): - A = np.char.array(b'abc') - assert_equal(len(A), 1) - assert_equal(len(A[0]), 3) - assert_(issubclass(A.dtype.type, np.string_)) - - def test_from_unicode(self): - A = np.char.array(u'\u03a3') - assert_equal(len(A), 1) - assert_equal(len(A[0]), 1) - assert_equal(A.itemsize, 4) - assert_(issubclass(A.dtype.type, np.unicode_)) - -class TestVecString(object): - def test_non_existent_method(self): - - def fail(): - _vec_string('a', np.string_, 'bogus') - - assert_raises(AttributeError, fail) - - def test_non_string_array(self): - - def fail(): - _vec_string(1, np.string_, 'strip') - - assert_raises(TypeError, fail) - - def test_invalid_args_tuple(self): - - def fail(): - _vec_string(['a'], np.string_, 'strip', 1) - - assert_raises(TypeError, fail) - - def test_invalid_type_descr(self): - - def fail(): - _vec_string(['a'], 'BOGUS', 'strip') - - assert_raises(TypeError, fail) - - def test_invalid_function_args(self): - - def fail(): - _vec_string(['a'], np.string_, 'strip', (1,)) - - assert_raises(TypeError, fail) - - def test_invalid_result_type(self): - - def fail(): - _vec_string(['a'], np.integer, 'strip') - - assert_raises(TypeError, fail) - - def test_broadcast_error(self): - - def fail(): - _vec_string([['abc', 'def']], np.integer, 'find', (['a', 'd', 'j'],)) - - assert_raises(ValueError, fail) - - -class TestWhitespace(object): - def setup(self): - self.A = np.array([['abc ', '123 '], - ['789 ', 'xyz ']]).view(np.chararray) - self.B = np.array([['abc', '123'], - ['789', 'xyz']]).view(np.chararray) - - def test1(self): - assert_(np.all(self.A == self.B)) - assert_(np.all(self.A >= self.B)) - assert_(np.all(self.A <= self.B)) - assert_(not np.any(self.A > self.B)) - assert_(not np.any(self.A < self.B)) - assert_(not np.any(self.A != self.B)) - -class TestChar(object): - def setup(self): - self.A = np.array('abc1', dtype='c').view(np.chararray) - - def test_it(self): - assert_equal(self.A.shape, (4,)) - assert_equal(self.A.upper()[:2].tobytes(), b'AB') - -class TestComparisons(object): - def setup(self): - self.A = np.array([['abc', '123'], - ['789', 'xyz']]).view(np.chararray) - self.B = np.array([['efg', '123 '], - ['051', 'tuv']]).view(np.chararray) - - def test_not_equal(self): - assert_array_equal((self.A != self.B), [[True, False], [True, True]]) - - def test_equal(self): - assert_array_equal((self.A == self.B), [[False, True], [False, False]]) - - def test_greater_equal(self): - assert_array_equal((self.A >= self.B), [[False, True], [True, True]]) - - def test_less_equal(self): - assert_array_equal((self.A <= self.B), [[True, True], [False, False]]) - - def test_greater(self): - assert_array_equal((self.A > self.B), [[False, False], [True, True]]) - - def test_less(self): - assert_array_equal((self.A < self.B), [[True, False], [False, False]]) - -class TestComparisonsMixed1(TestComparisons): - """Ticket #1276""" - - def setup(self): - TestComparisons.setup(self) - self.B = np.array([['efg', '123 '], - ['051', 'tuv']], np.unicode_).view(np.chararray) - -class TestComparisonsMixed2(TestComparisons): - """Ticket #1276""" - - def setup(self): - TestComparisons.setup(self) - self.A = np.array([['abc', '123'], - ['789', 'xyz']], np.unicode_).view(np.chararray) - -class TestInformation(object): - def setup(self): - self.A = np.array([[' abc ', ''], - ['12345', 'MixedCase'], - ['123 \t 345 \0 ', 'UPPER']]).view(np.chararray) - self.B = np.array([[u' \u03a3 ', u''], - [u'12345', u'MixedCase'], - [u'123 \t 345 \0 ', u'UPPER']]).view(np.chararray) - - def test_len(self): - assert_(issubclass(np.char.str_len(self.A).dtype.type, np.integer)) - assert_array_equal(np.char.str_len(self.A), [[5, 0], [5, 9], [12, 5]]) - assert_array_equal(np.char.str_len(self.B), [[3, 0], [5, 9], [12, 5]]) - - def test_count(self): - assert_(issubclass(self.A.count('').dtype.type, np.integer)) - assert_array_equal(self.A.count('a'), [[1, 0], [0, 1], [0, 0]]) - assert_array_equal(self.A.count('123'), [[0, 0], [1, 0], [1, 0]]) - # Python doesn't seem to like counting NULL characters - # assert_array_equal(self.A.count('\0'), [[0, 0], [0, 0], [1, 0]]) - assert_array_equal(self.A.count('a', 0, 2), [[1, 0], [0, 0], [0, 0]]) - assert_array_equal(self.B.count('a'), [[0, 0], [0, 1], [0, 0]]) - assert_array_equal(self.B.count('123'), [[0, 0], [1, 0], [1, 0]]) - # assert_array_equal(self.B.count('\0'), [[0, 0], [0, 0], [1, 0]]) - - def test_endswith(self): - assert_(issubclass(self.A.endswith('').dtype.type, np.bool_)) - assert_array_equal(self.A.endswith(' '), [[1, 0], [0, 0], [1, 0]]) - assert_array_equal(self.A.endswith('3', 0, 3), [[0, 0], [1, 0], [1, 0]]) - - def fail(): - self.A.endswith('3', 'fdjk') - - assert_raises(TypeError, fail) - - def test_find(self): - assert_(issubclass(self.A.find('a').dtype.type, np.integer)) - assert_array_equal(self.A.find('a'), [[1, -1], [-1, 6], [-1, -1]]) - assert_array_equal(self.A.find('3'), [[-1, -1], [2, -1], [2, -1]]) - assert_array_equal(self.A.find('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]]) - assert_array_equal(self.A.find(['1', 'P']), [[-1, -1], [0, -1], [0, 1]]) - - def test_index(self): - - def fail(): - self.A.index('a') - - assert_raises(ValueError, fail) - assert_(np.char.index('abcba', 'b') == 1) - assert_(issubclass(np.char.index('abcba', 'b').dtype.type, np.integer)) - - def test_isalnum(self): - assert_(issubclass(self.A.isalnum().dtype.type, np.bool_)) - assert_array_equal(self.A.isalnum(), [[False, False], [True, True], [False, True]]) - - def test_isalpha(self): - assert_(issubclass(self.A.isalpha().dtype.type, np.bool_)) - assert_array_equal(self.A.isalpha(), [[False, False], [False, True], [False, True]]) - - def test_isdigit(self): - assert_(issubclass(self.A.isdigit().dtype.type, np.bool_)) - assert_array_equal(self.A.isdigit(), [[False, False], [True, False], [False, False]]) - - def test_islower(self): - assert_(issubclass(self.A.islower().dtype.type, np.bool_)) - assert_array_equal(self.A.islower(), [[True, False], [False, False], [False, False]]) - - def test_isspace(self): - assert_(issubclass(self.A.isspace().dtype.type, np.bool_)) - assert_array_equal(self.A.isspace(), [[False, False], [False, False], [False, False]]) - - def test_istitle(self): - assert_(issubclass(self.A.istitle().dtype.type, np.bool_)) - assert_array_equal(self.A.istitle(), [[False, False], [False, False], [False, False]]) - - def test_isupper(self): - assert_(issubclass(self.A.isupper().dtype.type, np.bool_)) - assert_array_equal(self.A.isupper(), [[False, False], [False, False], [False, True]]) - - def test_rfind(self): - assert_(issubclass(self.A.rfind('a').dtype.type, np.integer)) - assert_array_equal(self.A.rfind('a'), [[1, -1], [-1, 6], [-1, -1]]) - assert_array_equal(self.A.rfind('3'), [[-1, -1], [2, -1], [6, -1]]) - assert_array_equal(self.A.rfind('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]]) - assert_array_equal(self.A.rfind(['1', 'P']), [[-1, -1], [0, -1], [0, 2]]) - - def test_rindex(self): - - def fail(): - self.A.rindex('a') - - assert_raises(ValueError, fail) - assert_(np.char.rindex('abcba', 'b') == 3) - assert_(issubclass(np.char.rindex('abcba', 'b').dtype.type, np.integer)) - - def test_startswith(self): - assert_(issubclass(self.A.startswith('').dtype.type, np.bool_)) - assert_array_equal(self.A.startswith(' '), [[1, 0], [0, 0], [0, 0]]) - assert_array_equal(self.A.startswith('1', 0, 3), [[0, 0], [1, 0], [1, 0]]) - - def fail(): - self.A.startswith('3', 'fdjk') - - assert_raises(TypeError, fail) - - -class TestMethods(object): - def setup(self): - self.A = np.array([[' abc ', ''], - ['12345', 'MixedCase'], - ['123 \t 345 \0 ', 'UPPER']], - dtype='S').view(np.chararray) - self.B = np.array([[u' \u03a3 ', u''], - [u'12345', u'MixedCase'], - [u'123 \t 345 \0 ', u'UPPER']]).view(np.chararray) - - def test_capitalize(self): - tgt = [[b' abc ', b''], - [b'12345', b'Mixedcase'], - [b'123 \t 345 \0 ', b'Upper']] - assert_(issubclass(self.A.capitalize().dtype.type, np.string_)) - assert_array_equal(self.A.capitalize(), tgt) - - tgt = [[u' \u03c3 ', ''], - ['12345', 'Mixedcase'], - ['123 \t 345 \0 ', 'Upper']] - assert_(issubclass(self.B.capitalize().dtype.type, np.unicode_)) - assert_array_equal(self.B.capitalize(), tgt) - - def test_center(self): - assert_(issubclass(self.A.center(10).dtype.type, np.string_)) - C = self.A.center([10, 20]) - assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) - - C = self.A.center(20, b'#') - assert_(np.all(C.startswith(b'#'))) - assert_(np.all(C.endswith(b'#'))) - - C = np.char.center(b'FOO', [[10, 20], [15, 8]]) - tgt = [[b' FOO ', b' FOO '], - [b' FOO ', b' FOO ']] - assert_(issubclass(C.dtype.type, np.string_)) - assert_array_equal(C, tgt) - - def test_decode(self): - if sys.version_info[0] >= 3: - A = np.char.array([b'\\u03a3']) - assert_(A.decode('unicode-escape')[0] == '\u03a3') - else: - with suppress_warnings() as sup: - if sys.py3kwarning: - sup.filter(DeprecationWarning, "'hex_codec'") - A = np.char.array(['736563726574206d657373616765']) - assert_(A.decode('hex_codec')[0] == 'secret message') - - def test_encode(self): - B = self.B.encode('unicode_escape') - assert_(B[0][0] == str(' \\u03a3 ').encode('latin1')) - - def test_expandtabs(self): - T = self.A.expandtabs() - assert_(T[2, 0] == b'123 345 \0') - - def test_join(self): - if sys.version_info[0] >= 3: - # NOTE: list(b'123') == [49, 50, 51] - # so that b','.join(b'123') results to an error on Py3 - A0 = self.A.decode('ascii') - else: - A0 = self.A - - A = np.char.join([',', '#'], A0) - if sys.version_info[0] >= 3: - assert_(issubclass(A.dtype.type, np.unicode_)) - else: - assert_(issubclass(A.dtype.type, np.string_)) - tgt = np.array([[' ,a,b,c, ', ''], - ['1,2,3,4,5', 'M#i#x#e#d#C#a#s#e'], - ['1,2,3, ,\t, ,3,4,5, ,\x00, ', 'U#P#P#E#R']]) - assert_array_equal(np.char.join([',', '#'], A0), tgt) - - def test_ljust(self): - assert_(issubclass(self.A.ljust(10).dtype.type, np.string_)) - - C = self.A.ljust([10, 20]) - assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) - - C = self.A.ljust(20, b'#') - assert_array_equal(C.startswith(b'#'), [ - [False, True], [False, False], [False, False]]) - assert_(np.all(C.endswith(b'#'))) - - C = np.char.ljust(b'FOO', [[10, 20], [15, 8]]) - tgt = [[b'FOO ', b'FOO '], - [b'FOO ', b'FOO ']] - assert_(issubclass(C.dtype.type, np.string_)) - assert_array_equal(C, tgt) - - def test_lower(self): - tgt = [[b' abc ', b''], - [b'12345', b'mixedcase'], - [b'123 \t 345 \0 ', b'upper']] - assert_(issubclass(self.A.lower().dtype.type, np.string_)) - assert_array_equal(self.A.lower(), tgt) - - tgt = [[u' \u03c3 ', u''], - [u'12345', u'mixedcase'], - [u'123 \t 345 \0 ', u'upper']] - assert_(issubclass(self.B.lower().dtype.type, np.unicode_)) - assert_array_equal(self.B.lower(), tgt) - - def test_lstrip(self): - tgt = [[b'abc ', b''], - [b'12345', b'MixedCase'], - [b'123 \t 345 \0 ', b'UPPER']] - assert_(issubclass(self.A.lstrip().dtype.type, np.string_)) - assert_array_equal(self.A.lstrip(), tgt) - - tgt = [[b' abc', b''], - [b'2345', b'ixedCase'], - [b'23 \t 345 \x00', b'UPPER']] - assert_array_equal(self.A.lstrip([b'1', b'M']), tgt) - - tgt = [[u'\u03a3 ', ''], - ['12345', 'MixedCase'], - ['123 \t 345 \0 ', 'UPPER']] - assert_(issubclass(self.B.lstrip().dtype.type, np.unicode_)) - assert_array_equal(self.B.lstrip(), tgt) - - def test_partition(self): - P = self.A.partition([b'3', b'M']) - tgt = [[(b' abc ', b'', b''), (b'', b'', b'')], - [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')], - [(b'12', b'3', b' \t 345 \0 '), (b'UPPER', b'', b'')]] - assert_(issubclass(P.dtype.type, np.string_)) - assert_array_equal(P, tgt) - - def test_replace(self): - R = self.A.replace([b'3', b'a'], - [b'##########', b'@']) - tgt = [[b' abc ', b''], - [b'12##########45', b'MixedC@se'], - [b'12########## \t ##########45 \x00', b'UPPER']] - assert_(issubclass(R.dtype.type, np.string_)) - assert_array_equal(R, tgt) - - if sys.version_info[0] < 3: - # NOTE: b'abc'.replace(b'a', 'b') is not allowed on Py3 - R = self.A.replace(b'a', u'\u03a3') - tgt = [[u' \u03a3bc ', ''], - ['12345', u'MixedC\u03a3se'], - ['123 \t 345 \x00', 'UPPER']] - assert_(issubclass(R.dtype.type, np.unicode_)) - assert_array_equal(R, tgt) - - def test_rjust(self): - assert_(issubclass(self.A.rjust(10).dtype.type, np.string_)) - - C = self.A.rjust([10, 20]) - assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) - - C = self.A.rjust(20, b'#') - assert_(np.all(C.startswith(b'#'))) - assert_array_equal(C.endswith(b'#'), - [[False, True], [False, False], [False, False]]) - - C = np.char.rjust(b'FOO', [[10, 20], [15, 8]]) - tgt = [[b' FOO', b' FOO'], - [b' FOO', b' FOO']] - assert_(issubclass(C.dtype.type, np.string_)) - assert_array_equal(C, tgt) - - def test_rpartition(self): - P = self.A.rpartition([b'3', b'M']) - tgt = [[(b'', b'', b' abc '), (b'', b'', b'')], - [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')], - [(b'123 \t ', b'3', b'45 \0 '), (b'', b'', b'UPPER')]] - assert_(issubclass(P.dtype.type, np.string_)) - assert_array_equal(P, tgt) - - def test_rsplit(self): - A = self.A.rsplit(b'3') - tgt = [[[b' abc '], [b'']], - [[b'12', b'45'], [b'MixedCase']], - [[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]] - assert_(issubclass(A.dtype.type, np.object_)) - assert_equal(A.tolist(), tgt) - - def test_rstrip(self): - assert_(issubclass(self.A.rstrip().dtype.type, np.string_)) - - tgt = [[b' abc', b''], - [b'12345', b'MixedCase'], - [b'123 \t 345', b'UPPER']] - assert_array_equal(self.A.rstrip(), tgt) - - tgt = [[b' abc ', b''], - [b'1234', b'MixedCase'], - [b'123 \t 345 \x00', b'UPP'] - ] - assert_array_equal(self.A.rstrip([b'5', b'ER']), tgt) - - tgt = [[u' \u03a3', ''], - ['12345', 'MixedCase'], - ['123 \t 345', 'UPPER']] - assert_(issubclass(self.B.rstrip().dtype.type, np.unicode_)) - assert_array_equal(self.B.rstrip(), tgt) - - def test_strip(self): - tgt = [[b'abc', b''], - [b'12345', b'MixedCase'], - [b'123 \t 345', b'UPPER']] - assert_(issubclass(self.A.strip().dtype.type, np.string_)) - assert_array_equal(self.A.strip(), tgt) - - tgt = [[b' abc ', b''], - [b'234', b'ixedCas'], - [b'23 \t 345 \x00', b'UPP']] - assert_array_equal(self.A.strip([b'15', b'EReM']), tgt) - - tgt = [[u'\u03a3', ''], - ['12345', 'MixedCase'], - ['123 \t 345', 'UPPER']] - assert_(issubclass(self.B.strip().dtype.type, np.unicode_)) - assert_array_equal(self.B.strip(), tgt) - - def test_split(self): - A = self.A.split(b'3') - tgt = [ - [[b' abc '], [b'']], - [[b'12', b'45'], [b'MixedCase']], - [[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]] - assert_(issubclass(A.dtype.type, np.object_)) - assert_equal(A.tolist(), tgt) - - def test_splitlines(self): - A = np.char.array(['abc\nfds\nwer']).splitlines() - assert_(issubclass(A.dtype.type, np.object_)) - assert_(A.shape == (1,)) - assert_(len(A[0]) == 3) - - def test_swapcase(self): - tgt = [[b' ABC ', b''], - [b'12345', b'mIXEDcASE'], - [b'123 \t 345 \0 ', b'upper']] - assert_(issubclass(self.A.swapcase().dtype.type, np.string_)) - assert_array_equal(self.A.swapcase(), tgt) - - tgt = [[u' \u03c3 ', u''], - [u'12345', u'mIXEDcASE'], - [u'123 \t 345 \0 ', u'upper']] - assert_(issubclass(self.B.swapcase().dtype.type, np.unicode_)) - assert_array_equal(self.B.swapcase(), tgt) - - def test_title(self): - tgt = [[b' Abc ', b''], - [b'12345', b'Mixedcase'], - [b'123 \t 345 \0 ', b'Upper']] - assert_(issubclass(self.A.title().dtype.type, np.string_)) - assert_array_equal(self.A.title(), tgt) - - tgt = [[u' \u03a3 ', u''], - [u'12345', u'Mixedcase'], - [u'123 \t 345 \0 ', u'Upper']] - assert_(issubclass(self.B.title().dtype.type, np.unicode_)) - assert_array_equal(self.B.title(), tgt) - - def test_upper(self): - tgt = [[b' ABC ', b''], - [b'12345', b'MIXEDCASE'], - [b'123 \t 345 \0 ', b'UPPER']] - assert_(issubclass(self.A.upper().dtype.type, np.string_)) - assert_array_equal(self.A.upper(), tgt) - - tgt = [[u' \u03a3 ', u''], - [u'12345', u'MIXEDCASE'], - [u'123 \t 345 \0 ', u'UPPER']] - assert_(issubclass(self.B.upper().dtype.type, np.unicode_)) - assert_array_equal(self.B.upper(), tgt) - - def test_isnumeric(self): - - def fail(): - self.A.isnumeric() - - assert_raises(TypeError, fail) - assert_(issubclass(self.B.isnumeric().dtype.type, np.bool_)) - assert_array_equal(self.B.isnumeric(), [ - [False, False], [True, False], [False, False]]) - - def test_isdecimal(self): - - def fail(): - self.A.isdecimal() - - assert_raises(TypeError, fail) - assert_(issubclass(self.B.isdecimal().dtype.type, np.bool_)) - assert_array_equal(self.B.isdecimal(), [ - [False, False], [True, False], [False, False]]) - - -class TestOperations(object): - def setup(self): - self.A = np.array([['abc', '123'], - ['789', 'xyz']]).view(np.chararray) - self.B = np.array([['efg', '456'], - ['051', 'tuv']]).view(np.chararray) - - def test_add(self): - AB = np.array([['abcefg', '123456'], - ['789051', 'xyztuv']]).view(np.chararray) - assert_array_equal(AB, (self.A + self.B)) - assert_(len((self.A + self.B)[0][0]) == 6) - - def test_radd(self): - QA = np.array([['qabc', 'q123'], - ['q789', 'qxyz']]).view(np.chararray) - assert_array_equal(QA, ('q' + self.A)) - - def test_mul(self): - A = self.A - for r in (2, 3, 5, 7, 197): - Ar = np.array([[A[0, 0]*r, A[0, 1]*r], - [A[1, 0]*r, A[1, 1]*r]]).view(np.chararray) - - assert_array_equal(Ar, (self.A * r)) - - for ob in [object(), 'qrs']: - with assert_raises_regex(ValueError, - 'Can only multiply by integers'): - A*ob - - def test_rmul(self): - A = self.A - for r in (2, 3, 5, 7, 197): - Ar = np.array([[A[0, 0]*r, A[0, 1]*r], - [A[1, 0]*r, A[1, 1]*r]]).view(np.chararray) - assert_array_equal(Ar, (r * self.A)) - - for ob in [object(), 'qrs']: - with assert_raises_regex(ValueError, - 'Can only multiply by integers'): - ob * A - - def test_mod(self): - """Ticket #856""" - F = np.array([['%d', '%f'], ['%s', '%r']]).view(np.chararray) - C = np.array([[3, 7], [19, 1]]) - FC = np.array([['3', '7.000000'], - ['19', '1']]).view(np.chararray) - assert_array_equal(FC, F % C) - - A = np.array([['%.3f', '%d'], ['%s', '%r']]).view(np.chararray) - A1 = np.array([['1.000', '1'], ['1', '1']]).view(np.chararray) - assert_array_equal(A1, (A % 1)) - - A2 = np.array([['1.000', '2'], ['3', '4']]).view(np.chararray) - assert_array_equal(A2, (A % [[1, 2], [3, 4]])) - - def test_rmod(self): - assert_(("%s" % self.A) == str(self.A)) - assert_(("%r" % self.A) == repr(self.A)) - - for ob in [42, object()]: - with assert_raises_regex( - TypeError, "unsupported operand type.* and 'chararray'"): - ob % self.A - - def test_slice(self): - """Regression test for https://github.com/numpy/numpy/issues/5982""" - - arr = np.array([['abc ', 'def '], ['geh ', 'ijk ']], - dtype='S4').view(np.chararray) - sl1 = arr[:] - assert_array_equal(sl1, arr) - assert_(sl1.base is arr) - assert_(sl1.base.base is arr.base) - - sl2 = arr[:, :] - assert_array_equal(sl2, arr) - assert_(sl2.base is arr) - assert_(sl2.base.base is arr.base) - - assert_(arr[0, 0] == b'abc') - - -def test_empty_indexing(): - """Regression test for ticket 1948.""" - # Check that indexing a chararray with an empty list/array returns an - # empty chararray instead of a chararray with a single empty string in it. - s = np.chararray((4,)) - assert_(s[[]].size == 0) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_deprecations.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_deprecations.py deleted file mode 100644 index 363ff26..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_deprecations.py +++ /dev/null @@ -1,570 +0,0 @@ -""" -Tests related to deprecation warnings. Also a convenient place -to document how deprecations should eventually be turned into errors. - -""" -from __future__ import division, absolute_import, print_function - -import datetime -import sys -import operator -import warnings -import pytest -import shutil -import tempfile - -import numpy as np -from numpy.testing import ( - assert_raises, assert_warns, assert_, assert_array_equal - ) - -from numpy.core._multiarray_tests import fromstring_null_term_c_api - -try: - import pytz - _has_pytz = True -except ImportError: - _has_pytz = False - - -class _DeprecationTestCase(object): - # Just as warning: warnings uses re.match, so the start of this message - # must match. - message = '' - warning_cls = DeprecationWarning - - def setup(self): - self.warn_ctx = warnings.catch_warnings(record=True) - self.log = self.warn_ctx.__enter__() - - # Do *not* ignore other DeprecationWarnings. Ignoring warnings - # can give very confusing results because of - # https://bugs.python.org/issue4180 and it is probably simplest to - # try to keep the tests cleanly giving only the right warning type. - # (While checking them set to "error" those are ignored anyway) - # We still have them show up, because otherwise they would be raised - warnings.filterwarnings("always", category=self.warning_cls) - warnings.filterwarnings("always", message=self.message, - category=self.warning_cls) - - def teardown(self): - self.warn_ctx.__exit__() - - def assert_deprecated(self, function, num=1, ignore_others=False, - function_fails=False, - exceptions=np._NoValue, - args=(), kwargs={}): - """Test if DeprecationWarnings are given and raised. - - This first checks if the function when called gives `num` - DeprecationWarnings, after that it tries to raise these - DeprecationWarnings and compares them with `exceptions`. - The exceptions can be different for cases where this code path - is simply not anticipated and the exception is replaced. - - Parameters - ---------- - function : callable - The function to test - num : int - Number of DeprecationWarnings to expect. This should normally be 1. - ignore_others : bool - Whether warnings of the wrong type should be ignored (note that - the message is not checked) - function_fails : bool - If the function would normally fail, setting this will check for - warnings inside a try/except block. - exceptions : Exception or tuple of Exceptions - Exception to expect when turning the warnings into an error. - The default checks for DeprecationWarnings. If exceptions is - empty the function is expected to run successfully. - args : tuple - Arguments for `function` - kwargs : dict - Keyword arguments for `function` - """ - # reset the log - self.log[:] = [] - - if exceptions is np._NoValue: - exceptions = (self.warning_cls,) - - try: - function(*args, **kwargs) - except (Exception if function_fails else tuple()): - pass - - # just in case, clear the registry - num_found = 0 - for warning in self.log: - if warning.category is self.warning_cls: - num_found += 1 - elif not ignore_others: - raise AssertionError( - "expected %s but got: %s" % - (self.warning_cls.__name__, warning.category)) - if num is not None and num_found != num: - msg = "%i warnings found but %i expected." % (len(self.log), num) - lst = [str(w) for w in self.log] - raise AssertionError("\n".join([msg] + lst)) - - with warnings.catch_warnings(): - warnings.filterwarnings("error", message=self.message, - category=self.warning_cls) - try: - function(*args, **kwargs) - if exceptions != tuple(): - raise AssertionError( - "No error raised during function call") - except exceptions: - if exceptions == tuple(): - raise AssertionError( - "Error raised during function call") - - def assert_not_deprecated(self, function, args=(), kwargs={}): - """Test that warnings are not raised. - - This is just a shorthand for: - - self.assert_deprecated(function, num=0, ignore_others=True, - exceptions=tuple(), args=args, kwargs=kwargs) - """ - self.assert_deprecated(function, num=0, ignore_others=True, - exceptions=tuple(), args=args, kwargs=kwargs) - - -class _VisibleDeprecationTestCase(_DeprecationTestCase): - warning_cls = np.VisibleDeprecationWarning - - -class TestNonTupleNDIndexDeprecation(object): - def test_basic(self): - a = np.zeros((5, 5)) - with warnings.catch_warnings(): - warnings.filterwarnings('always') - assert_warns(FutureWarning, a.__getitem__, [[0, 1], [0, 1]]) - assert_warns(FutureWarning, a.__getitem__, [slice(None)]) - - warnings.filterwarnings('error') - assert_raises(FutureWarning, a.__getitem__, [[0, 1], [0, 1]]) - assert_raises(FutureWarning, a.__getitem__, [slice(None)]) - - # a a[[0, 1]] always was advanced indexing, so no error/warning - a[[0, 1]] - - -class TestComparisonDeprecations(_DeprecationTestCase): - """This tests the deprecation, for non-element-wise comparison logic. - This used to mean that when an error occurred during element-wise comparison - (i.e. broadcasting) NotImplemented was returned, but also in the comparison - itself, False was given instead of the error. - - Also test FutureWarning for the None comparison. - """ - - message = "elementwise.* comparison failed; .*" - - def test_normal_types(self): - for op in (operator.eq, operator.ne): - # Broadcasting errors: - self.assert_deprecated(op, args=(np.zeros(3), [])) - a = np.zeros(3, dtype='i,i') - # (warning is issued a couple of times here) - self.assert_deprecated(op, args=(a, a[:-1]), num=None) - - # ragged array comparison returns True/False - a = np.array([1, np.array([1,2,3])], dtype=object) - b = np.array([1, np.array([1,2,3])], dtype=object) - self.assert_deprecated(op, args=(a, b), num=None) - - def test_string(self): - # For two string arrays, strings always raised the broadcasting error: - a = np.array(['a', 'b']) - b = np.array(['a', 'b', 'c']) - assert_raises(ValueError, lambda x, y: x == y, a, b) - - # The empty list is not cast to string, and this used to pass due - # to dtype mismatch; now (2018-06-21) it correctly leads to a - # FutureWarning. - assert_warns(FutureWarning, lambda: a == []) - - def test_void_dtype_equality_failures(self): - class NotArray(object): - def __array__(self): - raise TypeError - - # Needed so Python 3 does not raise DeprecationWarning twice. - def __ne__(self, other): - return NotImplemented - - self.assert_deprecated(lambda: np.arange(2) == NotArray()) - self.assert_deprecated(lambda: np.arange(2) != NotArray()) - - struct1 = np.zeros(2, dtype="i4,i4") - struct2 = np.zeros(2, dtype="i4,i4,i4") - - assert_warns(FutureWarning, lambda: struct1 == 1) - assert_warns(FutureWarning, lambda: struct1 == struct2) - assert_warns(FutureWarning, lambda: struct1 != 1) - assert_warns(FutureWarning, lambda: struct1 != struct2) - - def test_array_richcompare_legacy_weirdness(self): - # It doesn't really work to use assert_deprecated here, b/c part of - # the point of assert_deprecated is to check that when warnings are - # set to "error" mode then the error is propagated -- which is good! - # But here we are testing a bunch of code that is deprecated *because* - # it has the habit of swallowing up errors and converting them into - # different warnings. So assert_warns will have to be sufficient. - assert_warns(FutureWarning, lambda: np.arange(2) == "a") - assert_warns(FutureWarning, lambda: np.arange(2) != "a") - # No warning for scalar comparisons - with warnings.catch_warnings(): - warnings.filterwarnings("error") - assert_(not (np.array(0) == "a")) - assert_(np.array(0) != "a") - assert_(not (np.int16(0) == "a")) - assert_(np.int16(0) != "a") - - for arg1 in [np.asarray(0), np.int16(0)]: - struct = np.zeros(2, dtype="i4,i4") - for arg2 in [struct, "a"]: - for f in [operator.lt, operator.le, operator.gt, operator.ge]: - if sys.version_info[0] >= 3: - # py3 - with warnings.catch_warnings() as l: - warnings.filterwarnings("always") - assert_raises(TypeError, f, arg1, arg2) - assert_(not l) - else: - # py2 - assert_warns(DeprecationWarning, f, arg1, arg2) - - -class TestDatetime64Timezone(_DeprecationTestCase): - """Parsing of datetime64 with timezones deprecated in 1.11.0, because - datetime64 is now timezone naive rather than UTC only. - - It will be quite a while before we can remove this, because, at the very - least, a lot of existing code uses the 'Z' modifier to avoid conversion - from local time to UTC, even if otherwise it handles time in a timezone - naive fashion. - """ - def test_string(self): - self.assert_deprecated(np.datetime64, args=('2000-01-01T00+01',)) - self.assert_deprecated(np.datetime64, args=('2000-01-01T00Z',)) - - @pytest.mark.skipif(not _has_pytz, - reason="The pytz module is not available.") - def test_datetime(self): - tz = pytz.timezone('US/Eastern') - dt = datetime.datetime(2000, 1, 1, 0, 0, tzinfo=tz) - self.assert_deprecated(np.datetime64, args=(dt,)) - - -class TestNonCContiguousViewDeprecation(_DeprecationTestCase): - """View of non-C-contiguous arrays deprecated in 1.11.0. - - The deprecation will not be raised for arrays that are both C and F - contiguous, as C contiguous is dominant. There are more such arrays - with relaxed stride checking than without so the deprecation is not - as visible with relaxed stride checking in force. - """ - - def test_fortran_contiguous(self): - self.assert_deprecated(np.ones((2,2)).T.view, args=(complex,)) - self.assert_deprecated(np.ones((2,2)).T.view, args=(np.int8,)) - - -class TestArrayDataAttributeAssignmentDeprecation(_DeprecationTestCase): - """Assigning the 'data' attribute of an ndarray is unsafe as pointed - out in gh-7093. Eventually, such assignment should NOT be allowed, but - in the interests of maintaining backwards compatibility, only a Deprecation- - Warning will be raised instead for the time being to give developers time to - refactor relevant code. - """ - - def test_data_attr_assignment(self): - a = np.arange(10) - b = np.linspace(0, 1, 10) - - self.message = ("Assigning the 'data' attribute is an " - "inherently unsafe operation and will " - "be removed in the future.") - self.assert_deprecated(a.__setattr__, args=('data', b.data)) - - -class TestBinaryReprInsufficientWidthParameterForRepresentation(_DeprecationTestCase): - """ - If a 'width' parameter is passed into ``binary_repr`` that is insufficient to - represent the number in base 2 (positive) or 2's complement (negative) form, - the function used to silently ignore the parameter and return a representation - using the minimal number of bits needed for the form in question. Such behavior - is now considered unsafe from a user perspective and will raise an error in the future. - """ - - def test_insufficient_width_positive(self): - args = (10,) - kwargs = {'width': 2} - - self.message = ("Insufficient bit width provided. This behavior " - "will raise an error in the future.") - self.assert_deprecated(np.binary_repr, args=args, kwargs=kwargs) - - def test_insufficient_width_negative(self): - args = (-5,) - kwargs = {'width': 2} - - self.message = ("Insufficient bit width provided. This behavior " - "will raise an error in the future.") - self.assert_deprecated(np.binary_repr, args=args, kwargs=kwargs) - - -class TestNumericStyleTypecodes(_DeprecationTestCase): - """ - Deprecate the old numeric-style dtypes, which are especially - confusing for complex types, e.g. Complex32 -> complex64. When the - deprecation cycle is complete, the check for the strings should be - removed from PyArray_DescrConverter in descriptor.c, and the - deprecated keys should not be added as capitalized aliases in - _add_aliases in numerictypes.py. - """ - def test_all_dtypes(self): - deprecated_types = [ - 'Bool', 'Complex32', 'Complex64', 'Float16', 'Float32', 'Float64', - 'Int8', 'Int16', 'Int32', 'Int64', 'Object0', 'Timedelta64', - 'UInt8', 'UInt16', 'UInt32', 'UInt64', 'Void0' - ] - if sys.version_info[0] < 3: - deprecated_types.extend(['Unicode0', 'String0']) - - for dt in deprecated_types: - self.assert_deprecated(np.dtype, exceptions=(TypeError,), - args=(dt,)) - - -class TestTestDeprecated(object): - def test_assert_deprecated(self): - test_case_instance = _DeprecationTestCase() - test_case_instance.setup() - assert_raises(AssertionError, - test_case_instance.assert_deprecated, - lambda: None) - - def foo(): - warnings.warn("foo", category=DeprecationWarning, stacklevel=2) - - test_case_instance.assert_deprecated(foo) - test_case_instance.teardown() - - -class TestClassicIntDivision(_DeprecationTestCase): - """ - See #7949. Deprecate the numeric-style dtypes with -3 flag in python 2 - if used for division - List of data types: https://docs.scipy.org/doc/numpy/user/basics.types.html - """ - def test_int_dtypes(self): - #scramble types and do some mix and match testing - deprecated_types = [ - 'bool_', 'int_', 'intc', 'uint8', 'int8', 'uint64', 'int32', 'uint16', - 'intp', 'int64', 'uint32', 'int16' - ] - if sys.version_info[0] < 3 and sys.py3kwarning: - import operator as op - dt2 = 'bool_' - for dt1 in deprecated_types: - a = np.array([1,2,3], dtype=dt1) - b = np.array([1,2,3], dtype=dt2) - self.assert_deprecated(op.div, args=(a,b)) - dt2 = dt1 - - -class TestNonNumericConjugate(_DeprecationTestCase): - """ - Deprecate no-op behavior of ndarray.conjugate on non-numeric dtypes, - which conflicts with the error behavior of np.conjugate. - """ - def test_conjugate(self): - for a in np.array(5), np.array(5j): - self.assert_not_deprecated(a.conjugate) - for a in (np.array('s'), np.array('2016', 'M'), - np.array((1, 2), [('a', int), ('b', int)])): - self.assert_deprecated(a.conjugate) - - -class TestNPY_CHAR(_DeprecationTestCase): - # 2017-05-03, 1.13.0 - def test_npy_char_deprecation(self): - from numpy.core._multiarray_tests import npy_char_deprecation - self.assert_deprecated(npy_char_deprecation) - assert_(npy_char_deprecation() == 'S1') - - -class TestPyArray_AS1D(_DeprecationTestCase): - def test_npy_pyarrayas1d_deprecation(self): - from numpy.core._multiarray_tests import npy_pyarrayas1d_deprecation - assert_raises(NotImplementedError, npy_pyarrayas1d_deprecation) - - -class TestPyArray_AS2D(_DeprecationTestCase): - def test_npy_pyarrayas2d_deprecation(self): - from numpy.core._multiarray_tests import npy_pyarrayas2d_deprecation - assert_raises(NotImplementedError, npy_pyarrayas2d_deprecation) - - -class Test_UPDATEIFCOPY(_DeprecationTestCase): - """ - v1.14 deprecates creating an array with the UPDATEIFCOPY flag, use - WRITEBACKIFCOPY instead - """ - def test_npy_updateifcopy_deprecation(self): - from numpy.core._multiarray_tests import npy_updateifcopy_deprecation - arr = np.arange(9).reshape(3, 3) - v = arr.T - self.assert_deprecated(npy_updateifcopy_deprecation, args=(v,)) - - -class TestDatetimeEvent(_DeprecationTestCase): - # 2017-08-11, 1.14.0 - def test_3_tuple(self): - for cls in (np.datetime64, np.timedelta64): - # two valid uses - (unit, num) and (unit, num, den, None) - self.assert_not_deprecated(cls, args=(1, ('ms', 2))) - self.assert_not_deprecated(cls, args=(1, ('ms', 2, 1, None))) - - # trying to use the event argument, removed in 1.7.0, is deprecated - # it used to be a uint8 - self.assert_deprecated(cls, args=(1, ('ms', 2, 'event'))) - self.assert_deprecated(cls, args=(1, ('ms', 2, 63))) - self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 'event'))) - self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 63))) - - -class TestTruthTestingEmptyArrays(_DeprecationTestCase): - # 2017-09-25, 1.14.0 - message = '.*truth value of an empty array is ambiguous.*' - - def test_1d(self): - self.assert_deprecated(bool, args=(np.array([]),)) - - def test_2d(self): - self.assert_deprecated(bool, args=(np.zeros((1, 0)),)) - self.assert_deprecated(bool, args=(np.zeros((0, 1)),)) - self.assert_deprecated(bool, args=(np.zeros((0, 0)),)) - - -class TestBincount(_DeprecationTestCase): - # 2017-06-01, 1.14.0 - def test_bincount_minlength(self): - self.assert_deprecated(lambda: np.bincount([1, 2, 3], minlength=None)) - - -class TestAlen(_DeprecationTestCase): - # 2019-08-02, 1.18.0 - def test_alen(self): - self.assert_deprecated(lambda: np.alen(np.array([1, 2, 3]))) - - -class TestGeneratorSum(_DeprecationTestCase): - # 2018-02-25, 1.15.0 - def test_generator_sum(self): - self.assert_deprecated(np.sum, args=((i for i in range(5)),)) - - -class TestSctypeNA(_VisibleDeprecationTestCase): - # 2018-06-24, 1.16 - def test_sctypeNA(self): - self.assert_deprecated(lambda: np.sctypeNA['?']) - self.assert_deprecated(lambda: np.typeNA['?']) - self.assert_deprecated(lambda: np.typeNA.get('?')) - - -class TestPositiveOnNonNumerical(_DeprecationTestCase): - # 2018-06-28, 1.16.0 - def test_positive_on_non_number(self): - self.assert_deprecated(operator.pos, args=(np.array('foo'),)) - - -class TestFromstring(_DeprecationTestCase): - # 2017-10-19, 1.14 - def test_fromstring(self): - self.assert_deprecated(np.fromstring, args=('\x00'*80,)) - - -class TestFromStringAndFileInvalidData(_DeprecationTestCase): - # 2019-06-08, 1.17.0 - # Tests should be moved to real tests when deprecation is done. - message = "string or file could not be read to its end" - - @pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"]) - def test_deprecate_unparsable_data_file(self, invalid_str): - x = np.array([1.51, 2, 3.51, 4], dtype=float) - - with tempfile.TemporaryFile(mode="w") as f: - x.tofile(f, sep=',', format='%.2f') - f.write(invalid_str) - - f.seek(0) - self.assert_deprecated(lambda: np.fromfile(f, sep=",")) - f.seek(0) - self.assert_deprecated(lambda: np.fromfile(f, sep=",", count=5)) - # Should not raise: - with warnings.catch_warnings(): - warnings.simplefilter("error", DeprecationWarning) - f.seek(0) - res = np.fromfile(f, sep=",", count=4) - assert_array_equal(res, x) - - @pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"]) - def test_deprecate_unparsable_string(self, invalid_str): - x = np.array([1.51, 2, 3.51, 4], dtype=float) - x_str = "1.51,2,3.51,4{}".format(invalid_str) - - self.assert_deprecated(lambda: np.fromstring(x_str, sep=",")) - self.assert_deprecated(lambda: np.fromstring(x_str, sep=",", count=5)) - - # The C-level API can use not fixed size, but 0 terminated strings, - # so test that as well: - bytestr = x_str.encode("ascii") - self.assert_deprecated(lambda: fromstring_null_term_c_api(bytestr)) - - with assert_warns(DeprecationWarning): - # this is slightly strange, in that fromstring leaves data - # potentially uninitialized (would be good to error when all is - # read, but count is larger then actual data maybe). - res = np.fromstring(x_str, sep=",", count=5) - assert_array_equal(res[:-1], x) - - with warnings.catch_warnings(): - warnings.simplefilter("error", DeprecationWarning) - - # Should not raise: - res = np.fromstring(x_str, sep=",", count=4) - assert_array_equal(res, x) - - -class Test_GetSet_NumericOps(_DeprecationTestCase): - # 2018-09-20, 1.16.0 - def test_get_numeric_ops(self): - from numpy.core._multiarray_tests import getset_numericops - self.assert_deprecated(getset_numericops, num=2) - - # empty kwargs prevents any state actually changing which would break - # other tests. - self.assert_deprecated(np.set_numeric_ops, kwargs={}) - assert_raises(ValueError, np.set_numeric_ops, add='abc') - - -class TestShape1Fields(_DeprecationTestCase): - warning_cls = FutureWarning - - # 2019-05-20, 1.17.0 - def test_shape_1_fields(self): - self.assert_deprecated(np.dtype, args=([('a', int, 1)],)) - - -class TestNonZero(_DeprecationTestCase): - # 2019-05-26, 1.17.0 - def test_zerod(self): - self.assert_deprecated(lambda: np.nonzero(np.array(0))) - self.assert_deprecated(lambda: np.nonzero(np.array(1))) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_dtype.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_dtype.py deleted file mode 100644 index e18e66c..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_dtype.py +++ /dev/null @@ -1,1300 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import operator -import pytest -import ctypes -import gc - -import numpy as np -from numpy.core._rational_tests import rational -from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT) -from numpy.compat import pickle -from itertools import permutations - -def assert_dtype_equal(a, b): - assert_equal(a, b) - assert_equal(hash(a), hash(b), - "two equivalent types do not hash to the same value !") - -def assert_dtype_not_equal(a, b): - assert_(a != b) - assert_(hash(a) != hash(b), - "two different types hash to the same value !") - -class TestBuiltin(object): - @pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object, - np.compat.unicode]) - def test_run(self, t): - """Only test hash runs at all.""" - dt = np.dtype(t) - hash(dt) - - @pytest.mark.parametrize('t', [int, float]) - def test_dtype(self, t): - # Make sure equivalent byte order char hash the same (e.g. < and = on - # little endian) - dt = np.dtype(t) - dt2 = dt.newbyteorder("<") - dt3 = dt.newbyteorder(">") - if dt == dt2: - assert_(dt.byteorder != dt2.byteorder, "bogus test") - assert_dtype_equal(dt, dt2) - else: - assert_(dt.byteorder != dt3.byteorder, "bogus test") - assert_dtype_equal(dt, dt3) - - def test_equivalent_dtype_hashing(self): - # Make sure equivalent dtypes with different type num hash equal - uintp = np.dtype(np.uintp) - if uintp.itemsize == 4: - left = uintp - right = np.dtype(np.uint32) - else: - left = uintp - right = np.dtype(np.ulonglong) - assert_(left == right) - assert_(hash(left) == hash(right)) - - def test_invalid_types(self): - # Make sure invalid type strings raise an error - - assert_raises(TypeError, np.dtype, 'O3') - assert_raises(TypeError, np.dtype, 'O5') - assert_raises(TypeError, np.dtype, 'O7') - assert_raises(TypeError, np.dtype, 'b3') - assert_raises(TypeError, np.dtype, 'h4') - assert_raises(TypeError, np.dtype, 'I5') - assert_raises(TypeError, np.dtype, 'e3') - assert_raises(TypeError, np.dtype, 'f5') - - if np.dtype('g').itemsize == 8 or np.dtype('g').itemsize == 16: - assert_raises(TypeError, np.dtype, 'g12') - elif np.dtype('g').itemsize == 12: - assert_raises(TypeError, np.dtype, 'g16') - - if np.dtype('l').itemsize == 8: - assert_raises(TypeError, np.dtype, 'l4') - assert_raises(TypeError, np.dtype, 'L4') - else: - assert_raises(TypeError, np.dtype, 'l8') - assert_raises(TypeError, np.dtype, 'L8') - - if np.dtype('q').itemsize == 8: - assert_raises(TypeError, np.dtype, 'q4') - assert_raises(TypeError, np.dtype, 'Q4') - else: - assert_raises(TypeError, np.dtype, 'q8') - assert_raises(TypeError, np.dtype, 'Q8') - - @pytest.mark.parametrize( - 'value', - ['m8', 'M8', 'datetime64', 'timedelta64', - 'i4, (2,3)f8, f4', 'a3, 3u8, (3,4)a10', - '>f', 'f4', (64, 64)), (1,)), - ('rtile', '>f4', (64, 36))], (3,)), - ('bottom', [('bleft', ('>f4', (8, 64)), (1,)), - ('bright', '>f4', (8, 36))])]) - assert_equal(str(dt), - "[('top', [('tiles', ('>f4', (64, 64)), (1,)), " - "('rtile', '>f4', (64, 36))], (3,)), " - "('bottom', [('bleft', ('>f4', (8, 64)), (1,)), " - "('bright', '>f4', (8, 36))])]") - - # If the sticky aligned flag is set to True, it makes the - # str() function use a dict representation with an 'aligned' flag - dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), - ('rtile', '>f4', (64, 36))], - (3,)), - ('bottom', [('bleft', ('>f4', (8, 64)), (1,)), - ('bright', '>f4', (8, 36))])], - align=True) - assert_equal(str(dt), - "{'names':['top','bottom'], " - "'formats':[([('tiles', ('>f4', (64, 64)), (1,)), " - "('rtile', '>f4', (64, 36))], (3,))," - "[('bleft', ('>f4', (8, 64)), (1,)), " - "('bright', '>f4', (8, 36))]], " - "'offsets':[0,76800], " - "'itemsize':80000, " - "'aligned':True}") - assert_equal(np.dtype(eval(str(dt))), dt) - - dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'], - 'offsets': [0, 1, 2], - 'titles': ['Red pixel', 'Green pixel', 'Blue pixel']}) - assert_equal(str(dt), - "[(('Red pixel', 'r'), 'u1'), " - "(('Green pixel', 'g'), 'u1'), " - "(('Blue pixel', 'b'), 'u1')]") - - dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'], - 'formats': ['f4', (64, 64)), (1,)), - ('rtile', '>f4', (64, 36))], (3,)), - ('bottom', [('bleft', ('>f4', (8, 64)), (1,)), - ('bright', '>f4', (8, 36))])]) - assert_equal(repr(dt), - "dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), " - "('rtile', '>f4', (64, 36))], (3,)), " - "('bottom', [('bleft', ('>f4', (8, 64)), (1,)), " - "('bright', '>f4', (8, 36))])])") - - dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'], - 'offsets': [0, 1, 2], - 'titles': ['Red pixel', 'Green pixel', 'Blue pixel']}, - align=True) - assert_equal(repr(dt), - "dtype([(('Red pixel', 'r'), 'u1'), " - "(('Green pixel', 'g'), 'u1'), " - "(('Blue pixel', 'b'), 'u1')], align=True)") - - def test_repr_structured_not_packed(self): - dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'], - 'formats': ['= 3, reason="Python 2 only") - def test_dtype_str_with_long_in_shape(self): - # Pull request #376, should not error - np.dtype('(1L,)i4') - - def test_base_dtype_with_object_type(self): - # Issue gh-2798, should not error. - np.array(['a'], dtype="O").astype(("O", [("name", "O")])) - - def test_empty_string_to_object(self): - # Pull request #4722 - np.array(["", ""]).astype(object) - - def test_void_subclass_unsized(self): - dt = np.dtype(np.record) - assert_equal(repr(dt), "dtype('V')") - assert_equal(str(dt), '|V0') - assert_equal(dt.name, 'record') - - def test_void_subclass_sized(self): - dt = np.dtype((np.record, 2)) - assert_equal(repr(dt), "dtype('V2')") - assert_equal(str(dt), '|V2') - assert_equal(dt.name, 'record16') - - def test_void_subclass_fields(self): - dt = np.dtype((np.record, [('a', 'f4', (2, 1)), ('b', 'u4')]) - self.check(BigEndStruct, expected) - - def test_little_endian_structure_packed(self): - class LittleEndStruct(ctypes.LittleEndianStructure): - _fields_ = [ - ('one', ctypes.c_uint8), - ('two', ctypes.c_uint32) - ] - _pack_ = 1 - expected = np.dtype([('one', 'u1'), ('two', 'B'), - ('b', '>H') - ], align=True) - self.check(PaddedStruct, expected) - - def test_simple_endian_types(self): - self.check(ctypes.c_uint16.__ctype_le__, np.dtype('u2')) - self.check(ctypes.c_uint8.__ctype_le__, np.dtype('u1')) - self.check(ctypes.c_uint8.__ctype_be__, np.dtype('u1')) - - all_types = set(np.typecodes['All']) - all_pairs = permutations(all_types, 2) - - @pytest.mark.parametrize("pair", all_pairs) - def test_pairs(self, pair): - """ - Check that np.dtype('x,y') matches [np.dtype('x'), np.dtype('y')] - Example: np.dtype('d,I') -> dtype([('f0', '..j", [0, 0], optimize=do_opt) - assert_raises(ValueError, np.einsum, "j->.j...", [0, 0], optimize=do_opt) - - # invalid subscript character - assert_raises(ValueError, np.einsum, "i%...", [0, 0], optimize=do_opt) - assert_raises(ValueError, np.einsum, "...j$", [0, 0], optimize=do_opt) - assert_raises(ValueError, np.einsum, "i->&", [0, 0], optimize=do_opt) - - # output subscripts must appear in input - assert_raises(ValueError, np.einsum, "i->ij", [0, 0], optimize=do_opt) - - # output subscripts may only be specified once - assert_raises(ValueError, np.einsum, "ij->jij", [[0, 0], [0, 0]], - optimize=do_opt) - - # dimensions much match when being collapsed - assert_raises(ValueError, np.einsum, "ii", - np.arange(6).reshape(2, 3), optimize=do_opt) - assert_raises(ValueError, np.einsum, "ii->i", - np.arange(6).reshape(2, 3), optimize=do_opt) - - # broadcasting to new dimensions must be enabled explicitly - assert_raises(ValueError, np.einsum, "i", np.arange(6).reshape(2, 3), - optimize=do_opt) - assert_raises(ValueError, np.einsum, "i->i", [[0, 1], [0, 1]], - out=np.arange(4).reshape(2, 2), optimize=do_opt) - with assert_raises_regex(ValueError, "'b'"): - # gh-11221 - 'c' erroneously appeared in the error message - a = np.ones((3, 3, 4, 5, 6)) - b = np.ones((3, 4, 5)) - np.einsum('aabcb,abc', a, b) - - def test_einsum_views(self): - # pass-through - for do_opt in [True, False]: - a = np.arange(6) - a.shape = (2, 3) - - b = np.einsum("...", a, optimize=do_opt) - assert_(b.base is a) - - b = np.einsum(a, [Ellipsis], optimize=do_opt) - assert_(b.base is a) - - b = np.einsum("ij", a, optimize=do_opt) - assert_(b.base is a) - assert_equal(b, a) - - b = np.einsum(a, [0, 1], optimize=do_opt) - assert_(b.base is a) - assert_equal(b, a) - - # output is writeable whenever input is writeable - b = np.einsum("...", a, optimize=do_opt) - assert_(b.flags['WRITEABLE']) - a.flags['WRITEABLE'] = False - b = np.einsum("...", a, optimize=do_opt) - assert_(not b.flags['WRITEABLE']) - - # transpose - a = np.arange(6) - a.shape = (2, 3) - - b = np.einsum("ji", a, optimize=do_opt) - assert_(b.base is a) - assert_equal(b, a.T) - - b = np.einsum(a, [1, 0], optimize=do_opt) - assert_(b.base is a) - assert_equal(b, a.T) - - # diagonal - a = np.arange(9) - a.shape = (3, 3) - - b = np.einsum("ii->i", a, optimize=do_opt) - assert_(b.base is a) - assert_equal(b, [a[i, i] for i in range(3)]) - - b = np.einsum(a, [0, 0], [0], optimize=do_opt) - assert_(b.base is a) - assert_equal(b, [a[i, i] for i in range(3)]) - - # diagonal with various ways of broadcasting an additional dimension - a = np.arange(27) - a.shape = (3, 3, 3) - - b = np.einsum("...ii->...i", a, optimize=do_opt) - assert_(b.base is a) - assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) - - b = np.einsum(a, [Ellipsis, 0, 0], [Ellipsis, 0], optimize=do_opt) - assert_(b.base is a) - assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) - - b = np.einsum("ii...->...i", a, optimize=do_opt) - assert_(b.base is a) - assert_equal(b, [[x[i, i] for i in range(3)] - for x in a.transpose(2, 0, 1)]) - - b = np.einsum(a, [0, 0, Ellipsis], [Ellipsis, 0], optimize=do_opt) - assert_(b.base is a) - assert_equal(b, [[x[i, i] for i in range(3)] - for x in a.transpose(2, 0, 1)]) - - b = np.einsum("...ii->i...", a, optimize=do_opt) - assert_(b.base is a) - assert_equal(b, [a[:, i, i] for i in range(3)]) - - b = np.einsum(a, [Ellipsis, 0, 0], [0, Ellipsis], optimize=do_opt) - assert_(b.base is a) - assert_equal(b, [a[:, i, i] for i in range(3)]) - - b = np.einsum("jii->ij", a, optimize=do_opt) - assert_(b.base is a) - assert_equal(b, [a[:, i, i] for i in range(3)]) - - b = np.einsum(a, [1, 0, 0], [0, 1], optimize=do_opt) - assert_(b.base is a) - assert_equal(b, [a[:, i, i] for i in range(3)]) - - b = np.einsum("ii...->i...", a, optimize=do_opt) - assert_(b.base is a) - assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) - - b = np.einsum(a, [0, 0, Ellipsis], [0, Ellipsis], optimize=do_opt) - assert_(b.base is a) - assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) - - b = np.einsum("i...i->i...", a, optimize=do_opt) - assert_(b.base is a) - assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) - - b = np.einsum(a, [0, Ellipsis, 0], [0, Ellipsis], optimize=do_opt) - assert_(b.base is a) - assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) - - b = np.einsum("i...i->...i", a, optimize=do_opt) - assert_(b.base is a) - assert_equal(b, [[x[i, i] for i in range(3)] - for x in a.transpose(1, 0, 2)]) - - b = np.einsum(a, [0, Ellipsis, 0], [Ellipsis, 0], optimize=do_opt) - assert_(b.base is a) - assert_equal(b, [[x[i, i] for i in range(3)] - for x in a.transpose(1, 0, 2)]) - - # triple diagonal - a = np.arange(27) - a.shape = (3, 3, 3) - - b = np.einsum("iii->i", a, optimize=do_opt) - assert_(b.base is a) - assert_equal(b, [a[i, i, i] for i in range(3)]) - - b = np.einsum(a, [0, 0, 0], [0], optimize=do_opt) - assert_(b.base is a) - assert_equal(b, [a[i, i, i] for i in range(3)]) - - # swap axes - a = np.arange(24) - a.shape = (2, 3, 4) - - b = np.einsum("ijk->jik", a, optimize=do_opt) - assert_(b.base is a) - assert_equal(b, a.swapaxes(0, 1)) - - b = np.einsum(a, [0, 1, 2], [1, 0, 2], optimize=do_opt) - assert_(b.base is a) - assert_equal(b, a.swapaxes(0, 1)) - - def check_einsum_sums(self, dtype, do_opt=False): - # Check various sums. Does many sizes to exercise unrolled loops. - - # sum(a, axis=-1) - for n in range(1, 17): - a = np.arange(n, dtype=dtype) - assert_equal(np.einsum("i->", a, optimize=do_opt), - np.sum(a, axis=-1).astype(dtype)) - assert_equal(np.einsum(a, [0], [], optimize=do_opt), - np.sum(a, axis=-1).astype(dtype)) - - for n in range(1, 17): - a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n) - assert_equal(np.einsum("...i->...", a, optimize=do_opt), - np.sum(a, axis=-1).astype(dtype)) - assert_equal(np.einsum(a, [Ellipsis, 0], [Ellipsis], optimize=do_opt), - np.sum(a, axis=-1).astype(dtype)) - - # sum(a, axis=0) - for n in range(1, 17): - a = np.arange(2*n, dtype=dtype).reshape(2, n) - assert_equal(np.einsum("i...->...", a, optimize=do_opt), - np.sum(a, axis=0).astype(dtype)) - assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt), - np.sum(a, axis=0).astype(dtype)) - - for n in range(1, 17): - a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n) - assert_equal(np.einsum("i...->...", a, optimize=do_opt), - np.sum(a, axis=0).astype(dtype)) - assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt), - np.sum(a, axis=0).astype(dtype)) - - # trace(a) - for n in range(1, 17): - a = np.arange(n*n, dtype=dtype).reshape(n, n) - assert_equal(np.einsum("ii", a, optimize=do_opt), - np.trace(a).astype(dtype)) - assert_equal(np.einsum(a, [0, 0], optimize=do_opt), - np.trace(a).astype(dtype)) - - # multiply(a, b) - assert_equal(np.einsum("..., ...", 3, 4), 12) # scalar case - for n in range(1, 17): - a = np.arange(3 * n, dtype=dtype).reshape(3, n) - b = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n) - assert_equal(np.einsum("..., ...", a, b, optimize=do_opt), - np.multiply(a, b)) - assert_equal(np.einsum(a, [Ellipsis], b, [Ellipsis], optimize=do_opt), - np.multiply(a, b)) - - # inner(a,b) - for n in range(1, 17): - a = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n) - b = np.arange(n, dtype=dtype) - assert_equal(np.einsum("...i, ...i", a, b, optimize=do_opt), np.inner(a, b)) - assert_equal(np.einsum(a, [Ellipsis, 0], b, [Ellipsis, 0], optimize=do_opt), - np.inner(a, b)) - - for n in range(1, 11): - a = np.arange(n * 3 * 2, dtype=dtype).reshape(n, 3, 2) - b = np.arange(n, dtype=dtype) - assert_equal(np.einsum("i..., i...", a, b, optimize=do_opt), - np.inner(a.T, b.T).T) - assert_equal(np.einsum(a, [0, Ellipsis], b, [0, Ellipsis], optimize=do_opt), - np.inner(a.T, b.T).T) - - # outer(a,b) - for n in range(1, 17): - a = np.arange(3, dtype=dtype)+1 - b = np.arange(n, dtype=dtype)+1 - assert_equal(np.einsum("i,j", a, b, optimize=do_opt), - np.outer(a, b)) - assert_equal(np.einsum(a, [0], b, [1], optimize=do_opt), - np.outer(a, b)) - - # Suppress the complex warnings for the 'as f8' tests - with suppress_warnings() as sup: - sup.filter(np.ComplexWarning) - - # matvec(a,b) / a.dot(b) where a is matrix, b is vector - for n in range(1, 17): - a = np.arange(4*n, dtype=dtype).reshape(4, n) - b = np.arange(n, dtype=dtype) - assert_equal(np.einsum("ij, j", a, b, optimize=do_opt), - np.dot(a, b)) - assert_equal(np.einsum(a, [0, 1], b, [1], optimize=do_opt), - np.dot(a, b)) - - c = np.arange(4, dtype=dtype) - np.einsum("ij,j", a, b, out=c, - dtype='f8', casting='unsafe', optimize=do_opt) - assert_equal(c, - np.dot(a.astype('f8'), - b.astype('f8')).astype(dtype)) - c[...] = 0 - np.einsum(a, [0, 1], b, [1], out=c, - dtype='f8', casting='unsafe', optimize=do_opt) - assert_equal(c, - np.dot(a.astype('f8'), - b.astype('f8')).astype(dtype)) - - for n in range(1, 17): - a = np.arange(4*n, dtype=dtype).reshape(4, n) - b = np.arange(n, dtype=dtype) - assert_equal(np.einsum("ji,j", a.T, b.T, optimize=do_opt), - np.dot(b.T, a.T)) - assert_equal(np.einsum(a.T, [1, 0], b.T, [1], optimize=do_opt), - np.dot(b.T, a.T)) - - c = np.arange(4, dtype=dtype) - np.einsum("ji,j", a.T, b.T, out=c, - dtype='f8', casting='unsafe', optimize=do_opt) - assert_equal(c, - np.dot(b.T.astype('f8'), - a.T.astype('f8')).astype(dtype)) - c[...] = 0 - np.einsum(a.T, [1, 0], b.T, [1], out=c, - dtype='f8', casting='unsafe', optimize=do_opt) - assert_equal(c, - np.dot(b.T.astype('f8'), - a.T.astype('f8')).astype(dtype)) - - # matmat(a,b) / a.dot(b) where a is matrix, b is matrix - for n in range(1, 17): - if n < 8 or dtype != 'f2': - a = np.arange(4*n, dtype=dtype).reshape(4, n) - b = np.arange(n*6, dtype=dtype).reshape(n, 6) - assert_equal(np.einsum("ij,jk", a, b, optimize=do_opt), - np.dot(a, b)) - assert_equal(np.einsum(a, [0, 1], b, [1, 2], optimize=do_opt), - np.dot(a, b)) - - for n in range(1, 17): - a = np.arange(4*n, dtype=dtype).reshape(4, n) - b = np.arange(n*6, dtype=dtype).reshape(n, 6) - c = np.arange(24, dtype=dtype).reshape(4, 6) - np.einsum("ij,jk", a, b, out=c, dtype='f8', casting='unsafe', - optimize=do_opt) - assert_equal(c, - np.dot(a.astype('f8'), - b.astype('f8')).astype(dtype)) - c[...] = 0 - np.einsum(a, [0, 1], b, [1, 2], out=c, - dtype='f8', casting='unsafe', optimize=do_opt) - assert_equal(c, - np.dot(a.astype('f8'), - b.astype('f8')).astype(dtype)) - - # matrix triple product (note this is not currently an efficient - # way to multiply 3 matrices) - a = np.arange(12, dtype=dtype).reshape(3, 4) - b = np.arange(20, dtype=dtype).reshape(4, 5) - c = np.arange(30, dtype=dtype).reshape(5, 6) - if dtype != 'f2': - assert_equal(np.einsum("ij,jk,kl", a, b, c, optimize=do_opt), - a.dot(b).dot(c)) - assert_equal(np.einsum(a, [0, 1], b, [1, 2], c, [2, 3], - optimize=do_opt), a.dot(b).dot(c)) - - d = np.arange(18, dtype=dtype).reshape(3, 6) - np.einsum("ij,jk,kl", a, b, c, out=d, - dtype='f8', casting='unsafe', optimize=do_opt) - tgt = a.astype('f8').dot(b.astype('f8')) - tgt = tgt.dot(c.astype('f8')).astype(dtype) - assert_equal(d, tgt) - - d[...] = 0 - np.einsum(a, [0, 1], b, [1, 2], c, [2, 3], out=d, - dtype='f8', casting='unsafe', optimize=do_opt) - tgt = a.astype('f8').dot(b.astype('f8')) - tgt = tgt.dot(c.astype('f8')).astype(dtype) - assert_equal(d, tgt) - - # tensordot(a, b) - if np.dtype(dtype) != np.dtype('f2'): - a = np.arange(60, dtype=dtype).reshape(3, 4, 5) - b = np.arange(24, dtype=dtype).reshape(4, 3, 2) - assert_equal(np.einsum("ijk, jil -> kl", a, b), - np.tensordot(a, b, axes=([1, 0], [0, 1]))) - assert_equal(np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3]), - np.tensordot(a, b, axes=([1, 0], [0, 1]))) - - c = np.arange(10, dtype=dtype).reshape(5, 2) - np.einsum("ijk,jil->kl", a, b, out=c, - dtype='f8', casting='unsafe', optimize=do_opt) - assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'), - axes=([1, 0], [0, 1])).astype(dtype)) - c[...] = 0 - np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3], out=c, - dtype='f8', casting='unsafe', optimize=do_opt) - assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'), - axes=([1, 0], [0, 1])).astype(dtype)) - - # logical_and(logical_and(a!=0, b!=0), c!=0) - a = np.array([1, 3, -2, 0, 12, 13, 0, 1], dtype=dtype) - b = np.array([0, 3.5, 0., -2, 0, 1, 3, 12], dtype=dtype) - c = np.array([True, True, False, True, True, False, True, True]) - assert_equal(np.einsum("i,i,i->i", a, b, c, - dtype='?', casting='unsafe', optimize=do_opt), - np.logical_and(np.logical_and(a != 0, b != 0), c != 0)) - assert_equal(np.einsum(a, [0], b, [0], c, [0], [0], - dtype='?', casting='unsafe'), - np.logical_and(np.logical_and(a != 0, b != 0), c != 0)) - - a = np.arange(9, dtype=dtype) - assert_equal(np.einsum(",i->", 3, a), 3*np.sum(a)) - assert_equal(np.einsum(3, [], a, [0], []), 3*np.sum(a)) - assert_equal(np.einsum("i,->", a, 3), 3*np.sum(a)) - assert_equal(np.einsum(a, [0], 3, [], []), 3*np.sum(a)) - - # Various stride0, contiguous, and SSE aligned variants - for n in range(1, 25): - a = np.arange(n, dtype=dtype) - if np.dtype(dtype).itemsize > 1: - assert_equal(np.einsum("...,...", a, a, optimize=do_opt), - np.multiply(a, a)) - assert_equal(np.einsum("i,i", a, a, optimize=do_opt), np.dot(a, a)) - assert_equal(np.einsum("i,->i", a, 2, optimize=do_opt), 2*a) - assert_equal(np.einsum(",i->i", 2, a, optimize=do_opt), 2*a) - assert_equal(np.einsum("i,->", a, 2, optimize=do_opt), 2*np.sum(a)) - assert_equal(np.einsum(",i->", 2, a, optimize=do_opt), 2*np.sum(a)) - - assert_equal(np.einsum("...,...", a[1:], a[:-1], optimize=do_opt), - np.multiply(a[1:], a[:-1])) - assert_equal(np.einsum("i,i", a[1:], a[:-1], optimize=do_opt), - np.dot(a[1:], a[:-1])) - assert_equal(np.einsum("i,->i", a[1:], 2, optimize=do_opt), 2*a[1:]) - assert_equal(np.einsum(",i->i", 2, a[1:], optimize=do_opt), 2*a[1:]) - assert_equal(np.einsum("i,->", a[1:], 2, optimize=do_opt), - 2*np.sum(a[1:])) - assert_equal(np.einsum(",i->", 2, a[1:], optimize=do_opt), - 2*np.sum(a[1:])) - - # An object array, summed as the data type - a = np.arange(9, dtype=object) - - b = np.einsum("i->", a, dtype=dtype, casting='unsafe') - assert_equal(b, np.sum(a)) - assert_equal(b.dtype, np.dtype(dtype)) - - b = np.einsum(a, [0], [], dtype=dtype, casting='unsafe') - assert_equal(b, np.sum(a)) - assert_equal(b.dtype, np.dtype(dtype)) - - # A case which was failing (ticket #1885) - p = np.arange(2) + 1 - q = np.arange(4).reshape(2, 2) + 3 - r = np.arange(4).reshape(2, 2) + 7 - assert_equal(np.einsum('z,mz,zm->', p, q, r), 253) - - # singleton dimensions broadcast (gh-10343) - p = np.ones((10,2)) - q = np.ones((1,2)) - assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True), - np.einsum('ij,ij->j', p, q, optimize=False)) - assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True), - [10.] * 2) - - # a blas-compatible contraction broadcasting case which was failing - # for optimize=True (ticket #10930) - x = np.array([2., 3.]) - y = np.array([4.]) - assert_array_equal(np.einsum("i, i", x, y, optimize=False), 20.) - assert_array_equal(np.einsum("i, i", x, y, optimize=True), 20.) - - # all-ones array was bypassing bug (ticket #10930) - p = np.ones((1, 5)) / 2 - q = np.ones((5, 5)) / 2 - for optimize in (True, False): - assert_array_equal(np.einsum("...ij,...jk->...ik", p, p, - optimize=optimize), - np.einsum("...ij,...jk->...ik", p, q, - optimize=optimize)) - assert_array_equal(np.einsum("...ij,...jk->...ik", p, q, - optimize=optimize), - np.full((1, 5), 1.25)) - - # Cases which were failing (gh-10899) - x = np.eye(2, dtype=dtype) - y = np.ones(2, dtype=dtype) - assert_array_equal(np.einsum("ji,i->", x, y, optimize=optimize), - [2.]) # contig_contig_outstride0_two - assert_array_equal(np.einsum("i,ij->", y, x, optimize=optimize), - [2.]) # stride0_contig_outstride0_two - assert_array_equal(np.einsum("ij,i->", x, y, optimize=optimize), - [2.]) # contig_stride0_outstride0_two - - def test_einsum_sums_int8(self): - self.check_einsum_sums('i1') - - def test_einsum_sums_uint8(self): - self.check_einsum_sums('u1') - - def test_einsum_sums_int16(self): - self.check_einsum_sums('i2') - - def test_einsum_sums_uint16(self): - self.check_einsum_sums('u2') - - def test_einsum_sums_int32(self): - self.check_einsum_sums('i4') - self.check_einsum_sums('i4', True) - - def test_einsum_sums_uint32(self): - self.check_einsum_sums('u4') - self.check_einsum_sums('u4', True) - - def test_einsum_sums_int64(self): - self.check_einsum_sums('i8') - - def test_einsum_sums_uint64(self): - self.check_einsum_sums('u8') - - def test_einsum_sums_float16(self): - self.check_einsum_sums('f2') - - def test_einsum_sums_float32(self): - self.check_einsum_sums('f4') - - def test_einsum_sums_float64(self): - self.check_einsum_sums('f8') - self.check_einsum_sums('f8', True) - - def test_einsum_sums_longdouble(self): - self.check_einsum_sums(np.longdouble) - - def test_einsum_sums_cfloat64(self): - self.check_einsum_sums('c8') - self.check_einsum_sums('c8', True) - - def test_einsum_sums_cfloat128(self): - self.check_einsum_sums('c16') - - def test_einsum_sums_clongdouble(self): - self.check_einsum_sums(np.clongdouble) - - def test_einsum_misc(self): - # This call used to crash because of a bug in - # PyArray_AssignZero - a = np.ones((1, 2)) - b = np.ones((2, 2, 1)) - assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]]) - assert_equal(np.einsum('ij...,j...->i...', a, b, optimize=True), [[[2], [2]]]) - - # Regression test for issue #10369 (test unicode inputs with Python 2) - assert_equal(np.einsum(u'ij...,j...->i...', a, b), [[[2], [2]]]) - assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4]), 20) - assert_equal(np.einsum(u'...i,...i', [1, 2, 3], [2, 3, 4]), 20) - assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4], - optimize=u'greedy'), 20) - - # The iterator had an issue with buffering this reduction - a = np.ones((5, 12, 4, 2, 3), np.int64) - b = np.ones((5, 12, 11), np.int64) - assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b), - np.einsum('ijklm,ijn->', a, b)) - assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b, optimize=True), - np.einsum('ijklm,ijn->', a, b, optimize=True)) - - # Issue #2027, was a problem in the contiguous 3-argument - # inner loop implementation - a = np.arange(1, 3) - b = np.arange(1, 5).reshape(2, 2) - c = np.arange(1, 9).reshape(4, 2) - assert_equal(np.einsum('x,yx,zx->xzy', a, b, c), - [[[1, 3], [3, 9], [5, 15], [7, 21]], - [[8, 16], [16, 32], [24, 48], [32, 64]]]) - assert_equal(np.einsum('x,yx,zx->xzy', a, b, c, optimize=True), - [[[1, 3], [3, 9], [5, 15], [7, 21]], - [[8, 16], [16, 32], [24, 48], [32, 64]]]) - - def test_subscript_range(self): - # Issue #7741, make sure that all letters of Latin alphabet (both uppercase & lowercase) can be used - # when creating a subscript from arrays - a = np.ones((2, 3)) - b = np.ones((3, 4)) - np.einsum(a, [0, 20], b, [20, 2], [0, 2], optimize=False) - np.einsum(a, [0, 27], b, [27, 2], [0, 2], optimize=False) - np.einsum(a, [0, 51], b, [51, 2], [0, 2], optimize=False) - assert_raises(ValueError, lambda: np.einsum(a, [0, 52], b, [52, 2], [0, 2], optimize=False)) - assert_raises(ValueError, lambda: np.einsum(a, [-1, 5], b, [5, 2], [-1, 2], optimize=False)) - - def test_einsum_broadcast(self): - # Issue #2455 change in handling ellipsis - # remove the 'middle broadcast' error - # only use the 'RIGHT' iteration in prepare_op_axes - # adds auto broadcast on left where it belongs - # broadcast on right has to be explicit - # We need to test the optimized parsing as well - - A = np.arange(2 * 3 * 4).reshape(2, 3, 4) - B = np.arange(3) - ref = np.einsum('ijk,j->ijk', A, B, optimize=False) - for opt in [True, False]: - assert_equal(np.einsum('ij...,j...->ij...', A, B, optimize=opt), ref) - assert_equal(np.einsum('ij...,...j->ij...', A, B, optimize=opt), ref) - assert_equal(np.einsum('ij...,j->ij...', A, B, optimize=opt), ref) # used to raise error - - A = np.arange(12).reshape((4, 3)) - B = np.arange(6).reshape((3, 2)) - ref = np.einsum('ik,kj->ij', A, B, optimize=False) - for opt in [True, False]: - assert_equal(np.einsum('ik...,k...->i...', A, B, optimize=opt), ref) - assert_equal(np.einsum('ik...,...kj->i...j', A, B, optimize=opt), ref) - assert_equal(np.einsum('...k,kj', A, B, optimize=opt), ref) # used to raise error - assert_equal(np.einsum('ik,k...->i...', A, B, optimize=opt), ref) # used to raise error - - dims = [2, 3, 4, 5] - a = np.arange(np.prod(dims)).reshape(dims) - v = np.arange(dims[2]) - ref = np.einsum('ijkl,k->ijl', a, v, optimize=False) - for opt in [True, False]: - assert_equal(np.einsum('ijkl,k', a, v, optimize=opt), ref) - assert_equal(np.einsum('...kl,k', a, v, optimize=opt), ref) # used to raise error - assert_equal(np.einsum('...kl,k...', a, v, optimize=opt), ref) - - J, K, M = 160, 160, 120 - A = np.arange(J * K * M).reshape(1, 1, 1, J, K, M) - B = np.arange(J * K * M * 3).reshape(J, K, M, 3) - ref = np.einsum('...lmn,...lmno->...o', A, B, optimize=False) - for opt in [True, False]: - assert_equal(np.einsum('...lmn,lmno->...o', A, B, - optimize=opt), ref) # used to raise error - - def test_einsum_fixedstridebug(self): - # Issue #4485 obscure einsum bug - # This case revealed a bug in nditer where it reported a stride - # as 'fixed' (0) when it was in fact not fixed during processing - # (0 or 4). The reason for the bug was that the check for a fixed - # stride was using the information from the 2D inner loop reuse - # to restrict the iteration dimensions it had to validate to be - # the same, but that 2D inner loop reuse logic is only triggered - # during the buffer copying step, and hence it was invalid to - # rely on those values. The fix is to check all the dimensions - # of the stride in question, which in the test case reveals that - # the stride is not fixed. - # - # NOTE: This test is triggered by the fact that the default buffersize, - # used by einsum, is 8192, and 3*2731 = 8193, is larger than that - # and results in a mismatch between the buffering and the - # striding for operand A. - A = np.arange(2 * 3).reshape(2, 3).astype(np.float32) - B = np.arange(2 * 3 * 2731).reshape(2, 3, 2731).astype(np.int16) - es = np.einsum('cl, cpx->lpx', A, B) - tp = np.tensordot(A, B, axes=(0, 0)) - assert_equal(es, tp) - # The following is the original test case from the bug report, - # made repeatable by changing random arrays to aranges. - A = np.arange(3 * 3).reshape(3, 3).astype(np.float64) - B = np.arange(3 * 3 * 64 * 64).reshape(3, 3, 64, 64).astype(np.float32) - es = np.einsum('cl, cpxy->lpxy', A, B) - tp = np.tensordot(A, B, axes=(0, 0)) - assert_equal(es, tp) - - def test_einsum_fixed_collapsingbug(self): - # Issue #5147. - # The bug only occurred when output argument of einssum was used. - x = np.random.normal(0, 1, (5, 5, 5, 5)) - y1 = np.zeros((5, 5)) - np.einsum('aabb->ab', x, out=y1) - idx = np.arange(5) - y2 = x[idx[:, None], idx[:, None], idx, idx] - assert_equal(y1, y2) - - def test_einsum_failed_on_p9_and_s390x(self): - # Issues gh-14692 and gh-12689 - # Bug with signed vs unsigned char errored on power9 and s390x Linux - tensor = np.random.random_sample((10, 10, 10, 10)) - x = np.einsum('ijij->', tensor) - y = tensor.trace(axis1=0, axis2=2).trace() - assert_allclose(x, y) - - def test_einsum_all_contig_non_contig_output(self): - # Issue gh-5907, tests that the all contiguous special case - # actually checks the contiguity of the output - x = np.ones((5, 5)) - out = np.ones(10)[::2] - correct_base = np.ones(10) - correct_base[::2] = 5 - # Always worked (inner iteration is done with 0-stride): - np.einsum('mi,mi,mi->m', x, x, x, out=out) - assert_array_equal(out.base, correct_base) - # Example 1: - out = np.ones(10)[::2] - np.einsum('im,im,im->m', x, x, x, out=out) - assert_array_equal(out.base, correct_base) - # Example 2, buffering causes x to be contiguous but - # special cases do not catch the operation before: - out = np.ones((2, 2, 2))[..., 0] - correct_base = np.ones((2, 2, 2)) - correct_base[..., 0] = 2 - x = np.ones((2, 2), np.float32) - np.einsum('ij,jk->ik', x, x, out=out) - assert_array_equal(out.base, correct_base) - - def test_small_boolean_arrays(self): - # See gh-5946. - # Use array of True embedded in False. - a = np.zeros((16, 1, 1), dtype=np.bool_)[:2] - a[...] = True - out = np.zeros((16, 1, 1), dtype=np.bool_)[:2] - tgt = np.ones((2, 1, 1), dtype=np.bool_) - res = np.einsum('...ij,...jk->...ik', a, a, out=out) - assert_equal(res, tgt) - - def test_out_is_res(self): - a = np.arange(9).reshape(3, 3) - res = np.einsum('...ij,...jk->...ik', a, a, out=a) - assert res is a - - def optimize_compare(self, subscripts, operands=None): - # Tests all paths of the optimization function against - # conventional einsum - if operands is None: - args = [subscripts] - terms = subscripts.split('->')[0].split(',') - for term in terms: - dims = [global_size_dict[x] for x in term] - args.append(np.random.rand(*dims)) - else: - args = [subscripts] + operands - - noopt = np.einsum(*args, optimize=False) - opt = np.einsum(*args, optimize='greedy') - assert_almost_equal(opt, noopt) - opt = np.einsum(*args, optimize='optimal') - assert_almost_equal(opt, noopt) - - def test_hadamard_like_products(self): - # Hadamard outer products - self.optimize_compare('a,ab,abc->abc') - self.optimize_compare('a,b,ab->ab') - - def test_index_transformations(self): - # Simple index transformation cases - self.optimize_compare('ea,fb,gc,hd,abcd->efgh') - self.optimize_compare('ea,fb,abcd,gc,hd->efgh') - self.optimize_compare('abcd,ea,fb,gc,hd->efgh') - - def test_complex(self): - # Long test cases - self.optimize_compare('acdf,jbje,gihb,hfac,gfac,gifabc,hfac') - self.optimize_compare('acdf,jbje,gihb,hfac,gfac,gifabc,hfac') - self.optimize_compare('cd,bdhe,aidb,hgca,gc,hgibcd,hgac') - self.optimize_compare('abhe,hidj,jgba,hiab,gab') - self.optimize_compare('bde,cdh,agdb,hica,ibd,hgicd,hiac') - self.optimize_compare('chd,bde,agbc,hiad,hgc,hgi,hiad') - self.optimize_compare('chd,bde,agbc,hiad,bdi,cgh,agdb') - self.optimize_compare('bdhe,acad,hiab,agac,hibd') - - def test_collapse(self): - # Inner products - self.optimize_compare('ab,ab,c->') - self.optimize_compare('ab,ab,c->c') - self.optimize_compare('ab,ab,cd,cd->') - self.optimize_compare('ab,ab,cd,cd->ac') - self.optimize_compare('ab,ab,cd,cd->cd') - self.optimize_compare('ab,ab,cd,cd,ef,ef->') - - def test_expand(self): - # Outer products - self.optimize_compare('ab,cd,ef->abcdef') - self.optimize_compare('ab,cd,ef->acdf') - self.optimize_compare('ab,cd,de->abcde') - self.optimize_compare('ab,cd,de->be') - self.optimize_compare('ab,bcd,cd->abcd') - self.optimize_compare('ab,bcd,cd->abd') - - def test_edge_cases(self): - # Difficult edge cases for optimization - self.optimize_compare('eb,cb,fb->cef') - self.optimize_compare('dd,fb,be,cdb->cef') - self.optimize_compare('bca,cdb,dbf,afc->') - self.optimize_compare('dcc,fce,ea,dbf->ab') - self.optimize_compare('fdf,cdd,ccd,afe->ae') - self.optimize_compare('abcd,ad') - self.optimize_compare('ed,fcd,ff,bcf->be') - self.optimize_compare('baa,dcf,af,cde->be') - self.optimize_compare('bd,db,eac->ace') - self.optimize_compare('fff,fae,bef,def->abd') - self.optimize_compare('efc,dbc,acf,fd->abe') - self.optimize_compare('ba,ac,da->bcd') - - def test_inner_product(self): - # Inner products - self.optimize_compare('ab,ab') - self.optimize_compare('ab,ba') - self.optimize_compare('abc,abc') - self.optimize_compare('abc,bac') - self.optimize_compare('abc,cba') - - def test_random_cases(self): - # Randomly built test cases - self.optimize_compare('aab,fa,df,ecc->bde') - self.optimize_compare('ecb,fef,bad,ed->ac') - self.optimize_compare('bcf,bbb,fbf,fc->') - self.optimize_compare('bb,ff,be->e') - self.optimize_compare('bcb,bb,fc,fff->') - self.optimize_compare('fbb,dfd,fc,fc->') - self.optimize_compare('afd,ba,cc,dc->bf') - self.optimize_compare('adb,bc,fa,cfc->d') - self.optimize_compare('bbd,bda,fc,db->acf') - self.optimize_compare('dba,ead,cad->bce') - self.optimize_compare('aef,fbc,dca->bde') - - def test_combined_views_mapping(self): - # gh-10792 - a = np.arange(9).reshape(1, 1, 3, 1, 3) - b = np.einsum('bbcdc->d', a) - assert_equal(b, [12]) - - def test_broadcasting_dot_cases(self): - # Ensures broadcasting cases are not mistaken for GEMM - - a = np.random.rand(1, 5, 4) - b = np.random.rand(4, 6) - c = np.random.rand(5, 6) - d = np.random.rand(10) - - self.optimize_compare('ijk,kl,jl', operands=[a, b, c]) - self.optimize_compare('ijk,kl,jl,i->i', operands=[a, b, c, d]) - - e = np.random.rand(1, 1, 5, 4) - f = np.random.rand(7, 7) - self.optimize_compare('abjk,kl,jl', operands=[e, b, c]) - self.optimize_compare('abjk,kl,jl,ab->ab', operands=[e, b, c, f]) - - # Edge case found in gh-11308 - g = np.arange(64).reshape(2, 4, 8) - self.optimize_compare('obk,ijk->ioj', operands=[g, g]) - - -class TestEinsumPath(object): - def build_operands(self, string, size_dict=global_size_dict): - - # Builds views based off initial operands - operands = [string] - terms = string.split('->')[0].split(',') - for term in terms: - dims = [size_dict[x] for x in term] - operands.append(np.random.rand(*dims)) - - return operands - - def assert_path_equal(self, comp, benchmark): - # Checks if list of tuples are equivalent - ret = (len(comp) == len(benchmark)) - assert_(ret) - for pos in range(len(comp) - 1): - ret &= isinstance(comp[pos + 1], tuple) - ret &= (comp[pos + 1] == benchmark[pos + 1]) - assert_(ret) - - def test_memory_contraints(self): - # Ensure memory constraints are satisfied - - outer_test = self.build_operands('a,b,c->abc') - - path, path_str = np.einsum_path(*outer_test, optimize=('greedy', 0)) - self.assert_path_equal(path, ['einsum_path', (0, 1, 2)]) - - path, path_str = np.einsum_path(*outer_test, optimize=('optimal', 0)) - self.assert_path_equal(path, ['einsum_path', (0, 1, 2)]) - - long_test = self.build_operands('acdf,jbje,gihb,hfac') - path, path_str = np.einsum_path(*long_test, optimize=('greedy', 0)) - self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)]) - - path, path_str = np.einsum_path(*long_test, optimize=('optimal', 0)) - self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)]) - - def test_long_paths(self): - # Long complex cases - - # Long test 1 - long_test1 = self.build_operands('acdf,jbje,gihb,hfac,gfac,gifabc,hfac') - path, path_str = np.einsum_path(*long_test1, optimize='greedy') - self.assert_path_equal(path, ['einsum_path', - (3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)]) - - path, path_str = np.einsum_path(*long_test1, optimize='optimal') - self.assert_path_equal(path, ['einsum_path', - (3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)]) - - # Long test 2 - long_test2 = self.build_operands('chd,bde,agbc,hiad,bdi,cgh,agdb') - path, path_str = np.einsum_path(*long_test2, optimize='greedy') - print(path) - self.assert_path_equal(path, ['einsum_path', - (3, 4), (0, 3), (3, 4), (1, 3), (1, 2), (0, 1)]) - - path, path_str = np.einsum_path(*long_test2, optimize='optimal') - print(path) - self.assert_path_equal(path, ['einsum_path', - (0, 5), (1, 4), (3, 4), (1, 3), (1, 2), (0, 1)]) - - def test_edge_paths(self): - # Difficult edge cases - - # Edge test1 - edge_test1 = self.build_operands('eb,cb,fb->cef') - path, path_str = np.einsum_path(*edge_test1, optimize='greedy') - self.assert_path_equal(path, ['einsum_path', (0, 2), (0, 1)]) - - path, path_str = np.einsum_path(*edge_test1, optimize='optimal') - self.assert_path_equal(path, ['einsum_path', (0, 2), (0, 1)]) - - # Edge test2 - edge_test2 = self.build_operands('dd,fb,be,cdb->cef') - path, path_str = np.einsum_path(*edge_test2, optimize='greedy') - self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 1), (0, 1)]) - - path, path_str = np.einsum_path(*edge_test2, optimize='optimal') - self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 1), (0, 1)]) - - # Edge test3 - edge_test3 = self.build_operands('bca,cdb,dbf,afc->') - path, path_str = np.einsum_path(*edge_test3, optimize='greedy') - self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)]) - - path, path_str = np.einsum_path(*edge_test3, optimize='optimal') - self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)]) - - # Edge test4 - edge_test4 = self.build_operands('dcc,fce,ea,dbf->ab') - path, path_str = np.einsum_path(*edge_test4, optimize='greedy') - self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)]) - - path, path_str = np.einsum_path(*edge_test4, optimize='optimal') - self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)]) - - # Edge test5 - edge_test4 = self.build_operands('a,ac,ab,ad,cd,bd,bc->', - size_dict={"a": 20, "b": 20, "c": 20, "d": 20}) - path, path_str = np.einsum_path(*edge_test4, optimize='greedy') - self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)]) - - path, path_str = np.einsum_path(*edge_test4, optimize='optimal') - self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)]) - - def test_path_type_input(self): - # Test explicit path handeling - path_test = self.build_operands('dcc,fce,ea,dbf->ab') - - path, path_str = np.einsum_path(*path_test, optimize=False) - self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)]) - - path, path_str = np.einsum_path(*path_test, optimize=True) - self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)]) - - exp_path = ['einsum_path', (0, 2), (0, 2), (0, 1)] - path, path_str = np.einsum_path(*path_test, optimize=exp_path) - self.assert_path_equal(path, exp_path) - - # Double check einsum works on the input path - noopt = np.einsum(*path_test, optimize=False) - opt = np.einsum(*path_test, optimize=exp_path) - assert_almost_equal(noopt, opt) - - def test_spaces(self): - #gh-10794 - arr = np.array([[1]]) - for sp in itertools.product(['', ' '], repeat=4): - # no error for any spacing - np.einsum('{}...a{}->{}...a{}'.format(*sp), arr) - -def test_overlap(): - a = np.arange(9, dtype=int).reshape(3, 3) - b = np.arange(9, dtype=int).reshape(3, 3) - d = np.dot(a, b) - # sanity check - c = np.einsum('ij,jk->ik', a, b) - assert_equal(c, d) - #gh-10080, out overlaps one of the operands - c = np.einsum('ij,jk->ik', a, b, out=b) - assert_equal(c, d) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_errstate.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_errstate.py deleted file mode 100644 index 0008c4c..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_errstate.py +++ /dev/null @@ -1,49 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import platform -import pytest - -import numpy as np -from numpy.testing import assert_, assert_raises - - -class TestErrstate(object): - @pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.") - def test_invalid(self): - with np.errstate(all='raise', under='ignore'): - a = -np.arange(3) - # This should work - with np.errstate(invalid='ignore'): - np.sqrt(a) - # While this should fail! - with assert_raises(FloatingPointError): - np.sqrt(a) - - def test_divide(self): - with np.errstate(all='raise', under='ignore'): - a = -np.arange(3) - # This should work - with np.errstate(divide='ignore'): - a // 0 - # While this should fail! - with assert_raises(FloatingPointError): - a // 0 - - def test_errcall(self): - def foo(*args): - print(args) - - olderrcall = np.geterrcall() - with np.errstate(call=foo): - assert_(np.geterrcall() is foo, 'call is not foo') - with np.errstate(call=None): - assert_(np.geterrcall() is None, 'call is not None') - assert_(np.geterrcall() is olderrcall, 'call is not olderrcall') - - def test_errstate_decorator(self): - @np.errstate(all='ignore') - def foo(): - a = -np.arange(3) - a // 0 - - foo() diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_extint128.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_extint128.py deleted file mode 100644 index 7c454a6..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_extint128.py +++ /dev/null @@ -1,221 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import itertools -import contextlib -import operator -import pytest - -import numpy as np -import numpy.core._multiarray_tests as mt - -from numpy.testing import assert_raises, assert_equal - - -INT64_MAX = np.iinfo(np.int64).max -INT64_MIN = np.iinfo(np.int64).min -INT64_MID = 2**32 - -# int128 is not two's complement, the sign bit is separate -INT128_MAX = 2**128 - 1 -INT128_MIN = -INT128_MAX -INT128_MID = 2**64 - -INT64_VALUES = ( - [INT64_MIN + j for j in range(20)] + - [INT64_MAX - j for j in range(20)] + - [INT64_MID + j for j in range(-20, 20)] + - [2*INT64_MID + j for j in range(-20, 20)] + - [INT64_MID//2 + j for j in range(-20, 20)] + - list(range(-70, 70)) -) - -INT128_VALUES = ( - [INT128_MIN + j for j in range(20)] + - [INT128_MAX - j for j in range(20)] + - [INT128_MID + j for j in range(-20, 20)] + - [2*INT128_MID + j for j in range(-20, 20)] + - [INT128_MID//2 + j for j in range(-20, 20)] + - list(range(-70, 70)) + - [False] # negative zero -) - -INT64_POS_VALUES = [x for x in INT64_VALUES if x > 0] - - -@contextlib.contextmanager -def exc_iter(*args): - """ - Iterate over Cartesian product of *args, and if an exception is raised, - add information of the current iterate. - """ - - value = [None] - - def iterate(): - for v in itertools.product(*args): - value[0] = v - yield v - - try: - yield iterate() - except Exception: - import traceback - msg = "At: %r\n%s" % (repr(value[0]), - traceback.format_exc()) - raise AssertionError(msg) - - -def test_safe_binop(): - # Test checked arithmetic routines - - ops = [ - (operator.add, 1), - (operator.sub, 2), - (operator.mul, 3) - ] - - with exc_iter(ops, INT64_VALUES, INT64_VALUES) as it: - for xop, a, b in it: - pyop, op = xop - c = pyop(a, b) - - if not (INT64_MIN <= c <= INT64_MAX): - assert_raises(OverflowError, mt.extint_safe_binop, a, b, op) - else: - d = mt.extint_safe_binop(a, b, op) - if c != d: - # assert_equal is slow - assert_equal(d, c) - - -def test_to_128(): - with exc_iter(INT64_VALUES) as it: - for a, in it: - b = mt.extint_to_128(a) - if a != b: - assert_equal(b, a) - - -def test_to_64(): - with exc_iter(INT128_VALUES) as it: - for a, in it: - if not (INT64_MIN <= a <= INT64_MAX): - assert_raises(OverflowError, mt.extint_to_64, a) - else: - b = mt.extint_to_64(a) - if a != b: - assert_equal(b, a) - - -def test_mul_64_64(): - with exc_iter(INT64_VALUES, INT64_VALUES) as it: - for a, b in it: - c = a * b - d = mt.extint_mul_64_64(a, b) - if c != d: - assert_equal(d, c) - - -def test_add_128(): - with exc_iter(INT128_VALUES, INT128_VALUES) as it: - for a, b in it: - c = a + b - if not (INT128_MIN <= c <= INT128_MAX): - assert_raises(OverflowError, mt.extint_add_128, a, b) - else: - d = mt.extint_add_128(a, b) - if c != d: - assert_equal(d, c) - - -def test_sub_128(): - with exc_iter(INT128_VALUES, INT128_VALUES) as it: - for a, b in it: - c = a - b - if not (INT128_MIN <= c <= INT128_MAX): - assert_raises(OverflowError, mt.extint_sub_128, a, b) - else: - d = mt.extint_sub_128(a, b) - if c != d: - assert_equal(d, c) - - -def test_neg_128(): - with exc_iter(INT128_VALUES) as it: - for a, in it: - b = -a - c = mt.extint_neg_128(a) - if b != c: - assert_equal(c, b) - - -def test_shl_128(): - with exc_iter(INT128_VALUES) as it: - for a, in it: - if a < 0: - b = -(((-a) << 1) & (2**128-1)) - else: - b = (a << 1) & (2**128-1) - c = mt.extint_shl_128(a) - if b != c: - assert_equal(c, b) - - -def test_shr_128(): - with exc_iter(INT128_VALUES) as it: - for a, in it: - if a < 0: - b = -((-a) >> 1) - else: - b = a >> 1 - c = mt.extint_shr_128(a) - if b != c: - assert_equal(c, b) - - -def test_gt_128(): - with exc_iter(INT128_VALUES, INT128_VALUES) as it: - for a, b in it: - c = a > b - d = mt.extint_gt_128(a, b) - if c != d: - assert_equal(d, c) - - -@pytest.mark.slow -def test_divmod_128_64(): - with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it: - for a, b in it: - if a >= 0: - c, cr = divmod(a, b) - else: - c, cr = divmod(-a, b) - c = -c - cr = -cr - - d, dr = mt.extint_divmod_128_64(a, b) - - if c != d or d != dr or b*d + dr != a: - assert_equal(d, c) - assert_equal(dr, cr) - assert_equal(b*d + dr, a) - - -def test_floordiv_128_64(): - with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it: - for a, b in it: - c = a // b - d = mt.extint_floordiv_128_64(a, b) - - if c != d: - assert_equal(d, c) - - -def test_ceildiv_128_64(): - with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it: - for a, b in it: - c = (a + b - 1) // b - d = mt.extint_ceildiv_128_64(a, b) - - if c != d: - assert_equal(d, c) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_function_base.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_function_base.py deleted file mode 100644 index c8a7cb6..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_function_base.py +++ /dev/null @@ -1,373 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy import ( - logspace, linspace, geomspace, dtype, array, sctypes, arange, isnan, - ndarray, sqrt, nextafter, stack - ) -from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_array_equal, assert_allclose, - suppress_warnings - ) - - -class PhysicalQuantity(float): - def __new__(cls, value): - return float.__new__(cls, value) - - def __add__(self, x): - assert_(isinstance(x, PhysicalQuantity)) - return PhysicalQuantity(float(x) + float(self)) - __radd__ = __add__ - - def __sub__(self, x): - assert_(isinstance(x, PhysicalQuantity)) - return PhysicalQuantity(float(self) - float(x)) - - def __rsub__(self, x): - assert_(isinstance(x, PhysicalQuantity)) - return PhysicalQuantity(float(x) - float(self)) - - def __mul__(self, x): - return PhysicalQuantity(float(x) * float(self)) - __rmul__ = __mul__ - - def __div__(self, x): - return PhysicalQuantity(float(self) / float(x)) - - def __rdiv__(self, x): - return PhysicalQuantity(float(x) / float(self)) - - -class PhysicalQuantity2(ndarray): - __array_priority__ = 10 - - -class TestLogspace(object): - - def test_basic(self): - y = logspace(0, 6) - assert_(len(y) == 50) - y = logspace(0, 6, num=100) - assert_(y[-1] == 10 ** 6) - y = logspace(0, 6, endpoint=False) - assert_(y[-1] < 10 ** 6) - y = logspace(0, 6, num=7) - assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6]) - - def test_start_stop_array(self): - start = array([0., 1.]) - stop = array([6., 7.]) - t1 = logspace(start, stop, 6) - t2 = stack([logspace(_start, _stop, 6) - for _start, _stop in zip(start, stop)], axis=1) - assert_equal(t1, t2) - t3 = logspace(start, stop[0], 6) - t4 = stack([logspace(_start, stop[0], 6) - for _start in start], axis=1) - assert_equal(t3, t4) - t5 = logspace(start, stop, 6, axis=-1) - assert_equal(t5, t2.T) - - def test_dtype(self): - y = logspace(0, 6, dtype='float32') - assert_equal(y.dtype, dtype('float32')) - y = logspace(0, 6, dtype='float64') - assert_equal(y.dtype, dtype('float64')) - y = logspace(0, 6, dtype='int32') - assert_equal(y.dtype, dtype('int32')) - - def test_physical_quantities(self): - a = PhysicalQuantity(1.0) - b = PhysicalQuantity(5.0) - assert_equal(logspace(a, b), logspace(1.0, 5.0)) - - def test_subclass(self): - a = array(1).view(PhysicalQuantity2) - b = array(7).view(PhysicalQuantity2) - ls = logspace(a, b) - assert type(ls) is PhysicalQuantity2 - assert_equal(ls, logspace(1.0, 7.0)) - ls = logspace(a, b, 1) - assert type(ls) is PhysicalQuantity2 - assert_equal(ls, logspace(1.0, 7.0, 1)) - - -class TestGeomspace(object): - - def test_basic(self): - y = geomspace(1, 1e6) - assert_(len(y) == 50) - y = geomspace(1, 1e6, num=100) - assert_(y[-1] == 10 ** 6) - y = geomspace(1, 1e6, endpoint=False) - assert_(y[-1] < 10 ** 6) - y = geomspace(1, 1e6, num=7) - assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6]) - - y = geomspace(8, 2, num=3) - assert_allclose(y, [8, 4, 2]) - assert_array_equal(y.imag, 0) - - y = geomspace(-1, -100, num=3) - assert_array_equal(y, [-1, -10, -100]) - assert_array_equal(y.imag, 0) - - y = geomspace(-100, -1, num=3) - assert_array_equal(y, [-100, -10, -1]) - assert_array_equal(y.imag, 0) - - def test_complex(self): - # Purely imaginary - y = geomspace(1j, 16j, num=5) - assert_allclose(y, [1j, 2j, 4j, 8j, 16j]) - assert_array_equal(y.real, 0) - - y = geomspace(-4j, -324j, num=5) - assert_allclose(y, [-4j, -12j, -36j, -108j, -324j]) - assert_array_equal(y.real, 0) - - y = geomspace(1+1j, 1000+1000j, num=4) - assert_allclose(y, [1+1j, 10+10j, 100+100j, 1000+1000j]) - - y = geomspace(-1+1j, -1000+1000j, num=4) - assert_allclose(y, [-1+1j, -10+10j, -100+100j, -1000+1000j]) - - # Logarithmic spirals - y = geomspace(-1, 1, num=3, dtype=complex) - assert_allclose(y, [-1, 1j, +1]) - - y = geomspace(0+3j, -3+0j, 3) - assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j]) - y = geomspace(0+3j, 3+0j, 3) - assert_allclose(y, [0+3j, 3/sqrt(2)+3j/sqrt(2), 3+0j]) - y = geomspace(-3+0j, 0-3j, 3) - assert_allclose(y, [-3+0j, -3/sqrt(2)-3j/sqrt(2), 0-3j]) - y = geomspace(0+3j, -3+0j, 3) - assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j]) - y = geomspace(-2-3j, 5+7j, 7) - assert_allclose(y, [-2-3j, -0.29058977-4.15771027j, - 2.08885354-4.34146838j, 4.58345529-3.16355218j, - 6.41401745-0.55233457j, 6.75707386+3.11795092j, - 5+7j]) - - # Type promotion should prevent the -5 from becoming a NaN - y = geomspace(3j, -5, 2) - assert_allclose(y, [3j, -5]) - y = geomspace(-5, 3j, 2) - assert_allclose(y, [-5, 3j]) - - def test_dtype(self): - y = geomspace(1, 1e6, dtype='float32') - assert_equal(y.dtype, dtype('float32')) - y = geomspace(1, 1e6, dtype='float64') - assert_equal(y.dtype, dtype('float64')) - y = geomspace(1, 1e6, dtype='int32') - assert_equal(y.dtype, dtype('int32')) - - # Native types - y = geomspace(1, 1e6, dtype=float) - assert_equal(y.dtype, dtype('float_')) - y = geomspace(1, 1e6, dtype=complex) - assert_equal(y.dtype, dtype('complex')) - - def test_start_stop_array_scalar(self): - lim1 = array([120, 100], dtype="int8") - lim2 = array([-120, -100], dtype="int8") - lim3 = array([1200, 1000], dtype="uint16") - t1 = geomspace(lim1[0], lim1[1], 5) - t2 = geomspace(lim2[0], lim2[1], 5) - t3 = geomspace(lim3[0], lim3[1], 5) - t4 = geomspace(120.0, 100.0, 5) - t5 = geomspace(-120.0, -100.0, 5) - t6 = geomspace(1200.0, 1000.0, 5) - - # t3 uses float32, t6 uses float64 - assert_allclose(t1, t4, rtol=1e-2) - assert_allclose(t2, t5, rtol=1e-2) - assert_allclose(t3, t6, rtol=1e-5) - - def test_start_stop_array(self): - # Try to use all special cases. - start = array([1.e0, 32., 1j, -4j, 1+1j, -1]) - stop = array([1.e4, 2., 16j, -324j, 10000+10000j, 1]) - t1 = geomspace(start, stop, 5) - t2 = stack([geomspace(_start, _stop, 5) - for _start, _stop in zip(start, stop)], axis=1) - assert_equal(t1, t2) - t3 = geomspace(start, stop[0], 5) - t4 = stack([geomspace(_start, stop[0], 5) - for _start in start], axis=1) - assert_equal(t3, t4) - t5 = geomspace(start, stop, 5, axis=-1) - assert_equal(t5, t2.T) - - def test_physical_quantities(self): - a = PhysicalQuantity(1.0) - b = PhysicalQuantity(5.0) - assert_equal(geomspace(a, b), geomspace(1.0, 5.0)) - - def test_subclass(self): - a = array(1).view(PhysicalQuantity2) - b = array(7).view(PhysicalQuantity2) - gs = geomspace(a, b) - assert type(gs) is PhysicalQuantity2 - assert_equal(gs, geomspace(1.0, 7.0)) - gs = geomspace(a, b, 1) - assert type(gs) is PhysicalQuantity2 - assert_equal(gs, geomspace(1.0, 7.0, 1)) - - def test_bounds(self): - assert_raises(ValueError, geomspace, 0, 10) - assert_raises(ValueError, geomspace, 10, 0) - assert_raises(ValueError, geomspace, 0, 0) - - -class TestLinspace(object): - - def test_basic(self): - y = linspace(0, 10) - assert_(len(y) == 50) - y = linspace(2, 10, num=100) - assert_(y[-1] == 10) - y = linspace(2, 10, endpoint=False) - assert_(y[-1] < 10) - assert_raises(ValueError, linspace, 0, 10, num=-1) - - def test_corner(self): - y = list(linspace(0, 1, 1)) - assert_(y == [0.0], y) - assert_raises(TypeError, linspace, 0, 1, num=2.5) - - def test_type(self): - t1 = linspace(0, 1, 0).dtype - t2 = linspace(0, 1, 1).dtype - t3 = linspace(0, 1, 2).dtype - assert_equal(t1, t2) - assert_equal(t2, t3) - - def test_dtype(self): - y = linspace(0, 6, dtype='float32') - assert_equal(y.dtype, dtype('float32')) - y = linspace(0, 6, dtype='float64') - assert_equal(y.dtype, dtype('float64')) - y = linspace(0, 6, dtype='int32') - assert_equal(y.dtype, dtype('int32')) - - def test_start_stop_array_scalar(self): - lim1 = array([-120, 100], dtype="int8") - lim2 = array([120, -100], dtype="int8") - lim3 = array([1200, 1000], dtype="uint16") - t1 = linspace(lim1[0], lim1[1], 5) - t2 = linspace(lim2[0], lim2[1], 5) - t3 = linspace(lim3[0], lim3[1], 5) - t4 = linspace(-120.0, 100.0, 5) - t5 = linspace(120.0, -100.0, 5) - t6 = linspace(1200.0, 1000.0, 5) - assert_equal(t1, t4) - assert_equal(t2, t5) - assert_equal(t3, t6) - - def test_start_stop_array(self): - start = array([-120, 120], dtype="int8") - stop = array([100, -100], dtype="int8") - t1 = linspace(start, stop, 5) - t2 = stack([linspace(_start, _stop, 5) - for _start, _stop in zip(start, stop)], axis=1) - assert_equal(t1, t2) - t3 = linspace(start, stop[0], 5) - t4 = stack([linspace(_start, stop[0], 5) - for _start in start], axis=1) - assert_equal(t3, t4) - t5 = linspace(start, stop, 5, axis=-1) - assert_equal(t5, t2.T) - - def test_complex(self): - lim1 = linspace(1 + 2j, 3 + 4j, 5) - t1 = array([1.0+2.j, 1.5+2.5j, 2.0+3j, 2.5+3.5j, 3.0+4j]) - lim2 = linspace(1j, 10, 5) - t2 = array([0.0+1.j, 2.5+0.75j, 5.0+0.5j, 7.5+0.25j, 10.0+0j]) - assert_equal(lim1, t1) - assert_equal(lim2, t2) - - def test_physical_quantities(self): - a = PhysicalQuantity(0.0) - b = PhysicalQuantity(1.0) - assert_equal(linspace(a, b), linspace(0.0, 1.0)) - - def test_subclass(self): - a = array(0).view(PhysicalQuantity2) - b = array(1).view(PhysicalQuantity2) - ls = linspace(a, b) - assert type(ls) is PhysicalQuantity2 - assert_equal(ls, linspace(0.0, 1.0)) - ls = linspace(a, b, 1) - assert type(ls) is PhysicalQuantity2 - assert_equal(ls, linspace(0.0, 1.0, 1)) - - def test_array_interface(self): - # Regression test for https://github.com/numpy/numpy/pull/6659 - # Ensure that start/stop can be objects that implement - # __array_interface__ and are convertible to numeric scalars - - class Arrayish(object): - """ - A generic object that supports the __array_interface__ and hence - can in principle be converted to a numeric scalar, but is not - otherwise recognized as numeric, but also happens to support - multiplication by floats. - - Data should be an object that implements the buffer interface, - and contains at least 4 bytes. - """ - - def __init__(self, data): - self._data = data - - @property - def __array_interface__(self): - return {'shape': (), 'typestr': ' 1) - assert_(info.minexp < -1) - assert_(info.maxexp > 1) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_half.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_half.py deleted file mode 100644 index 1e1e6d7..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_half.py +++ /dev/null @@ -1,518 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import platform -import pytest - -import numpy as np -from numpy import uint16, float16, float32, float64 -from numpy.testing import assert_, assert_equal - - -def assert_raises_fpe(strmatch, callable, *args, **kwargs): - try: - callable(*args, **kwargs) - except FloatingPointError as exc: - assert_(str(exc).find(strmatch) >= 0, - "Did not raise floating point %s error" % strmatch) - else: - assert_(False, - "Did not raise floating point %s error" % strmatch) - -class TestHalf(object): - def setup(self): - # An array of all possible float16 values - self.all_f16 = np.arange(0x10000, dtype=uint16) - self.all_f16.dtype = float16 - self.all_f32 = np.array(self.all_f16, dtype=float32) - self.all_f64 = np.array(self.all_f16, dtype=float64) - - # An array of all non-NaN float16 values, in sorted order - self.nonan_f16 = np.concatenate( - (np.arange(0xfc00, 0x7fff, -1, dtype=uint16), - np.arange(0x0000, 0x7c01, 1, dtype=uint16))) - self.nonan_f16.dtype = float16 - self.nonan_f32 = np.array(self.nonan_f16, dtype=float32) - self.nonan_f64 = np.array(self.nonan_f16, dtype=float64) - - # An array of all finite float16 values, in sorted order - self.finite_f16 = self.nonan_f16[1:-1] - self.finite_f32 = self.nonan_f32[1:-1] - self.finite_f64 = self.nonan_f64[1:-1] - - def test_half_conversions(self): - """Checks that all 16-bit values survive conversion - to/from 32-bit and 64-bit float""" - # Because the underlying routines preserve the NaN bits, every - # value is preserved when converting to/from other floats. - - # Convert from float32 back to float16 - b = np.array(self.all_f32, dtype=float16) - assert_equal(self.all_f16.view(dtype=uint16), - b.view(dtype=uint16)) - - # Convert from float64 back to float16 - b = np.array(self.all_f64, dtype=float16) - assert_equal(self.all_f16.view(dtype=uint16), - b.view(dtype=uint16)) - - # Convert float16 to longdouble and back - # This doesn't necessarily preserve the extra NaN bits, - # so exclude NaNs. - a_ld = np.array(self.nonan_f16, dtype=np.longdouble) - b = np.array(a_ld, dtype=float16) - assert_equal(self.nonan_f16.view(dtype=uint16), - b.view(dtype=uint16)) - - # Check the range for which all integers can be represented - i_int = np.arange(-2048, 2049) - i_f16 = np.array(i_int, dtype=float16) - j = np.array(i_f16, dtype=int) - assert_equal(i_int, j) - - @pytest.mark.parametrize("offset", [None, "up", "down"]) - @pytest.mark.parametrize("shift", [None, "up", "down"]) - @pytest.mark.parametrize("float_t", [np.float32, np.float64]) - def test_half_conversion_rounding(self, float_t, shift, offset): - # Assumes that round to even is used during casting. - max_pattern = np.float16(np.finfo(np.float16).max).view(np.uint16) - - # Test all (positive) finite numbers, denormals are most interesting - # however: - f16s_patterns = np.arange(0, max_pattern+1, dtype=np.uint16) - f16s_float = f16s_patterns.view(np.float16).astype(float_t) - - # Shift the values by half a bit up or a down (or do not shift), - if shift == "up": - f16s_float = 0.5 * (f16s_float[:-1] + f16s_float[1:])[1:] - elif shift == "down": - f16s_float = 0.5 * (f16s_float[:-1] + f16s_float[1:])[:-1] - else: - f16s_float = f16s_float[1:-1] - - # Increase the float by a minimal value: - if offset == "up": - f16s_float = np.nextafter(f16s_float, float_t(1e50)) - elif offset == "down": - f16s_float = np.nextafter(f16s_float, float_t(-1e50)) - - # Convert back to float16 and its bit pattern: - res_patterns = f16s_float.astype(np.float16).view(np.uint16) - - # The above calculations tries the original values, or the exact - # mid points between the float16 values. It then further offsets them - # by as little as possible. If no offset occurs, "round to even" - # logic will be necessary, an arbitrarily small offset should cause - # normal up/down rounding always. - - # Calculate the expected pattern: - cmp_patterns = f16s_patterns[1:-1].copy() - - if shift == "down" and offset != "up": - shift_pattern = -1 - elif shift == "up" and offset != "down": - shift_pattern = 1 - else: - # There cannot be a shift, either shift is None, so all rounding - # will go back to original, or shift is reduced by offset too much. - shift_pattern = 0 - - # If rounding occurs, is it normal rounding or round to even? - if offset is None: - # Round to even occurs, modify only non-even, cast to allow + (-1) - cmp_patterns[0::2].view(np.int16)[...] += shift_pattern - else: - cmp_patterns.view(np.int16)[...] += shift_pattern - - assert_equal(res_patterns, cmp_patterns) - - @pytest.mark.parametrize(["float_t", "uint_t", "bits"], - [(np.float32, np.uint32, 23), - (np.float64, np.uint64, 52)]) - def test_half_conversion_denormal_round_even(self, float_t, uint_t, bits): - # Test specifically that all bits are considered when deciding - # whether round to even should occur (i.e. no bits are lost at the - # end. Compare also gh-12721. The most bits can get lost for the - # smallest denormal: - smallest_value = np.uint16(1).view(np.float16).astype(float_t) - assert smallest_value == 2**-24 - - # Will be rounded to zero based on round to even rule: - rounded_to_zero = smallest_value / float_t(2) - assert rounded_to_zero.astype(np.float16) == 0 - - # The significand will be all 0 for the float_t, test that we do not - # lose the lower ones of these: - for i in range(bits): - # slightly increasing the value should make it round up: - larger_pattern = rounded_to_zero.view(uint_t) | uint_t(1 << i) - larger_value = larger_pattern.view(float_t) - assert larger_value.astype(np.float16) == smallest_value - - def test_nans_infs(self): - with np.errstate(all='ignore'): - # Check some of the ufuncs - assert_equal(np.isnan(self.all_f16), np.isnan(self.all_f32)) - assert_equal(np.isinf(self.all_f16), np.isinf(self.all_f32)) - assert_equal(np.isfinite(self.all_f16), np.isfinite(self.all_f32)) - assert_equal(np.signbit(self.all_f16), np.signbit(self.all_f32)) - assert_equal(np.spacing(float16(65504)), np.inf) - - # Check comparisons of all values with NaN - nan = float16(np.nan) - - assert_(not (self.all_f16 == nan).any()) - assert_(not (nan == self.all_f16).any()) - - assert_((self.all_f16 != nan).all()) - assert_((nan != self.all_f16).all()) - - assert_(not (self.all_f16 < nan).any()) - assert_(not (nan < self.all_f16).any()) - - assert_(not (self.all_f16 <= nan).any()) - assert_(not (nan <= self.all_f16).any()) - - assert_(not (self.all_f16 > nan).any()) - assert_(not (nan > self.all_f16).any()) - - assert_(not (self.all_f16 >= nan).any()) - assert_(not (nan >= self.all_f16).any()) - - def test_half_values(self): - """Confirms a small number of known half values""" - a = np.array([1.0, -1.0, - 2.0, -2.0, - 0.0999755859375, 0.333251953125, # 1/10, 1/3 - 65504, -65504, # Maximum magnitude - 2.0**(-14), -2.0**(-14), # Minimum normal - 2.0**(-24), -2.0**(-24), # Minimum subnormal - 0, -1/1e1000, # Signed zeros - np.inf, -np.inf]) - b = np.array([0x3c00, 0xbc00, - 0x4000, 0xc000, - 0x2e66, 0x3555, - 0x7bff, 0xfbff, - 0x0400, 0x8400, - 0x0001, 0x8001, - 0x0000, 0x8000, - 0x7c00, 0xfc00], dtype=uint16) - b.dtype = float16 - assert_equal(a, b) - - def test_half_rounding(self): - """Checks that rounding when converting to half is correct""" - a = np.array([2.0**-25 + 2.0**-35, # Rounds to minimum subnormal - 2.0**-25, # Underflows to zero (nearest even mode) - 2.0**-26, # Underflows to zero - 1.0+2.0**-11 + 2.0**-16, # rounds to 1.0+2**(-10) - 1.0+2.0**-11, # rounds to 1.0 (nearest even mode) - 1.0+2.0**-12, # rounds to 1.0 - 65519, # rounds to 65504 - 65520], # rounds to inf - dtype=float64) - rounded = [2.0**-24, - 0.0, - 0.0, - 1.0+2.0**(-10), - 1.0, - 1.0, - 65504, - np.inf] - - # Check float64->float16 rounding - b = np.array(a, dtype=float16) - assert_equal(b, rounded) - - # Check float32->float16 rounding - a = np.array(a, dtype=float32) - b = np.array(a, dtype=float16) - assert_equal(b, rounded) - - def test_half_correctness(self): - """Take every finite float16, and check the casting functions with - a manual conversion.""" - - # Create an array of all finite float16s - a_bits = self.finite_f16.view(dtype=uint16) - - # Convert to 64-bit float manually - a_sgn = (-1.0)**((a_bits & 0x8000) >> 15) - a_exp = np.array((a_bits & 0x7c00) >> 10, dtype=np.int32) - 15 - a_man = (a_bits & 0x03ff) * 2.0**(-10) - # Implicit bit of normalized floats - a_man[a_exp != -15] += 1 - # Denormalized exponent is -14 - a_exp[a_exp == -15] = -14 - - a_manual = a_sgn * a_man * 2.0**a_exp - - a32_fail = np.nonzero(self.finite_f32 != a_manual)[0] - if len(a32_fail) != 0: - bad_index = a32_fail[0] - assert_equal(self.finite_f32, a_manual, - "First non-equal is half value %x -> %g != %g" % - (self.finite_f16[bad_index], - self.finite_f32[bad_index], - a_manual[bad_index])) - - a64_fail = np.nonzero(self.finite_f64 != a_manual)[0] - if len(a64_fail) != 0: - bad_index = a64_fail[0] - assert_equal(self.finite_f64, a_manual, - "First non-equal is half value %x -> %g != %g" % - (self.finite_f16[bad_index], - self.finite_f64[bad_index], - a_manual[bad_index])) - - def test_half_ordering(self): - """Make sure comparisons are working right""" - - # All non-NaN float16 values in reverse order - a = self.nonan_f16[::-1].copy() - - # 32-bit float copy - b = np.array(a, dtype=float32) - - # Should sort the same - a.sort() - b.sort() - assert_equal(a, b) - - # Comparisons should work - assert_((a[:-1] <= a[1:]).all()) - assert_(not (a[:-1] > a[1:]).any()) - assert_((a[1:] >= a[:-1]).all()) - assert_(not (a[1:] < a[:-1]).any()) - # All != except for +/-0 - assert_equal(np.nonzero(a[:-1] < a[1:])[0].size, a.size-2) - assert_equal(np.nonzero(a[1:] > a[:-1])[0].size, a.size-2) - - def test_half_funcs(self): - """Test the various ArrFuncs""" - - # fill - assert_equal(np.arange(10, dtype=float16), - np.arange(10, dtype=float32)) - - # fillwithscalar - a = np.zeros((5,), dtype=float16) - a.fill(1) - assert_equal(a, np.ones((5,), dtype=float16)) - - # nonzero and copyswap - a = np.array([0, 0, -1, -1/1e20, 0, 2.0**-24, 7.629e-6], dtype=float16) - assert_equal(a.nonzero()[0], - [2, 5, 6]) - a = a.byteswap().newbyteorder() - assert_equal(a.nonzero()[0], - [2, 5, 6]) - - # dot - a = np.arange(0, 10, 0.5, dtype=float16) - b = np.ones((20,), dtype=float16) - assert_equal(np.dot(a, b), - 95) - - # argmax - a = np.array([0, -np.inf, -2, 0.5, 12.55, 7.3, 2.1, 12.4], dtype=float16) - assert_equal(a.argmax(), - 4) - a = np.array([0, -np.inf, -2, np.inf, 12.55, np.nan, 2.1, 12.4], dtype=float16) - assert_equal(a.argmax(), - 5) - - # getitem - a = np.arange(10, dtype=float16) - for i in range(10): - assert_equal(a.item(i), i) - - def test_spacing_nextafter(self): - """Test np.spacing and np.nextafter""" - # All non-negative finite #'s - a = np.arange(0x7c00, dtype=uint16) - hinf = np.array((np.inf,), dtype=float16) - a_f16 = a.view(dtype=float16) - - assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1]) - - assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:]) - assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1]) - assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1]) - - # switch to negatives - a |= 0x8000 - - assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1])) - assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:]) - - assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1]) - assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1]) - assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:]) - - def test_half_ufuncs(self): - """Test the various ufuncs""" - - a = np.array([0, 1, 2, 4, 2], dtype=float16) - b = np.array([-2, 5, 1, 4, 3], dtype=float16) - c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16) - - assert_equal(np.add(a, b), [-2, 6, 3, 8, 5]) - assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1]) - assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6]) - assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625]) - - assert_equal(np.equal(a, b), [False, False, False, True, False]) - assert_equal(np.not_equal(a, b), [True, True, True, False, True]) - assert_equal(np.less(a, b), [False, True, False, False, True]) - assert_equal(np.less_equal(a, b), [False, True, False, True, True]) - assert_equal(np.greater(a, b), [True, False, True, False, False]) - assert_equal(np.greater_equal(a, b), [True, False, True, True, False]) - assert_equal(np.logical_and(a, b), [False, True, True, True, True]) - assert_equal(np.logical_or(a, b), [True, True, True, True, True]) - assert_equal(np.logical_xor(a, b), [True, False, False, False, False]) - assert_equal(np.logical_not(a), [True, False, False, False, False]) - - assert_equal(np.isnan(c), [False, False, False, True, False]) - assert_equal(np.isinf(c), [False, False, True, False, False]) - assert_equal(np.isfinite(c), [True, True, False, False, True]) - assert_equal(np.signbit(b), [True, False, False, False, False]) - - assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3]) - - assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3]) - - x = np.maximum(b, c) - assert_(np.isnan(x[3])) - x[3] = 0 - assert_equal(x, [0, 5, 1, 0, 6]) - - assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2]) - - x = np.minimum(b, c) - assert_(np.isnan(x[3])) - x[3] = 0 - assert_equal(x, [-2, -1, -np.inf, 0, 3]) - - assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3]) - assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6]) - assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2]) - assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3]) - - assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0]) - assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2]) - assert_equal(np.divmod(a, b), ([0, 0, 2, 1, 0], [0, 1, 0, 0, 2])) - assert_equal(np.square(b), [4, 25, 1, 16, 9]) - assert_equal(np.reciprocal(b), [-0.5, 0.199951171875, 1, 0.25, 0.333251953125]) - assert_equal(np.ones_like(b), [1, 1, 1, 1, 1]) - assert_equal(np.conjugate(b), b) - assert_equal(np.absolute(b), [2, 5, 1, 4, 3]) - assert_equal(np.negative(b), [2, -5, -1, -4, -3]) - assert_equal(np.positive(b), b) - assert_equal(np.sign(b), [-1, 1, 1, 1, 1]) - assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b)) - assert_equal(np.frexp(b), ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2])) - assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12]) - - def test_half_coercion(self): - """Test that half gets coerced properly with the other types""" - a16 = np.array((1,), dtype=float16) - a32 = np.array((1,), dtype=float32) - b16 = float16(1) - b32 = float32(1) - - assert_equal(np.power(a16, 2).dtype, float16) - assert_equal(np.power(a16, 2.0).dtype, float16) - assert_equal(np.power(a16, b16).dtype, float16) - assert_equal(np.power(a16, b32).dtype, float16) - assert_equal(np.power(a16, a16).dtype, float16) - assert_equal(np.power(a16, a32).dtype, float32) - - assert_equal(np.power(b16, 2).dtype, float64) - assert_equal(np.power(b16, 2.0).dtype, float64) - assert_equal(np.power(b16, b16).dtype, float16) - assert_equal(np.power(b16, b32).dtype, float32) - assert_equal(np.power(b16, a16).dtype, float16) - assert_equal(np.power(b16, a32).dtype, float32) - - assert_equal(np.power(a32, a16).dtype, float32) - assert_equal(np.power(a32, b16).dtype, float32) - assert_equal(np.power(b32, a16).dtype, float16) - assert_equal(np.power(b32, b16).dtype, float32) - - @pytest.mark.skipif(platform.machine() == "armv5tel", - reason="See gh-413.") - def test_half_fpe(self): - with np.errstate(all='raise'): - sx16 = np.array((1e-4,), dtype=float16) - bx16 = np.array((1e4,), dtype=float16) - sy16 = float16(1e-4) - by16 = float16(1e4) - - # Underflow errors - assert_raises_fpe('underflow', lambda a, b:a*b, sx16, sx16) - assert_raises_fpe('underflow', lambda a, b:a*b, sx16, sy16) - assert_raises_fpe('underflow', lambda a, b:a*b, sy16, sx16) - assert_raises_fpe('underflow', lambda a, b:a*b, sy16, sy16) - assert_raises_fpe('underflow', lambda a, b:a/b, sx16, bx16) - assert_raises_fpe('underflow', lambda a, b:a/b, sx16, by16) - assert_raises_fpe('underflow', lambda a, b:a/b, sy16, bx16) - assert_raises_fpe('underflow', lambda a, b:a/b, sy16, by16) - assert_raises_fpe('underflow', lambda a, b:a/b, - float16(2.**-14), float16(2**11)) - assert_raises_fpe('underflow', lambda a, b:a/b, - float16(-2.**-14), float16(2**11)) - assert_raises_fpe('underflow', lambda a, b:a/b, - float16(2.**-14+2**-24), float16(2)) - assert_raises_fpe('underflow', lambda a, b:a/b, - float16(-2.**-14-2**-24), float16(2)) - assert_raises_fpe('underflow', lambda a, b:a/b, - float16(2.**-14+2**-23), float16(4)) - - # Overflow errors - assert_raises_fpe('overflow', lambda a, b:a*b, bx16, bx16) - assert_raises_fpe('overflow', lambda a, b:a*b, bx16, by16) - assert_raises_fpe('overflow', lambda a, b:a*b, by16, bx16) - assert_raises_fpe('overflow', lambda a, b:a*b, by16, by16) - assert_raises_fpe('overflow', lambda a, b:a/b, bx16, sx16) - assert_raises_fpe('overflow', lambda a, b:a/b, bx16, sy16) - assert_raises_fpe('overflow', lambda a, b:a/b, by16, sx16) - assert_raises_fpe('overflow', lambda a, b:a/b, by16, sy16) - assert_raises_fpe('overflow', lambda a, b:a+b, - float16(65504), float16(17)) - assert_raises_fpe('overflow', lambda a, b:a-b, - float16(-65504), float16(17)) - assert_raises_fpe('overflow', np.nextafter, float16(65504), float16(np.inf)) - assert_raises_fpe('overflow', np.nextafter, float16(-65504), float16(-np.inf)) - assert_raises_fpe('overflow', np.spacing, float16(65504)) - - # Invalid value errors - assert_raises_fpe('invalid', np.divide, float16(np.inf), float16(np.inf)) - assert_raises_fpe('invalid', np.spacing, float16(np.inf)) - assert_raises_fpe('invalid', np.spacing, float16(np.nan)) - assert_raises_fpe('invalid', np.nextafter, float16(np.inf), float16(0)) - assert_raises_fpe('invalid', np.nextafter, float16(-np.inf), float16(0)) - assert_raises_fpe('invalid', np.nextafter, float16(0), float16(np.nan)) - - # These should not raise - float16(65472)+float16(32) - float16(2**-13)/float16(2) - float16(2**-14)/float16(2**10) - np.spacing(float16(-65504)) - np.nextafter(float16(65504), float16(-np.inf)) - np.nextafter(float16(-65504), float16(np.inf)) - float16(2**-14)/float16(2**10) - float16(-2**-14)/float16(2**10) - float16(2**-14+2**-23)/float16(2) - float16(-2**-14-2**-23)/float16(2) - - def test_half_array_interface(self): - """Test that half is compatible with __array_interface__""" - class Dummy: - pass - - a = np.ones((1,), dtype=float16) - b = Dummy() - b.__array_interface__ = a.__array_interface__ - c = np.array(b) - assert_(c.dtype == float16) - assert_equal(a, c) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_indexerrors.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_indexerrors.py deleted file mode 100644 index 63b43c4..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_indexerrors.py +++ /dev/null @@ -1,123 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy as np -from numpy.testing import assert_raises - -class TestIndexErrors(object): - '''Tests to exercise indexerrors not covered by other tests.''' - - def test_arraytypes_fasttake(self): - 'take from a 0-length dimension' - x = np.empty((2, 3, 0, 4)) - assert_raises(IndexError, x.take, [0], axis=2) - assert_raises(IndexError, x.take, [1], axis=2) - assert_raises(IndexError, x.take, [0], axis=2, mode='wrap') - assert_raises(IndexError, x.take, [0], axis=2, mode='clip') - - def test_take_from_object(self): - # Check exception taking from object array - d = np.zeros(5, dtype=object) - assert_raises(IndexError, d.take, [6]) - - # Check exception taking from 0-d array - d = np.zeros((5, 0), dtype=object) - assert_raises(IndexError, d.take, [1], axis=1) - assert_raises(IndexError, d.take, [0], axis=1) - assert_raises(IndexError, d.take, [0]) - assert_raises(IndexError, d.take, [0], mode='wrap') - assert_raises(IndexError, d.take, [0], mode='clip') - - def test_multiindex_exceptions(self): - a = np.empty(5, dtype=object) - assert_raises(IndexError, a.item, 20) - a = np.empty((5, 0), dtype=object) - assert_raises(IndexError, a.item, (0, 0)) - - a = np.empty(5, dtype=object) - assert_raises(IndexError, a.itemset, 20, 0) - a = np.empty((5, 0), dtype=object) - assert_raises(IndexError, a.itemset, (0, 0), 0) - - def test_put_exceptions(self): - a = np.zeros((5, 5)) - assert_raises(IndexError, a.put, 100, 0) - a = np.zeros((5, 5), dtype=object) - assert_raises(IndexError, a.put, 100, 0) - a = np.zeros((5, 5, 0)) - assert_raises(IndexError, a.put, 100, 0) - a = np.zeros((5, 5, 0), dtype=object) - assert_raises(IndexError, a.put, 100, 0) - - def test_iterators_exceptions(self): - "cases in iterators.c" - def assign(obj, ind, val): - obj[ind] = val - - a = np.zeros([1, 2, 3]) - assert_raises(IndexError, lambda: a[0, 5, None, 2]) - assert_raises(IndexError, lambda: a[0, 5, 0, 2]) - assert_raises(IndexError, lambda: assign(a, (0, 5, None, 2), 1)) - assert_raises(IndexError, lambda: assign(a, (0, 5, 0, 2), 1)) - - a = np.zeros([1, 0, 3]) - assert_raises(IndexError, lambda: a[0, 0, None, 2]) - assert_raises(IndexError, lambda: assign(a, (0, 0, None, 2), 1)) - - a = np.zeros([1, 2, 3]) - assert_raises(IndexError, lambda: a.flat[10]) - assert_raises(IndexError, lambda: assign(a.flat, 10, 5)) - a = np.zeros([1, 0, 3]) - assert_raises(IndexError, lambda: a.flat[10]) - assert_raises(IndexError, lambda: assign(a.flat, 10, 5)) - - a = np.zeros([1, 2, 3]) - assert_raises(IndexError, lambda: a.flat[np.array(10)]) - assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5)) - a = np.zeros([1, 0, 3]) - assert_raises(IndexError, lambda: a.flat[np.array(10)]) - assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5)) - - a = np.zeros([1, 2, 3]) - assert_raises(IndexError, lambda: a.flat[np.array([10])]) - assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5)) - a = np.zeros([1, 0, 3]) - assert_raises(IndexError, lambda: a.flat[np.array([10])]) - assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5)) - - def test_mapping(self): - "cases from mapping.c" - - def assign(obj, ind, val): - obj[ind] = val - - a = np.zeros((0, 10)) - assert_raises(IndexError, lambda: a[12]) - - a = np.zeros((3, 5)) - assert_raises(IndexError, lambda: a[(10, 20)]) - assert_raises(IndexError, lambda: assign(a, (10, 20), 1)) - a = np.zeros((3, 0)) - assert_raises(IndexError, lambda: a[(1, 0)]) - assert_raises(IndexError, lambda: assign(a, (1, 0), 1)) - - a = np.zeros((10,)) - assert_raises(IndexError, lambda: assign(a, 10, 1)) - a = np.zeros((0,)) - assert_raises(IndexError, lambda: assign(a, 10, 1)) - - a = np.zeros((3, 5)) - assert_raises(IndexError, lambda: a[(1, [1, 20])]) - assert_raises(IndexError, lambda: assign(a, (1, [1, 20]), 1)) - a = np.zeros((3, 0)) - assert_raises(IndexError, lambda: a[(1, [0, 1])]) - assert_raises(IndexError, lambda: assign(a, (1, [0, 1]), 1)) - - def test_methods(self): - "cases from methods.c" - - a = np.zeros((3, 3)) - assert_raises(IndexError, lambda: a.item(100)) - assert_raises(IndexError, lambda: a.itemset(100, 1)) - a = np.zeros((0, 3)) - assert_raises(IndexError, lambda: a.item(100)) - assert_raises(IndexError, lambda: a.itemset(100, 1)) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_indexing.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_indexing.py deleted file mode 100644 index 70a5a24..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_indexing.py +++ /dev/null @@ -1,1347 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import warnings -import functools -import operator -import pytest - -import numpy as np -from numpy.core._multiarray_tests import array_indexing -from itertools import product -from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_array_equal, assert_warns, - HAS_REFCOUNT, suppress_warnings, - ) - - -class TestIndexing(object): - def test_index_no_floats(self): - a = np.array([[[5]]]) - - assert_raises(IndexError, lambda: a[0.0]) - assert_raises(IndexError, lambda: a[0, 0.0]) - assert_raises(IndexError, lambda: a[0.0, 0]) - assert_raises(IndexError, lambda: a[0.0,:]) - assert_raises(IndexError, lambda: a[:, 0.0]) - assert_raises(IndexError, lambda: a[:, 0.0,:]) - assert_raises(IndexError, lambda: a[0.0,:,:]) - assert_raises(IndexError, lambda: a[0, 0, 0.0]) - assert_raises(IndexError, lambda: a[0.0, 0, 0]) - assert_raises(IndexError, lambda: a[0, 0.0, 0]) - assert_raises(IndexError, lambda: a[-1.4]) - assert_raises(IndexError, lambda: a[0, -1.4]) - assert_raises(IndexError, lambda: a[-1.4, 0]) - assert_raises(IndexError, lambda: a[-1.4,:]) - assert_raises(IndexError, lambda: a[:, -1.4]) - assert_raises(IndexError, lambda: a[:, -1.4,:]) - assert_raises(IndexError, lambda: a[-1.4,:,:]) - assert_raises(IndexError, lambda: a[0, 0, -1.4]) - assert_raises(IndexError, lambda: a[-1.4, 0, 0]) - assert_raises(IndexError, lambda: a[0, -1.4, 0]) - assert_raises(IndexError, lambda: a[0.0:, 0.0]) - assert_raises(IndexError, lambda: a[0.0:, 0.0,:]) - - def test_slicing_no_floats(self): - a = np.array([[5]]) - - # start as float. - assert_raises(TypeError, lambda: a[0.0:]) - assert_raises(TypeError, lambda: a[0:, 0.0:2]) - assert_raises(TypeError, lambda: a[0.0::2, :0]) - assert_raises(TypeError, lambda: a[0.0:1:2,:]) - assert_raises(TypeError, lambda: a[:, 0.0:]) - # stop as float. - assert_raises(TypeError, lambda: a[:0.0]) - assert_raises(TypeError, lambda: a[:0, 1:2.0]) - assert_raises(TypeError, lambda: a[:0.0:2, :0]) - assert_raises(TypeError, lambda: a[:0.0,:]) - assert_raises(TypeError, lambda: a[:, 0:4.0:2]) - # step as float. - assert_raises(TypeError, lambda: a[::1.0]) - assert_raises(TypeError, lambda: a[0:, :2:2.0]) - assert_raises(TypeError, lambda: a[1::4.0, :0]) - assert_raises(TypeError, lambda: a[::5.0,:]) - assert_raises(TypeError, lambda: a[:, 0:4:2.0]) - # mixed. - assert_raises(TypeError, lambda: a[1.0:2:2.0]) - assert_raises(TypeError, lambda: a[1.0::2.0]) - assert_raises(TypeError, lambda: a[0:, :2.0:2.0]) - assert_raises(TypeError, lambda: a[1.0:1:4.0, :0]) - assert_raises(TypeError, lambda: a[1.0:5.0:5.0,:]) - assert_raises(TypeError, lambda: a[:, 0.4:4.0:2.0]) - # should still get the DeprecationWarning if step = 0. - assert_raises(TypeError, lambda: a[::0.0]) - - def test_index_no_array_to_index(self): - # No non-scalar arrays. - a = np.array([[[1]]]) - - assert_raises(TypeError, lambda: a[a:a:a]) - - def test_none_index(self): - # `None` index adds newaxis - a = np.array([1, 2, 3]) - assert_equal(a[None], a[np.newaxis]) - assert_equal(a[None].ndim, a.ndim + 1) - - def test_empty_tuple_index(self): - # Empty tuple index creates a view - a = np.array([1, 2, 3]) - assert_equal(a[()], a) - assert_(a[()].base is a) - a = np.array(0) - assert_(isinstance(a[()], np.int_)) - - def test_void_scalar_empty_tuple(self): - s = np.zeros((), dtype='V4') - assert_equal(s[()].dtype, s.dtype) - assert_equal(s[()], s) - assert_equal(type(s[...]), np.ndarray) - - def test_same_kind_index_casting(self): - # Indexes should be cast with same-kind and not safe, even if that - # is somewhat unsafe. So test various different code paths. - index = np.arange(5) - u_index = index.astype(np.uintp) - arr = np.arange(10) - - assert_array_equal(arr[index], arr[u_index]) - arr[u_index] = np.arange(5) - assert_array_equal(arr, np.arange(10)) - - arr = np.arange(10).reshape(5, 2) - assert_array_equal(arr[index], arr[u_index]) - - arr[u_index] = np.arange(5)[:,None] - assert_array_equal(arr, np.arange(5)[:,None].repeat(2, axis=1)) - - arr = np.arange(25).reshape(5, 5) - assert_array_equal(arr[u_index, u_index], arr[index, index]) - - def test_empty_fancy_index(self): - # Empty list index creates an empty array - # with the same dtype (but with weird shape) - a = np.array([1, 2, 3]) - assert_equal(a[[]], []) - assert_equal(a[[]].dtype, a.dtype) - - b = np.array([], dtype=np.intp) - assert_equal(a[[]], []) - assert_equal(a[[]].dtype, a.dtype) - - b = np.array([]) - assert_raises(IndexError, a.__getitem__, b) - - def test_ellipsis_index(self): - a = np.array([[1, 2, 3], - [4, 5, 6], - [7, 8, 9]]) - assert_(a[...] is not a) - assert_equal(a[...], a) - # `a[...]` was `a` in numpy <1.9. - assert_(a[...].base is a) - - # Slicing with ellipsis can skip an - # arbitrary number of dimensions - assert_equal(a[0, ...], a[0]) - assert_equal(a[0, ...], a[0,:]) - assert_equal(a[..., 0], a[:, 0]) - - # Slicing with ellipsis always results - # in an array, not a scalar - assert_equal(a[0, ..., 1], np.array(2)) - - # Assignment with `(Ellipsis,)` on 0-d arrays - b = np.array(1) - b[(Ellipsis,)] = 2 - assert_equal(b, 2) - - def test_single_int_index(self): - # Single integer index selects one row - a = np.array([[1, 2, 3], - [4, 5, 6], - [7, 8, 9]]) - - assert_equal(a[0], [1, 2, 3]) - assert_equal(a[-1], [7, 8, 9]) - - # Index out of bounds produces IndexError - assert_raises(IndexError, a.__getitem__, 1 << 30) - # Index overflow produces IndexError - assert_raises(IndexError, a.__getitem__, 1 << 64) - - def test_single_bool_index(self): - # Single boolean index - a = np.array([[1, 2, 3], - [4, 5, 6], - [7, 8, 9]]) - - assert_equal(a[np.array(True)], a[None]) - assert_equal(a[np.array(False)], a[None][0:0]) - - def test_boolean_shape_mismatch(self): - arr = np.ones((5, 4, 3)) - - index = np.array([True]) - assert_raises(IndexError, arr.__getitem__, index) - - index = np.array([False] * 6) - assert_raises(IndexError, arr.__getitem__, index) - - index = np.zeros((4, 4), dtype=bool) - assert_raises(IndexError, arr.__getitem__, index) - - assert_raises(IndexError, arr.__getitem__, (slice(None), index)) - - def test_boolean_indexing_onedim(self): - # Indexing a 2-dimensional array with - # boolean array of length one - a = np.array([[ 0., 0., 0.]]) - b = np.array([ True], dtype=bool) - assert_equal(a[b], a) - # boolean assignment - a[b] = 1. - assert_equal(a, [[1., 1., 1.]]) - - def test_boolean_assignment_value_mismatch(self): - # A boolean assignment should fail when the shape of the values - # cannot be broadcast to the subscription. (see also gh-3458) - a = np.arange(4) - - def f(a, v): - a[a > -1] = v - - assert_raises(ValueError, f, a, []) - assert_raises(ValueError, f, a, [1, 2, 3]) - assert_raises(ValueError, f, a[:1], [1, 2, 3]) - - def test_boolean_assignment_needs_api(self): - # See also gh-7666 - # This caused a segfault on Python 2 due to the GIL not being - # held when the iterator does not need it, but the transfer function - # does - arr = np.zeros(1000) - indx = np.zeros(1000, dtype=bool) - indx[:100] = True - arr[indx] = np.ones(100, dtype=object) - - expected = np.zeros(1000) - expected[:100] = 1 - assert_array_equal(arr, expected) - - def test_boolean_indexing_twodim(self): - # Indexing a 2-dimensional array with - # 2-dimensional boolean array - a = np.array([[1, 2, 3], - [4, 5, 6], - [7, 8, 9]]) - b = np.array([[ True, False, True], - [False, True, False], - [ True, False, True]]) - assert_equal(a[b], [1, 3, 5, 7, 9]) - assert_equal(a[b[1]], [[4, 5, 6]]) - assert_equal(a[b[0]], a[b[2]]) - - # boolean assignment - a[b] = 0 - assert_equal(a, [[0, 2, 0], - [4, 0, 6], - [0, 8, 0]]) - - def test_boolean_indexing_list(self): - # Regression test for #13715. It's a use-after-free bug which the - # test won't directly catch, but it will show up in valgrind. - a = np.array([1, 2, 3]) - b = [True, False, True] - # Two variants of the test because the first takes a fast path - assert_equal(a[b], [1, 3]) - assert_equal(a[None, b], [[1, 3]]) - - def test_reverse_strides_and_subspace_bufferinit(self): - # This tests that the strides are not reversed for simple and - # subspace fancy indexing. - a = np.ones(5) - b = np.zeros(5, dtype=np.intp)[::-1] - c = np.arange(5)[::-1] - - a[b] = c - # If the strides are not reversed, the 0 in the arange comes last. - assert_equal(a[0], 0) - - # This also tests that the subspace buffer is initialized: - a = np.ones((5, 2)) - c = np.arange(10).reshape(5, 2)[::-1] - a[b, :] = c - assert_equal(a[0], [0, 1]) - - def test_reversed_strides_result_allocation(self): - # Test a bug when calculating the output strides for a result array - # when the subspace size was 1 (and test other cases as well) - a = np.arange(10)[:, None] - i = np.arange(10)[::-1] - assert_array_equal(a[i], a[i.copy('C')]) - - a = np.arange(20).reshape(-1, 2) - - def test_uncontiguous_subspace_assignment(self): - # During development there was a bug activating a skip logic - # based on ndim instead of size. - a = np.full((3, 4, 2), -1) - b = np.full((3, 4, 2), -1) - - a[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T - b[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T.copy() - - assert_equal(a, b) - - def test_too_many_fancy_indices_special_case(self): - # Just documents behaviour, this is a small limitation. - a = np.ones((1,) * 32) # 32 is NPY_MAXDIMS - assert_raises(IndexError, a.__getitem__, (np.array([0]),) * 32) - - def test_scalar_array_bool(self): - # NumPy bools can be used as boolean index (python ones as of yet not) - a = np.array(1) - assert_equal(a[np.bool_(True)], a[np.array(True)]) - assert_equal(a[np.bool_(False)], a[np.array(False)]) - - # After deprecating bools as integers: - #a = np.array([0,1,2]) - #assert_equal(a[True, :], a[None, :]) - #assert_equal(a[:, True], a[:, None]) - # - #assert_(not np.may_share_memory(a, a[True, :])) - - def test_everything_returns_views(self): - # Before `...` would return a itself. - a = np.arange(5) - - assert_(a is not a[()]) - assert_(a is not a[...]) - assert_(a is not a[:]) - - def test_broaderrors_indexing(self): - a = np.zeros((5, 5)) - assert_raises(IndexError, a.__getitem__, ([0, 1], [0, 1, 2])) - assert_raises(IndexError, a.__setitem__, ([0, 1], [0, 1, 2]), 0) - - def test_trivial_fancy_out_of_bounds(self): - a = np.zeros(5) - ind = np.ones(20, dtype=np.intp) - ind[-1] = 10 - assert_raises(IndexError, a.__getitem__, ind) - assert_raises(IndexError, a.__setitem__, ind, 0) - ind = np.ones(20, dtype=np.intp) - ind[0] = 11 - assert_raises(IndexError, a.__getitem__, ind) - assert_raises(IndexError, a.__setitem__, ind, 0) - - def test_trivial_fancy_not_possible(self): - # Test that the fast path for trivial assignment is not incorrectly - # used when the index is not contiguous or 1D, see also gh-11467. - a = np.arange(6) - idx = np.arange(6, dtype=np.intp).reshape(2, 1, 3)[:, :, 0] - assert_array_equal(a[idx], idx) - - # this case must not go into the fast path, note that idx is - # a non-contiuguous none 1D array here. - a[idx] = -1 - res = np.arange(6) - res[0] = -1 - res[3] = -1 - assert_array_equal(a, res) - - def test_nonbaseclass_values(self): - class SubClass(np.ndarray): - def __array_finalize__(self, old): - # Have array finalize do funny things - self.fill(99) - - a = np.zeros((5, 5)) - s = a.copy().view(type=SubClass) - s.fill(1) - - a[[0, 1, 2, 3, 4], :] = s - assert_((a == 1).all()) - - # Subspace is last, so transposing might want to finalize - a[:, [0, 1, 2, 3, 4]] = s - assert_((a == 1).all()) - - a.fill(0) - a[...] = s - assert_((a == 1).all()) - - def test_subclass_writeable(self): - d = np.rec.array([('NGC1001', 11), ('NGC1002', 1.), ('NGC1003', 1.)], - dtype=[('target', 'S20'), ('V_mag', '>f4')]) - ind = np.array([False, True, True], dtype=bool) - assert_(d[ind].flags.writeable) - ind = np.array([0, 1]) - assert_(d[ind].flags.writeable) - assert_(d[...].flags.writeable) - assert_(d[0].flags.writeable) - - def test_memory_order(self): - # This is not necessary to preserve. Memory layouts for - # more complex indices are not as simple. - a = np.arange(10) - b = np.arange(10).reshape(5,2).T - assert_(a[b].flags.f_contiguous) - - # Takes a different implementation branch: - a = a.reshape(-1, 1) - assert_(a[b, 0].flags.f_contiguous) - - def test_scalar_return_type(self): - # Full scalar indices should return scalars and object - # arrays should not call PyArray_Return on their items - class Zero(object): - # The most basic valid indexing - def __index__(self): - return 0 - - z = Zero() - - class ArrayLike(object): - # Simple array, should behave like the array - def __array__(self): - return np.array(0) - - a = np.zeros(()) - assert_(isinstance(a[()], np.float_)) - a = np.zeros(1) - assert_(isinstance(a[z], np.float_)) - a = np.zeros((1, 1)) - assert_(isinstance(a[z, np.array(0)], np.float_)) - assert_(isinstance(a[z, ArrayLike()], np.float_)) - - # And object arrays do not call it too often: - b = np.array(0) - a = np.array(0, dtype=object) - a[()] = b - assert_(isinstance(a[()], np.ndarray)) - a = np.array([b, None]) - assert_(isinstance(a[z], np.ndarray)) - a = np.array([[b, None]]) - assert_(isinstance(a[z, np.array(0)], np.ndarray)) - assert_(isinstance(a[z, ArrayLike()], np.ndarray)) - - def test_small_regressions(self): - # Reference count of intp for index checks - a = np.array([0]) - if HAS_REFCOUNT: - refcount = sys.getrefcount(np.dtype(np.intp)) - # item setting always checks indices in separate function: - a[np.array([0], dtype=np.intp)] = 1 - a[np.array([0], dtype=np.uint8)] = 1 - assert_raises(IndexError, a.__setitem__, - np.array([1], dtype=np.intp), 1) - assert_raises(IndexError, a.__setitem__, - np.array([1], dtype=np.uint8), 1) - - if HAS_REFCOUNT: - assert_equal(sys.getrefcount(np.dtype(np.intp)), refcount) - - def test_unaligned(self): - v = (np.zeros(64, dtype=np.int8) + ord('a'))[1:-7] - d = v.view(np.dtype("S8")) - # unaligned source - x = (np.zeros(16, dtype=np.int8) + ord('a'))[1:-7] - x = x.view(np.dtype("S8")) - x[...] = np.array("b" * 8, dtype="S") - b = np.arange(d.size) - #trivial - assert_equal(d[b], d) - d[b] = x - # nontrivial - # unaligned index array - b = np.zeros(d.size + 1).view(np.int8)[1:-(np.intp(0).itemsize - 1)] - b = b.view(np.intp)[:d.size] - b[...] = np.arange(d.size) - assert_equal(d[b.astype(np.int16)], d) - d[b.astype(np.int16)] = x - # boolean - d[b % 2 == 0] - d[b % 2 == 0] = x[::2] - - def test_tuple_subclass(self): - arr = np.ones((5, 5)) - - # A tuple subclass should also be an nd-index - class TupleSubclass(tuple): - pass - index = ([1], [1]) - index = TupleSubclass(index) - assert_(arr[index].shape == (1,)) - # Unlike the non nd-index: - assert_(arr[index,].shape != (1,)) - - def test_broken_sequence_not_nd_index(self): - # See gh-5063: - # If we have an object which claims to be a sequence, but fails - # on item getting, this should not be converted to an nd-index (tuple) - # If this object happens to be a valid index otherwise, it should work - # This object here is very dubious and probably bad though: - class SequenceLike(object): - def __index__(self): - return 0 - - def __len__(self): - return 1 - - def __getitem__(self, item): - raise IndexError('Not possible') - - arr = np.arange(10) - assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),]) - - # also test that field indexing does not segfault - # for a similar reason, by indexing a structured array - arr = np.zeros((1,), dtype=[('f1', 'i8'), ('f2', 'i8')]) - assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),]) - - def test_indexing_array_weird_strides(self): - # See also gh-6221 - # the shapes used here come from the issue and create the correct - # size for the iterator buffering size. - x = np.ones(10) - x2 = np.ones((10, 2)) - ind = np.arange(10)[:, None, None, None] - ind = np.broadcast_to(ind, (10, 55, 4, 4)) - - # single advanced index case - assert_array_equal(x[ind], x[ind.copy()]) - # higher dimensional advanced index - zind = np.zeros(4, dtype=np.intp) - assert_array_equal(x2[ind, zind], x2[ind.copy(), zind]) - - def test_indexing_array_negative_strides(self): - # From gh-8264, - # core dumps if negative strides are used in iteration - arro = np.zeros((4, 4)) - arr = arro[::-1, ::-1] - - slices = (slice(None), [0, 1, 2, 3]) - arr[slices] = 10 - assert_array_equal(arr, 10.) - -class TestFieldIndexing(object): - def test_scalar_return_type(self): - # Field access on an array should return an array, even if it - # is 0-d. - a = np.zeros((), [('a','f8')]) - assert_(isinstance(a['a'], np.ndarray)) - assert_(isinstance(a[['a']], np.ndarray)) - - -class TestBroadcastedAssignments(object): - def assign(self, a, ind, val): - a[ind] = val - return a - - def test_prepending_ones(self): - a = np.zeros((3, 2)) - - a[...] = np.ones((1, 3, 2)) - # Fancy with subspace with and without transpose - a[[0, 1, 2], :] = np.ones((1, 3, 2)) - a[:, [0, 1]] = np.ones((1, 3, 2)) - # Fancy without subspace (with broadcasting) - a[[[0], [1], [2]], [0, 1]] = np.ones((1, 3, 2)) - - def test_prepend_not_one(self): - assign = self.assign - s_ = np.s_ - a = np.zeros(5) - - # Too large and not only ones. - assert_raises(ValueError, assign, a, s_[...], np.ones((2, 1))) - assert_raises(ValueError, assign, a, s_[[1, 2, 3],], np.ones((2, 1))) - assert_raises(ValueError, assign, a, s_[[[1], [2]],], np.ones((2,2,1))) - - def test_simple_broadcasting_errors(self): - assign = self.assign - s_ = np.s_ - a = np.zeros((5, 1)) - - assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 2))) - assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 0))) - assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 2))) - assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 0))) - assert_raises(ValueError, assign, a, s_[[0], :], np.zeros((2, 1))) - - def test_index_is_larger(self): - # Simple case of fancy index broadcasting of the index. - a = np.zeros((5, 5)) - a[[[0], [1], [2]], [0, 1, 2]] = [2, 3, 4] - - assert_((a[:3, :3] == [2, 3, 4]).all()) - - def test_broadcast_subspace(self): - a = np.zeros((100, 100)) - v = np.arange(100)[:,None] - b = np.arange(100)[::-1] - a[b] = v - assert_((a[::-1] == v).all()) - - -class TestSubclasses(object): - def test_basic(self): - # Test that indexing in various ways produces SubClass instances, - # and that the base is set up correctly: the original subclass - # instance for views, and a new ndarray for advanced/boolean indexing - # where a copy was made (latter a regression test for gh-11983). - class SubClass(np.ndarray): - pass - - a = np.arange(5) - s = a.view(SubClass) - s_slice = s[:3] - assert_(type(s_slice) is SubClass) - assert_(s_slice.base is s) - assert_array_equal(s_slice, a[:3]) - - s_fancy = s[[0, 1, 2]] - assert_(type(s_fancy) is SubClass) - assert_(s_fancy.base is not s) - assert_(type(s_fancy.base) is np.ndarray) - assert_array_equal(s_fancy, a[[0, 1, 2]]) - assert_array_equal(s_fancy.base, a[[0, 1, 2]]) - - s_bool = s[s > 0] - assert_(type(s_bool) is SubClass) - assert_(s_bool.base is not s) - assert_(type(s_bool.base) is np.ndarray) - assert_array_equal(s_bool, a[a > 0]) - assert_array_equal(s_bool.base, a[a > 0]) - - def test_fancy_on_read_only(self): - # Test that fancy indexing on read-only SubClass does not make a - # read-only copy (gh-14132) - class SubClass(np.ndarray): - pass - - a = np.arange(5) - s = a.view(SubClass) - s.flags.writeable = False - s_fancy = s[[0, 1, 2]] - assert_(s_fancy.flags.writeable) - - - def test_finalize_gets_full_info(self): - # Array finalize should be called on the filled array. - class SubClass(np.ndarray): - def __array_finalize__(self, old): - self.finalize_status = np.array(self) - self.old = old - - s = np.arange(10).view(SubClass) - new_s = s[:3] - assert_array_equal(new_s.finalize_status, new_s) - assert_array_equal(new_s.old, s) - - new_s = s[[0,1,2,3]] - assert_array_equal(new_s.finalize_status, new_s) - assert_array_equal(new_s.old, s) - - new_s = s[s > 0] - assert_array_equal(new_s.finalize_status, new_s) - assert_array_equal(new_s.old, s) - - @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") - def test_slice_decref_getsetslice(self): - # See gh-10066, a temporary slice object should be discarted. - # This test is only really interesting on Python 2 since - # it goes through `__set/getslice__` here and can probably be - # removed. Use 0:7 to make sure it is never None:7. - class KeepIndexObject(np.ndarray): - def __getitem__(self, indx): - self.indx = indx - if indx == slice(0, 7): - raise ValueError - - def __setitem__(self, indx, val): - self.indx = indx - if indx == slice(0, 4): - raise ValueError - - k = np.array([1]).view(KeepIndexObject) - k[0:5] - assert_equal(k.indx, slice(0, 5)) - assert_equal(sys.getrefcount(k.indx), 2) - try: - k[0:7] - raise AssertionError - except ValueError: - # The exception holds a reference to the slice so clear on Py2 - if hasattr(sys, 'exc_clear'): - with suppress_warnings() as sup: - sup.filter(DeprecationWarning) - sys.exc_clear() - assert_equal(k.indx, slice(0, 7)) - assert_equal(sys.getrefcount(k.indx), 2) - - k[0:3] = 6 - assert_equal(k.indx, slice(0, 3)) - assert_equal(sys.getrefcount(k.indx), 2) - try: - k[0:4] = 2 - raise AssertionError - except ValueError: - # The exception holds a reference to the slice so clear on Py2 - if hasattr(sys, 'exc_clear'): - with suppress_warnings() as sup: - sup.filter(DeprecationWarning) - sys.exc_clear() - assert_equal(k.indx, slice(0, 4)) - assert_equal(sys.getrefcount(k.indx), 2) - - -class TestFancyIndexingCast(object): - def test_boolean_index_cast_assign(self): - # Setup the boolean index and float arrays. - shape = (8, 63) - bool_index = np.zeros(shape).astype(bool) - bool_index[0, 1] = True - zero_array = np.zeros(shape) - - # Assigning float is fine. - zero_array[bool_index] = np.array([1]) - assert_equal(zero_array[0, 1], 1) - - # Fancy indexing works, although we get a cast warning. - assert_warns(np.ComplexWarning, - zero_array.__setitem__, ([0], [1]), np.array([2 + 1j])) - assert_equal(zero_array[0, 1], 2) # No complex part - - # Cast complex to float, throwing away the imaginary portion. - assert_warns(np.ComplexWarning, - zero_array.__setitem__, bool_index, np.array([1j])) - assert_equal(zero_array[0, 1], 0) - -class TestFancyIndexingEquivalence(object): - def test_object_assign(self): - # Check that the field and object special case using copyto is active. - # The right hand side cannot be converted to an array here. - a = np.arange(5, dtype=object) - b = a.copy() - a[:3] = [1, (1,2), 3] - b[[0, 1, 2]] = [1, (1,2), 3] - assert_array_equal(a, b) - - # test same for subspace fancy indexing - b = np.arange(5, dtype=object)[None, :] - b[[0], :3] = [[1, (1,2), 3]] - assert_array_equal(a, b[0]) - - # Check that swapping of axes works. - # There was a bug that made the later assignment throw a ValueError - # do to an incorrectly transposed temporary right hand side (gh-5714) - b = b.T - b[:3, [0]] = [[1], [(1,2)], [3]] - assert_array_equal(a, b[:, 0]) - - # Another test for the memory order of the subspace - arr = np.ones((3, 4, 5), dtype=object) - # Equivalent slicing assignment for comparison - cmp_arr = arr.copy() - cmp_arr[:1, ...] = [[[1], [2], [3], [4]]] - arr[[0], ...] = [[[1], [2], [3], [4]]] - assert_array_equal(arr, cmp_arr) - arr = arr.copy('F') - arr[[0], ...] = [[[1], [2], [3], [4]]] - assert_array_equal(arr, cmp_arr) - - def test_cast_equivalence(self): - # Yes, normal slicing uses unsafe casting. - a = np.arange(5) - b = a.copy() - - a[:3] = np.array(['2', '-3', '-1']) - b[[0, 2, 1]] = np.array(['2', '-1', '-3']) - assert_array_equal(a, b) - - # test the same for subspace fancy indexing - b = np.arange(5)[None, :] - b[[0], :3] = np.array([['2', '-3', '-1']]) - assert_array_equal(a, b[0]) - - -class TestMultiIndexingAutomated(object): - """ - These tests use code to mimic the C-Code indexing for selection. - - NOTE: - - * This still lacks tests for complex item setting. - * If you change behavior of indexing, you might want to modify - these tests to try more combinations. - * Behavior was written to match numpy version 1.8. (though a - first version matched 1.7.) - * Only tuple indices are supported by the mimicking code. - (and tested as of writing this) - * Error types should match most of the time as long as there - is only one error. For multiple errors, what gets raised - will usually not be the same one. They are *not* tested. - - Update 2016-11-30: It is probably not worth maintaining this test - indefinitely and it can be dropped if maintenance becomes a burden. - - """ - - def setup(self): - self.a = np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6) - self.b = np.empty((3, 0, 5, 6)) - self.complex_indices = ['skip', Ellipsis, - 0, - # Boolean indices, up to 3-d for some special cases of eating up - # dimensions, also need to test all False - np.array([True, False, False]), - np.array([[True, False], [False, True]]), - np.array([[[False, False], [False, False]]]), - # Some slices: - slice(-5, 5, 2), - slice(1, 1, 100), - slice(4, -1, -2), - slice(None, None, -3), - # Some Fancy indexes: - np.empty((0, 1, 1), dtype=np.intp), # empty and can be broadcast - np.array([0, 1, -2]), - np.array([[2], [0], [1]]), - np.array([[0, -1], [0, 1]], dtype=np.dtype('intp').newbyteorder()), - np.array([2, -1], dtype=np.int8), - np.zeros([1]*31, dtype=int), # trigger too large array. - np.array([0., 1.])] # invalid datatype - # Some simpler indices that still cover a bit more - self.simple_indices = [Ellipsis, None, -1, [1], np.array([True]), - 'skip'] - # Very simple ones to fill the rest: - self.fill_indices = [slice(None, None), 0] - - def _get_multi_index(self, arr, indices): - """Mimic multi dimensional indexing. - - Parameters - ---------- - arr : ndarray - Array to be indexed. - indices : tuple of index objects - - Returns - ------- - out : ndarray - An array equivalent to the indexing operation (but always a copy). - `arr[indices]` should be identical. - no_copy : bool - Whether the indexing operation requires a copy. If this is `True`, - `np.may_share_memory(arr, arr[indices])` should be `True` (with - some exceptions for scalars and possibly 0-d arrays). - - Notes - ----- - While the function may mostly match the errors of normal indexing this - is generally not the case. - """ - in_indices = list(indices) - indices = [] - # if False, this is a fancy or boolean index - no_copy = True - # number of fancy/scalar indexes that are not consecutive - num_fancy = 0 - # number of dimensions indexed by a "fancy" index - fancy_dim = 0 - # NOTE: This is a funny twist (and probably OK to change). - # The boolean array has illegal indexes, but this is - # allowed if the broadcast fancy-indices are 0-sized. - # This variable is to catch that case. - error_unless_broadcast_to_empty = False - - # We need to handle Ellipsis and make arrays from indices, also - # check if this is fancy indexing (set no_copy). - ndim = 0 - ellipsis_pos = None # define here mostly to replace all but first. - for i, indx in enumerate(in_indices): - if indx is None: - continue - if isinstance(indx, np.ndarray) and indx.dtype == bool: - no_copy = False - if indx.ndim == 0: - raise IndexError - # boolean indices can have higher dimensions - ndim += indx.ndim - fancy_dim += indx.ndim - continue - if indx is Ellipsis: - if ellipsis_pos is None: - ellipsis_pos = i - continue # do not increment ndim counter - raise IndexError - if isinstance(indx, slice): - ndim += 1 - continue - if not isinstance(indx, np.ndarray): - # This could be open for changes in numpy. - # numpy should maybe raise an error if casting to intp - # is not safe. It rejects np.array([1., 2.]) but not - # [1., 2.] as index (same for ie. np.take). - # (Note the importance of empty lists if changing this here) - try: - indx = np.array(indx, dtype=np.intp) - except ValueError: - raise IndexError - in_indices[i] = indx - elif indx.dtype.kind != 'b' and indx.dtype.kind != 'i': - raise IndexError('arrays used as indices must be of ' - 'integer (or boolean) type') - if indx.ndim != 0: - no_copy = False - ndim += 1 - fancy_dim += 1 - - if arr.ndim - ndim < 0: - # we can't take more dimensions then we have, not even for 0-d - # arrays. since a[()] makes sense, but not a[(),]. We will - # raise an error later on, unless a broadcasting error occurs - # first. - raise IndexError - - if ndim == 0 and None not in in_indices: - # Well we have no indexes or one Ellipsis. This is legal. - return arr.copy(), no_copy - - if ellipsis_pos is not None: - in_indices[ellipsis_pos:ellipsis_pos+1] = ([slice(None, None)] * - (arr.ndim - ndim)) - - for ax, indx in enumerate(in_indices): - if isinstance(indx, slice): - # convert to an index array - indx = np.arange(*indx.indices(arr.shape[ax])) - indices.append(['s', indx]) - continue - elif indx is None: - # this is like taking a slice with one element from a new axis: - indices.append(['n', np.array([0], dtype=np.intp)]) - arr = arr.reshape((arr.shape[:ax] + (1,) + arr.shape[ax:])) - continue - if isinstance(indx, np.ndarray) and indx.dtype == bool: - if indx.shape != arr.shape[ax:ax+indx.ndim]: - raise IndexError - - try: - flat_indx = np.ravel_multi_index(np.nonzero(indx), - arr.shape[ax:ax+indx.ndim], mode='raise') - except Exception: - error_unless_broadcast_to_empty = True - # fill with 0s instead, and raise error later - flat_indx = np.array([0]*indx.sum(), dtype=np.intp) - # concatenate axis into a single one: - if indx.ndim != 0: - arr = arr.reshape((arr.shape[:ax] - + (np.prod(arr.shape[ax:ax+indx.ndim]),) - + arr.shape[ax+indx.ndim:])) - indx = flat_indx - else: - # This could be changed, a 0-d boolean index can - # make sense (even outside the 0-d indexed array case) - # Note that originally this is could be interpreted as - # integer in the full integer special case. - raise IndexError - else: - # If the index is a singleton, the bounds check is done - # before the broadcasting. This used to be different in <1.9 - if indx.ndim == 0: - if indx >= arr.shape[ax] or indx < -arr.shape[ax]: - raise IndexError - if indx.ndim == 0: - # The index is a scalar. This used to be two fold, but if - # fancy indexing was active, the check was done later, - # possibly after broadcasting it away (1.7. or earlier). - # Now it is always done. - if indx >= arr.shape[ax] or indx < - arr.shape[ax]: - raise IndexError - if (len(indices) > 0 and - indices[-1][0] == 'f' and - ax != ellipsis_pos): - # NOTE: There could still have been a 0-sized Ellipsis - # between them. Checked that with ellipsis_pos. - indices[-1].append(indx) - else: - # We have a fancy index that is not after an existing one. - # NOTE: A 0-d array triggers this as well, while one may - # expect it to not trigger it, since a scalar would not be - # considered fancy indexing. - num_fancy += 1 - indices.append(['f', indx]) - - if num_fancy > 1 and not no_copy: - # We have to flush the fancy indexes left - new_indices = indices[:] - axes = list(range(arr.ndim)) - fancy_axes = [] - new_indices.insert(0, ['f']) - ni = 0 - ai = 0 - for indx in indices: - ni += 1 - if indx[0] == 'f': - new_indices[0].extend(indx[1:]) - del new_indices[ni] - ni -= 1 - for ax in range(ai, ai + len(indx[1:])): - fancy_axes.append(ax) - axes.remove(ax) - ai += len(indx) - 1 # axis we are at - indices = new_indices - # and now we need to transpose arr: - arr = arr.transpose(*(fancy_axes + axes)) - - # We only have one 'f' index now and arr is transposed accordingly. - # Now handle newaxis by reshaping... - ax = 0 - for indx in indices: - if indx[0] == 'f': - if len(indx) == 1: - continue - # First of all, reshape arr to combine fancy axes into one: - orig_shape = arr.shape - orig_slice = orig_shape[ax:ax + len(indx[1:])] - arr = arr.reshape((arr.shape[:ax] - + (np.prod(orig_slice).astype(int),) - + arr.shape[ax + len(indx[1:]):])) - - # Check if broadcasting works - res = np.broadcast(*indx[1:]) - # unfortunately the indices might be out of bounds. So check - # that first, and use mode='wrap' then. However only if - # there are any indices... - if res.size != 0: - if error_unless_broadcast_to_empty: - raise IndexError - for _indx, _size in zip(indx[1:], orig_slice): - if _indx.size == 0: - continue - if np.any(_indx >= _size) or np.any(_indx < -_size): - raise IndexError - if len(indx[1:]) == len(orig_slice): - if np.product(orig_slice) == 0: - # Work around for a crash or IndexError with 'wrap' - # in some 0-sized cases. - try: - mi = np.ravel_multi_index(indx[1:], orig_slice, - mode='raise') - except Exception: - # This happens with 0-sized orig_slice (sometimes?) - # here it is a ValueError, but indexing gives a: - raise IndexError('invalid index into 0-sized') - else: - mi = np.ravel_multi_index(indx[1:], orig_slice, - mode='wrap') - else: - # Maybe never happens... - raise ValueError - arr = arr.take(mi.ravel(), axis=ax) - try: - arr = arr.reshape((arr.shape[:ax] - + mi.shape - + arr.shape[ax+1:])) - except ValueError: - # too many dimensions, probably - raise IndexError - ax += mi.ndim - continue - - # If we are here, we have a 1D array for take: - arr = arr.take(indx[1], axis=ax) - ax += 1 - - return arr, no_copy - - def _check_multi_index(self, arr, index): - """Check a multi index item getting and simple setting. - - Parameters - ---------- - arr : ndarray - Array to be indexed, must be a reshaped arange. - index : tuple of indexing objects - Index being tested. - """ - # Test item getting - try: - mimic_get, no_copy = self._get_multi_index(arr, index) - except Exception as e: - if HAS_REFCOUNT: - prev_refcount = sys.getrefcount(arr) - assert_raises(type(e), arr.__getitem__, index) - assert_raises(type(e), arr.__setitem__, index, 0) - if HAS_REFCOUNT: - assert_equal(prev_refcount, sys.getrefcount(arr)) - return - - self._compare_index_result(arr, index, mimic_get, no_copy) - - def _check_single_index(self, arr, index): - """Check a single index item getting and simple setting. - - Parameters - ---------- - arr : ndarray - Array to be indexed, must be an arange. - index : indexing object - Index being tested. Must be a single index and not a tuple - of indexing objects (see also `_check_multi_index`). - """ - try: - mimic_get, no_copy = self._get_multi_index(arr, (index,)) - except Exception as e: - if HAS_REFCOUNT: - prev_refcount = sys.getrefcount(arr) - assert_raises(type(e), arr.__getitem__, index) - assert_raises(type(e), arr.__setitem__, index, 0) - if HAS_REFCOUNT: - assert_equal(prev_refcount, sys.getrefcount(arr)) - return - - self._compare_index_result(arr, index, mimic_get, no_copy) - - def _compare_index_result(self, arr, index, mimic_get, no_copy): - """Compare mimicked result to indexing result. - """ - arr = arr.copy() - indexed_arr = arr[index] - assert_array_equal(indexed_arr, mimic_get) - # Check if we got a view, unless its a 0-sized or 0-d array. - # (then its not a view, and that does not matter) - if indexed_arr.size != 0 and indexed_arr.ndim != 0: - assert_(np.may_share_memory(indexed_arr, arr) == no_copy) - # Check reference count of the original array - if HAS_REFCOUNT: - if no_copy: - # refcount increases by one: - assert_equal(sys.getrefcount(arr), 3) - else: - assert_equal(sys.getrefcount(arr), 2) - - # Test non-broadcast setitem: - b = arr.copy() - b[index] = mimic_get + 1000 - if b.size == 0: - return # nothing to compare here... - if no_copy and indexed_arr.ndim != 0: - # change indexed_arr in-place to manipulate original: - indexed_arr += 1000 - assert_array_equal(arr, b) - return - # Use the fact that the array is originally an arange: - arr.flat[indexed_arr.ravel()] += 1000 - assert_array_equal(arr, b) - - def test_boolean(self): - a = np.array(5) - assert_equal(a[np.array(True)], 5) - a[np.array(True)] = 1 - assert_equal(a, 1) - # NOTE: This is different from normal broadcasting, as - # arr[boolean_array] works like in a multi index. Which means - # it is aligned to the left. This is probably correct for - # consistency with arr[boolean_array,] also no broadcasting - # is done at all - self._check_multi_index( - self.a, (np.zeros_like(self.a, dtype=bool),)) - self._check_multi_index( - self.a, (np.zeros_like(self.a, dtype=bool)[..., 0],)) - self._check_multi_index( - self.a, (np.zeros_like(self.a, dtype=bool)[None, ...],)) - - def test_multidim(self): - # Automatically test combinations with complex indexes on 2nd (or 1st) - # spot and the simple ones in one other spot. - with warnings.catch_warnings(): - # This is so that np.array(True) is not accepted in a full integer - # index, when running the file separately. - warnings.filterwarnings('error', '', DeprecationWarning) - warnings.filterwarnings('error', '', np.VisibleDeprecationWarning) - - def isskip(idx): - return isinstance(idx, str) and idx == "skip" - - for simple_pos in [0, 2, 3]: - tocheck = [self.fill_indices, self.complex_indices, - self.fill_indices, self.fill_indices] - tocheck[simple_pos] = self.simple_indices - for index in product(*tocheck): - index = tuple(i for i in index if not isskip(i)) - self._check_multi_index(self.a, index) - self._check_multi_index(self.b, index) - - # Check very simple item getting: - self._check_multi_index(self.a, (0, 0, 0, 0)) - self._check_multi_index(self.b, (0, 0, 0, 0)) - # Also check (simple cases of) too many indices: - assert_raises(IndexError, self.a.__getitem__, (0, 0, 0, 0, 0)) - assert_raises(IndexError, self.a.__setitem__, (0, 0, 0, 0, 0), 0) - assert_raises(IndexError, self.a.__getitem__, (0, 0, [1], 0, 0)) - assert_raises(IndexError, self.a.__setitem__, (0, 0, [1], 0, 0), 0) - - def test_1d(self): - a = np.arange(10) - for index in self.complex_indices: - self._check_single_index(a, index) - -class TestFloatNonIntegerArgument(object): - """ - These test that ``TypeError`` is raised when you try to use - non-integers as arguments to for indexing and slicing e.g. ``a[0.0:5]`` - and ``a[0.5]``, or other functions like ``array.reshape(1., -1)``. - - """ - def test_valid_indexing(self): - # These should raise no errors. - a = np.array([[[5]]]) - - a[np.array([0])] - a[[0, 0]] - a[:, [0, 0]] - a[:, 0,:] - a[:,:,:] - - def test_valid_slicing(self): - # These should raise no errors. - a = np.array([[[5]]]) - - a[::] - a[0:] - a[:2] - a[0:2] - a[::2] - a[1::2] - a[:2:2] - a[1:2:2] - - def test_non_integer_argument_errors(self): - a = np.array([[5]]) - - assert_raises(TypeError, np.reshape, a, (1., 1., -1)) - assert_raises(TypeError, np.reshape, a, (np.array(1.), -1)) - assert_raises(TypeError, np.take, a, [0], 1.) - assert_raises(TypeError, np.take, a, [0], np.float64(1.)) - - def test_non_integer_sequence_multiplication(self): - # NumPy scalar sequence multiply should not work with non-integers - def mult(a, b): - return a * b - - assert_raises(TypeError, mult, [1], np.float_(3)) - # following should be OK - mult([1], np.int_(3)) - - def test_reduce_axis_float_index(self): - d = np.zeros((3,3,3)) - assert_raises(TypeError, np.min, d, 0.5) - assert_raises(TypeError, np.min, d, (0.5, 1)) - assert_raises(TypeError, np.min, d, (1, 2.2)) - assert_raises(TypeError, np.min, d, (.2, 1.2)) - - -class TestBooleanIndexing(object): - # Using a boolean as integer argument/indexing is an error. - def test_bool_as_int_argument_errors(self): - a = np.array([[[1]]]) - - assert_raises(TypeError, np.reshape, a, (True, -1)) - assert_raises(TypeError, np.reshape, a, (np.bool_(True), -1)) - # Note that operator.index(np.array(True)) does not work, a boolean - # array is thus also deprecated, but not with the same message: - assert_raises(TypeError, operator.index, np.array(True)) - assert_warns(DeprecationWarning, operator.index, np.True_) - assert_raises(TypeError, np.take, args=(a, [0], False)) - - def test_boolean_indexing_weirdness(self): - # Weird boolean indexing things - a = np.ones((2, 3, 4)) - a[False, True, ...].shape == (0, 2, 3, 4) - a[True, [0, 1], True, True, [1], [[2]]] == (1, 2) - assert_raises(IndexError, lambda: a[False, [0, 1], ...]) - - -class TestArrayToIndexDeprecation(object): - """Creating an an index from array not 0-D is an error. - - """ - def test_array_to_index_error(self): - # so no exception is expected. The raising is effectively tested above. - a = np.array([[[1]]]) - - assert_raises(TypeError, operator.index, np.array([1])) - assert_raises(TypeError, np.reshape, a, (a, -1)) - assert_raises(TypeError, np.take, a, [0], a) - - -class TestNonIntegerArrayLike(object): - """Tests that array_likes only valid if can safely cast to integer. - - For instance, lists give IndexError when they cannot be safely cast to - an integer. - - """ - def test_basic(self): - a = np.arange(10) - - assert_raises(IndexError, a.__getitem__, [0.5, 1.5]) - assert_raises(IndexError, a.__getitem__, (['1', '2'],)) - - # The following is valid - a.__getitem__([]) - - -class TestMultipleEllipsisError(object): - """An index can only have a single ellipsis. - - """ - def test_basic(self): - a = np.arange(10) - assert_raises(IndexError, lambda: a[..., ...]) - assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 2,)) - assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 3,)) - - -class TestCApiAccess(object): - def test_getitem(self): - subscript = functools.partial(array_indexing, 0) - - # 0-d arrays don't work: - assert_raises(IndexError, subscript, np.ones(()), 0) - # Out of bound values: - assert_raises(IndexError, subscript, np.ones(10), 11) - assert_raises(IndexError, subscript, np.ones(10), -11) - assert_raises(IndexError, subscript, np.ones((10, 10)), 11) - assert_raises(IndexError, subscript, np.ones((10, 10)), -11) - - a = np.arange(10) - assert_array_equal(a[4], subscript(a, 4)) - a = a.reshape(5, 2) - assert_array_equal(a[-4], subscript(a, -4)) - - def test_setitem(self): - assign = functools.partial(array_indexing, 1) - - # Deletion is impossible: - assert_raises(ValueError, assign, np.ones(10), 0) - # 0-d arrays don't work: - assert_raises(IndexError, assign, np.ones(()), 0, 0) - # Out of bound values: - assert_raises(IndexError, assign, np.ones(10), 11, 0) - assert_raises(IndexError, assign, np.ones(10), -11, 0) - assert_raises(IndexError, assign, np.ones((10, 10)), 11, 0) - assert_raises(IndexError, assign, np.ones((10, 10)), -11, 0) - - a = np.arange(10) - assign(a, 4, 10) - assert_(a[4] == 10) - - a = a.reshape(5, 2) - assign(a, 4, 10) - assert_array_equal(a[-1], [10, 10]) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_issue14735.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_issue14735.py deleted file mode 100644 index 6105c8e..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_issue14735.py +++ /dev/null @@ -1,29 +0,0 @@ -import pytest -import warnings -import numpy as np - - -class Wrapper: - def __init__(self, array): - self.array = array - - def __len__(self): - return len(self.array) - - def __getitem__(self, item): - return type(self)(self.array[item]) - - def __getattr__(self, name): - if name.startswith("__array_"): - warnings.warn("object got converted", UserWarning, stacklevel=1) - - return getattr(self.array, name) - - def __repr__(self): - return "".format(self=self) - -@pytest.mark.filterwarnings("error") -def test_getattr_warning(): - array = Wrapper(np.arange(10)) - with pytest.raises(UserWarning, match="object got converted"): - np.asarray(array) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_item_selection.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_item_selection.py deleted file mode 100644 index 9bd2468..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_item_selection.py +++ /dev/null @@ -1,87 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys - -import numpy as np -from numpy.testing import ( - assert_, assert_raises, assert_array_equal, HAS_REFCOUNT - ) - - -class TestTake(object): - def test_simple(self): - a = [[1, 2], [3, 4]] - a_str = [[b'1', b'2'], [b'3', b'4']] - modes = ['raise', 'wrap', 'clip'] - indices = [-1, 4] - index_arrays = [np.empty(0, dtype=np.intp), - np.empty(tuple(), dtype=np.intp), - np.empty((1, 1), dtype=np.intp)] - real_indices = {'raise': {-1: 1, 4: IndexError}, - 'wrap': {-1: 1, 4: 0}, - 'clip': {-1: 0, 4: 1}} - # Currently all types but object, use the same function generation. - # So it should not be necessary to test all. However test also a non - # refcounted struct on top of object. - types = int, object, np.dtype([('', 'i', 2)]) - for t in types: - # ta works, even if the array may be odd if buffer interface is used - ta = np.array(a if np.issubdtype(t, np.number) else a_str, dtype=t) - tresult = list(ta.T.copy()) - for index_array in index_arrays: - if index_array.size != 0: - tresult[0].shape = (2,) + index_array.shape - tresult[1].shape = (2,) + index_array.shape - for mode in modes: - for index in indices: - real_index = real_indices[mode][index] - if real_index is IndexError and index_array.size != 0: - index_array.put(0, index) - assert_raises(IndexError, ta.take, index_array, - mode=mode, axis=1) - elif index_array.size != 0: - index_array.put(0, index) - res = ta.take(index_array, mode=mode, axis=1) - assert_array_equal(res, tresult[real_index]) - else: - res = ta.take(index_array, mode=mode, axis=1) - assert_(res.shape == (2,) + index_array.shape) - - def test_refcounting(self): - objects = [object() for i in range(10)] - for mode in ('raise', 'clip', 'wrap'): - a = np.array(objects) - b = np.array([2, 2, 4, 5, 3, 5]) - a.take(b, out=a[:6], mode=mode) - del a - if HAS_REFCOUNT: - assert_(all(sys.getrefcount(o) == 3 for o in objects)) - # not contiguous, example: - a = np.array(objects * 2)[::2] - a.take(b, out=a[:6], mode=mode) - del a - if HAS_REFCOUNT: - assert_(all(sys.getrefcount(o) == 3 for o in objects)) - - def test_unicode_mode(self): - d = np.arange(10) - k = b'\xc3\xa4'.decode("UTF8") - assert_raises(ValueError, d.take, 5, mode=k) - - def test_empty_partition(self): - # In reference to github issue #6530 - a_original = np.array([0, 2, 4, 6, 8, 10]) - a = a_original.copy() - - # An empty partition should be a successful no-op - a.partition(np.array([], dtype=np.int16)) - - assert_array_equal(a, a_original) - - def test_empty_argpartition(self): - # In reference to github issue #6530 - a = np.array([0, 2, 4, 6, 8, 10]) - a = a.argpartition(np.array([], dtype=np.int16)) - - b = np.array([0, 1, 2, 3, 4, 5]) - assert_array_equal(a, b) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_longdouble.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_longdouble.py deleted file mode 100644 index 2b6e1c5..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_longdouble.py +++ /dev/null @@ -1,357 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import warnings -import pytest - -import numpy as np -from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_warns, assert_array_equal, - temppath, - ) -from numpy.core.tests._locales import CommaDecimalPointLocale - -LD_INFO = np.finfo(np.longdouble) -longdouble_longer_than_double = (LD_INFO.eps < np.finfo(np.double).eps) - - -_o = 1 + LD_INFO.eps -string_to_longdouble_inaccurate = (_o != np.longdouble(repr(_o))) -del _o - - -def test_scalar_extraction(): - """Confirm that extracting a value doesn't convert to python float""" - o = 1 + LD_INFO.eps - a = np.array([o, o, o]) - assert_equal(a[1], o) - - -# Conversions string -> long double - -# 0.1 not exactly representable in base 2 floating point. -repr_precision = len(repr(np.longdouble(0.1))) -# +2 from macro block starting around line 842 in scalartypes.c.src. -@pytest.mark.skipif(LD_INFO.precision + 2 >= repr_precision, - reason="repr precision not enough to show eps") -def test_repr_roundtrip(): - # We will only see eps in repr if within printing precision. - o = 1 + LD_INFO.eps - assert_equal(np.longdouble(repr(o)), o, "repr was %s" % repr(o)) - - -def test_unicode(): - np.longdouble(u"1.2") - - -def test_string(): - np.longdouble("1.2") - - -def test_bytes(): - np.longdouble(b"1.2") - - -@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") -def test_repr_roundtrip_bytes(): - o = 1 + LD_INFO.eps - assert_equal(np.longdouble(repr(o).encode("ascii")), o) - - -def test_bogus_string(): - assert_raises(ValueError, np.longdouble, "spam") - assert_raises(ValueError, np.longdouble, "1.0 flub") - - -@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") -def test_fromstring(): - o = 1 + LD_INFO.eps - s = (" " + repr(o))*5 - a = np.array([o]*5) - assert_equal(np.fromstring(s, sep=" ", dtype=np.longdouble), a, - err_msg="reading '%s'" % s) - - -def test_fromstring_complex(): - for ctype in ["complex", "cdouble", "cfloat"]: - # Check spacing between separator - assert_equal(np.fromstring("1, 2 , 3 ,4", sep=",", dtype=ctype), - np.array([1., 2., 3., 4.])) - # Real component not specified - assert_equal(np.fromstring("1j, -2j, 3j, 4e1j", sep=",", dtype=ctype), - np.array([1.j, -2.j, 3.j, 40.j])) - # Both components specified - assert_equal(np.fromstring("1+1j,2-2j, -3+3j, -4e1+4j", sep=",", dtype=ctype), - np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j])) - # Spaces at wrong places - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1+2 j,3", dtype=ctype, sep=","), - np.array([1.])) - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1+ 2j,3", dtype=ctype, sep=","), - np.array([1.])) - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1 +2j,3", dtype=ctype, sep=","), - np.array([1.])) - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1+j", dtype=ctype, sep=","), - np.array([1.])) - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1+", dtype=ctype, sep=","), - np.array([1.])) - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1j+1", dtype=ctype, sep=","), - np.array([1j])) - - -def test_fromstring_bogus(): - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1. 2. 3. flop 4.", dtype=float, sep=" "), - np.array([1., 2., 3.])) - - -def test_fromstring_empty(): - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("xxxxx", sep="x"), - np.array([])) - - -def test_fromstring_missing(): - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1xx3x4x5x6", sep="x"), - np.array([1])) - - -class TestFileBased(object): - - ldbl = 1 + LD_INFO.eps - tgt = np.array([ldbl]*5) - out = ''.join([repr(t) + '\n' for t in tgt]) - - def test_fromfile_bogus(self): - with temppath() as path: - with open(path, 'wt') as f: - f.write("1. 2. 3. flop 4.\n") - - with assert_warns(DeprecationWarning): - res = np.fromfile(path, dtype=float, sep=" ") - assert_equal(res, np.array([1., 2., 3.])) - - def test_fromfile_complex(self): - for ctype in ["complex", "cdouble", "cfloat"]: - # Check spacing between separator and only real component specified - with temppath() as path: - with open(path, 'wt') as f: - f.write("1, 2 , 3 ,4\n") - - res = np.fromfile(path, dtype=ctype, sep=",") - assert_equal(res, np.array([1., 2., 3., 4.])) - - # Real component not specified - with temppath() as path: - with open(path, 'wt') as f: - f.write("1j, -2j, 3j, 4e1j\n") - - res = np.fromfile(path, dtype=ctype, sep=",") - assert_equal(res, np.array([1.j, -2.j, 3.j, 40.j])) - - # Both components specified - with temppath() as path: - with open(path, 'wt') as f: - f.write("1+1j,2-2j, -3+3j, -4e1+4j\n") - - res = np.fromfile(path, dtype=ctype, sep=",") - assert_equal(res, np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j])) - - # Spaces at wrong places - with temppath() as path: - with open(path, 'wt') as f: - f.write("1+2 j,3\n") - - with assert_warns(DeprecationWarning): - res = np.fromfile(path, dtype=ctype, sep=",") - assert_equal(res, np.array([1.])) - - # Spaces at wrong places - with temppath() as path: - with open(path, 'wt') as f: - f.write("1+ 2j,3\n") - - with assert_warns(DeprecationWarning): - res = np.fromfile(path, dtype=ctype, sep=",") - assert_equal(res, np.array([1.])) - - # Spaces at wrong places - with temppath() as path: - with open(path, 'wt') as f: - f.write("1 +2j,3\n") - - with assert_warns(DeprecationWarning): - res = np.fromfile(path, dtype=ctype, sep=",") - assert_equal(res, np.array([1.])) - - # Spaces at wrong places - with temppath() as path: - with open(path, 'wt') as f: - f.write("1+j\n") - - with assert_warns(DeprecationWarning): - res = np.fromfile(path, dtype=ctype, sep=",") - assert_equal(res, np.array([1.])) - - # Spaces at wrong places - with temppath() as path: - with open(path, 'wt') as f: - f.write("1+\n") - - with assert_warns(DeprecationWarning): - res = np.fromfile(path, dtype=ctype, sep=",") - assert_equal(res, np.array([1.])) - - # Spaces at wrong places - with temppath() as path: - with open(path, 'wt') as f: - f.write("1j+1\n") - - with assert_warns(DeprecationWarning): - res = np.fromfile(path, dtype=ctype, sep=",") - assert_equal(res, np.array([1.j])) - - - - @pytest.mark.skipif(string_to_longdouble_inaccurate, - reason="Need strtold_l") - def test_fromfile(self): - with temppath() as path: - with open(path, 'wt') as f: - f.write(self.out) - res = np.fromfile(path, dtype=np.longdouble, sep="\n") - assert_equal(res, self.tgt) - - @pytest.mark.skipif(string_to_longdouble_inaccurate, - reason="Need strtold_l") - def test_genfromtxt(self): - with temppath() as path: - with open(path, 'wt') as f: - f.write(self.out) - res = np.genfromtxt(path, dtype=np.longdouble) - assert_equal(res, self.tgt) - - @pytest.mark.skipif(string_to_longdouble_inaccurate, - reason="Need strtold_l") - def test_loadtxt(self): - with temppath() as path: - with open(path, 'wt') as f: - f.write(self.out) - res = np.loadtxt(path, dtype=np.longdouble) - assert_equal(res, self.tgt) - - @pytest.mark.skipif(string_to_longdouble_inaccurate, - reason="Need strtold_l") - def test_tofile_roundtrip(self): - with temppath() as path: - self.tgt.tofile(path, sep=" ") - res = np.fromfile(path, dtype=np.longdouble, sep=" ") - assert_equal(res, self.tgt) - - -# Conversions long double -> string - - -def test_repr_exact(): - o = 1 + LD_INFO.eps - assert_(repr(o) != '1') - - -@pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376") -@pytest.mark.skipif(string_to_longdouble_inaccurate, - reason="Need strtold_l") -def test_format(): - o = 1 + LD_INFO.eps - assert_("{0:.40g}".format(o) != '1') - - -@pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376") -@pytest.mark.skipif(string_to_longdouble_inaccurate, - reason="Need strtold_l") -def test_percent(): - o = 1 + LD_INFO.eps - assert_("%.40g" % o != '1') - - -@pytest.mark.skipif(longdouble_longer_than_double, - reason="array repr problem") -@pytest.mark.skipif(string_to_longdouble_inaccurate, - reason="Need strtold_l") -def test_array_repr(): - o = 1 + LD_INFO.eps - a = np.array([o]) - b = np.array([1], dtype=np.longdouble) - if not np.all(a != b): - raise ValueError("precision loss creating arrays") - assert_(repr(a) != repr(b)) - -# -# Locale tests: scalar types formatting should be independent of the locale -# - -class TestCommaDecimalPointLocale(CommaDecimalPointLocale): - - def test_repr_roundtrip_foreign(self): - o = 1.5 - assert_equal(o, np.longdouble(repr(o))) - - def test_fromstring_foreign_repr(self): - f = 1.234 - a = np.fromstring(repr(f), dtype=float, sep=" ") - assert_equal(a[0], f) - - def test_fromstring_best_effort_float(self): - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1,234", dtype=float, sep=" "), - np.array([1.])) - - def test_fromstring_best_effort(self): - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1,234", dtype=np.longdouble, sep=" "), - np.array([1.])) - - def test_fromstring_foreign(self): - s = "1.234" - a = np.fromstring(s, dtype=np.longdouble, sep=" ") - assert_equal(a[0], np.longdouble(s)) - - def test_fromstring_foreign_sep(self): - a = np.array([1, 2, 3, 4]) - b = np.fromstring("1,2,3,4,", dtype=np.longdouble, sep=",") - assert_array_equal(a, b) - - def test_fromstring_foreign_value(self): - with assert_warns(DeprecationWarning): - b = np.fromstring("1,234", dtype=np.longdouble, sep=" ") - assert_array_equal(b[0], 1) - - -@pytest.mark.parametrize("int_val", [ - # cases discussed in gh-10723 - # and gh-9968 - 2 ** 1024, 0]) -def test_longdouble_from_int(int_val): - # for issue gh-9968 - str_val = str(int_val) - # we'll expect a RuntimeWarning on platforms - # with np.longdouble equivalent to np.double - # for large integer input - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) - # can be inf==inf on some platforms - assert np.longdouble(int_val) == np.longdouble(str_val) - # we can't directly compare the int and - # max longdouble value on all platforms - if np.allclose(np.finfo(np.longdouble).max, - np.finfo(np.double).max) and w: - assert w[0].category is RuntimeWarning - -@pytest.mark.parametrize("bool_val", [ - True, False]) -def test_longdouble_from_bool(bool_val): - assert np.longdouble(bool_val) == np.longdouble(int(bool_val)) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_machar.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_machar.py deleted file mode 100644 index ab8800c..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_machar.py +++ /dev/null @@ -1,32 +0,0 @@ -""" -Test machar. Given recent changes to hardcode type data, we might want to get -rid of both MachAr and this test at some point. - -""" -from __future__ import division, absolute_import, print_function - -from numpy.core.machar import MachAr -import numpy.core.numerictypes as ntypes -from numpy import errstate, array - - -class TestMachAr(object): - def _run_machar_highprec(self): - # Instantiate MachAr instance with high enough precision to cause - # underflow - try: - hiprec = ntypes.float96 - MachAr(lambda v:array([v], hiprec)) - except AttributeError: - # Fixme, this needs to raise a 'skip' exception. - "Skipping test: no ntypes.float96 available on this platform." - - def test_underlow(self): - # Regression test for #759: - # instantiating MachAr for dtype = np.float96 raises spurious warning. - with errstate(all='raise'): - try: - self._run_machar_highprec() - except FloatingPointError as e: - msg = "Caught %s exception, should not have been raised." % e - raise AssertionError(msg) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_mem_overlap.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_mem_overlap.py deleted file mode 100644 index 3c8e0e7..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_mem_overlap.py +++ /dev/null @@ -1,950 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import itertools -import pytest - -import numpy as np -from numpy.core._multiarray_tests import solve_diophantine, internal_overlap -from numpy.core import _umath_tests -from numpy.lib.stride_tricks import as_strided -from numpy.compat import long -from numpy.testing import ( - assert_, assert_raises, assert_equal, assert_array_equal - ) - -if sys.version_info[0] >= 3: - xrange = range - - -ndims = 2 -size = 10 -shape = tuple([size] * ndims) - -MAY_SHARE_BOUNDS = 0 -MAY_SHARE_EXACT = -1 - - -def _indices_for_nelems(nelems): - """Returns slices of length nelems, from start onwards, in direction sign.""" - - if nelems == 0: - return [size // 2] # int index - - res = [] - for step in (1, 2): - for sign in (-1, 1): - start = size // 2 - nelems * step * sign // 2 - stop = start + nelems * step * sign - res.append(slice(start, stop, step * sign)) - - return res - - -def _indices_for_axis(): - """Returns (src, dst) pairs of indices.""" - - res = [] - for nelems in (0, 2, 3): - ind = _indices_for_nelems(nelems) - - # no itertools.product available in Py2.4 - res.extend([(a, b) for a in ind for b in ind]) # all assignments of size "nelems" - - return res - - -def _indices(ndims): - """Returns ((axis0_src, axis0_dst), (axis1_src, axis1_dst), ... ) index pairs.""" - - ind = _indices_for_axis() - - # no itertools.product available in Py2.4 - - res = [[]] - for i in range(ndims): - newres = [] - for elem in ind: - for others in res: - newres.append([elem] + others) - res = newres - - return res - - -def _check_assignment(srcidx, dstidx): - """Check assignment arr[dstidx] = arr[srcidx] works.""" - - arr = np.arange(np.product(shape)).reshape(shape) - - cpy = arr.copy() - - cpy[dstidx] = arr[srcidx] - arr[dstidx] = arr[srcidx] - - assert_(np.all(arr == cpy), - 'assigning arr[%s] = arr[%s]' % (dstidx, srcidx)) - - -def test_overlapping_assignments(): - # Test automatically generated assignments which overlap in memory. - - inds = _indices(ndims) - - for ind in inds: - srcidx = tuple([a[0] for a in ind]) - dstidx = tuple([a[1] for a in ind]) - - _check_assignment(srcidx, dstidx) - - -@pytest.mark.slow -def test_diophantine_fuzz(): - # Fuzz test the diophantine solver - rng = np.random.RandomState(1234) - - max_int = np.iinfo(np.intp).max - - for ndim in range(10): - feasible_count = 0 - infeasible_count = 0 - - min_count = 500//(ndim + 1) - - while min(feasible_count, infeasible_count) < min_count: - # Ensure big and small integer problems - A_max = 1 + rng.randint(0, 11, dtype=np.intp)**6 - U_max = rng.randint(0, 11, dtype=np.intp)**6 - - A_max = min(max_int, A_max) - U_max = min(max_int-1, U_max) - - A = tuple(int(rng.randint(1, A_max+1, dtype=np.intp)) - for j in range(ndim)) - U = tuple(int(rng.randint(0, U_max+2, dtype=np.intp)) - for j in range(ndim)) - - b_ub = min(max_int-2, sum(a*ub for a, ub in zip(A, U))) - b = rng.randint(-1, b_ub+2, dtype=np.intp) - - if ndim == 0 and feasible_count < min_count: - b = 0 - - X = solve_diophantine(A, U, b) - - if X is None: - # Check the simplified decision problem agrees - X_simplified = solve_diophantine(A, U, b, simplify=1) - assert_(X_simplified is None, (A, U, b, X_simplified)) - - # Check no solution exists (provided the problem is - # small enough so that brute force checking doesn't - # take too long) - try: - ranges = tuple(xrange(0, a*ub+1, a) for a, ub in zip(A, U)) - except OverflowError: - # xrange on 32-bit Python 2 may overflow - continue - - size = 1 - for r in ranges: - size *= len(r) - if size < 100000: - assert_(not any(sum(w) == b for w in itertools.product(*ranges))) - infeasible_count += 1 - else: - # Check the simplified decision problem agrees - X_simplified = solve_diophantine(A, U, b, simplify=1) - assert_(X_simplified is not None, (A, U, b, X_simplified)) - - # Check validity - assert_(sum(a*x for a, x in zip(A, X)) == b) - assert_(all(0 <= x <= ub for x, ub in zip(X, U))) - feasible_count += 1 - - -def test_diophantine_overflow(): - # Smoke test integer overflow detection - max_intp = np.iinfo(np.intp).max - max_int64 = np.iinfo(np.int64).max - - if max_int64 <= max_intp: - # Check that the algorithm works internally in 128-bit; - # solving this problem requires large intermediate numbers - A = (max_int64//2, max_int64//2 - 10) - U = (max_int64//2, max_int64//2 - 10) - b = 2*(max_int64//2) - 10 - - assert_equal(solve_diophantine(A, U, b), (1, 1)) - - -def check_may_share_memory_exact(a, b): - got = np.may_share_memory(a, b, max_work=MAY_SHARE_EXACT) - - assert_equal(np.may_share_memory(a, b), - np.may_share_memory(a, b, max_work=MAY_SHARE_BOUNDS)) - - a.fill(0) - b.fill(0) - a.fill(1) - exact = b.any() - - err_msg = "" - if got != exact: - err_msg = " " + "\n ".join([ - "base_a - base_b = %r" % (a.__array_interface__['data'][0] - b.__array_interface__['data'][0],), - "shape_a = %r" % (a.shape,), - "shape_b = %r" % (b.shape,), - "strides_a = %r" % (a.strides,), - "strides_b = %r" % (b.strides,), - "size_a = %r" % (a.size,), - "size_b = %r" % (b.size,) - ]) - - assert_equal(got, exact, err_msg=err_msg) - - -def test_may_share_memory_manual(): - # Manual test cases for may_share_memory - - # Base arrays - xs0 = [ - np.zeros([13, 21, 23, 22], dtype=np.int8), - np.zeros([13, 21, 23*2, 22], dtype=np.int8)[:,:,::2,:] - ] - - # Generate all negative stride combinations - xs = [] - for x in xs0: - for ss in itertools.product(*(([slice(None), slice(None, None, -1)],)*4)): - xp = x[ss] - xs.append(xp) - - for x in xs: - # The default is a simple extent check - assert_(np.may_share_memory(x[:,0,:], x[:,1,:])) - assert_(np.may_share_memory(x[:,0,:], x[:,1,:], max_work=None)) - - # Exact checks - check_may_share_memory_exact(x[:,0,:], x[:,1,:]) - check_may_share_memory_exact(x[:,::7], x[:,3::3]) - - try: - xp = x.ravel() - if xp.flags.owndata: - continue - xp = xp.view(np.int16) - except ValueError: - continue - - # 0-size arrays cannot overlap - check_may_share_memory_exact(x.ravel()[6:6], - xp.reshape(13, 21, 23, 11)[:,::7]) - - # Test itemsize is dealt with - check_may_share_memory_exact(x[:,::7], - xp.reshape(13, 21, 23, 11)) - check_may_share_memory_exact(x[:,::7], - xp.reshape(13, 21, 23, 11)[:,3::3]) - check_may_share_memory_exact(x.ravel()[6:7], - xp.reshape(13, 21, 23, 11)[:,::7]) - - # Check unit size - x = np.zeros([1], dtype=np.int8) - check_may_share_memory_exact(x, x) - check_may_share_memory_exact(x, x.copy()) - - -def iter_random_view_pairs(x, same_steps=True, equal_size=False): - rng = np.random.RandomState(1234) - - if equal_size and same_steps: - raise ValueError() - - def random_slice(n, step): - start = rng.randint(0, n+1, dtype=np.intp) - stop = rng.randint(start, n+1, dtype=np.intp) - if rng.randint(0, 2, dtype=np.intp) == 0: - stop, start = start, stop - step *= -1 - return slice(start, stop, step) - - def random_slice_fixed_size(n, step, size): - start = rng.randint(0, n+1 - size*step) - stop = start + (size-1)*step + 1 - if rng.randint(0, 2) == 0: - stop, start = start-1, stop-1 - if stop < 0: - stop = None - step *= -1 - return slice(start, stop, step) - - # First a few regular views - yield x, x - for j in range(1, 7, 3): - yield x[j:], x[:-j] - yield x[...,j:], x[...,:-j] - - # An array with zero stride internal overlap - strides = list(x.strides) - strides[0] = 0 - xp = as_strided(x, shape=x.shape, strides=strides) - yield x, xp - yield xp, xp - - # An array with non-zero stride internal overlap - strides = list(x.strides) - if strides[0] > 1: - strides[0] = 1 - xp = as_strided(x, shape=x.shape, strides=strides) - yield x, xp - yield xp, xp - - # Then discontiguous views - while True: - steps = tuple(rng.randint(1, 11, dtype=np.intp) - if rng.randint(0, 5, dtype=np.intp) == 0 else 1 - for j in range(x.ndim)) - s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps)) - - t1 = np.arange(x.ndim) - rng.shuffle(t1) - - if equal_size: - t2 = t1 - else: - t2 = np.arange(x.ndim) - rng.shuffle(t2) - - a = x[s1] - - if equal_size: - if a.size == 0: - continue - - steps2 = tuple(rng.randint(1, max(2, p//(1+pa))) - if rng.randint(0, 5) == 0 else 1 - for p, s, pa in zip(x.shape, s1, a.shape)) - s2 = tuple(random_slice_fixed_size(p, s, pa) - for p, s, pa in zip(x.shape, steps2, a.shape)) - elif same_steps: - steps2 = steps - else: - steps2 = tuple(rng.randint(1, 11, dtype=np.intp) - if rng.randint(0, 5, dtype=np.intp) == 0 else 1 - for j in range(x.ndim)) - - if not equal_size: - s2 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps2)) - - a = a.transpose(t1) - b = x[s2].transpose(t2) - - yield a, b - - -def check_may_share_memory_easy_fuzz(get_max_work, same_steps, min_count): - # Check that overlap problems with common strides are solved with - # little work. - x = np.zeros([17,34,71,97], dtype=np.int16) - - feasible = 0 - infeasible = 0 - - pair_iter = iter_random_view_pairs(x, same_steps) - - while min(feasible, infeasible) < min_count: - a, b = next(pair_iter) - - bounds_overlap = np.may_share_memory(a, b) - may_share_answer = np.may_share_memory(a, b) - easy_answer = np.may_share_memory(a, b, max_work=get_max_work(a, b)) - exact_answer = np.may_share_memory(a, b, max_work=MAY_SHARE_EXACT) - - if easy_answer != exact_answer: - # assert_equal is slow... - assert_equal(easy_answer, exact_answer) - - if may_share_answer != bounds_overlap: - assert_equal(may_share_answer, bounds_overlap) - - if bounds_overlap: - if exact_answer: - feasible += 1 - else: - infeasible += 1 - - -@pytest.mark.slow -def test_may_share_memory_easy_fuzz(): - # Check that overlap problems with common strides are always - # solved with little work. - - check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: 1, - same_steps=True, - min_count=2000) - - -@pytest.mark.slow -def test_may_share_memory_harder_fuzz(): - # Overlap problems with not necessarily common strides take more - # work. - # - # The work bound below can't be reduced much. Harder problems can - # also exist but not be detected here, as the set of problems - # comes from RNG. - - check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: max(a.size, b.size)//2, - same_steps=False, - min_count=2000) - - -def test_shares_memory_api(): - x = np.zeros([4, 5, 6], dtype=np.int8) - - assert_equal(np.shares_memory(x, x), True) - assert_equal(np.shares_memory(x, x.copy()), False) - - a = x[:,::2,::3] - b = x[:,::3,::2] - assert_equal(np.shares_memory(a, b), True) - assert_equal(np.shares_memory(a, b, max_work=None), True) - assert_raises(np.TooHardError, np.shares_memory, a, b, max_work=1) - assert_raises(np.TooHardError, np.shares_memory, a, b, max_work=long(1)) - - -def test_may_share_memory_bad_max_work(): - x = np.zeros([1]) - assert_raises(OverflowError, np.may_share_memory, x, x, max_work=10**100) - assert_raises(OverflowError, np.shares_memory, x, x, max_work=10**100) - - -def test_internal_overlap_diophantine(): - def check(A, U, exists=None): - X = solve_diophantine(A, U, 0, require_ub_nontrivial=1) - - if exists is None: - exists = (X is not None) - - if X is not None: - assert_(sum(a*x for a, x in zip(A, X)) == sum(a*u//2 for a, u in zip(A, U))) - assert_(all(0 <= x <= u for x, u in zip(X, U))) - assert_(any(x != u//2 for x, u in zip(X, U))) - - if exists: - assert_(X is not None, repr(X)) - else: - assert_(X is None, repr(X)) - - # Smoke tests - check((3, 2), (2*2, 3*2), exists=True) - check((3*2, 2), (15*2, (3-1)*2), exists=False) - - -def test_internal_overlap_slices(): - # Slicing an array never generates internal overlap - - x = np.zeros([17,34,71,97], dtype=np.int16) - - rng = np.random.RandomState(1234) - - def random_slice(n, step): - start = rng.randint(0, n+1, dtype=np.intp) - stop = rng.randint(start, n+1, dtype=np.intp) - if rng.randint(0, 2, dtype=np.intp) == 0: - stop, start = start, stop - step *= -1 - return slice(start, stop, step) - - cases = 0 - min_count = 5000 - - while cases < min_count: - steps = tuple(rng.randint(1, 11, dtype=np.intp) - if rng.randint(0, 5, dtype=np.intp) == 0 else 1 - for j in range(x.ndim)) - t1 = np.arange(x.ndim) - rng.shuffle(t1) - s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps)) - a = x[s1].transpose(t1) - - assert_(not internal_overlap(a)) - cases += 1 - - -def check_internal_overlap(a, manual_expected=None): - got = internal_overlap(a) - - # Brute-force check - m = set() - ranges = tuple(xrange(n) for n in a.shape) - for v in itertools.product(*ranges): - offset = sum(s*w for s, w in zip(a.strides, v)) - if offset in m: - expected = True - break - else: - m.add(offset) - else: - expected = False - - # Compare - if got != expected: - assert_equal(got, expected, err_msg=repr((a.strides, a.shape))) - if manual_expected is not None and expected != manual_expected: - assert_equal(expected, manual_expected) - return got - - -def test_internal_overlap_manual(): - # Stride tricks can construct arrays with internal overlap - - # We don't care about memory bounds, the array is not - # read/write accessed - x = np.arange(1).astype(np.int8) - - # Check low-dimensional special cases - - check_internal_overlap(x, False) # 1-dim - check_internal_overlap(x.reshape([]), False) # 0-dim - - a = as_strided(x, strides=(3, 4), shape=(4, 4)) - check_internal_overlap(a, False) - - a = as_strided(x, strides=(3, 4), shape=(5, 4)) - check_internal_overlap(a, True) - - a = as_strided(x, strides=(0,), shape=(0,)) - check_internal_overlap(a, False) - - a = as_strided(x, strides=(0,), shape=(1,)) - check_internal_overlap(a, False) - - a = as_strided(x, strides=(0,), shape=(2,)) - check_internal_overlap(a, True) - - a = as_strided(x, strides=(0, -9993), shape=(87, 22)) - check_internal_overlap(a, True) - - a = as_strided(x, strides=(0, -9993), shape=(1, 22)) - check_internal_overlap(a, False) - - a = as_strided(x, strides=(0, -9993), shape=(0, 22)) - check_internal_overlap(a, False) - - -def test_internal_overlap_fuzz(): - # Fuzz check; the brute-force check is fairly slow - - x = np.arange(1).astype(np.int8) - - overlap = 0 - no_overlap = 0 - min_count = 100 - - rng = np.random.RandomState(1234) - - while min(overlap, no_overlap) < min_count: - ndim = rng.randint(1, 4, dtype=np.intp) - - strides = tuple(rng.randint(-1000, 1000, dtype=np.intp) - for j in range(ndim)) - shape = tuple(rng.randint(1, 30, dtype=np.intp) - for j in range(ndim)) - - a = as_strided(x, strides=strides, shape=shape) - result = check_internal_overlap(a) - - if result: - overlap += 1 - else: - no_overlap += 1 - - -def test_non_ndarray_inputs(): - # Regression check for gh-5604 - - class MyArray(object): - def __init__(self, data): - self.data = data - - @property - def __array_interface__(self): - return self.data.__array_interface__ - - class MyArray2(object): - def __init__(self, data): - self.data = data - - def __array__(self): - return self.data - - for cls in [MyArray, MyArray2]: - x = np.arange(5) - - assert_(np.may_share_memory(cls(x[::2]), x[1::2])) - assert_(not np.shares_memory(cls(x[::2]), x[1::2])) - - assert_(np.shares_memory(cls(x[1::3]), x[::2])) - assert_(np.may_share_memory(cls(x[1::3]), x[::2])) - - -def view_element_first_byte(x): - """Construct an array viewing the first byte of each element of `x`""" - from numpy.lib.stride_tricks import DummyArray - interface = dict(x.__array_interface__) - interface['typestr'] = '|b1' - interface['descr'] = [('', '|b1')] - return np.asarray(DummyArray(interface, x)) - - -def assert_copy_equivalent(operation, args, out, **kwargs): - """ - Check that operation(*args, out=out) produces results - equivalent to out[...] = operation(*args, out=out.copy()) - """ - - kwargs['out'] = out - kwargs2 = dict(kwargs) - kwargs2['out'] = out.copy() - - out_orig = out.copy() - out[...] = operation(*args, **kwargs2) - expected = out.copy() - out[...] = out_orig - - got = operation(*args, **kwargs).copy() - - if (got != expected).any(): - assert_equal(got, expected) - - -class TestUFunc(object): - """ - Test ufunc call memory overlap handling - """ - - def check_unary_fuzz(self, operation, get_out_axis_size, dtype=np.int16, - count=5000): - shapes = [7, 13, 8, 21, 29, 32] - - rng = np.random.RandomState(1234) - - for ndim in range(1, 6): - x = rng.randint(0, 2**16, size=shapes[:ndim]).astype(dtype) - - it = iter_random_view_pairs(x, same_steps=False, equal_size=True) - - min_count = count // (ndim + 1)**2 - - overlapping = 0 - while overlapping < min_count: - a, b = next(it) - - a_orig = a.copy() - b_orig = b.copy() - - if get_out_axis_size is None: - assert_copy_equivalent(operation, [a], out=b) - - if np.shares_memory(a, b): - overlapping += 1 - else: - for axis in itertools.chain(range(ndim), [None]): - a[...] = a_orig - b[...] = b_orig - - # Determine size for reduction axis (None if scalar) - outsize, scalarize = get_out_axis_size(a, b, axis) - if outsize == 'skip': - continue - - # Slice b to get an output array of the correct size - sl = [slice(None)] * ndim - if axis is None: - if outsize is None: - sl = [slice(0, 1)] + [0]*(ndim - 1) - else: - sl = [slice(0, outsize)] + [0]*(ndim - 1) - else: - if outsize is None: - k = b.shape[axis]//2 - if ndim == 1: - sl[axis] = slice(k, k + 1) - else: - sl[axis] = k - else: - assert b.shape[axis] >= outsize - sl[axis] = slice(0, outsize) - b_out = b[tuple(sl)] - - if scalarize: - b_out = b_out.reshape([]) - - if np.shares_memory(a, b_out): - overlapping += 1 - - # Check result - assert_copy_equivalent(operation, [a], out=b_out, axis=axis) - - @pytest.mark.slow - def test_unary_ufunc_call_fuzz(self): - self.check_unary_fuzz(np.invert, None, np.int16) - - def test_binary_ufunc_accumulate_fuzz(self): - def get_out_axis_size(a, b, axis): - if axis is None: - if a.ndim == 1: - return a.size, False - else: - return 'skip', False # accumulate doesn't support this - else: - return a.shape[axis], False - - self.check_unary_fuzz(np.add.accumulate, get_out_axis_size, - dtype=np.int16, count=500) - - def test_binary_ufunc_reduce_fuzz(self): - def get_out_axis_size(a, b, axis): - return None, (axis is None or a.ndim == 1) - - self.check_unary_fuzz(np.add.reduce, get_out_axis_size, - dtype=np.int16, count=500) - - def test_binary_ufunc_reduceat_fuzz(self): - def get_out_axis_size(a, b, axis): - if axis is None: - if a.ndim == 1: - return a.size, False - else: - return 'skip', False # reduceat doesn't support this - else: - return a.shape[axis], False - - def do_reduceat(a, out, axis): - if axis is None: - size = len(a) - step = size//len(out) - else: - size = a.shape[axis] - step = a.shape[axis] // out.shape[axis] - idx = np.arange(0, size, step) - return np.add.reduceat(a, idx, out=out, axis=axis) - - self.check_unary_fuzz(do_reduceat, get_out_axis_size, - dtype=np.int16, count=500) - - def test_binary_ufunc_reduceat_manual(self): - def check(ufunc, a, ind, out): - c1 = ufunc.reduceat(a.copy(), ind.copy(), out=out.copy()) - c2 = ufunc.reduceat(a, ind, out=out) - assert_array_equal(c1, c2) - - # Exactly same input/output arrays - a = np.arange(10000, dtype=np.int16) - check(np.add, a, a[::-1].copy(), a) - - # Overlap with index - a = np.arange(10000, dtype=np.int16) - check(np.add, a, a[::-1], a) - - def test_unary_gufunc_fuzz(self): - shapes = [7, 13, 8, 21, 29, 32] - gufunc = _umath_tests.euclidean_pdist - - rng = np.random.RandomState(1234) - - for ndim in range(2, 6): - x = rng.rand(*shapes[:ndim]) - - it = iter_random_view_pairs(x, same_steps=False, equal_size=True) - - min_count = 500 // (ndim + 1)**2 - - overlapping = 0 - while overlapping < min_count: - a, b = next(it) - - if min(a.shape[-2:]) < 2 or min(b.shape[-2:]) < 2 or a.shape[-1] < 2: - continue - - # Ensure the shapes are so that euclidean_pdist is happy - if b.shape[-1] > b.shape[-2]: - b = b[...,0,:] - else: - b = b[...,:,0] - - n = a.shape[-2] - p = n * (n - 1) // 2 - if p <= b.shape[-1] and p > 0: - b = b[...,:p] - else: - n = max(2, int(np.sqrt(b.shape[-1]))//2) - p = n * (n - 1) // 2 - a = a[...,:n,:] - b = b[...,:p] - - # Call - if np.shares_memory(a, b): - overlapping += 1 - - with np.errstate(over='ignore', invalid='ignore'): - assert_copy_equivalent(gufunc, [a], out=b) - - def test_ufunc_at_manual(self): - def check(ufunc, a, ind, b=None): - a0 = a.copy() - if b is None: - ufunc.at(a0, ind.copy()) - c1 = a0.copy() - ufunc.at(a, ind) - c2 = a.copy() - else: - ufunc.at(a0, ind.copy(), b.copy()) - c1 = a0.copy() - ufunc.at(a, ind, b) - c2 = a.copy() - assert_array_equal(c1, c2) - - # Overlap with index - a = np.arange(10000, dtype=np.int16) - check(np.invert, a[::-1], a) - - # Overlap with second data array - a = np.arange(100, dtype=np.int16) - ind = np.arange(0, 100, 2, dtype=np.int16) - check(np.add, a, ind, a[25:75]) - - def test_unary_ufunc_1d_manual(self): - # Exercise branches in PyArray_EQUIVALENTLY_ITERABLE - - def check(a, b): - a_orig = a.copy() - b_orig = b.copy() - - b0 = b.copy() - c1 = ufunc(a, out=b0) - c2 = ufunc(a, out=b) - assert_array_equal(c1, c2) - - # Trigger "fancy ufunc loop" code path - mask = view_element_first_byte(b).view(np.bool_) - - a[...] = a_orig - b[...] = b_orig - c1 = ufunc(a, out=b.copy(), where=mask.copy()).copy() - - a[...] = a_orig - b[...] = b_orig - c2 = ufunc(a, out=b, where=mask.copy()).copy() - - # Also, mask overlapping with output - a[...] = a_orig - b[...] = b_orig - c3 = ufunc(a, out=b, where=mask).copy() - - assert_array_equal(c1, c2) - assert_array_equal(c1, c3) - - dtypes = [np.int8, np.int16, np.int32, np.int64, np.float32, - np.float64, np.complex64, np.complex128] - dtypes = [np.dtype(x) for x in dtypes] - - for dtype in dtypes: - if np.issubdtype(dtype, np.integer): - ufunc = np.invert - else: - ufunc = np.reciprocal - - n = 1000 - k = 10 - indices = [ - np.index_exp[:n], - np.index_exp[k:k+n], - np.index_exp[n-1::-1], - np.index_exp[k+n-1:k-1:-1], - np.index_exp[:2*n:2], - np.index_exp[k:k+2*n:2], - np.index_exp[2*n-1::-2], - np.index_exp[k+2*n-1:k-1:-2], - ] - - for xi, yi in itertools.product(indices, indices): - v = np.arange(1, 1 + n*2 + k, dtype=dtype) - x = v[xi] - y = v[yi] - - with np.errstate(all='ignore'): - check(x, y) - - # Scalar cases - check(x[:1], y) - check(x[-1:], y) - check(x[:1].reshape([]), y) - check(x[-1:].reshape([]), y) - - def test_unary_ufunc_where_same(self): - # Check behavior at wheremask overlap - ufunc = np.invert - - def check(a, out, mask): - c1 = ufunc(a, out=out.copy(), where=mask.copy()) - c2 = ufunc(a, out=out, where=mask) - assert_array_equal(c1, c2) - - # Check behavior with same input and output arrays - x = np.arange(100).astype(np.bool_) - check(x, x, x) - check(x, x.copy(), x) - check(x, x, x.copy()) - - @pytest.mark.slow - def test_binary_ufunc_1d_manual(self): - ufunc = np.add - - def check(a, b, c): - c0 = c.copy() - c1 = ufunc(a, b, out=c0) - c2 = ufunc(a, b, out=c) - assert_array_equal(c1, c2) - - for dtype in [np.int8, np.int16, np.int32, np.int64, - np.float32, np.float64, np.complex64, np.complex128]: - # Check different data dependency orders - - n = 1000 - k = 10 - - indices = [] - for p in [1, 2]: - indices.extend([ - np.index_exp[:p*n:p], - np.index_exp[k:k+p*n:p], - np.index_exp[p*n-1::-p], - np.index_exp[k+p*n-1:k-1:-p], - ]) - - for x, y, z in itertools.product(indices, indices, indices): - v = np.arange(6*n).astype(dtype) - x = v[x] - y = v[y] - z = v[z] - - check(x, y, z) - - # Scalar cases - check(x[:1], y, z) - check(x[-1:], y, z) - check(x[:1].reshape([]), y, z) - check(x[-1:].reshape([]), y, z) - check(x, y[:1], z) - check(x, y[-1:], z) - check(x, y[:1].reshape([]), z) - check(x, y[-1:].reshape([]), z) - - def test_inplace_op_simple_manual(self): - rng = np.random.RandomState(1234) - x = rng.rand(200, 200) # bigger than bufsize - - x += x.T - assert_array_equal(x - x.T, 0) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_memmap.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_memmap.py deleted file mode 100644 index d2ae564..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_memmap.py +++ /dev/null @@ -1,216 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import os -import shutil -import mmap -import pytest -from tempfile import NamedTemporaryFile, TemporaryFile, mktemp, mkdtemp - -from numpy import ( - memmap, sum, average, product, ndarray, isscalar, add, subtract, multiply) -from numpy.compat import Path - -from numpy import arange, allclose, asarray -from numpy.testing import ( - assert_, assert_equal, assert_array_equal, suppress_warnings - ) - -class TestMemmap(object): - def setup(self): - self.tmpfp = NamedTemporaryFile(prefix='mmap') - self.tempdir = mkdtemp() - self.shape = (3, 4) - self.dtype = 'float32' - self.data = arange(12, dtype=self.dtype) - self.data.resize(self.shape) - - def teardown(self): - self.tmpfp.close() - shutil.rmtree(self.tempdir) - - def test_roundtrip(self): - # Write data to file - fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', - shape=self.shape) - fp[:] = self.data[:] - del fp # Test __del__ machinery, which handles cleanup - - # Read data back from file - newfp = memmap(self.tmpfp, dtype=self.dtype, mode='r', - shape=self.shape) - assert_(allclose(self.data, newfp)) - assert_array_equal(self.data, newfp) - assert_equal(newfp.flags.writeable, False) - - def test_open_with_filename(self): - tmpname = mktemp('', 'mmap', dir=self.tempdir) - fp = memmap(tmpname, dtype=self.dtype, mode='w+', - shape=self.shape) - fp[:] = self.data[:] - del fp - - def test_unnamed_file(self): - with TemporaryFile() as f: - fp = memmap(f, dtype=self.dtype, shape=self.shape) - del fp - - def test_attributes(self): - offset = 1 - mode = "w+" - fp = memmap(self.tmpfp, dtype=self.dtype, mode=mode, - shape=self.shape, offset=offset) - assert_equal(offset, fp.offset) - assert_equal(mode, fp.mode) - del fp - - def test_filename(self): - tmpname = mktemp('', 'mmap', dir=self.tempdir) - fp = memmap(tmpname, dtype=self.dtype, mode='w+', - shape=self.shape) - abspath = os.path.abspath(tmpname) - fp[:] = self.data[:] - assert_equal(abspath, fp.filename) - b = fp[:1] - assert_equal(abspath, b.filename) - del b - del fp - - @pytest.mark.skipif(Path is None, reason="No pathlib.Path") - def test_path(self): - tmpname = mktemp('', 'mmap', dir=self.tempdir) - fp = memmap(Path(tmpname), dtype=self.dtype, mode='w+', - shape=self.shape) - # os.path.realpath does not resolve symlinks on Windows - # see: https://bugs.python.org/issue9949 - # use Path.resolve, just as memmap class does internally - abspath = str(Path(tmpname).resolve()) - fp[:] = self.data[:] - assert_equal(abspath, str(fp.filename.resolve())) - b = fp[:1] - assert_equal(abspath, str(b.filename.resolve())) - del b - del fp - - def test_filename_fileobj(self): - fp = memmap(self.tmpfp, dtype=self.dtype, mode="w+", - shape=self.shape) - assert_equal(fp.filename, self.tmpfp.name) - - @pytest.mark.skipif(sys.platform == 'gnu0', - reason="Known to fail on hurd") - def test_flush(self): - fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', - shape=self.shape) - fp[:] = self.data[:] - assert_equal(fp[0], self.data[0]) - fp.flush() - - def test_del(self): - # Make sure a view does not delete the underlying mmap - fp_base = memmap(self.tmpfp, dtype=self.dtype, mode='w+', - shape=self.shape) - fp_base[0] = 5 - fp_view = fp_base[0:1] - assert_equal(fp_view[0], 5) - del fp_view - # Should still be able to access and assign values after - # deleting the view - assert_equal(fp_base[0], 5) - fp_base[0] = 6 - assert_equal(fp_base[0], 6) - - def test_arithmetic_drops_references(self): - fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', - shape=self.shape) - tmp = (fp + 10) - if isinstance(tmp, memmap): - assert_(tmp._mmap is not fp._mmap) - - def test_indexing_drops_references(self): - fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', - shape=self.shape) - tmp = fp[(1, 2), (2, 3)] - if isinstance(tmp, memmap): - assert_(tmp._mmap is not fp._mmap) - - def test_slicing_keeps_references(self): - fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', - shape=self.shape) - assert_(fp[:2, :2]._mmap is fp._mmap) - - def test_view(self): - fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) - new1 = fp.view() - new2 = new1.view() - assert_(new1.base is fp) - assert_(new2.base is fp) - new_array = asarray(fp) - assert_(new_array.base is fp) - - def test_ufunc_return_ndarray(self): - fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) - fp[:] = self.data - - with suppress_warnings() as sup: - sup.filter(FutureWarning, "np.average currently does not preserve") - for unary_op in [sum, average, product]: - result = unary_op(fp) - assert_(isscalar(result)) - assert_(result.__class__ is self.data[0, 0].__class__) - - assert_(unary_op(fp, axis=0).__class__ is ndarray) - assert_(unary_op(fp, axis=1).__class__ is ndarray) - - for binary_op in [add, subtract, multiply]: - assert_(binary_op(fp, self.data).__class__ is ndarray) - assert_(binary_op(self.data, fp).__class__ is ndarray) - assert_(binary_op(fp, fp).__class__ is ndarray) - - fp += 1 - assert(fp.__class__ is memmap) - add(fp, 1, out=fp) - assert(fp.__class__ is memmap) - - def test_getitem(self): - fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) - fp[:] = self.data - - assert_(fp[1:, :-1].__class__ is memmap) - # Fancy indexing returns a copy that is not memmapped - assert_(fp[[0, 1]].__class__ is ndarray) - - def test_memmap_subclass(self): - class MemmapSubClass(memmap): - pass - - fp = MemmapSubClass(self.tmpfp, dtype=self.dtype, shape=self.shape) - fp[:] = self.data - - # We keep previous behavior for subclasses of memmap, i.e. the - # ufunc and __getitem__ output is never turned into a ndarray - assert_(sum(fp, axis=0).__class__ is MemmapSubClass) - assert_(sum(fp).__class__ is MemmapSubClass) - assert_(fp[1:, :-1].__class__ is MemmapSubClass) - assert(fp[[0, 1]].__class__ is MemmapSubClass) - - def test_mmap_offset_greater_than_allocation_granularity(self): - size = 5 * mmap.ALLOCATIONGRANULARITY - offset = mmap.ALLOCATIONGRANULARITY + 1 - fp = memmap(self.tmpfp, shape=size, mode='w+', offset=offset) - assert_(fp.offset == offset) - - def test_no_shape(self): - self.tmpfp.write(b'a'*16) - mm = memmap(self.tmpfp, dtype='float64') - assert_equal(mm.shape, (2,)) - - def test_empty_array(self): - # gh-12653 - with pytest.raises(ValueError, match='empty file'): - memmap(self.tmpfp, shape=(0,4), mode='w+') - - self.tmpfp.write(b'\0') - - # ok now the file is not empty - memmap(self.tmpfp, shape=(0,4), mode='w+') diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_multiarray.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_multiarray.py deleted file mode 100644 index 958b265..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_multiarray.py +++ /dev/null @@ -1,8431 +0,0 @@ -from __future__ import division, absolute_import, print_function - -try: - # Accessing collections abstract classes from collections - # has been deprecated since Python 3.3 - import collections.abc as collections_abc -except ImportError: - import collections as collections_abc -import tempfile -import sys -import shutil -import warnings -import operator -import io -import itertools -import functools -import ctypes -import os -import gc -import weakref -import pytest -from contextlib import contextmanager - -from numpy.compat import pickle - -try: - import pathlib -except ImportError: - try: - import pathlib2 as pathlib - except ImportError: - pathlib = None - -if sys.version_info[0] >= 3: - import builtins -else: - import __builtin__ as builtins -from decimal import Decimal - -import numpy as np -from numpy.compat import strchar, unicode -import numpy.core._multiarray_tests as _multiarray_tests -from numpy.testing import ( - assert_, assert_raises, assert_warns, assert_equal, assert_almost_equal, - assert_array_equal, assert_raises_regex, assert_array_almost_equal, - assert_allclose, IS_PYPY, HAS_REFCOUNT, assert_array_less, runstring, - temppath, suppress_warnings, break_cycles, - ) -from numpy.testing._private.utils import _no_tracing -from numpy.core.tests._locales import CommaDecimalPointLocale - -# Need to test an object that does not fully implement math interface -from datetime import timedelta, datetime - - -if sys.version_info[:2] > (3, 2): - # In Python 3.3 the representation of empty shape, strides and sub-offsets - # is an empty tuple instead of None. - # https://docs.python.org/dev/whatsnew/3.3.html#api-changes - EMPTY = () -else: - EMPTY = None - - -def _aligned_zeros(shape, dtype=float, order="C", align=None): - """ - Allocate a new ndarray with aligned memory. - - The ndarray is guaranteed *not* aligned to twice the requested alignment. - Eg, if align=4, guarantees it is not aligned to 8. If align=None uses - dtype.alignment.""" - dtype = np.dtype(dtype) - if dtype == np.dtype(object): - # Can't do this, fall back to standard allocation (which - # should always be sufficiently aligned) - if align is not None: - raise ValueError("object array alignment not supported") - return np.zeros(shape, dtype=dtype, order=order) - if align is None: - align = dtype.alignment - if not hasattr(shape, '__len__'): - shape = (shape,) - size = functools.reduce(operator.mul, shape) * dtype.itemsize - buf = np.empty(size + 2*align + 1, np.uint8) - - ptr = buf.__array_interface__['data'][0] - offset = ptr % align - if offset != 0: - offset = align - offset - if (ptr % (2*align)) == 0: - offset += align - - # Note: slices producing 0-size arrays do not necessarily change - # data pointer --- so we use and allocate size+1 - buf = buf[offset:offset+size+1][:-1] - data = np.ndarray(shape, dtype, buf, order=order) - data.fill(0) - return data - - -class TestFlags(object): - def setup(self): - self.a = np.arange(10) - - def test_writeable(self): - mydict = locals() - self.a.flags.writeable = False - assert_raises(ValueError, runstring, 'self.a[0] = 3', mydict) - assert_raises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict) - self.a.flags.writeable = True - self.a[0] = 5 - self.a[0] = 0 - - def test_writeable_any_base(self): - # Ensure that any base being writeable is sufficient to change flag; - # this is especially interesting for arrays from an array interface. - arr = np.arange(10) - - class subclass(np.ndarray): - pass - - # Create subclass so base will not be collapsed, this is OK to change - view1 = arr.view(subclass) - view2 = view1[...] - arr.flags.writeable = False - view2.flags.writeable = False - view2.flags.writeable = True # Can be set to True again. - - arr = np.arange(10) - - class frominterface: - def __init__(self, arr): - self.arr = arr - self.__array_interface__ = arr.__array_interface__ - - view1 = np.asarray(frominterface) - view2 = view1[...] - view2.flags.writeable = False - view2.flags.writeable = True - - view1.flags.writeable = False - view2.flags.writeable = False - with assert_raises(ValueError): - # Must assume not writeable, since only base is not: - view2.flags.writeable = True - - def test_writeable_from_readonly(self): - # gh-9440 - make sure fromstring, from buffer on readonly buffers - # set writeable False - data = b'\x00' * 100 - vals = np.frombuffer(data, 'B') - assert_raises(ValueError, vals.setflags, write=True) - types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] ) - values = np.core.records.fromstring(data, types) - vals = values['vals'] - assert_raises(ValueError, vals.setflags, write=True) - - def test_writeable_from_buffer(self): - data = bytearray(b'\x00' * 100) - vals = np.frombuffer(data, 'B') - assert_(vals.flags.writeable) - vals.setflags(write=False) - assert_(vals.flags.writeable is False) - vals.setflags(write=True) - assert_(vals.flags.writeable) - types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] ) - values = np.core.records.fromstring(data, types) - vals = values['vals'] - assert_(vals.flags.writeable) - vals.setflags(write=False) - assert_(vals.flags.writeable is False) - vals.setflags(write=True) - assert_(vals.flags.writeable) - - @pytest.mark.skipif(sys.version_info[0] < 3, reason="Python 2 always copies") - @pytest.mark.skipif(IS_PYPY, reason="PyPy always copies") - def test_writeable_pickle(self): - import pickle - # Small arrays will be copied without setting base. - # See condition for using PyArray_SetBaseObject in - # array_setstate. - a = np.arange(1000) - for v in range(pickle.HIGHEST_PROTOCOL): - vals = pickle.loads(pickle.dumps(a, v)) - assert_(vals.flags.writeable) - assert_(isinstance(vals.base, bytes)) - - def test_writeable_from_c_data(self): - # Test that the writeable flag can be changed for an array wrapping - # low level C-data, but not owning its data. - # Also see that this is deprecated to change from python. - from numpy.core._multiarray_tests import get_c_wrapping_array - - arr_writeable = get_c_wrapping_array(True) - assert not arr_writeable.flags.owndata - assert arr_writeable.flags.writeable - view = arr_writeable[...] - - # Toggling the writeable flag works on the view: - view.flags.writeable = False - assert not view.flags.writeable - view.flags.writeable = True - assert view.flags.writeable - # Flag can be unset on the arr_writeable: - arr_writeable.flags.writeable = False - - arr_readonly = get_c_wrapping_array(False) - assert not arr_readonly.flags.owndata - assert not arr_readonly.flags.writeable - - for arr in [arr_writeable, arr_readonly]: - view = arr[...] - view.flags.writeable = False # make sure it is readonly - arr.flags.writeable = False - assert not arr.flags.writeable - - with assert_raises(ValueError): - view.flags.writeable = True - - with warnings.catch_warnings(): - warnings.simplefilter("error", DeprecationWarning) - with assert_raises(DeprecationWarning): - arr.flags.writeable = True - - with assert_warns(DeprecationWarning): - arr.flags.writeable = True - - def test_warnonwrite(self): - a = np.arange(10) - a.flags._warn_on_write = True - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always') - a[1] = 10 - a[2] = 10 - # only warn once - assert_(len(w) == 1) - - def test_otherflags(self): - assert_equal(self.a.flags.carray, True) - assert_equal(self.a.flags['C'], True) - assert_equal(self.a.flags.farray, False) - assert_equal(self.a.flags.behaved, True) - assert_equal(self.a.flags.fnc, False) - assert_equal(self.a.flags.forc, True) - assert_equal(self.a.flags.owndata, True) - assert_equal(self.a.flags.writeable, True) - assert_equal(self.a.flags.aligned, True) - with assert_warns(DeprecationWarning): - assert_equal(self.a.flags.updateifcopy, False) - with assert_warns(DeprecationWarning): - assert_equal(self.a.flags['U'], False) - assert_equal(self.a.flags['UPDATEIFCOPY'], False) - assert_equal(self.a.flags.writebackifcopy, False) - assert_equal(self.a.flags['X'], False) - assert_equal(self.a.flags['WRITEBACKIFCOPY'], False) - - def test_string_align(self): - a = np.zeros(4, dtype=np.dtype('|S4')) - assert_(a.flags.aligned) - # not power of two are accessed byte-wise and thus considered aligned - a = np.zeros(5, dtype=np.dtype('|S4')) - assert_(a.flags.aligned) - - def test_void_align(self): - a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")])) - assert_(a.flags.aligned) - - -class TestHash(object): - # see #3793 - def test_int(self): - for st, ut, s in [(np.int8, np.uint8, 8), - (np.int16, np.uint16, 16), - (np.int32, np.uint32, 32), - (np.int64, np.uint64, 64)]: - for i in range(1, s): - assert_equal(hash(st(-2**i)), hash(-2**i), - err_msg="%r: -2**%d" % (st, i)) - assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)), - err_msg="%r: 2**%d" % (st, i - 1)) - assert_equal(hash(st(2**i - 1)), hash(2**i - 1), - err_msg="%r: 2**%d - 1" % (st, i)) - - i = max(i - 1, 1) - assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)), - err_msg="%r: 2**%d" % (ut, i - 1)) - assert_equal(hash(ut(2**i - 1)), hash(2**i - 1), - err_msg="%r: 2**%d - 1" % (ut, i)) - - -class TestAttributes(object): - def setup(self): - self.one = np.arange(10) - self.two = np.arange(20).reshape(4, 5) - self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6) - - def test_attributes(self): - assert_equal(self.one.shape, (10,)) - assert_equal(self.two.shape, (4, 5)) - assert_equal(self.three.shape, (2, 5, 6)) - self.three.shape = (10, 3, 2) - assert_equal(self.three.shape, (10, 3, 2)) - self.three.shape = (2, 5, 6) - assert_equal(self.one.strides, (self.one.itemsize,)) - num = self.two.itemsize - assert_equal(self.two.strides, (5*num, num)) - num = self.three.itemsize - assert_equal(self.three.strides, (30*num, 6*num, num)) - assert_equal(self.one.ndim, 1) - assert_equal(self.two.ndim, 2) - assert_equal(self.three.ndim, 3) - num = self.two.itemsize - assert_equal(self.two.size, 20) - assert_equal(self.two.nbytes, 20*num) - assert_equal(self.two.itemsize, self.two.dtype.itemsize) - assert_equal(self.two.base, np.arange(20)) - - def test_dtypeattr(self): - assert_equal(self.one.dtype, np.dtype(np.int_)) - assert_equal(self.three.dtype, np.dtype(np.float_)) - assert_equal(self.one.dtype.char, 'l') - assert_equal(self.three.dtype.char, 'd') - assert_(self.three.dtype.str[0] in '<>') - assert_equal(self.one.dtype.str[1], 'i') - assert_equal(self.three.dtype.str[1], 'f') - - def test_int_subclassing(self): - # Regression test for https://github.com/numpy/numpy/pull/3526 - - numpy_int = np.int_(0) - - if sys.version_info[0] >= 3: - # On Py3k int_ should not inherit from int, because it's not - # fixed-width anymore - assert_equal(isinstance(numpy_int, int), False) - else: - # Otherwise, it should inherit from int... - assert_equal(isinstance(numpy_int, int), True) - - # ... and fast-path checks on C-API level should also work - from numpy.core._multiarray_tests import test_int_subclass - assert_equal(test_int_subclass(numpy_int), True) - - def test_stridesattr(self): - x = self.one - - def make_array(size, offset, strides): - return np.ndarray(size, buffer=x, dtype=int, - offset=offset*x.itemsize, - strides=strides*x.itemsize) - - assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1])) - assert_raises(ValueError, make_array, 4, 4, -2) - assert_raises(ValueError, make_array, 4, 2, -1) - assert_raises(ValueError, make_array, 8, 3, 1) - assert_equal(make_array(8, 3, 0), np.array([3]*8)) - # Check behavior reported in gh-2503: - assert_raises(ValueError, make_array, (2, 3), 5, np.array([-2, -3])) - make_array(0, 0, 10) - - def test_set_stridesattr(self): - x = self.one - - def make_array(size, offset, strides): - try: - r = np.ndarray([size], dtype=int, buffer=x, - offset=offset*x.itemsize) - except Exception as e: - raise RuntimeError(e) - r.strides = strides = strides*x.itemsize - return r - - assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1])) - assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9])) - assert_raises(ValueError, make_array, 4, 4, -2) - assert_raises(ValueError, make_array, 4, 2, -1) - assert_raises(RuntimeError, make_array, 8, 3, 1) - # Check that the true extent of the array is used. - # Test relies on as_strided base not exposing a buffer. - x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0)) - - def set_strides(arr, strides): - arr.strides = strides - - assert_raises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize)) - - # Test for offset calculations: - x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1], - shape=(10,), strides=(-1,)) - assert_raises(ValueError, set_strides, x[::-1], -1) - a = x[::-1] - a.strides = 1 - a[::2].strides = 2 - - def test_fill(self): - for t in "?bhilqpBHILQPfdgFDGO": - x = np.empty((3, 2, 1), t) - y = np.empty((3, 2, 1), t) - x.fill(1) - y[...] = 1 - assert_equal(x, y) - - def test_fill_max_uint64(self): - x = np.empty((3, 2, 1), dtype=np.uint64) - y = np.empty((3, 2, 1), dtype=np.uint64) - value = 2**64 - 1 - y[...] = value - x.fill(value) - assert_array_equal(x, y) - - def test_fill_struct_array(self): - # Filling from a scalar - x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8') - x.fill(x[0]) - assert_equal(x['f1'][1], x['f1'][0]) - # Filling from a tuple that can be converted - # to a scalar - x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')]) - x.fill((3.5, -2)) - assert_array_equal(x['a'], [3.5, 3.5]) - assert_array_equal(x['b'], [-2, -2]) - - -class TestArrayConstruction(object): - def test_array(self): - d = np.ones(6) - r = np.array([d, d]) - assert_equal(r, np.ones((2, 6))) - - d = np.ones(6) - tgt = np.ones((2, 6)) - r = np.array([d, d]) - assert_equal(r, tgt) - tgt[1] = 2 - r = np.array([d, d + 1]) - assert_equal(r, tgt) - - d = np.ones(6) - r = np.array([[d, d]]) - assert_equal(r, np.ones((1, 2, 6))) - - d = np.ones(6) - r = np.array([[d, d], [d, d]]) - assert_equal(r, np.ones((2, 2, 6))) - - d = np.ones((6, 6)) - r = np.array([d, d]) - assert_equal(r, np.ones((2, 6, 6))) - - d = np.ones((6, )) - r = np.array([[d, d + 1], d + 2]) - assert_equal(len(r), 2) - assert_equal(r[0], [d, d + 1]) - assert_equal(r[1], d + 2) - - tgt = np.ones((2, 3), dtype=bool) - tgt[0, 2] = False - tgt[1, 0:2] = False - r = np.array([[True, True, False], [False, False, True]]) - assert_equal(r, tgt) - r = np.array([[True, False], [True, False], [False, True]]) - assert_equal(r, tgt.T) - - def test_array_empty(self): - assert_raises(TypeError, np.array) - - def test_array_copy_false(self): - d = np.array([1, 2, 3]) - e = np.array(d, copy=False) - d[1] = 3 - assert_array_equal(e, [1, 3, 3]) - e = np.array(d, copy=False, order='F') - d[1] = 4 - assert_array_equal(e, [1, 4, 3]) - e[2] = 7 - assert_array_equal(d, [1, 4, 7]) - - def test_array_copy_true(self): - d = np.array([[1,2,3], [1, 2, 3]]) - e = np.array(d, copy=True) - d[0, 1] = 3 - e[0, 2] = -7 - assert_array_equal(e, [[1, 2, -7], [1, 2, 3]]) - assert_array_equal(d, [[1, 3, 3], [1, 2, 3]]) - e = np.array(d, copy=True, order='F') - d[0, 1] = 5 - e[0, 2] = 7 - assert_array_equal(e, [[1, 3, 7], [1, 2, 3]]) - assert_array_equal(d, [[1, 5, 3], [1,2,3]]) - - def test_array_cont(self): - d = np.ones(10)[::2] - assert_(np.ascontiguousarray(d).flags.c_contiguous) - assert_(np.ascontiguousarray(d).flags.f_contiguous) - assert_(np.asfortranarray(d).flags.c_contiguous) - assert_(np.asfortranarray(d).flags.f_contiguous) - d = np.ones((10, 10))[::2,::2] - assert_(np.ascontiguousarray(d).flags.c_contiguous) - assert_(np.asfortranarray(d).flags.f_contiguous) - - -class TestAssignment(object): - def test_assignment_broadcasting(self): - a = np.arange(6).reshape(2, 3) - - # Broadcasting the input to the output - a[...] = np.arange(3) - assert_equal(a, [[0, 1, 2], [0, 1, 2]]) - a[...] = np.arange(2).reshape(2, 1) - assert_equal(a, [[0, 0, 0], [1, 1, 1]]) - - # For compatibility with <= 1.5, a limited version of broadcasting - # the output to the input. - # - # This behavior is inconsistent with NumPy broadcasting - # in general, because it only uses one of the two broadcasting - # rules (adding a new "1" dimension to the left of the shape), - # applied to the output instead of an input. In NumPy 2.0, this kind - # of broadcasting assignment will likely be disallowed. - a[...] = np.arange(6)[::-1].reshape(1, 2, 3) - assert_equal(a, [[5, 4, 3], [2, 1, 0]]) - # The other type of broadcasting would require a reduction operation. - - def assign(a, b): - a[...] = b - - assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3)) - - def test_assignment_errors(self): - # Address issue #2276 - class C: - pass - a = np.zeros(1) - - def assign(v): - a[0] = v - - assert_raises((AttributeError, TypeError), assign, C()) - assert_raises(ValueError, assign, [1]) - - def test_unicode_assignment(self): - # gh-5049 - from numpy.core.numeric import set_string_function - - @contextmanager - def inject_str(s): - """ replace ndarray.__str__ temporarily """ - set_string_function(lambda x: s, repr=False) - try: - yield - finally: - set_string_function(None, repr=False) - - a1d = np.array([u'test']) - a0d = np.array(u'done') - with inject_str(u'bad'): - a1d[0] = a0d # previously this would invoke __str__ - assert_equal(a1d[0], u'done') - - # this would crash for the same reason - np.array([np.array(u'\xe5\xe4\xf6')]) - - def test_stringlike_empty_list(self): - # gh-8902 - u = np.array([u'done']) - b = np.array([b'done']) - - class bad_sequence(object): - def __getitem__(self): pass - def __len__(self): raise RuntimeError - - assert_raises(ValueError, operator.setitem, u, 0, []) - assert_raises(ValueError, operator.setitem, b, 0, []) - - assert_raises(ValueError, operator.setitem, u, 0, bad_sequence()) - assert_raises(ValueError, operator.setitem, b, 0, bad_sequence()) - - def test_longdouble_assignment(self): - # only relevant if longdouble is larger than float - # we're looking for loss of precision - - for dtype in (np.longdouble, np.longcomplex): - # gh-8902 - tinyb = np.nextafter(np.longdouble(0), 1).astype(dtype) - tinya = np.nextafter(np.longdouble(0), -1).astype(dtype) - - # construction - tiny1d = np.array([tinya]) - assert_equal(tiny1d[0], tinya) - - # scalar = scalar - tiny1d[0] = tinyb - assert_equal(tiny1d[0], tinyb) - - # 0d = scalar - tiny1d[0, ...] = tinya - assert_equal(tiny1d[0], tinya) - - # 0d = 0d - tiny1d[0, ...] = tinyb[...] - assert_equal(tiny1d[0], tinyb) - - # scalar = 0d - tiny1d[0] = tinyb[...] - assert_equal(tiny1d[0], tinyb) - - arr = np.array([np.array(tinya)]) - assert_equal(arr[0], tinya) - - def test_cast_to_string(self): - # cast to str should do "str(scalar)", not "str(scalar.item())" - # Example: In python2, str(float) is truncated, so we want to avoid - # str(np.float64(...).item()) as this would incorrectly truncate. - a = np.zeros(1, dtype='S20') - a[:] = np.array(['1.12345678901234567890'], dtype='f8') - assert_equal(a[0], b"1.1234567890123457") - - -class TestDtypedescr(object): - def test_construction(self): - d1 = np.dtype('i4') - assert_equal(d1, np.dtype(np.int32)) - d2 = np.dtype('f8') - assert_equal(d2, np.dtype(np.float64)) - - def test_byteorders(self): - assert_(np.dtype('i4')) - assert_(np.dtype([('a', 'i4')])) - - def test_structured_non_void(self): - fields = [('a', '= 3, reason="Not Python 2") - def test_sequence_long(self): - assert_equal(np.array([long(4), long(4)]).dtype, long) - assert_equal(np.array([long(4), 2**80]).dtype, object) - assert_equal(np.array([long(4), 2**80, long(4)]).dtype, object) - assert_equal(np.array([2**80, long(4)]).dtype, object) - - def test_non_sequence_sequence(self): - """Should not segfault. - - Class Fail breaks the sequence protocol for new style classes, i.e., - those derived from object. Class Map is a mapping type indicated by - raising a ValueError. At some point we may raise a warning instead - of an error in the Fail case. - - """ - class Fail(object): - def __len__(self): - return 1 - - def __getitem__(self, index): - raise ValueError() - - class Map(object): - def __len__(self): - return 1 - - def __getitem__(self, index): - raise KeyError() - - a = np.array([Map()]) - assert_(a.shape == (1,)) - assert_(a.dtype == np.dtype(object)) - assert_raises(ValueError, np.array, [Fail()]) - - def test_no_len_object_type(self): - # gh-5100, want object array from iterable object without len() - class Point2: - def __init__(self): - pass - - def __getitem__(self, ind): - if ind in [0, 1]: - return ind - else: - raise IndexError() - d = np.array([Point2(), Point2(), Point2()]) - assert_equal(d.dtype, np.dtype(object)) - - def test_false_len_sequence(self): - # gh-7264, segfault for this example - class C: - def __getitem__(self, i): - raise IndexError - def __len__(self): - return 42 - - assert_raises(ValueError, np.array, C()) # segfault? - - def test_failed_len_sequence(self): - # gh-7393 - class A(object): - def __init__(self, data): - self._data = data - def __getitem__(self, item): - return type(self)(self._data[item]) - def __len__(self): - return len(self._data) - - # len(d) should give 3, but len(d[0]) will fail - d = A([1,2,3]) - assert_equal(len(np.array(d)), 3) - - def test_array_too_big(self): - # Test that array creation succeeds for arrays addressable by intp - # on the byte level and fails for too large arrays. - buf = np.zeros(100) - - max_bytes = np.iinfo(np.intp).max - for dtype in ["intp", "S20", "b"]: - dtype = np.dtype(dtype) - itemsize = dtype.itemsize - - np.ndarray(buffer=buf, strides=(0,), - shape=(max_bytes//itemsize,), dtype=dtype) - assert_raises(ValueError, np.ndarray, buffer=buf, strides=(0,), - shape=(max_bytes//itemsize + 1,), dtype=dtype) - - def test_jagged_ndim_object(self): - # Lists of mismatching depths are treated as object arrays - a = np.array([[1], 2, 3]) - assert_equal(a.shape, (3,)) - assert_equal(a.dtype, object) - - a = np.array([1, [2], 3]) - assert_equal(a.shape, (3,)) - assert_equal(a.dtype, object) - - a = np.array([1, 2, [3]]) - assert_equal(a.shape, (3,)) - assert_equal(a.dtype, object) - - def test_jagged_shape_object(self): - # The jagged dimension of a list is turned into an object array - a = np.array([[1, 1], [2], [3]]) - assert_equal(a.shape, (3,)) - assert_equal(a.dtype, object) - - a = np.array([[1], [2, 2], [3]]) - assert_equal(a.shape, (3,)) - assert_equal(a.dtype, object) - - a = np.array([[1], [2], [3, 3]]) - assert_equal(a.shape, (3,)) - assert_equal(a.dtype, object) - - -class TestStructured(object): - def test_subarray_field_access(self): - a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))]) - a['a'] = np.arange(60).reshape(3, 5, 2, 2) - - # Since the subarray is always in C-order, a transpose - # does not swap the subarray: - assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3)) - - # In Fortran order, the subarray gets appended - # like in all other cases, not prepended as a special case - b = a.copy(order='F') - assert_equal(a['a'].shape, b['a'].shape) - assert_equal(a.T['a'].shape, a.T.copy()['a'].shape) - - def test_subarray_comparison(self): - # Check that comparisons between record arrays with - # multi-dimensional field types work properly - a = np.rec.fromrecords( - [([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])], - dtype=[('a', ('f4', 3)), ('b', object), ('c', ('i4', (2, 2)))]) - b = a.copy() - assert_equal(a == b, [True, True]) - assert_equal(a != b, [False, False]) - b[1].b = 'c' - assert_equal(a == b, [True, False]) - assert_equal(a != b, [False, True]) - for i in range(3): - b[0].a = a[0].a - b[0].a[i] = 5 - assert_equal(a == b, [False, False]) - assert_equal(a != b, [True, True]) - for i in range(2): - for j in range(2): - b = a.copy() - b[0].c[i, j] = 10 - assert_equal(a == b, [False, True]) - assert_equal(a != b, [True, False]) - - # Check that broadcasting with a subarray works - a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')]) - b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')]) - assert_equal(a == b, [[True, True, False], [False, False, True]]) - assert_equal(b == a, [[True, True, False], [False, False, True]]) - a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))]) - b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))]) - assert_equal(a == b, [[True, True, False], [False, False, True]]) - assert_equal(b == a, [[True, True, False], [False, False, True]]) - a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))]) - b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))]) - assert_equal(a == b, [[True, False, False], [False, False, True]]) - assert_equal(b == a, [[True, False, False], [False, False, True]]) - - # Check that broadcasting Fortran-style arrays with a subarray work - a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F') - b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))]) - assert_equal(a == b, [[True, False, False], [False, False, True]]) - assert_equal(b == a, [[True, False, False], [False, False, True]]) - - # Check that incompatible sub-array shapes don't result to broadcasting - x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')]) - y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')]) - # This comparison invokes deprecated behaviour, and will probably - # start raising an error eventually. What we really care about in this - # test is just that it doesn't return True. - with suppress_warnings() as sup: - sup.filter(FutureWarning, "elementwise == comparison failed") - assert_equal(x == y, False) - - x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')]) - y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')]) - # This comparison invokes deprecated behaviour, and will probably - # start raising an error eventually. What we really care about in this - # test is just that it doesn't return True. - with suppress_warnings() as sup: - sup.filter(FutureWarning, "elementwise == comparison failed") - assert_equal(x == y, False) - - # Check that structured arrays that are different only in - # byte-order work - a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', 'f8')]) - assert_equal(a == b, [False, True]) - - def test_casting(self): - # Check that casting a structured array to change its byte order - # works - a = np.array([(1,)], dtype=[('a', 'i4')], casting='unsafe')) - b = a.astype([('a', '>i4')]) - assert_equal(b, a.byteswap().newbyteorder()) - assert_equal(a['a'][0], b['a'][0]) - - # Check that equality comparison works on structured arrays if - # they are 'equiv'-castable - a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', 'f8')]) - assert_(np.can_cast(a.dtype, b.dtype, casting='equiv')) - assert_equal(a == b, [True, True]) - - # Check that 'equiv' casting can change byte order - assert_(np.can_cast(a.dtype, b.dtype, casting='equiv')) - c = a.astype(b.dtype, casting='equiv') - assert_equal(a == c, [True, True]) - - # Check that 'safe' casting can change byte order and up-cast - # fields - t = [('a', 'f8')] - assert_(np.can_cast(a.dtype, t, casting='safe')) - c = a.astype(t, casting='safe') - assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)), - [True, True]) - - # Check that 'same_kind' casting can change byte order and - # change field widths within a "kind" - t = [('a', 'f4')] - assert_(np.can_cast(a.dtype, t, casting='same_kind')) - c = a.astype(t, casting='same_kind') - assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)), - [True, True]) - - # Check that casting fails if the casting rule should fail on - # any of the fields - t = [('a', '>i8'), ('b', 'i2'), ('b', 'i8'), ('b', 'i4')] - assert_(not np.can_cast(a.dtype, t, casting=casting)) - t = [('a', '>i4'), ('b', ' false - for n in range(3): - v = np.array(b'', (dtype, n)) - assert_equal(bool(v), False) - assert_equal(bool(v[()]), False) - assert_equal(v.astype(bool), False) - assert_(isinstance(v.astype(bool), np.ndarray)) - assert_(v[()].astype(bool) is np.False_) - - # anything else -> true - for n in range(1, 4): - for val in [b'a', b'0', b' ']: - v = np.array(val, (dtype, n)) - assert_equal(bool(v), True) - assert_equal(bool(v[()]), True) - assert_equal(v.astype(bool), True) - assert_(isinstance(v.astype(bool), np.ndarray)) - assert_(v[()].astype(bool) is np.True_) - - def test_cast_from_void(self): - self._test_cast_from_flexible(np.void) - - @pytest.mark.xfail(reason="See gh-9847") - def test_cast_from_unicode(self): - self._test_cast_from_flexible(np.unicode_) - - @pytest.mark.xfail(reason="See gh-9847") - def test_cast_from_bytes(self): - self._test_cast_from_flexible(np.bytes_) - - -class TestZeroSizeFlexible(object): - @staticmethod - def _zeros(shape, dtype=str): - dtype = np.dtype(dtype) - if dtype == np.void: - return np.zeros(shape, dtype=(dtype, 0)) - - # not constructable directly - dtype = np.dtype([('x', dtype, 0)]) - return np.zeros(shape, dtype=dtype)['x'] - - def test_create(self): - zs = self._zeros(10, bytes) - assert_equal(zs.itemsize, 0) - zs = self._zeros(10, np.void) - assert_equal(zs.itemsize, 0) - zs = self._zeros(10, unicode) - assert_equal(zs.itemsize, 0) - - def _test_sort_partition(self, name, kinds, **kwargs): - # Previously, these would all hang - for dt in [bytes, np.void, unicode]: - zs = self._zeros(10, dt) - sort_method = getattr(zs, name) - sort_func = getattr(np, name) - for kind in kinds: - sort_method(kind=kind, **kwargs) - sort_func(zs, kind=kind, **kwargs) - - def test_sort(self): - self._test_sort_partition('sort', kinds='qhs') - - def test_argsort(self): - self._test_sort_partition('argsort', kinds='qhs') - - def test_partition(self): - self._test_sort_partition('partition', kinds=['introselect'], kth=2) - - def test_argpartition(self): - self._test_sort_partition('argpartition', kinds=['introselect'], kth=2) - - def test_resize(self): - # previously an error - for dt in [bytes, np.void, unicode]: - zs = self._zeros(10, dt) - zs.resize(25) - zs.resize((10, 10)) - - def test_view(self): - for dt in [bytes, np.void, unicode]: - zs = self._zeros(10, dt) - - # viewing as itself should be allowed - assert_equal(zs.view(dt).dtype, np.dtype(dt)) - - # viewing as any non-empty type gives an empty result - assert_equal(zs.view((dt, 1)).shape, (0,)) - - def test_dumps(self): - zs = self._zeros(10, int) - assert_equal(zs, pickle.loads(zs.dumps())) - - def test_pickle(self): - for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): - for dt in [bytes, np.void, unicode]: - zs = self._zeros(10, dt) - p = pickle.dumps(zs, protocol=proto) - zs2 = pickle.loads(p) - - assert_equal(zs.dtype, zs2.dtype) - - @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5, - reason="requires pickle protocol 5") - def test_pickle_with_buffercallback(self): - array = np.arange(10) - buffers = [] - bytes_string = pickle.dumps(array, buffer_callback=buffers.append, - protocol=5) - array_from_buffer = pickle.loads(bytes_string, buffers=buffers) - # when using pickle protocol 5 with buffer callbacks, - # array_from_buffer is reconstructed from a buffer holding a view - # to the initial array's data, so modifying an element in array - # should modify it in array_from_buffer too. - array[0] = -1 - assert array_from_buffer[0] == -1, array_from_buffer[0] - - -class TestMethods(object): - - sort_kinds = ['quicksort', 'heapsort', 'stable'] - - def test_compress(self): - tgt = [[5, 6, 7, 8, 9]] - arr = np.arange(10).reshape(2, 5) - out = arr.compress([0, 1], axis=0) - assert_equal(out, tgt) - - tgt = [[1, 3], [6, 8]] - out = arr.compress([0, 1, 0, 1, 0], axis=1) - assert_equal(out, tgt) - - tgt = [[1], [6]] - arr = np.arange(10).reshape(2, 5) - out = arr.compress([0, 1], axis=1) - assert_equal(out, tgt) - - arr = np.arange(10).reshape(2, 5) - out = arr.compress([0, 1]) - assert_equal(out, 1) - - def test_choose(self): - x = 2*np.ones((3,), dtype=int) - y = 3*np.ones((3,), dtype=int) - x2 = 2*np.ones((2, 3), dtype=int) - y2 = 3*np.ones((2, 3), dtype=int) - ind = np.array([0, 0, 1]) - - A = ind.choose((x, y)) - assert_equal(A, [2, 2, 3]) - - A = ind.choose((x2, y2)) - assert_equal(A, [[2, 2, 3], [2, 2, 3]]) - - A = ind.choose((x, y2)) - assert_equal(A, [[2, 2, 3], [2, 2, 3]]) - - oned = np.ones(1) - # gh-12031, caused SEGFAULT - assert_raises(TypeError, oned.choose,np.void(0), [oned]) - - # gh-6272 check overlap on out - x = np.arange(5) - y = np.choose([0,0,0], [x[:3], x[:3], x[:3]], out=x[1:4], mode='wrap') - assert_equal(y, np.array([0, 1, 2])) - - def test_prod(self): - ba = [1, 2, 10, 11, 6, 5, 4] - ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] - - for ctype in [np.int16, np.uint16, np.int32, np.uint32, - np.float32, np.float64, np.complex64, np.complex128]: - a = np.array(ba, ctype) - a2 = np.array(ba2, ctype) - if ctype in ['1', 'b']: - assert_raises(ArithmeticError, a.prod) - assert_raises(ArithmeticError, a2.prod, axis=1) - else: - assert_equal(a.prod(axis=0), 26400) - assert_array_equal(a2.prod(axis=0), - np.array([50, 36, 84, 180], ctype)) - assert_array_equal(a2.prod(axis=-1), - np.array([24, 1890, 600], ctype)) - - def test_repeat(self): - m = np.array([1, 2, 3, 4, 5, 6]) - m_rect = m.reshape((2, 3)) - - A = m.repeat([1, 3, 2, 1, 1, 2]) - assert_equal(A, [1, 2, 2, 2, 3, - 3, 4, 5, 6, 6]) - - A = m.repeat(2) - assert_equal(A, [1, 1, 2, 2, 3, 3, - 4, 4, 5, 5, 6, 6]) - - A = m_rect.repeat([2, 1], axis=0) - assert_equal(A, [[1, 2, 3], - [1, 2, 3], - [4, 5, 6]]) - - A = m_rect.repeat([1, 3, 2], axis=1) - assert_equal(A, [[1, 2, 2, 2, 3, 3], - [4, 5, 5, 5, 6, 6]]) - - A = m_rect.repeat(2, axis=0) - assert_equal(A, [[1, 2, 3], - [1, 2, 3], - [4, 5, 6], - [4, 5, 6]]) - - A = m_rect.repeat(2, axis=1) - assert_equal(A, [[1, 1, 2, 2, 3, 3], - [4, 4, 5, 5, 6, 6]]) - - def test_reshape(self): - arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) - - tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]] - assert_equal(arr.reshape(2, 6), tgt) - - tgt = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] - assert_equal(arr.reshape(3, 4), tgt) - - tgt = [[1, 10, 8, 6], [4, 2, 11, 9], [7, 5, 3, 12]] - assert_equal(arr.reshape((3, 4), order='F'), tgt) - - tgt = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]] - assert_equal(arr.T.reshape((3, 4), order='C'), tgt) - - def test_round(self): - def check_round(arr, expected, *round_args): - assert_equal(arr.round(*round_args), expected) - # With output array - out = np.zeros_like(arr) - res = arr.round(*round_args, out=out) - assert_equal(out, expected) - assert_equal(out, res) - - check_round(np.array([1.2, 1.5]), [1, 2]) - check_round(np.array(1.5), 2) - check_round(np.array([12.2, 15.5]), [10, 20], -1) - check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1) - # Complex rounding - check_round(np.array([4.5 + 1.5j]), [4 + 2j]) - check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1) - - def test_squeeze(self): - a = np.array([[[1], [2], [3]]]) - assert_equal(a.squeeze(), [1, 2, 3]) - assert_equal(a.squeeze(axis=(0,)), [[1], [2], [3]]) - assert_raises(ValueError, a.squeeze, axis=(1,)) - assert_equal(a.squeeze(axis=(2,)), [[1, 2, 3]]) - - def test_transpose(self): - a = np.array([[1, 2], [3, 4]]) - assert_equal(a.transpose(), [[1, 3], [2, 4]]) - assert_raises(ValueError, lambda: a.transpose(0)) - assert_raises(ValueError, lambda: a.transpose(0, 0)) - assert_raises(ValueError, lambda: a.transpose(0, 1, 2)) - - def test_sort(self): - # test ordering for floats and complex containing nans. It is only - # necessary to check the less-than comparison, so sorts that - # only follow the insertion sort path are sufficient. We only - # test doubles and complex doubles as the logic is the same. - - # check doubles - msg = "Test real sort order with nans" - a = np.array([np.nan, 1, 0]) - b = np.sort(a) - assert_equal(b, a[::-1], msg) - # check complex - msg = "Test complex sort order with nans" - a = np.zeros(9, dtype=np.complex128) - a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0] - a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0] - b = np.sort(a) - assert_equal(b, a[::-1], msg) - - # all c scalar sorts use the same code with different types - # so it suffices to run a quick check with one type. The number - # of sorted items must be greater than ~50 to check the actual - # algorithm because quick and merge sort fall over to insertion - # sort for small arrays. - # Test unsigned dtypes and nonnegative numbers - for dtype in [np.uint8, np.uint16, np.uint32, np.uint64, np.float16, np.float32, np.float64, np.longdouble]: - a = np.arange(101, dtype=dtype) - b = a[::-1].copy() - for kind in self.sort_kinds: - msg = "scalar sort, kind=%s, dtype=%s" % (kind, dtype) - c = a.copy() - c.sort(kind=kind) - assert_equal(c, a, msg) - c = b.copy() - c.sort(kind=kind) - assert_equal(c, a, msg) - - # Test signed dtypes and negative numbers as well - for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16, np.float32, np.float64, np.longdouble]: - a = np.arange(-50, 51, dtype=dtype) - b = a[::-1].copy() - for kind in self.sort_kinds: - msg = "scalar sort, kind=%s, dtype=%s" % (kind, dtype) - c = a.copy() - c.sort(kind=kind) - assert_equal(c, a, msg) - c = b.copy() - c.sort(kind=kind) - assert_equal(c, a, msg) - - # test complex sorts. These use the same code as the scalars - # but the compare function differs. - ai = a*1j + 1 - bi = b*1j + 1 - for kind in self.sort_kinds: - msg = "complex sort, real part == 1, kind=%s" % kind - c = ai.copy() - c.sort(kind=kind) - assert_equal(c, ai, msg) - c = bi.copy() - c.sort(kind=kind) - assert_equal(c, ai, msg) - ai = a + 1j - bi = b + 1j - for kind in self.sort_kinds: - msg = "complex sort, imag part == 1, kind=%s" % kind - c = ai.copy() - c.sort(kind=kind) - assert_equal(c, ai, msg) - c = bi.copy() - c.sort(kind=kind) - assert_equal(c, ai, msg) - - # test sorting of complex arrays requiring byte-swapping, gh-5441 - for endianness in '<>': - for dt in np.typecodes['Complex']: - arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt) - c = arr.copy() - c.sort() - msg = 'byte-swapped complex sort, dtype={0}'.format(dt) - assert_equal(c, arr, msg) - - # test string sorts. - s = 'aaaaaaaa' - a = np.array([s + chr(i) for i in range(101)]) - b = a[::-1].copy() - for kind in self.sort_kinds: - msg = "string sort, kind=%s" % kind - c = a.copy() - c.sort(kind=kind) - assert_equal(c, a, msg) - c = b.copy() - c.sort(kind=kind) - assert_equal(c, a, msg) - - # test unicode sorts. - s = 'aaaaaaaa' - a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode_) - b = a[::-1].copy() - for kind in self.sort_kinds: - msg = "unicode sort, kind=%s" % kind - c = a.copy() - c.sort(kind=kind) - assert_equal(c, a, msg) - c = b.copy() - c.sort(kind=kind) - assert_equal(c, a, msg) - - # test object array sorts. - a = np.empty((101,), dtype=object) - a[:] = list(range(101)) - b = a[::-1] - for kind in ['q', 'h', 'm']: - msg = "object sort, kind=%s" % kind - c = a.copy() - c.sort(kind=kind) - assert_equal(c, a, msg) - c = b.copy() - c.sort(kind=kind) - assert_equal(c, a, msg) - - # test record array sorts. - dt = np.dtype([('f', float), ('i', int)]) - a = np.array([(i, i) for i in range(101)], dtype=dt) - b = a[::-1] - for kind in ['q', 'h', 'm']: - msg = "object sort, kind=%s" % kind - c = a.copy() - c.sort(kind=kind) - assert_equal(c, a, msg) - c = b.copy() - c.sort(kind=kind) - assert_equal(c, a, msg) - - # test datetime64 sorts. - a = np.arange(0, 101, dtype='datetime64[D]') - b = a[::-1] - for kind in ['q', 'h', 'm']: - msg = "datetime64 sort, kind=%s" % kind - c = a.copy() - c.sort(kind=kind) - assert_equal(c, a, msg) - c = b.copy() - c.sort(kind=kind) - assert_equal(c, a, msg) - - # test timedelta64 sorts. - a = np.arange(0, 101, dtype='timedelta64[D]') - b = a[::-1] - for kind in ['q', 'h', 'm']: - msg = "timedelta64 sort, kind=%s" % kind - c = a.copy() - c.sort(kind=kind) - assert_equal(c, a, msg) - c = b.copy() - c.sort(kind=kind) - assert_equal(c, a, msg) - - # check axis handling. This should be the same for all type - # specific sorts, so we only check it for one type and one kind - a = np.array([[3, 2], [1, 0]]) - b = np.array([[1, 0], [3, 2]]) - c = np.array([[2, 3], [0, 1]]) - d = a.copy() - d.sort(axis=0) - assert_equal(d, b, "test sort with axis=0") - d = a.copy() - d.sort(axis=1) - assert_equal(d, c, "test sort with axis=1") - d = a.copy() - d.sort() - assert_equal(d, c, "test sort with default axis") - - # check axis handling for multidimensional empty arrays - a = np.array([]) - a.shape = (3, 2, 1, 0) - for axis in range(-a.ndim, a.ndim): - msg = 'test empty array sort with axis={0}'.format(axis) - assert_equal(np.sort(a, axis=axis), a, msg) - msg = 'test empty array sort with axis=None' - assert_equal(np.sort(a, axis=None), a.ravel(), msg) - - # test generic class with bogus ordering, - # should not segfault. - class Boom(object): - def __lt__(self, other): - return True - - a = np.array([Boom()]*100, dtype=object) - for kind in self.sort_kinds: - msg = "bogus comparison object sort, kind=%s" % kind - c.sort(kind=kind) - - def test_void_sort(self): - # gh-8210 - previously segfaulted - for i in range(4): - rand = np.random.randint(256, size=4000, dtype=np.uint8) - arr = rand.view('V4') - arr[::-1].sort() - - dt = np.dtype([('val', 'i4', (1,))]) - for i in range(4): - rand = np.random.randint(256, size=4000, dtype=np.uint8) - arr = rand.view(dt) - arr[::-1].sort() - - def test_sort_raises(self): - #gh-9404 - arr = np.array([0, datetime.now(), 1], dtype=object) - for kind in self.sort_kinds: - assert_raises(TypeError, arr.sort, kind=kind) - #gh-3879 - class Raiser(object): - def raises_anything(*args, **kwargs): - raise TypeError("SOMETHING ERRORED") - __eq__ = __ne__ = __lt__ = __gt__ = __ge__ = __le__ = raises_anything - arr = np.array([[Raiser(), n] for n in range(10)]).reshape(-1) - np.random.shuffle(arr) - for kind in self.sort_kinds: - assert_raises(TypeError, arr.sort, kind=kind) - - def test_sort_degraded(self): - # test degraded dataset would take minutes to run with normal qsort - d = np.arange(1000000) - do = d.copy() - x = d - # create a median of 3 killer where each median is the sorted second - # last element of the quicksort partition - while x.size > 3: - mid = x.size // 2 - x[mid], x[-2] = x[-2], x[mid] - x = x[:-2] - - assert_equal(np.sort(d), do) - assert_equal(d[np.argsort(d)], do) - - def test_copy(self): - def assert_fortran(arr): - assert_(arr.flags.fortran) - assert_(arr.flags.f_contiguous) - assert_(not arr.flags.c_contiguous) - - def assert_c(arr): - assert_(not arr.flags.fortran) - assert_(not arr.flags.f_contiguous) - assert_(arr.flags.c_contiguous) - - a = np.empty((2, 2), order='F') - # Test copying a Fortran array - assert_c(a.copy()) - assert_c(a.copy('C')) - assert_fortran(a.copy('F')) - assert_fortran(a.copy('A')) - - # Now test starting with a C array. - a = np.empty((2, 2), order='C') - assert_c(a.copy()) - assert_c(a.copy('C')) - assert_fortran(a.copy('F')) - assert_c(a.copy('A')) - - def test_sort_order(self): - # Test sorting an array with fields - x1 = np.array([21, 32, 14]) - x2 = np.array(['my', 'first', 'name']) - x3 = np.array([3.1, 4.5, 6.2]) - r = np.rec.fromarrays([x1, x2, x3], names='id,word,number') - - r.sort(order=['id']) - assert_equal(r.id, np.array([14, 21, 32])) - assert_equal(r.word, np.array(['name', 'my', 'first'])) - assert_equal(r.number, np.array([6.2, 3.1, 4.5])) - - r.sort(order=['word']) - assert_equal(r.id, np.array([32, 21, 14])) - assert_equal(r.word, np.array(['first', 'my', 'name'])) - assert_equal(r.number, np.array([4.5, 3.1, 6.2])) - - r.sort(order=['number']) - assert_equal(r.id, np.array([21, 32, 14])) - assert_equal(r.word, np.array(['my', 'first', 'name'])) - assert_equal(r.number, np.array([3.1, 4.5, 6.2])) - - assert_raises_regex(ValueError, 'duplicate', - lambda: r.sort(order=['id', 'id'])) - - if sys.byteorder == 'little': - strtype = '>i2' - else: - strtype = '': - for dt in np.typecodes['Complex']: - arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt) - msg = 'byte-swapped complex argsort, dtype={0}'.format(dt) - assert_equal(arr.argsort(), - np.arange(len(arr), dtype=np.intp), msg) - - # test string argsorts. - s = 'aaaaaaaa' - a = np.array([s + chr(i) for i in range(101)]) - b = a[::-1].copy() - r = np.arange(101) - rr = r[::-1] - for kind in self.sort_kinds: - msg = "string argsort, kind=%s" % kind - assert_equal(a.copy().argsort(kind=kind), r, msg) - assert_equal(b.copy().argsort(kind=kind), rr, msg) - - # test unicode argsorts. - s = 'aaaaaaaa' - a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode_) - b = a[::-1] - r = np.arange(101) - rr = r[::-1] - for kind in self.sort_kinds: - msg = "unicode argsort, kind=%s" % kind - assert_equal(a.copy().argsort(kind=kind), r, msg) - assert_equal(b.copy().argsort(kind=kind), rr, msg) - - # test object array argsorts. - a = np.empty((101,), dtype=object) - a[:] = list(range(101)) - b = a[::-1] - r = np.arange(101) - rr = r[::-1] - for kind in self.sort_kinds: - msg = "object argsort, kind=%s" % kind - assert_equal(a.copy().argsort(kind=kind), r, msg) - assert_equal(b.copy().argsort(kind=kind), rr, msg) - - # test structured array argsorts. - dt = np.dtype([('f', float), ('i', int)]) - a = np.array([(i, i) for i in range(101)], dtype=dt) - b = a[::-1] - r = np.arange(101) - rr = r[::-1] - for kind in self.sort_kinds: - msg = "structured array argsort, kind=%s" % kind - assert_equal(a.copy().argsort(kind=kind), r, msg) - assert_equal(b.copy().argsort(kind=kind), rr, msg) - - # test datetime64 argsorts. - a = np.arange(0, 101, dtype='datetime64[D]') - b = a[::-1] - r = np.arange(101) - rr = r[::-1] - for kind in ['q', 'h', 'm']: - msg = "datetime64 argsort, kind=%s" % kind - assert_equal(a.copy().argsort(kind=kind), r, msg) - assert_equal(b.copy().argsort(kind=kind), rr, msg) - - # test timedelta64 argsorts. - a = np.arange(0, 101, dtype='timedelta64[D]') - b = a[::-1] - r = np.arange(101) - rr = r[::-1] - for kind in ['q', 'h', 'm']: - msg = "timedelta64 argsort, kind=%s" % kind - assert_equal(a.copy().argsort(kind=kind), r, msg) - assert_equal(b.copy().argsort(kind=kind), rr, msg) - - # check axis handling. This should be the same for all type - # specific argsorts, so we only check it for one type and one kind - a = np.array([[3, 2], [1, 0]]) - b = np.array([[1, 1], [0, 0]]) - c = np.array([[1, 0], [1, 0]]) - assert_equal(a.copy().argsort(axis=0), b) - assert_equal(a.copy().argsort(axis=1), c) - assert_equal(a.copy().argsort(), c) - - # check axis handling for multidimensional empty arrays - a = np.array([]) - a.shape = (3, 2, 1, 0) - for axis in range(-a.ndim, a.ndim): - msg = 'test empty array argsort with axis={0}'.format(axis) - assert_equal(np.argsort(a, axis=axis), - np.zeros_like(a, dtype=np.intp), msg) - msg = 'test empty array argsort with axis=None' - assert_equal(np.argsort(a, axis=None), - np.zeros_like(a.ravel(), dtype=np.intp), msg) - - # check that stable argsorts are stable - r = np.arange(100) - # scalars - a = np.zeros(100) - assert_equal(a.argsort(kind='m'), r) - # complex - a = np.zeros(100, dtype=complex) - assert_equal(a.argsort(kind='m'), r) - # string - a = np.array(['aaaaaaaaa' for i in range(100)]) - assert_equal(a.argsort(kind='m'), r) - # unicode - a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode_) - assert_equal(a.argsort(kind='m'), r) - - def test_sort_unicode_kind(self): - d = np.arange(10) - k = b'\xc3\xa4'.decode("UTF8") - assert_raises(ValueError, d.sort, kind=k) - assert_raises(ValueError, d.argsort, kind=k) - - def test_searchsorted(self): - # test for floats and complex containing nans. The logic is the - # same for all float types so only test double types for now. - # The search sorted routines use the compare functions for the - # array type, so this checks if that is consistent with the sort - # order. - - # check double - a = np.array([0, 1, np.nan]) - msg = "Test real searchsorted with nans, side='l'" - b = a.searchsorted(a, side='l') - assert_equal(b, np.arange(3), msg) - msg = "Test real searchsorted with nans, side='r'" - b = a.searchsorted(a, side='r') - assert_equal(b, np.arange(1, 4), msg) - # check double complex - a = np.zeros(9, dtype=np.complex128) - a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan] - a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan] - msg = "Test complex searchsorted with nans, side='l'" - b = a.searchsorted(a, side='l') - assert_equal(b, np.arange(9), msg) - msg = "Test complex searchsorted with nans, side='r'" - b = a.searchsorted(a, side='r') - assert_equal(b, np.arange(1, 10), msg) - msg = "Test searchsorted with little endian, side='l'" - a = np.array([0, 128], dtype=' p[:, i]).all(), - msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T)) - aae(p, d1[np.arange(d1.shape[0])[:, None], - np.argpartition(d1, i, axis=1, kind=k)]) - - p = np.partition(d0, i, axis=0, kind=k) - aae(p[i, :], np.array([i] * d1.shape[0], dtype=dt)) - # array_less does not seem to work right - at((p[:i, :] <= p[i, :]).all(), - msg="%d: %r <= %r" % (i, p[i, :], p[:i, :])) - at((p[i + 1:, :] > p[i, :]).all(), - msg="%d: %r < %r" % (i, p[i, :], p[:, i + 1:])) - aae(p, d0[np.argpartition(d0, i, axis=0, kind=k), - np.arange(d0.shape[1])[None, :]]) - - # check inplace - dc = d.copy() - dc.partition(i, kind=k) - assert_equal(dc, np.partition(d, i, kind=k)) - dc = d0.copy() - dc.partition(i, axis=0, kind=k) - assert_equal(dc, np.partition(d0, i, axis=0, kind=k)) - dc = d1.copy() - dc.partition(i, axis=1, kind=k) - assert_equal(dc, np.partition(d1, i, axis=1, kind=k)) - - def assert_partitioned(self, d, kth): - prev = 0 - for k in np.sort(kth): - assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k) - assert_((d[k:] >= d[k]).all(), - msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k])) - prev = k + 1 - - def test_partition_iterative(self): - d = np.arange(17) - kth = (0, 1, 2, 429, 231) - assert_raises(ValueError, d.partition, kth) - assert_raises(ValueError, d.argpartition, kth) - d = np.arange(10).reshape((2, 5)) - assert_raises(ValueError, d.partition, kth, axis=0) - assert_raises(ValueError, d.partition, kth, axis=1) - assert_raises(ValueError, np.partition, d, kth, axis=1) - assert_raises(ValueError, np.partition, d, kth, axis=None) - - d = np.array([3, 4, 2, 1]) - p = np.partition(d, (0, 3)) - self.assert_partitioned(p, (0, 3)) - self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3)) - - assert_array_equal(p, np.partition(d, (-3, -1))) - assert_array_equal(p, d[np.argpartition(d, (-3, -1))]) - - d = np.arange(17) - np.random.shuffle(d) - d.partition(range(d.size)) - assert_array_equal(np.arange(17), d) - np.random.shuffle(d) - assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))]) - - # test unsorted kth - d = np.arange(17) - np.random.shuffle(d) - keys = np.array([1, 3, 8, -2]) - np.random.shuffle(d) - p = np.partition(d, keys) - self.assert_partitioned(p, keys) - p = d[np.argpartition(d, keys)] - self.assert_partitioned(p, keys) - np.random.shuffle(keys) - assert_array_equal(np.partition(d, keys), p) - assert_array_equal(d[np.argpartition(d, keys)], p) - - # equal kth - d = np.arange(20)[::-1] - self.assert_partitioned(np.partition(d, [5]*4), [5]) - self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]), - [5]*4 + [6, 13]) - self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5]) - self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])], - [5]*4 + [6, 13]) - - d = np.arange(12) - np.random.shuffle(d) - d1 = np.tile(np.arange(12), (4, 1)) - map(np.random.shuffle, d1) - d0 = np.transpose(d1) - - kth = (1, 6, 7, -1) - p = np.partition(d1, kth, axis=1) - pa = d1[np.arange(d1.shape[0])[:, None], - d1.argpartition(kth, axis=1)] - assert_array_equal(p, pa) - for i in range(d1.shape[0]): - self.assert_partitioned(p[i,:], kth) - p = np.partition(d0, kth, axis=0) - pa = d0[np.argpartition(d0, kth, axis=0), - np.arange(d0.shape[1])[None,:]] - assert_array_equal(p, pa) - for i in range(d0.shape[1]): - self.assert_partitioned(p[:, i], kth) - - def test_partition_cdtype(self): - d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41), - ('Lancelot', 1.9, 38)], - dtype=[('name', '|S10'), ('height', ' (numpy ufunc, has_in_place_version, preferred_dtype) - ops = { - 'add': (np.add, True, float), - 'sub': (np.subtract, True, float), - 'mul': (np.multiply, True, float), - 'truediv': (np.true_divide, True, float), - 'floordiv': (np.floor_divide, True, float), - 'mod': (np.remainder, True, float), - 'divmod': (np.divmod, False, float), - 'pow': (np.power, True, int), - 'lshift': (np.left_shift, True, int), - 'rshift': (np.right_shift, True, int), - 'and': (np.bitwise_and, True, int), - 'xor': (np.bitwise_xor, True, int), - 'or': (np.bitwise_or, True, int), - # 'ge': (np.less_equal, False), - # 'gt': (np.less, False), - # 'le': (np.greater_equal, False), - # 'lt': (np.greater, False), - # 'eq': (np.equal, False), - # 'ne': (np.not_equal, False), - } - if sys.version_info >= (3, 5): - ops['matmul'] = (np.matmul, False, float) - - class Coerced(Exception): - pass - - def array_impl(self): - raise Coerced - - def op_impl(self, other): - return "forward" - - def rop_impl(self, other): - return "reverse" - - def iop_impl(self, other): - return "in-place" - - def array_ufunc_impl(self, ufunc, method, *args, **kwargs): - return ("__array_ufunc__", ufunc, method, args, kwargs) - - # Create an object with the given base, in the given module, with a - # bunch of placeholder __op__ methods, and optionally a - # __array_ufunc__ and __array_priority__. - def make_obj(base, array_priority=False, array_ufunc=False, - alleged_module="__main__"): - class_namespace = {"__array__": array_impl} - if array_priority is not False: - class_namespace["__array_priority__"] = array_priority - for op in ops: - class_namespace["__{0}__".format(op)] = op_impl - class_namespace["__r{0}__".format(op)] = rop_impl - class_namespace["__i{0}__".format(op)] = iop_impl - if array_ufunc is not False: - class_namespace["__array_ufunc__"] = array_ufunc - eval_namespace = {"base": base, - "class_namespace": class_namespace, - "__name__": alleged_module, - } - MyType = eval("type('MyType', (base,), class_namespace)", - eval_namespace) - if issubclass(MyType, np.ndarray): - # Use this range to avoid special case weirdnesses around - # divide-by-0, pow(x, 2), overflow due to pow(big, big), etc. - return np.arange(3, 7).reshape(2, 2).view(MyType) - else: - return MyType() - - def check(obj, binop_override_expected, ufunc_override_expected, - inplace_override_expected, check_scalar=True): - for op, (ufunc, has_inplace, dtype) in ops.items(): - err_msg = ('op: %s, ufunc: %s, has_inplace: %s, dtype: %s' - % (op, ufunc, has_inplace, dtype)) - check_objs = [np.arange(3, 7, dtype=dtype).reshape(2, 2)] - if check_scalar: - check_objs.append(check_objs[0][0]) - for arr in check_objs: - arr_method = getattr(arr, "__{0}__".format(op)) - - def first_out_arg(result): - if op == "divmod": - assert_(isinstance(result, tuple)) - return result[0] - else: - return result - - # arr __op__ obj - if binop_override_expected: - assert_equal(arr_method(obj), NotImplemented, err_msg) - elif ufunc_override_expected: - assert_equal(arr_method(obj)[0], "__array_ufunc__", - err_msg) - else: - if (isinstance(obj, np.ndarray) and - (type(obj).__array_ufunc__ is - np.ndarray.__array_ufunc__)): - # __array__ gets ignored - res = first_out_arg(arr_method(obj)) - assert_(res.__class__ is obj.__class__, err_msg) - else: - assert_raises((TypeError, Coerced), - arr_method, obj, err_msg=err_msg) - # obj __op__ arr - arr_rmethod = getattr(arr, "__r{0}__".format(op)) - if ufunc_override_expected: - res = arr_rmethod(obj) - assert_equal(res[0], "__array_ufunc__", - err_msg=err_msg) - assert_equal(res[1], ufunc, err_msg=err_msg) - else: - if (isinstance(obj, np.ndarray) and - (type(obj).__array_ufunc__ is - np.ndarray.__array_ufunc__)): - # __array__ gets ignored - res = first_out_arg(arr_rmethod(obj)) - assert_(res.__class__ is obj.__class__, err_msg) - else: - # __array_ufunc__ = "asdf" creates a TypeError - assert_raises((TypeError, Coerced), - arr_rmethod, obj, err_msg=err_msg) - - # arr __iop__ obj - # array scalars don't have in-place operators - if has_inplace and isinstance(arr, np.ndarray): - arr_imethod = getattr(arr, "__i{0}__".format(op)) - if inplace_override_expected: - assert_equal(arr_method(obj), NotImplemented, - err_msg=err_msg) - elif ufunc_override_expected: - res = arr_imethod(obj) - assert_equal(res[0], "__array_ufunc__", err_msg) - assert_equal(res[1], ufunc, err_msg) - assert_(type(res[-1]["out"]) is tuple, err_msg) - assert_(res[-1]["out"][0] is arr, err_msg) - else: - if (isinstance(obj, np.ndarray) and - (type(obj).__array_ufunc__ is - np.ndarray.__array_ufunc__)): - # __array__ gets ignored - assert_(arr_imethod(obj) is arr, err_msg) - else: - assert_raises((TypeError, Coerced), - arr_imethod, obj, - err_msg=err_msg) - - op_fn = getattr(operator, op, None) - if op_fn is None: - op_fn = getattr(operator, op + "_", None) - if op_fn is None: - op_fn = getattr(builtins, op) - assert_equal(op_fn(obj, arr), "forward", err_msg) - if not isinstance(obj, np.ndarray): - if binop_override_expected: - assert_equal(op_fn(arr, obj), "reverse", err_msg) - elif ufunc_override_expected: - assert_equal(op_fn(arr, obj)[0], "__array_ufunc__", - err_msg) - if ufunc_override_expected: - assert_equal(ufunc(obj, arr)[0], "__array_ufunc__", - err_msg) - - # No array priority, no array_ufunc -> nothing called - check(make_obj(object), False, False, False) - # Negative array priority, no array_ufunc -> nothing called - # (has to be very negative, because scalar priority is -1000000.0) - check(make_obj(object, array_priority=-2**30), False, False, False) - # Positive array priority, no array_ufunc -> binops and iops only - check(make_obj(object, array_priority=1), True, False, True) - # ndarray ignores array_priority for ndarray subclasses - check(make_obj(np.ndarray, array_priority=1), False, False, False, - check_scalar=False) - # Positive array_priority and array_ufunc -> array_ufunc only - check(make_obj(object, array_priority=1, - array_ufunc=array_ufunc_impl), False, True, False) - check(make_obj(np.ndarray, array_priority=1, - array_ufunc=array_ufunc_impl), False, True, False) - # array_ufunc set to None -> defer binops only - check(make_obj(object, array_ufunc=None), True, False, False) - check(make_obj(np.ndarray, array_ufunc=None), True, False, False, - check_scalar=False) - - def test_ufunc_override_normalize_signature(self): - # gh-5674 - class SomeClass(object): - def __array_ufunc__(self, ufunc, method, *inputs, **kw): - return kw - - a = SomeClass() - kw = np.add(a, [1]) - assert_('sig' not in kw and 'signature' not in kw) - kw = np.add(a, [1], sig='ii->i') - assert_('sig' not in kw and 'signature' in kw) - assert_equal(kw['signature'], 'ii->i') - kw = np.add(a, [1], signature='ii->i') - assert_('sig' not in kw and 'signature' in kw) - assert_equal(kw['signature'], 'ii->i') - - def test_array_ufunc_index(self): - # Check that index is set appropriately, also if only an output - # is passed on (latter is another regression tests for github bug 4753) - # This also checks implicitly that 'out' is always a tuple. - class CheckIndex(object): - def __array_ufunc__(self, ufunc, method, *inputs, **kw): - for i, a in enumerate(inputs): - if a is self: - return i - # calls below mean we must be in an output. - for j, a in enumerate(kw['out']): - if a is self: - return (j,) - - a = CheckIndex() - dummy = np.arange(2.) - # 1 input, 1 output - assert_equal(np.sin(a), 0) - assert_equal(np.sin(dummy, a), (0,)) - assert_equal(np.sin(dummy, out=a), (0,)) - assert_equal(np.sin(dummy, out=(a,)), (0,)) - assert_equal(np.sin(a, a), 0) - assert_equal(np.sin(a, out=a), 0) - assert_equal(np.sin(a, out=(a,)), 0) - # 1 input, 2 outputs - assert_equal(np.modf(dummy, a), (0,)) - assert_equal(np.modf(dummy, None, a), (1,)) - assert_equal(np.modf(dummy, dummy, a), (1,)) - assert_equal(np.modf(dummy, out=(a, None)), (0,)) - assert_equal(np.modf(dummy, out=(a, dummy)), (0,)) - assert_equal(np.modf(dummy, out=(None, a)), (1,)) - assert_equal(np.modf(dummy, out=(dummy, a)), (1,)) - assert_equal(np.modf(a, out=(dummy, a)), 0) - with assert_raises(TypeError): - # Out argument must be tuple, since there are multiple outputs - np.modf(dummy, out=a) - - assert_raises(ValueError, np.modf, dummy, out=(a,)) - - # 2 inputs, 1 output - assert_equal(np.add(a, dummy), 0) - assert_equal(np.add(dummy, a), 1) - assert_equal(np.add(dummy, dummy, a), (0,)) - assert_equal(np.add(dummy, a, a), 1) - assert_equal(np.add(dummy, dummy, out=a), (0,)) - assert_equal(np.add(dummy, dummy, out=(a,)), (0,)) - assert_equal(np.add(a, dummy, out=a), 0) - - def test_out_override(self): - # regression test for github bug 4753 - class OutClass(np.ndarray): - def __array_ufunc__(self, ufunc, method, *inputs, **kw): - if 'out' in kw: - tmp_kw = kw.copy() - tmp_kw.pop('out') - func = getattr(ufunc, method) - kw['out'][0][...] = func(*inputs, **tmp_kw) - - A = np.array([0]).view(OutClass) - B = np.array([5]) - C = np.array([6]) - np.multiply(C, B, A) - assert_equal(A[0], 30) - assert_(isinstance(A, OutClass)) - A[0] = 0 - np.multiply(C, B, out=A) - assert_equal(A[0], 30) - assert_(isinstance(A, OutClass)) - - def test_pow_override_with_errors(self): - # regression test for gh-9112 - class PowerOnly(np.ndarray): - def __array_ufunc__(self, ufunc, method, *inputs, **kw): - if ufunc is not np.power: - raise NotImplementedError - return "POWER!" - # explicit cast to float, to ensure the fast power path is taken. - a = np.array(5., dtype=np.float64).view(PowerOnly) - assert_equal(a ** 2.5, "POWER!") - with assert_raises(NotImplementedError): - a ** 0.5 - with assert_raises(NotImplementedError): - a ** 0 - with assert_raises(NotImplementedError): - a ** 1 - with assert_raises(NotImplementedError): - a ** -1 - with assert_raises(NotImplementedError): - a ** 2 - - def test_pow_array_object_dtype(self): - # test pow on arrays of object dtype - class SomeClass(object): - def __init__(self, num=None): - self.num = num - - # want to ensure a fast pow path is not taken - def __mul__(self, other): - raise AssertionError('__mul__ should not be called') - - def __div__(self, other): - raise AssertionError('__div__ should not be called') - - def __pow__(self, exp): - return SomeClass(num=self.num ** exp) - - def __eq__(self, other): - if isinstance(other, SomeClass): - return self.num == other.num - - __rpow__ = __pow__ - - def pow_for(exp, arr): - return np.array([x ** exp for x in arr]) - - obj_arr = np.array([SomeClass(1), SomeClass(2), SomeClass(3)]) - - assert_equal(obj_arr ** 0.5, pow_for(0.5, obj_arr)) - assert_equal(obj_arr ** 0, pow_for(0, obj_arr)) - assert_equal(obj_arr ** 1, pow_for(1, obj_arr)) - assert_equal(obj_arr ** -1, pow_for(-1, obj_arr)) - assert_equal(obj_arr ** 2, pow_for(2, obj_arr)) - - def test_pos_array_ufunc_override(self): - class A(np.ndarray): - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - return getattr(ufunc, method)(*[i.view(np.ndarray) for - i in inputs], **kwargs) - tst = np.array('foo').view(A) - with assert_raises(TypeError): - +tst - - -class TestTemporaryElide(object): - # elision is only triggered on relatively large arrays - - def test_extension_incref_elide(self): - # test extension (e.g. cython) calling PyNumber_* slots without - # increasing the reference counts - # - # def incref_elide(a): - # d = input.copy() # refcount 1 - # return d, d + d # PyNumber_Add without increasing refcount - from numpy.core._multiarray_tests import incref_elide - d = np.ones(100000) - orig, res = incref_elide(d) - d + d - # the return original should not be changed to an inplace operation - assert_array_equal(orig, d) - assert_array_equal(res, d + d) - - def test_extension_incref_elide_stack(self): - # scanning if the refcount == 1 object is on the python stack to check - # that we are called directly from python is flawed as object may still - # be above the stack pointer and we have no access to the top of it - # - # def incref_elide_l(d): - # return l[4] + l[4] # PyNumber_Add without increasing refcount - from numpy.core._multiarray_tests import incref_elide_l - # padding with 1 makes sure the object on the stack is not overwritten - l = [1, 1, 1, 1, np.ones(100000)] - res = incref_elide_l(l) - # the return original should not be changed to an inplace operation - assert_array_equal(l[4], np.ones(100000)) - assert_array_equal(res, l[4] + l[4]) - - def test_temporary_with_cast(self): - # check that we don't elide into a temporary which would need casting - d = np.ones(200000, dtype=np.int64) - assert_equal(((d + d) + 2**222).dtype, np.dtype('O')) - - r = ((d + d) / 2) - assert_equal(r.dtype, np.dtype('f8')) - - r = np.true_divide((d + d), 2) - assert_equal(r.dtype, np.dtype('f8')) - - r = ((d + d) / 2.) - assert_equal(r.dtype, np.dtype('f8')) - - r = ((d + d) // 2) - assert_equal(r.dtype, np.dtype(np.int64)) - - # commutative elision into the astype result - f = np.ones(100000, dtype=np.float32) - assert_equal(((f + f) + f.astype(np.float64)).dtype, np.dtype('f8')) - - # no elision into lower type - d = f.astype(np.float64) - assert_equal(((f + f) + d).dtype, d.dtype) - l = np.ones(100000, dtype=np.longdouble) - assert_equal(((d + d) + l).dtype, l.dtype) - - # test unary abs with different output dtype - for dt in (np.complex64, np.complex128, np.clongdouble): - c = np.ones(100000, dtype=dt) - r = abs(c * 2.0) - assert_equal(r.dtype, np.dtype('f%d' % (c.itemsize // 2))) - - def test_elide_broadcast(self): - # test no elision on broadcast to higher dimension - # only triggers elision code path in debug mode as triggering it in - # normal mode needs 256kb large matching dimension, so a lot of memory - d = np.ones((2000, 1), dtype=int) - b = np.ones((2000), dtype=bool) - r = (1 - d) + b - assert_equal(r, 1) - assert_equal(r.shape, (2000, 2000)) - - def test_elide_scalar(self): - # check inplace op does not create ndarray from scalars - a = np.bool_() - assert_(type(~(a & a)) is np.bool_) - - def test_elide_scalar_readonly(self): - # The imaginary part of a real array is readonly. This needs to go - # through fast_scalar_power which is only called for powers of - # +1, -1, 0, 0.5, and 2, so use 2. Also need valid refcount for - # elision which can be gotten for the imaginary part of a real - # array. Should not error. - a = np.empty(100000, dtype=np.float64) - a.imag ** 2 - - def test_elide_readonly(self): - # don't try to elide readonly temporaries - r = np.asarray(np.broadcast_to(np.zeros(1), 100000).flat) * 0.0 - assert_equal(r, 0) - - def test_elide_updateifcopy(self): - a = np.ones(2**20)[::2] - b = a.flat.__array__() + 1 - del b - assert_equal(a, 1) - - -class TestCAPI(object): - def test_IsPythonScalar(self): - from numpy.core._multiarray_tests import IsPythonScalar - assert_(IsPythonScalar(b'foobar')) - assert_(IsPythonScalar(1)) - assert_(IsPythonScalar(2**80)) - assert_(IsPythonScalar(2.)) - assert_(IsPythonScalar("a")) - - -class TestSubscripting(object): - def test_test_zero_rank(self): - x = np.array([1, 2, 3]) - assert_(isinstance(x[0], np.int_)) - if sys.version_info[0] < 3: - assert_(isinstance(x[0], int)) - assert_(type(x[0, ...]) is np.ndarray) - - -class TestPickling(object): - @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL >= 5, - reason=('this tests the error messages when trying to' - 'protocol 5 although it is not available')) - def test_correct_protocol5_error_message(self): - array = np.arange(10) - - if sys.version_info[:2] in ((3, 6), (3, 7)): - # For the specific case of python3.6 and 3.7, raise a clear import - # error about the pickle5 backport when trying to use protocol=5 - # without the pickle5 package - with pytest.raises(ImportError): - array.__reduce_ex__(5) - - elif sys.version_info[:2] < (3, 6): - # when calling __reduce_ex__ explicitly with protocol=5 on python - # raise a ValueError saying that protocol 5 is not available for - # this python version - with pytest.raises(ValueError): - array.__reduce_ex__(5) - - def test_record_array_with_object_dtype(self): - my_object = object() - - arr_with_object = np.array( - [(my_object, 1, 2.0)], - dtype=[('a', object), ('b', int), ('c', float)]) - arr_without_object = np.array( - [('xxx', 1, 2.0)], - dtype=[('a', str), ('b', int), ('c', float)]) - - for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): - depickled_arr_with_object = pickle.loads( - pickle.dumps(arr_with_object, protocol=proto)) - depickled_arr_without_object = pickle.loads( - pickle.dumps(arr_without_object, protocol=proto)) - - assert_equal(arr_with_object.dtype, - depickled_arr_with_object.dtype) - assert_equal(arr_without_object.dtype, - depickled_arr_without_object.dtype) - - @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5, - reason="requires pickle protocol 5") - def test_f_contiguous_array(self): - f_contiguous_array = np.array([[1, 2, 3], [4, 5, 6]], order='F') - buffers = [] - - # When using pickle protocol 5, Fortran-contiguous arrays can be - # serialized using out-of-band buffers - bytes_string = pickle.dumps(f_contiguous_array, protocol=5, - buffer_callback=buffers.append) - - assert len(buffers) > 0 - - depickled_f_contiguous_array = pickle.loads(bytes_string, - buffers=buffers) - - assert_equal(f_contiguous_array, depickled_f_contiguous_array) - - def test_non_contiguous_array(self): - non_contiguous_array = np.arange(12).reshape(3, 4)[:, :2] - assert not non_contiguous_array.flags.c_contiguous - assert not non_contiguous_array.flags.f_contiguous - - # make sure non-contiguous arrays can be pickled-depickled - # using any protocol - for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): - depickled_non_contiguous_array = pickle.loads( - pickle.dumps(non_contiguous_array, protocol=proto)) - - assert_equal(non_contiguous_array, depickled_non_contiguous_array) - - def test_roundtrip(self): - for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): - carray = np.array([[2, 9], [7, 0], [3, 8]]) - DATA = [ - carray, - np.transpose(carray), - np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int), - ('c', float)]) - ] - - refs = [weakref.ref(a) for a in DATA] - for a in DATA: - assert_equal( - a, pickle.loads(pickle.dumps(a, protocol=proto)), - err_msg="%r" % a) - del a, DATA, carray - break_cycles() - # check for reference leaks (gh-12793) - for ref in refs: - assert ref() is None - - def _loads(self, obj): - if sys.version_info[0] >= 3: - return pickle.loads(obj, encoding='latin1') - else: - return pickle.loads(obj) - - # version 0 pickles, using protocol=2 to pickle - # version 0 doesn't have a version field - def test_version0_int8(self): - s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.' - a = np.array([1, 2, 3, 4], dtype=np.int8) - p = self._loads(s) - assert_equal(a, p) - - def test_version0_float32(self): - s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01= g2, [g1[i] >= g2[i] for i in [0, 1, 2]]) - assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]]) - assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]]) - - def test_mixed(self): - g1 = np.array(["spam", "spa", "spammer", "and eggs"]) - g2 = "spam" - assert_array_equal(g1 == g2, [x == g2 for x in g1]) - assert_array_equal(g1 != g2, [x != g2 for x in g1]) - assert_array_equal(g1 < g2, [x < g2 for x in g1]) - assert_array_equal(g1 > g2, [x > g2 for x in g1]) - assert_array_equal(g1 <= g2, [x <= g2 for x in g1]) - assert_array_equal(g1 >= g2, [x >= g2 for x in g1]) - - def test_unicode(self): - g1 = np.array([u"This", u"is", u"example"]) - g2 = np.array([u"This", u"was", u"example"]) - assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]]) - assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]]) - assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]]) - assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]]) - assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]]) - assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]]) - - -class TestArgmax(object): - - nan_arr = [ - ([0, 1, 2, 3, np.nan], 4), - ([0, 1, 2, np.nan, 3], 3), - ([np.nan, 0, 1, 2, 3], 0), - ([np.nan, 0, np.nan, 2, 3], 0), - ([0, 1, 2, 3, complex(0, np.nan)], 4), - ([0, 1, 2, 3, complex(np.nan, 0)], 4), - ([0, 1, 2, complex(np.nan, 0), 3], 3), - ([0, 1, 2, complex(0, np.nan), 3], 3), - ([complex(0, np.nan), 0, 1, 2, 3], 0), - ([complex(np.nan, np.nan), 0, 1, 2, 3], 0), - ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0), - ([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0), - ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0), - - ([complex(0, 0), complex(0, 2), complex(0, 1)], 1), - ([complex(1, 0), complex(0, 2), complex(0, 1)], 0), - ([complex(1, 0), complex(0, 2), complex(1, 1)], 2), - - ([np.datetime64('1923-04-14T12:43:12'), - np.datetime64('1994-06-21T14:43:15'), - np.datetime64('2001-10-15T04:10:32'), - np.datetime64('1995-11-25T16:02:16'), - np.datetime64('2005-01-04T03:14:12'), - np.datetime64('2041-12-03T14:05:03')], 5), - ([np.datetime64('1935-09-14T04:40:11'), - np.datetime64('1949-10-12T12:32:11'), - np.datetime64('2010-01-03T05:14:12'), - np.datetime64('2015-11-20T12:20:59'), - np.datetime64('1932-09-23T10:10:13'), - np.datetime64('2014-10-10T03:50:30')], 3), - # Assorted tests with NaTs - ([np.datetime64('NaT'), - np.datetime64('NaT'), - np.datetime64('2010-01-03T05:14:12'), - np.datetime64('NaT'), - np.datetime64('2015-09-23T10:10:13'), - np.datetime64('1932-10-10T03:50:30')], 0), - ([np.datetime64('2059-03-14T12:43:12'), - np.datetime64('1996-09-21T14:43:15'), - np.datetime64('NaT'), - np.datetime64('2022-12-25T16:02:16'), - np.datetime64('1963-10-04T03:14:12'), - np.datetime64('2013-05-08T18:15:23')], 2), - ([np.timedelta64(2, 's'), - np.timedelta64(1, 's'), - np.timedelta64('NaT', 's'), - np.timedelta64(3, 's')], 2), - ([np.timedelta64('NaT', 's')] * 3, 0), - - ([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35), - timedelta(days=-1, seconds=23)], 0), - ([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5), - timedelta(days=5, seconds=14)], 1), - ([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5), - timedelta(days=10, seconds=43)], 2), - - ([False, False, False, False, True], 4), - ([False, False, False, True, False], 3), - ([True, False, False, False, False], 0), - ([True, False, True, False, False], 0), - ] - - def test_all(self): - a = np.random.normal(0, 1, (4, 5, 6, 7, 8)) - for i in range(a.ndim): - amax = a.max(i) - aargmax = a.argmax(i) - axes = list(range(a.ndim)) - axes.remove(i) - assert_(np.all(amax == aargmax.choose(*a.transpose(i,*axes)))) - - def test_combinations(self): - for arr, pos in self.nan_arr: - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, - "invalid value encountered in reduce") - max_val = np.max(arr) - - assert_equal(np.argmax(arr), pos, err_msg="%r" % arr) - assert_equal(arr[np.argmax(arr)], max_val, err_msg="%r" % arr) - - def test_output_shape(self): - # see also gh-616 - a = np.ones((10, 5)) - # Check some simple shape mismatches - out = np.ones(11, dtype=np.int_) - assert_raises(ValueError, a.argmax, -1, out) - - out = np.ones((2, 5), dtype=np.int_) - assert_raises(ValueError, a.argmax, -1, out) - - # these could be relaxed possibly (used to allow even the previous) - out = np.ones((1, 10), dtype=np.int_) - assert_raises(ValueError, a.argmax, -1, out) - - out = np.ones(10, dtype=np.int_) - a.argmax(-1, out=out) - assert_equal(out, a.argmax(-1)) - - def test_argmax_unicode(self): - d = np.zeros(6031, dtype='= cmin)) - assert_(np.all(x <= cmax)) - - def _clip_type(self, type_group, array_max, - clip_min, clip_max, inplace=False, - expected_min=None, expected_max=None): - if expected_min is None: - expected_min = clip_min - if expected_max is None: - expected_max = clip_max - - for T in np.sctypes[type_group]: - if sys.byteorder == 'little': - byte_orders = ['=', '>'] - else: - byte_orders = ['<', '='] - - for byteorder in byte_orders: - dtype = np.dtype(T).newbyteorder(byteorder) - - x = (np.random.random(1000) * array_max).astype(dtype) - if inplace: - # The tests that call us pass clip_min and clip_max that - # might not fit in the destination dtype. They were written - # assuming the previous unsafe casting, which now must be - # passed explicitly to avoid a warning. - x.clip(clip_min, clip_max, x, casting='unsafe') - else: - x = x.clip(clip_min, clip_max) - byteorder = '=' - - if x.dtype.byteorder == '|': - byteorder = '|' - assert_equal(x.dtype.byteorder, byteorder) - self._check_range(x, expected_min, expected_max) - return x - - def test_basic(self): - for inplace in [False, True]: - self._clip_type( - 'float', 1024, -12.8, 100.2, inplace=inplace) - self._clip_type( - 'float', 1024, 0, 0, inplace=inplace) - - self._clip_type( - 'int', 1024, -120, 100, inplace=inplace) - self._clip_type( - 'int', 1024, 0, 0, inplace=inplace) - - self._clip_type( - 'uint', 1024, 0, 0, inplace=inplace) - self._clip_type( - 'uint', 1024, -120, 100, inplace=inplace, expected_min=0) - - def test_record_array(self): - rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)], - dtype=[('x', '= 3)) - x = val.clip(min=3) - assert_(np.all(x >= 3)) - x = val.clip(max=4) - assert_(np.all(x <= 4)) - - def test_nan(self): - input_arr = np.array([-2., np.nan, 0.5, 3., 0.25, np.nan]) - result = input_arr.clip(-1, 1) - expected = np.array([-1., np.nan, 0.5, 1., 0.25, np.nan]) - assert_array_equal(result, expected) - - -class TestCompress(object): - def test_axis(self): - tgt = [[5, 6, 7, 8, 9]] - arr = np.arange(10).reshape(2, 5) - out = np.compress([0, 1], arr, axis=0) - assert_equal(out, tgt) - - tgt = [[1, 3], [6, 8]] - out = np.compress([0, 1, 0, 1, 0], arr, axis=1) - assert_equal(out, tgt) - - def test_truncate(self): - tgt = [[1], [6]] - arr = np.arange(10).reshape(2, 5) - out = np.compress([0, 1], arr, axis=1) - assert_equal(out, tgt) - - def test_flatten(self): - arr = np.arange(10).reshape(2, 5) - out = np.compress([0, 1], arr) - assert_equal(out, 1) - - -class TestPutmask(object): - def tst_basic(self, x, T, mask, val): - np.putmask(x, mask, val) - assert_equal(x[mask], T(val)) - assert_equal(x.dtype, T) - - def test_ip_types(self): - unchecked_types = [bytes, unicode, np.void, object] - - x = np.random.random(1000)*100 - mask = x < 40 - - for val in [-100, 0, 15]: - for types in np.sctypes.values(): - for T in types: - if T not in unchecked_types: - self.tst_basic(x.copy().astype(T), T, mask, val) - - def test_mask_size(self): - assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5) - - @pytest.mark.parametrize('dtype', ('>i4', 'f8'), ('z', 'i4', 'f8'), ('z', ' 16MB - d = np.zeros(4 * 1024 ** 2) - d.tofile(self.filename) - assert_equal(os.path.getsize(self.filename), d.nbytes) - assert_array_equal(d, np.fromfile(self.filename)) - # check offset - with open(self.filename, "r+b") as f: - f.seek(d.nbytes) - d.tofile(f) - assert_equal(os.path.getsize(self.filename), d.nbytes * 2) - # check append mode (gh-8329) - open(self.filename, "w").close() # delete file contents - with open(self.filename, "ab") as f: - d.tofile(f) - assert_array_equal(d, np.fromfile(self.filename)) - with open(self.filename, "ab") as f: - d.tofile(f) - assert_equal(os.path.getsize(self.filename), d.nbytes * 2) - - def test_io_open_buffered_fromfile(self): - # gh-6632 - self.x.tofile(self.filename) - with io.open(self.filename, 'rb', buffering=-1) as f: - y = np.fromfile(f, dtype=self.dtype) - assert_array_equal(y, self.x.flat) - - def test_file_position_after_fromfile(self): - # gh-4118 - sizes = [io.DEFAULT_BUFFER_SIZE//8, - io.DEFAULT_BUFFER_SIZE, - io.DEFAULT_BUFFER_SIZE*8] - - for size in sizes: - f = open(self.filename, 'wb') - f.seek(size-1) - f.write(b'\0') - f.close() - - for mode in ['rb', 'r+b']: - err_msg = "%d %s" % (size, mode) - - f = open(self.filename, mode) - f.read(2) - np.fromfile(f, dtype=np.float64, count=1) - pos = f.tell() - f.close() - assert_equal(pos, 10, err_msg=err_msg) - - def test_file_position_after_tofile(self): - # gh-4118 - sizes = [io.DEFAULT_BUFFER_SIZE//8, - io.DEFAULT_BUFFER_SIZE, - io.DEFAULT_BUFFER_SIZE*8] - - for size in sizes: - err_msg = "%d" % (size,) - - f = open(self.filename, 'wb') - f.seek(size-1) - f.write(b'\0') - f.seek(10) - f.write(b'12') - np.array([0], dtype=np.float64).tofile(f) - pos = f.tell() - f.close() - assert_equal(pos, 10 + 2 + 8, err_msg=err_msg) - - f = open(self.filename, 'r+b') - f.read(2) - f.seek(0, 1) # seek between read&write required by ANSI C - np.array([0], dtype=np.float64).tofile(f) - pos = f.tell() - f.close() - assert_equal(pos, 10, err_msg=err_msg) - - def test_load_object_array_fromfile(self): - # gh-12300 - with open(self.filename, 'w') as f: - # Ensure we have a file with consistent contents - pass - - with open(self.filename, 'rb') as f: - assert_raises_regex(ValueError, "Cannot read into object array", - np.fromfile, f, dtype=object) - - assert_raises_regex(ValueError, "Cannot read into object array", - np.fromfile, self.filename, dtype=object) - - def test_fromfile_offset(self): - with open(self.filename, 'wb') as f: - self.x.tofile(f) - - with open(self.filename, 'rb') as f: - y = np.fromfile(f, dtype=self.dtype, offset=0) - assert_array_equal(y, self.x.flat) - - with open(self.filename, 'rb') as f: - count_items = len(self.x.flat) // 8 - offset_items = len(self.x.flat) // 4 - offset_bytes = self.dtype.itemsize * offset_items - y = np.fromfile(f, dtype=self.dtype, count=count_items, offset=offset_bytes) - assert_array_equal(y, self.x.flat[offset_items:offset_items+count_items]) - - # subsequent seeks should stack - offset_bytes = self.dtype.itemsize - z = np.fromfile(f, dtype=self.dtype, offset=offset_bytes) - assert_array_equal(z, self.x.flat[offset_items+count_items+1:]) - - with open(self.filename, 'wb') as f: - self.x.tofile(f, sep=",") - - with open(self.filename, 'rb') as f: - assert_raises_regex( - TypeError, - "'offset' argument only permitted for binary files", - np.fromfile, self.filename, dtype=self.dtype, - sep=",", offset=1) - - def _check_from(self, s, value, **kw): - if 'sep' not in kw: - y = np.frombuffer(s, **kw) - else: - y = np.fromstring(s, **kw) - assert_array_equal(y, value) - - f = open(self.filename, 'wb') - f.write(s) - f.close() - y = np.fromfile(self.filename, **kw) - assert_array_equal(y, value) - - def test_nan(self): - self._check_from( - b"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)", - [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], - sep=' ') - - def test_inf(self): - self._check_from( - b"inf +inf -inf infinity -Infinity iNfInItY -inF", - [np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf], - sep=' ') - - def test_numbers(self): - self._check_from(b"1.234 -1.234 .3 .3e55 -123133.1231e+133", - [1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ') - - def test_binary(self): - self._check_from(b'\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@', - np.array([1, 2, 3, 4]), - dtype=' 1 minute on mechanical hard drive - def test_big_binary(self): - """Test workarounds for 32-bit limited fwrite, fseek, and ftell - calls in windows. These normally would hang doing something like this. - See http://projects.scipy.org/numpy/ticket/1660""" - if sys.platform != 'win32': - return - try: - # before workarounds, only up to 2**32-1 worked - fourgbplus = 2**32 + 2**16 - testbytes = np.arange(8, dtype=np.int8) - n = len(testbytes) - flike = tempfile.NamedTemporaryFile() - f = flike.file - np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f) - flike.seek(0) - a = np.fromfile(f, dtype=np.int8) - flike.close() - assert_(len(a) == fourgbplus) - # check only start and end for speed: - assert_((a[:n] == testbytes).all()) - assert_((a[-n:] == testbytes).all()) - except (MemoryError, ValueError): - pass - - def test_string(self): - self._check_from(b'1,2,3,4', [1., 2., 3., 4.], sep=',') - - def test_counted_string(self): - self._check_from(b'1,2,3,4', [1., 2., 3., 4.], count=4, sep=',') - self._check_from(b'1,2,3,4', [1., 2., 3.], count=3, sep=',') - self._check_from(b'1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',') - - def test_string_with_ws(self): - self._check_from(b'1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ') - - def test_counted_string_with_ws(self): - self._check_from(b'1 2 3 4 ', [1, 2, 3], count=3, dtype=int, - sep=' ') - - def test_ascii(self): - self._check_from(b'1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',') - self._check_from(b'1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',') - - def test_malformed(self): - with assert_warns(DeprecationWarning): - self._check_from(b'1.234 1,234', [1.234, 1.], sep=' ') - - def test_long_sep(self): - self._check_from(b'1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_') - - def test_dtype(self): - v = np.array([1, 2, 3, 4], dtype=np.int_) - self._check_from(b'1,2,3,4', v, sep=',', dtype=np.int_) - - def test_dtype_bool(self): - # can't use _check_from because fromstring can't handle True/False - v = np.array([True, False, True, False], dtype=np.bool_) - s = b'1,0,-2.3,0' - f = open(self.filename, 'wb') - f.write(s) - f.close() - y = np.fromfile(self.filename, sep=',', dtype=np.bool_) - assert_(y.dtype == '?') - assert_array_equal(y, v) - - def test_tofile_sep(self): - x = np.array([1.51, 2, 3.51, 4], dtype=float) - f = open(self.filename, 'w') - x.tofile(f, sep=',') - f.close() - f = open(self.filename, 'r') - s = f.read() - f.close() - #assert_equal(s, '1.51,2.0,3.51,4.0') - y = np.array([float(p) for p in s.split(',')]) - assert_array_equal(x,y) - - def test_tofile_format(self): - x = np.array([1.51, 2, 3.51, 4], dtype=float) - f = open(self.filename, 'w') - x.tofile(f, sep=',', format='%.2f') - f.close() - f = open(self.filename, 'r') - s = f.read() - f.close() - assert_equal(s, '1.51,2.00,3.51,4.00') - - def test_locale(self): - with CommaDecimalPointLocale(): - self.test_numbers() - self.test_nan() - self.test_inf() - self.test_counted_string() - self.test_ascii() - self.test_malformed() - self.test_tofile_sep() - self.test_tofile_format() - - def test_fromfile_subarray_binary(self): - # Test subarray dtypes which are absorbed into the shape - x = np.arange(24, dtype="i4").reshape(2, 3, 4) - x.tofile(self.filename) - res = np.fromfile(self.filename, dtype="(3,4)i4") - assert_array_equal(x, res) - - x_str = x.tobytes() - with assert_warns(DeprecationWarning): - # binary fromstring is deprecated - res = np.fromstring(x_str, dtype="(3,4)i4") - assert_array_equal(x, res) - - -class TestFromBuffer(object): - @pytest.mark.parametrize('byteorder', ['<', '>']) - @pytest.mark.parametrize('dtype', [float, int, complex]) - def test_basic(self, byteorder, dtype): - dt = np.dtype(dtype).newbyteorder(byteorder) - x = (np.random.random((4, 7)) * 5).astype(dt) - buf = x.tobytes() - assert_array_equal(np.frombuffer(buf, dtype=dt), x.flat) - - def test_empty(self): - assert_array_equal(np.frombuffer(b''), np.array([])) - - -class TestFlat(object): - def setup(self): - a0 = np.arange(20.0) - a = a0.reshape(4, 5) - a0.shape = (4, 5) - a.flags.writeable = False - self.a = a - self.b = a[::2, ::2] - self.a0 = a0 - self.b0 = a0[::2, ::2] - - def test_contiguous(self): - testpassed = False - try: - self.a.flat[12] = 100.0 - except ValueError: - testpassed = True - assert_(testpassed) - assert_(self.a.flat[12] == 12.0) - - def test_discontiguous(self): - testpassed = False - try: - self.b.flat[4] = 100.0 - except ValueError: - testpassed = True - assert_(testpassed) - assert_(self.b.flat[4] == 12.0) - - def test___array__(self): - c = self.a.flat.__array__() - d = self.b.flat.__array__() - e = self.a0.flat.__array__() - f = self.b0.flat.__array__() - - assert_(c.flags.writeable is False) - assert_(d.flags.writeable is False) - # for 1.14 all are set to non-writeable on the way to replacing the - # UPDATEIFCOPY array returned for non-contiguous arrays. - assert_(e.flags.writeable is True) - assert_(f.flags.writeable is False) - with assert_warns(DeprecationWarning): - assert_(c.flags.updateifcopy is False) - with assert_warns(DeprecationWarning): - assert_(d.flags.updateifcopy is False) - with assert_warns(DeprecationWarning): - assert_(e.flags.updateifcopy is False) - with assert_warns(DeprecationWarning): - # UPDATEIFCOPY is removed. - assert_(f.flags.updateifcopy is False) - assert_(c.flags.writebackifcopy is False) - assert_(d.flags.writebackifcopy is False) - assert_(e.flags.writebackifcopy is False) - assert_(f.flags.writebackifcopy is False) - - @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") - def test_refcount(self): - # includes regression test for reference count error gh-13165 - inds = [np.intp(0), np.array([True]*self.a.size), np.array([0]), None] - indtype = np.dtype(np.intp) - rc_indtype = sys.getrefcount(indtype) - for ind in inds: - rc_ind = sys.getrefcount(ind) - for _ in range(100): - try: - self.a.flat[ind] - except IndexError: - pass - assert_(abs(sys.getrefcount(ind) - rc_ind) < 50) - assert_(abs(sys.getrefcount(indtype) - rc_indtype) < 50) - - -class TestResize(object): - - @_no_tracing - def test_basic(self): - x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) - if IS_PYPY: - x.resize((5, 5), refcheck=False) - else: - x.resize((5, 5)) - assert_array_equal(x.flat[:9], - np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat) - assert_array_equal(x[9:].flat, 0) - - def test_check_reference(self): - x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) - y = x - assert_raises(ValueError, x.resize, (5, 1)) - del y # avoid pyflakes unused variable warning. - - @_no_tracing - def test_int_shape(self): - x = np.eye(3) - if IS_PYPY: - x.resize(3, refcheck=False) - else: - x.resize(3) - assert_array_equal(x, np.eye(3)[0,:]) - - def test_none_shape(self): - x = np.eye(3) - x.resize(None) - assert_array_equal(x, np.eye(3)) - x.resize() - assert_array_equal(x, np.eye(3)) - - def test_0d_shape(self): - # to it multiple times to test it does not break alloc cache gh-9216 - for i in range(10): - x = np.empty((1,)) - x.resize(()) - assert_equal(x.shape, ()) - assert_equal(x.size, 1) - x = np.empty(()) - x.resize((1,)) - assert_equal(x.shape, (1,)) - assert_equal(x.size, 1) - - def test_invalid_arguments(self): - assert_raises(TypeError, np.eye(3).resize, 'hi') - assert_raises(ValueError, np.eye(3).resize, -1) - assert_raises(TypeError, np.eye(3).resize, order=1) - assert_raises(TypeError, np.eye(3).resize, refcheck='hi') - - @_no_tracing - def test_freeform_shape(self): - x = np.eye(3) - if IS_PYPY: - x.resize(3, 2, 1, refcheck=False) - else: - x.resize(3, 2, 1) - assert_(x.shape == (3, 2, 1)) - - @_no_tracing - def test_zeros_appended(self): - x = np.eye(3) - if IS_PYPY: - x.resize(2, 3, 3, refcheck=False) - else: - x.resize(2, 3, 3) - assert_array_equal(x[0], np.eye(3)) - assert_array_equal(x[1], np.zeros((3, 3))) - - @_no_tracing - def test_obj_obj(self): - # check memory is initialized on resize, gh-4857 - a = np.ones(10, dtype=[('k', object, 2)]) - if IS_PYPY: - a.resize(15, refcheck=False) - else: - a.resize(15,) - assert_equal(a.shape, (15,)) - assert_array_equal(a['k'][-5:], 0) - assert_array_equal(a['k'][:-5], 1) - - def test_empty_view(self): - # check that sizes containing a zero don't trigger a reallocate for - # already empty arrays - x = np.zeros((10, 0), int) - x_view = x[...] - x_view.resize((0, 10)) - x_view.resize((0, 100)) - - def test_check_weakref(self): - x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) - xref = weakref.ref(x) - assert_raises(ValueError, x.resize, (5, 1)) - del xref # avoid pyflakes unused variable warning. - - -class TestRecord(object): - def test_field_rename(self): - dt = np.dtype([('f', float), ('i', int)]) - dt.names = ['p', 'q'] - assert_equal(dt.names, ['p', 'q']) - - def test_multiple_field_name_occurrence(self): - def test_dtype_init(): - np.dtype([("A", "f8"), ("B", "f8"), ("A", "f8")]) - - # Error raised when multiple fields have the same name - assert_raises(ValueError, test_dtype_init) - - @pytest.mark.skipif(sys.version_info[0] < 3, reason="Not Python 3") - def test_bytes_fields(self): - # Bytes are not allowed in field names and not recognized in titles - # on Py3 - assert_raises(TypeError, np.dtype, [(b'a', int)]) - assert_raises(TypeError, np.dtype, [(('b', b'a'), int)]) - - dt = np.dtype([((b'a', 'b'), int)]) - assert_raises(TypeError, dt.__getitem__, b'a') - - x = np.array([(1,), (2,), (3,)], dtype=dt) - assert_raises(IndexError, x.__getitem__, b'a') - - y = x[0] - assert_raises(IndexError, y.__getitem__, b'a') - - @pytest.mark.skipif(sys.version_info[0] < 3, reason="Not Python 3") - def test_multiple_field_name_unicode(self): - def test_dtype_unicode(): - np.dtype([("\u20B9", "f8"), ("B", "f8"), ("\u20B9", "f8")]) - - # Error raised when multiple fields have the same name(unicode included) - assert_raises(ValueError, test_dtype_unicode) - - @pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2") - def test_unicode_field_titles(self): - # Unicode field titles are added to field dict on Py2 - title = u'b' - dt = np.dtype([((title, 'a'), int)]) - dt[title] - dt['a'] - x = np.array([(1,), (2,), (3,)], dtype=dt) - x[title] - x['a'] - y = x[0] - y[title] - y['a'] - - @pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2") - def test_unicode_field_names(self): - # Unicode field names are converted to ascii on Python 2: - encodable_name = u'b' - assert_equal(np.dtype([(encodable_name, int)]).names[0], b'b') - assert_equal(np.dtype([(('a', encodable_name), int)]).names[0], b'b') - - # But raises UnicodeEncodeError if it can't be encoded: - nonencodable_name = u'\uc3bc' - assert_raises(UnicodeEncodeError, np.dtype, [(nonencodable_name, int)]) - assert_raises(UnicodeEncodeError, np.dtype, [(('a', nonencodable_name), int)]) - - def test_fromarrays_unicode(self): - # A single name string provided to fromarrays() is allowed to be unicode - # on both Python 2 and 3: - x = np.core.records.fromarrays([[0], [1]], names=u'a,b', formats=u'i4,i4') - assert_equal(x['a'][0], 0) - assert_equal(x['b'][0], 1) - - def test_unicode_order(self): - # Test that we can sort with order as a unicode field name in both Python 2 and - # 3: - name = u'b' - x = np.array([1, 3, 2], dtype=[(name, int)]) - x.sort(order=name) - assert_equal(x[u'b'], np.array([1, 2, 3])) - - def test_field_names(self): - # Test unicode and 8-bit / byte strings can be used - a = np.zeros((1,), dtype=[('f1', 'i4'), - ('f2', 'i4'), - ('f3', [('sf1', 'i4')])]) - is_py3 = sys.version_info[0] >= 3 - if is_py3: - funcs = (str,) - # byte string indexing fails gracefully - assert_raises(IndexError, a.__setitem__, b'f1', 1) - assert_raises(IndexError, a.__getitem__, b'f1') - assert_raises(IndexError, a['f1'].__setitem__, b'sf1', 1) - assert_raises(IndexError, a['f1'].__getitem__, b'sf1') - else: - funcs = (str, unicode) - for func in funcs: - b = a.copy() - fn1 = func('f1') - b[fn1] = 1 - assert_equal(b[fn1], 1) - fnn = func('not at all') - assert_raises(ValueError, b.__setitem__, fnn, 1) - assert_raises(ValueError, b.__getitem__, fnn) - b[0][fn1] = 2 - assert_equal(b[fn1], 2) - # Subfield - assert_raises(ValueError, b[0].__setitem__, fnn, 1) - assert_raises(ValueError, b[0].__getitem__, fnn) - # Subfield - fn3 = func('f3') - sfn1 = func('sf1') - b[fn3][sfn1] = 1 - assert_equal(b[fn3][sfn1], 1) - assert_raises(ValueError, b[fn3].__setitem__, fnn, 1) - assert_raises(ValueError, b[fn3].__getitem__, fnn) - # multiple subfields - fn2 = func('f2') - b[fn2] = 3 - - assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3)) - assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2)) - assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,))) - - # non-ascii unicode field indexing is well behaved - if not is_py3: - pytest.skip('non ascii unicode field indexing skipped; ' - 'raises segfault on python 2.x') - else: - assert_raises(ValueError, a.__setitem__, u'\u03e0', 1) - assert_raises(ValueError, a.__getitem__, u'\u03e0') - - def test_record_hash(self): - a = np.array([(1, 2), (1, 2)], dtype='i1,i2') - a.flags.writeable = False - b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')]) - b.flags.writeable = False - c = np.array([(1, 2), (3, 4)], dtype='i1,i2') - c.flags.writeable = False - assert_(hash(a[0]) == hash(a[1])) - assert_(hash(a[0]) == hash(b[0])) - assert_(hash(a[0]) != hash(b[1])) - assert_(hash(c[0]) == hash(a[0]) and c[0] == a[0]) - - def test_record_no_hash(self): - a = np.array([(1, 2), (1, 2)], dtype='i1,i2') - assert_raises(TypeError, hash, a[0]) - - def test_empty_structure_creation(self): - # make sure these do not raise errors (gh-5631) - np.array([()], dtype={'names': [], 'formats': [], - 'offsets': [], 'itemsize': 12}) - np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [], - 'offsets': [], 'itemsize': 12}) - - def test_multifield_indexing_view(self): - a = np.ones(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u4')]) - v = a[['a', 'c']] - assert_(v.base is a) - assert_(v.dtype == np.dtype({'names': ['a', 'c'], - 'formats': ['i4', 'u4'], - 'offsets': [0, 8]})) - v[:] = (4,5) - assert_equal(a[0].item(), (4, 1, 5)) - -class TestView(object): - def test_basic(self): - x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], - dtype=[('r', np.int8), ('g', np.int8), - ('b', np.int8), ('a', np.int8)]) - # We must be specific about the endianness here: - y = x.view(dtype=' 0) - assert_(issubclass(w[0].category, RuntimeWarning)) - - def test_empty(self): - A = np.zeros((0, 3)) - for f in self.funcs: - for axis in [0, None]: - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - assert_(np.isnan(f(A, axis=axis)).all()) - assert_(len(w) > 0) - assert_(issubclass(w[0].category, RuntimeWarning)) - for axis in [1]: - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - assert_equal(f(A, axis=axis), np.zeros([])) - - def test_mean_values(self): - for mat in [self.rmat, self.cmat, self.omat]: - for axis in [0, 1]: - tgt = mat.sum(axis=axis) - res = _mean(mat, axis=axis) * mat.shape[axis] - assert_almost_equal(res, tgt) - for axis in [None]: - tgt = mat.sum(axis=axis) - res = _mean(mat, axis=axis) * np.prod(mat.shape) - assert_almost_equal(res, tgt) - - def test_mean_float16(self): - # This fail if the sum inside mean is done in float16 instead - # of float32. - assert_(_mean(np.ones(100000, dtype='float16')) == 1) - - def test_var_values(self): - for mat in [self.rmat, self.cmat, self.omat]: - for axis in [0, 1, None]: - msqr = _mean(mat * mat.conj(), axis=axis) - mean = _mean(mat, axis=axis) - tgt = msqr - mean * mean.conjugate() - res = _var(mat, axis=axis) - assert_almost_equal(res, tgt) - - def test_std_values(self): - for mat in [self.rmat, self.cmat, self.omat]: - for axis in [0, 1, None]: - tgt = np.sqrt(_var(mat, axis=axis)) - res = _std(mat, axis=axis) - assert_almost_equal(res, tgt) - - def test_subclass(self): - class TestArray(np.ndarray): - def __new__(cls, data, info): - result = np.array(data) - result = result.view(cls) - result.info = info - return result - - def __array_finalize__(self, obj): - self.info = getattr(obj, "info", '') - - dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba') - res = dat.mean(1) - assert_(res.info == dat.info) - res = dat.std(1) - assert_(res.info == dat.info) - res = dat.var(1) - assert_(res.info == dat.info) - -class TestVdot(object): - def test_basic(self): - dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger'] - dt_complex = np.typecodes['Complex'] - - # test real - a = np.eye(3) - for dt in dt_numeric + 'O': - b = a.astype(dt) - res = np.vdot(b, b) - assert_(np.isscalar(res)) - assert_equal(np.vdot(b, b), 3) - - # test complex - a = np.eye(3) * 1j - for dt in dt_complex + 'O': - b = a.astype(dt) - res = np.vdot(b, b) - assert_(np.isscalar(res)) - assert_equal(np.vdot(b, b), 3) - - # test boolean - b = np.eye(3, dtype=bool) - res = np.vdot(b, b) - assert_(np.isscalar(res)) - assert_equal(np.vdot(b, b), True) - - def test_vdot_array_order(self): - a = np.array([[1, 2], [3, 4]], order='C') - b = np.array([[1, 2], [3, 4]], order='F') - res = np.vdot(a, a) - - # integer arrays are exact - assert_equal(np.vdot(a, b), res) - assert_equal(np.vdot(b, a), res) - assert_equal(np.vdot(b, b), res) - - def test_vdot_uncontiguous(self): - for size in [2, 1000]: - # Different sizes match different branches in vdot. - a = np.zeros((size, 2, 2)) - b = np.zeros((size, 2, 2)) - a[:, 0, 0] = np.arange(size) - b[:, 0, 0] = np.arange(size) + 1 - # Make a and b uncontiguous: - a = a[..., 0] - b = b[..., 0] - - assert_equal(np.vdot(a, b), - np.vdot(a.flatten(), b.flatten())) - assert_equal(np.vdot(a, b.copy()), - np.vdot(a.flatten(), b.flatten())) - assert_equal(np.vdot(a.copy(), b), - np.vdot(a.flatten(), b.flatten())) - assert_equal(np.vdot(a.copy('F'), b), - np.vdot(a.flatten(), b.flatten())) - assert_equal(np.vdot(a, b.copy('F')), - np.vdot(a.flatten(), b.flatten())) - - -class TestDot(object): - def setup(self): - np.random.seed(128) - self.A = np.random.rand(4, 2) - self.b1 = np.random.rand(2, 1) - self.b2 = np.random.rand(2) - self.b3 = np.random.rand(1, 2) - self.b4 = np.random.rand(4) - self.N = 7 - - def test_dotmatmat(self): - A = self.A - res = np.dot(A.transpose(), A) - tgt = np.array([[1.45046013, 0.86323640], - [0.86323640, 0.84934569]]) - assert_almost_equal(res, tgt, decimal=self.N) - - def test_dotmatvec(self): - A, b1 = self.A, self.b1 - res = np.dot(A, b1) - tgt = np.array([[0.32114320], [0.04889721], - [0.15696029], [0.33612621]]) - assert_almost_equal(res, tgt, decimal=self.N) - - def test_dotmatvec2(self): - A, b2 = self.A, self.b2 - res = np.dot(A, b2) - tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293]) - assert_almost_equal(res, tgt, decimal=self.N) - - def test_dotvecmat(self): - A, b4 = self.A, self.b4 - res = np.dot(b4, A) - tgt = np.array([1.23495091, 1.12222648]) - assert_almost_equal(res, tgt, decimal=self.N) - - def test_dotvecmat2(self): - b3, A = self.b3, self.A - res = np.dot(b3, A.transpose()) - tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]]) - assert_almost_equal(res, tgt, decimal=self.N) - - def test_dotvecmat3(self): - A, b4 = self.A, self.b4 - res = np.dot(A.transpose(), b4) - tgt = np.array([1.23495091, 1.12222648]) - assert_almost_equal(res, tgt, decimal=self.N) - - def test_dotvecvecouter(self): - b1, b3 = self.b1, self.b3 - res = np.dot(b1, b3) - tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]]) - assert_almost_equal(res, tgt, decimal=self.N) - - def test_dotvecvecinner(self): - b1, b3 = self.b1, self.b3 - res = np.dot(b3, b1) - tgt = np.array([[ 0.23129668]]) - assert_almost_equal(res, tgt, decimal=self.N) - - def test_dotcolumnvect1(self): - b1 = np.ones((3, 1)) - b2 = [5.3] - res = np.dot(b1, b2) - tgt = np.array([5.3, 5.3, 5.3]) - assert_almost_equal(res, tgt, decimal=self.N) - - def test_dotcolumnvect2(self): - b1 = np.ones((3, 1)).transpose() - b2 = [6.2] - res = np.dot(b2, b1) - tgt = np.array([6.2, 6.2, 6.2]) - assert_almost_equal(res, tgt, decimal=self.N) - - def test_dotvecscalar(self): - np.random.seed(100) - b1 = np.random.rand(1, 1) - b2 = np.random.rand(1, 4) - res = np.dot(b1, b2) - tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]]) - assert_almost_equal(res, tgt, decimal=self.N) - - def test_dotvecscalar2(self): - np.random.seed(100) - b1 = np.random.rand(4, 1) - b2 = np.random.rand(1, 1) - res = np.dot(b1, b2) - tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]]) - assert_almost_equal(res, tgt, decimal=self.N) - - def test_all(self): - dims = [(), (1,), (1, 1)] - dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)] - for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)): - b1 = np.zeros(dim1) - b2 = np.zeros(dim2) - res = np.dot(b1, b2) - tgt = np.zeros(dim) - assert_(res.shape == tgt.shape) - assert_almost_equal(res, tgt, decimal=self.N) - - def test_vecobject(self): - class Vec(object): - def __init__(self, sequence=None): - if sequence is None: - sequence = [] - self.array = np.array(sequence) - - def __add__(self, other): - out = Vec() - out.array = self.array + other.array - return out - - def __sub__(self, other): - out = Vec() - out.array = self.array - other.array - return out - - def __mul__(self, other): # with scalar - out = Vec(self.array.copy()) - out.array *= other - return out - - def __rmul__(self, other): - return self*other - - U_non_cont = np.transpose([[1., 1.], [1., 2.]]) - U_cont = np.ascontiguousarray(U_non_cont) - x = np.array([Vec([1., 0.]), Vec([0., 1.])]) - zeros = np.array([Vec([0., 0.]), Vec([0., 0.])]) - zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x) - assert_equal(zeros[0].array, zeros_test[0].array) - assert_equal(zeros[1].array, zeros_test[1].array) - - def test_dot_2args(self): - from numpy.core.multiarray import dot - - a = np.array([[1, 2], [3, 4]], dtype=float) - b = np.array([[1, 0], [1, 1]], dtype=float) - c = np.array([[3, 2], [7, 4]], dtype=float) - - d = dot(a, b) - assert_allclose(c, d) - - def test_dot_3args(self): - from numpy.core.multiarray import dot - - np.random.seed(22) - f = np.random.random_sample((1024, 16)) - v = np.random.random_sample((16, 32)) - - r = np.empty((1024, 32)) - for i in range(12): - dot(f, v, r) - if HAS_REFCOUNT: - assert_equal(sys.getrefcount(r), 2) - r2 = dot(f, v, out=None) - assert_array_equal(r2, r) - assert_(r is dot(f, v, out=r)) - - v = v[:, 0].copy() # v.shape == (16,) - r = r[:, 0].copy() # r.shape == (1024,) - r2 = dot(f, v) - assert_(r is dot(f, v, r)) - assert_array_equal(r2, r) - - def test_dot_3args_errors(self): - from numpy.core.multiarray import dot - - np.random.seed(22) - f = np.random.random_sample((1024, 16)) - v = np.random.random_sample((16, 32)) - - r = np.empty((1024, 31)) - assert_raises(ValueError, dot, f, v, r) - - r = np.empty((1024,)) - assert_raises(ValueError, dot, f, v, r) - - r = np.empty((32,)) - assert_raises(ValueError, dot, f, v, r) - - r = np.empty((32, 1024)) - assert_raises(ValueError, dot, f, v, r) - assert_raises(ValueError, dot, f, v, r.T) - - r = np.empty((1024, 64)) - assert_raises(ValueError, dot, f, v, r[:, ::2]) - assert_raises(ValueError, dot, f, v, r[:, :32]) - - r = np.empty((1024, 32), dtype=np.float32) - assert_raises(ValueError, dot, f, v, r) - - r = np.empty((1024, 32), dtype=int) - assert_raises(ValueError, dot, f, v, r) - - def test_dot_array_order(self): - a = np.array([[1, 2], [3, 4]], order='C') - b = np.array([[1, 2], [3, 4]], order='F') - res = np.dot(a, a) - - # integer arrays are exact - assert_equal(np.dot(a, b), res) - assert_equal(np.dot(b, a), res) - assert_equal(np.dot(b, b), res) - - def test_accelerate_framework_sgemv_fix(self): - - def aligned_array(shape, align, dtype, order='C'): - d = dtype(0) - N = np.prod(shape) - tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8) - address = tmp.__array_interface__["data"][0] - for offset in range(align): - if (address + offset) % align == 0: - break - tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype) - return tmp.reshape(shape, order=order) - - def as_aligned(arr, align, dtype, order='C'): - aligned = aligned_array(arr.shape, align, dtype, order) - aligned[:] = arr[:] - return aligned - - def assert_dot_close(A, X, desired): - assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7) - - m = aligned_array(100, 15, np.float32) - s = aligned_array((100, 100), 15, np.float32) - np.dot(s, m) # this will always segfault if the bug is present - - testdata = itertools.product((15,32), (10000,), (200,89), ('C','F')) - for align, m, n, a_order in testdata: - # Calculation in double precision - A_d = np.random.rand(m, n) - X_d = np.random.rand(n) - desired = np.dot(A_d, X_d) - # Calculation with aligned single precision - A_f = as_aligned(A_d, align, np.float32, order=a_order) - X_f = as_aligned(X_d, align, np.float32) - assert_dot_close(A_f, X_f, desired) - # Strided A rows - A_d_2 = A_d[::2] - desired = np.dot(A_d_2, X_d) - A_f_2 = A_f[::2] - assert_dot_close(A_f_2, X_f, desired) - # Strided A columns, strided X vector - A_d_22 = A_d_2[:, ::2] - X_d_2 = X_d[::2] - desired = np.dot(A_d_22, X_d_2) - A_f_22 = A_f_2[:, ::2] - X_f_2 = X_f[::2] - assert_dot_close(A_f_22, X_f_2, desired) - # Check the strides are as expected - if a_order == 'F': - assert_equal(A_f_22.strides, (8, 8 * m)) - else: - assert_equal(A_f_22.strides, (8 * n, 8)) - assert_equal(X_f_2.strides, (8,)) - # Strides in A rows + cols only - X_f_2c = as_aligned(X_f_2, align, np.float32) - assert_dot_close(A_f_22, X_f_2c, desired) - # Strides just in A cols - A_d_12 = A_d[:, ::2] - desired = np.dot(A_d_12, X_d_2) - A_f_12 = A_f[:, ::2] - assert_dot_close(A_f_12, X_f_2c, desired) - # Strides in A cols and X - assert_dot_close(A_f_12, X_f_2, desired) - - -class MatmulCommon(object): - """Common tests for '@' operator and numpy.matmul. - - """ - # Should work with these types. Will want to add - # "O" at some point - types = "?bhilqBHILQefdgFDGO" - - def test_exceptions(self): - dims = [ - ((1,), (2,)), # mismatched vector vector - ((2, 1,), (2,)), # mismatched matrix vector - ((2,), (1, 2)), # mismatched vector matrix - ((1, 2), (3, 1)), # mismatched matrix matrix - ((1,), ()), # vector scalar - ((), (1)), # scalar vector - ((1, 1), ()), # matrix scalar - ((), (1, 1)), # scalar matrix - ((2, 2, 1), (3, 1, 2)), # cannot broadcast - ] - - for dt, (dm1, dm2) in itertools.product(self.types, dims): - a = np.ones(dm1, dtype=dt) - b = np.ones(dm2, dtype=dt) - assert_raises(ValueError, self.matmul, a, b) - - def test_shapes(self): - dims = [ - ((1, 1), (2, 1, 1)), # broadcast first argument - ((2, 1, 1), (1, 1)), # broadcast second argument - ((2, 1, 1), (2, 1, 1)), # matrix stack sizes match - ] - - for dt, (dm1, dm2) in itertools.product(self.types, dims): - a = np.ones(dm1, dtype=dt) - b = np.ones(dm2, dtype=dt) - res = self.matmul(a, b) - assert_(res.shape == (2, 1, 1)) - - # vector vector returns scalars. - for dt in self.types: - a = np.ones((2,), dtype=dt) - b = np.ones((2,), dtype=dt) - c = self.matmul(a, b) - assert_(np.array(c).shape == ()) - - def test_result_types(self): - mat = np.ones((1,1)) - vec = np.ones((1,)) - for dt in self.types: - m = mat.astype(dt) - v = vec.astype(dt) - for arg in [(m, v), (v, m), (m, m)]: - res = self.matmul(*arg) - assert_(res.dtype == dt) - - # vector vector returns scalars - if dt != "O": - res = self.matmul(v, v) - assert_(type(res) is np.dtype(dt).type) - - def test_scalar_output(self): - vec1 = np.array([2]) - vec2 = np.array([3, 4]).reshape(1, -1) - tgt = np.array([6, 8]) - for dt in self.types[1:]: - v1 = vec1.astype(dt) - v2 = vec2.astype(dt) - res = self.matmul(v1, v2) - assert_equal(res, tgt) - res = self.matmul(v2.T, v1) - assert_equal(res, tgt) - - # boolean type - vec = np.array([True, True], dtype='?').reshape(1, -1) - res = self.matmul(vec[:, 0], vec) - assert_equal(res, True) - - def test_vector_vector_values(self): - vec1 = np.array([1, 2]) - vec2 = np.array([3, 4]).reshape(-1, 1) - tgt1 = np.array([11]) - tgt2 = np.array([[3, 6], [4, 8]]) - for dt in self.types[1:]: - v1 = vec1.astype(dt) - v2 = vec2.astype(dt) - res = self.matmul(v1, v2) - assert_equal(res, tgt1) - # no broadcast, we must make v1 into a 2d ndarray - res = self.matmul(v2, v1.reshape(1, -1)) - assert_equal(res, tgt2) - - # boolean type - vec = np.array([True, True], dtype='?') - res = self.matmul(vec, vec) - assert_equal(res, True) - - def test_vector_matrix_values(self): - vec = np.array([1, 2]) - mat1 = np.array([[1, 2], [3, 4]]) - mat2 = np.stack([mat1]*2, axis=0) - tgt1 = np.array([7, 10]) - tgt2 = np.stack([tgt1]*2, axis=0) - for dt in self.types[1:]: - v = vec.astype(dt) - m1 = mat1.astype(dt) - m2 = mat2.astype(dt) - res = self.matmul(v, m1) - assert_equal(res, tgt1) - res = self.matmul(v, m2) - assert_equal(res, tgt2) - - # boolean type - vec = np.array([True, False]) - mat1 = np.array([[True, False], [False, True]]) - mat2 = np.stack([mat1]*2, axis=0) - tgt1 = np.array([True, False]) - tgt2 = np.stack([tgt1]*2, axis=0) - - res = self.matmul(vec, mat1) - assert_equal(res, tgt1) - res = self.matmul(vec, mat2) - assert_equal(res, tgt2) - - def test_matrix_vector_values(self): - vec = np.array([1, 2]) - mat1 = np.array([[1, 2], [3, 4]]) - mat2 = np.stack([mat1]*2, axis=0) - tgt1 = np.array([5, 11]) - tgt2 = np.stack([tgt1]*2, axis=0) - for dt in self.types[1:]: - v = vec.astype(dt) - m1 = mat1.astype(dt) - m2 = mat2.astype(dt) - res = self.matmul(m1, v) - assert_equal(res, tgt1) - res = self.matmul(m2, v) - assert_equal(res, tgt2) - - # boolean type - vec = np.array([True, False]) - mat1 = np.array([[True, False], [False, True]]) - mat2 = np.stack([mat1]*2, axis=0) - tgt1 = np.array([True, False]) - tgt2 = np.stack([tgt1]*2, axis=0) - - res = self.matmul(vec, mat1) - assert_equal(res, tgt1) - res = self.matmul(vec, mat2) - assert_equal(res, tgt2) - - def test_matrix_matrix_values(self): - mat1 = np.array([[1, 2], [3, 4]]) - mat2 = np.array([[1, 0], [1, 1]]) - mat12 = np.stack([mat1, mat2], axis=0) - mat21 = np.stack([mat2, mat1], axis=0) - tgt11 = np.array([[7, 10], [15, 22]]) - tgt12 = np.array([[3, 2], [7, 4]]) - tgt21 = np.array([[1, 2], [4, 6]]) - tgt12_21 = np.stack([tgt12, tgt21], axis=0) - tgt11_12 = np.stack((tgt11, tgt12), axis=0) - tgt11_21 = np.stack((tgt11, tgt21), axis=0) - for dt in self.types[1:]: - m1 = mat1.astype(dt) - m2 = mat2.astype(dt) - m12 = mat12.astype(dt) - m21 = mat21.astype(dt) - - # matrix @ matrix - res = self.matmul(m1, m2) - assert_equal(res, tgt12) - res = self.matmul(m2, m1) - assert_equal(res, tgt21) - - # stacked @ matrix - res = self.matmul(m12, m1) - assert_equal(res, tgt11_21) - - # matrix @ stacked - res = self.matmul(m1, m12) - assert_equal(res, tgt11_12) - - # stacked @ stacked - res = self.matmul(m12, m21) - assert_equal(res, tgt12_21) - - # boolean type - m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_) - m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_) - m12 = np.stack([m1, m2], axis=0) - m21 = np.stack([m2, m1], axis=0) - tgt11 = m1 - tgt12 = m1 - tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_) - tgt12_21 = np.stack([tgt12, tgt21], axis=0) - tgt11_12 = np.stack((tgt11, tgt12), axis=0) - tgt11_21 = np.stack((tgt11, tgt21), axis=0) - - # matrix @ matrix - res = self.matmul(m1, m2) - assert_equal(res, tgt12) - res = self.matmul(m2, m1) - assert_equal(res, tgt21) - - # stacked @ matrix - res = self.matmul(m12, m1) - assert_equal(res, tgt11_21) - - # matrix @ stacked - res = self.matmul(m1, m12) - assert_equal(res, tgt11_12) - - # stacked @ stacked - res = self.matmul(m12, m21) - assert_equal(res, tgt12_21) - - -class TestMatmul(MatmulCommon): - matmul = np.matmul - - def test_out_arg(self): - a = np.ones((5, 2), dtype=float) - b = np.array([[1, 3], [5, 7]], dtype=float) - tgt = np.dot(a, b) - - # test as positional argument - msg = "out positional argument" - out = np.zeros((5, 2), dtype=float) - self.matmul(a, b, out) - assert_array_equal(out, tgt, err_msg=msg) - - # test as keyword argument - msg = "out keyword argument" - out = np.zeros((5, 2), dtype=float) - self.matmul(a, b, out=out) - assert_array_equal(out, tgt, err_msg=msg) - - # test out with not allowed type cast (safe casting) - msg = "Cannot cast ufunc .* output" - out = np.zeros((5, 2), dtype=np.int32) - assert_raises_regex(TypeError, msg, self.matmul, a, b, out=out) - - # test out with type upcast to complex - out = np.zeros((5, 2), dtype=np.complex128) - c = self.matmul(a, b, out=out) - assert_(c is out) - with suppress_warnings() as sup: - sup.filter(np.ComplexWarning, '') - c = c.astype(tgt.dtype) - assert_array_equal(c, tgt) - - def test_out_contiguous(self): - a = np.ones((5, 2), dtype=float) - b = np.array([[1, 3], [5, 7]], dtype=float) - v = np.array([1, 3], dtype=float) - tgt = np.dot(a, b) - tgt_mv = np.dot(a, v) - - # test out non-contiguous - out = np.ones((5, 2, 2), dtype=float) - c = self.matmul(a, b, out=out[..., 0]) - assert c.base is out - assert_array_equal(c, tgt) - c = self.matmul(a, v, out=out[:, 0, 0]) - assert_array_equal(c, tgt_mv) - c = self.matmul(v, a.T, out=out[:, 0, 0]) - assert_array_equal(c, tgt_mv) - - # test out contiguous in only last dim - out = np.ones((10, 2), dtype=float) - c = self.matmul(a, b, out=out[::2, :]) - assert_array_equal(c, tgt) - - # test transposes of out, args - out = np.ones((5, 2), dtype=float) - c = self.matmul(b.T, a.T, out=out.T) - assert_array_equal(out, tgt) - - m1 = np.arange(15.).reshape(5, 3) - m2 = np.arange(21.).reshape(3, 7) - m3 = np.arange(30.).reshape(5, 6)[:, ::2] # non-contiguous - vc = np.arange(10.) - vr = np.arange(6.) - m0 = np.zeros((3, 0)) - @pytest.mark.parametrize('args', ( - # matrix-matrix - (m1, m2), (m2.T, m1.T), (m2.T.copy(), m1.T), (m2.T, m1.T.copy()), - # matrix-matrix-transpose, contiguous and non - (m1, m1.T), (m1.T, m1), (m1, m3.T), (m3, m1.T), - (m3, m3.T), (m3.T, m3), - # matrix-matrix non-contiguous - (m3, m2), (m2.T, m3.T), (m2.T.copy(), m3.T), - # vector-matrix, matrix-vector, contiguous - (m1, vr[:3]), (vc[:5], m1), (m1.T, vc[:5]), (vr[:3], m1.T), - # vector-matrix, matrix-vector, vector non-contiguous - (m1, vr[::2]), (vc[::2], m1), (m1.T, vc[::2]), (vr[::2], m1.T), - # vector-matrix, matrix-vector, matrix non-contiguous - (m3, vr[:3]), (vc[:5], m3), (m3.T, vc[:5]), (vr[:3], m3.T), - # vector-matrix, matrix-vector, both non-contiguous - (m3, vr[::2]), (vc[::2], m3), (m3.T, vc[::2]), (vr[::2], m3.T), - # size == 0 - (m0, m0.T), (m0.T, m0), (m1, m0), (m0.T, m1.T), - )) - def test_dot_equivalent(self, args): - r1 = np.matmul(*args) - r2 = np.dot(*args) - assert_equal(r1, r2) - - r3 = np.matmul(args[0].copy(), args[1].copy()) - assert_equal(r1, r3) - - def test_matmul_object(self): - import fractions - - f = np.vectorize(fractions.Fraction) - def random_ints(): - return np.random.randint(1, 1000, size=(10, 3, 3)) - M1 = f(random_ints(), random_ints()) - M2 = f(random_ints(), random_ints()) - - M3 = self.matmul(M1, M2) - - [N1, N2, N3] = [a.astype(float) for a in [M1, M2, M3]] - - assert_allclose(N3, self.matmul(N1, N2)) - - def test_matmul_object_type_scalar(self): - from fractions import Fraction as F - v = np.array([F(2,3), F(5,7)]) - res = self.matmul(v, v) - assert_(type(res) is F) - - def test_matmul_empty(self): - a = np.empty((3, 0), dtype=object) - b = np.empty((0, 3), dtype=object) - c = np.zeros((3, 3)) - assert_array_equal(np.matmul(a, b), c) - - def test_matmul_exception_multiply(self): - # test that matmul fails if `__mul__` is missing - class add_not_multiply(): - def __add__(self, other): - return self - a = np.full((3,3), add_not_multiply()) - with assert_raises(TypeError): - b = np.matmul(a, a) - - def test_matmul_exception_add(self): - # test that matmul fails if `__add__` is missing - class multiply_not_add(): - def __mul__(self, other): - return self - a = np.full((3,3), multiply_not_add()) - with assert_raises(TypeError): - b = np.matmul(a, a) - - def test_matmul_bool(self): - # gh-14439 - a = np.array([[1, 0],[1, 1]], dtype=bool) - assert np.max(a.view(np.uint8)) == 1 - b = np.matmul(a, a) - # matmul with boolean output should always be 0, 1 - assert np.max(b.view(np.uint8)) == 1 - - rg = np.random.default_rng(np.random.PCG64(43)) - d = rg.integers(2, size=4*5, dtype=np.int8) - d = d.reshape(4, 5) > 0 - out1 = np.matmul(d, d.reshape(5, 4)) - out2 = np.dot(d, d.reshape(5, 4)) - assert_equal(out1, out2) - - c = np.matmul(np.zeros((2, 0), dtype=bool), np.zeros(0, dtype=bool)) - assert not np.any(c) - - -if sys.version_info[:2] >= (3, 5): - class TestMatmulOperator(MatmulCommon): - import operator - matmul = operator.matmul - - def test_array_priority_override(self): - - class A(object): - __array_priority__ = 1000 - - def __matmul__(self, other): - return "A" - - def __rmatmul__(self, other): - return "A" - - a = A() - b = np.ones(2) - assert_equal(self.matmul(a, b), "A") - assert_equal(self.matmul(b, a), "A") - - def test_matmul_raises(self): - assert_raises(TypeError, self.matmul, np.int8(5), np.int8(5)) - assert_raises(TypeError, self.matmul, np.void(b'abc'), np.void(b'abc')) - assert_raises(ValueError, self.matmul, np.arange(10), np.void(b'abc')) - - def test_matmul_inplace(): - # It would be nice to support in-place matmul eventually, but for now - # we don't have a working implementation, so better just to error out - # and nudge people to writing "a = a @ b". - a = np.eye(3) - b = np.eye(3) - assert_raises(TypeError, a.__imatmul__, b) - import operator - assert_raises(TypeError, operator.imatmul, a, b) - # we avoid writing the token `exec` so as not to crash python 2's - # parser - exec_ = getattr(builtins, "exec") - assert_raises(TypeError, exec_, "a @= b", globals(), locals()) - - def test_matmul_axes(): - a = np.arange(3*4*5).reshape(3, 4, 5) - c = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (1, 2)]) - assert c.shape == (3, 4, 4) - d = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (0, 1)]) - assert d.shape == (4, 4, 3) - e = np.swapaxes(d, 0, 2) - assert_array_equal(e, c) - f = np.matmul(a, np.arange(3), axes=[(1, 0), (0), (0)]) - assert f.shape == (4, 5) - - -class TestInner(object): - - def test_inner_type_mismatch(self): - c = 1. - A = np.array((1,1), dtype='i,i') - - assert_raises(TypeError, np.inner, c, A) - assert_raises(TypeError, np.inner, A, c) - - def test_inner_scalar_and_vector(self): - for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': - sca = np.array(3, dtype=dt)[()] - vec = np.array([1, 2], dtype=dt) - desired = np.array([3, 6], dtype=dt) - assert_equal(np.inner(vec, sca), desired) - assert_equal(np.inner(sca, vec), desired) - - def test_vecself(self): - # Ticket 844. - # Inner product of a vector with itself segfaults or give - # meaningless result - a = np.zeros(shape=(1, 80), dtype=np.float64) - p = np.inner(a, a) - assert_almost_equal(p, 0, decimal=14) - - def test_inner_product_with_various_contiguities(self): - # github issue 6532 - for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': - # check an inner product involving a matrix transpose - A = np.array([[1, 2], [3, 4]], dtype=dt) - B = np.array([[1, 3], [2, 4]], dtype=dt) - C = np.array([1, 1], dtype=dt) - desired = np.array([4, 6], dtype=dt) - assert_equal(np.inner(A.T, C), desired) - assert_equal(np.inner(C, A.T), desired) - assert_equal(np.inner(B, C), desired) - assert_equal(np.inner(C, B), desired) - # check a matrix product - desired = np.array([[7, 10], [15, 22]], dtype=dt) - assert_equal(np.inner(A, B), desired) - # check the syrk vs. gemm paths - desired = np.array([[5, 11], [11, 25]], dtype=dt) - assert_equal(np.inner(A, A), desired) - assert_equal(np.inner(A, A.copy()), desired) - # check an inner product involving an aliased and reversed view - a = np.arange(5).astype(dt) - b = a[::-1] - desired = np.array(10, dtype=dt).item() - assert_equal(np.inner(b, a), desired) - - def test_3d_tensor(self): - for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': - a = np.arange(24).reshape(2,3,4).astype(dt) - b = np.arange(24, 48).reshape(2,3,4).astype(dt) - desired = np.array( - [[[[ 158, 182, 206], - [ 230, 254, 278]], - - [[ 566, 654, 742], - [ 830, 918, 1006]], - - [[ 974, 1126, 1278], - [1430, 1582, 1734]]], - - [[[1382, 1598, 1814], - [2030, 2246, 2462]], - - [[1790, 2070, 2350], - [2630, 2910, 3190]], - - [[2198, 2542, 2886], - [3230, 3574, 3918]]]], - dtype=dt - ) - assert_equal(np.inner(a, b), desired) - assert_equal(np.inner(b, a).transpose(2,3,0,1), desired) - - -class TestAlen(object): - def test_basic(self): - with pytest.warns(DeprecationWarning): - m = np.array([1, 2, 3]) - assert_equal(np.alen(m), 3) - - m = np.array([[1, 2, 3], [4, 5, 7]]) - assert_equal(np.alen(m), 2) - - m = [1, 2, 3] - assert_equal(np.alen(m), 3) - - m = [[1, 2, 3], [4, 5, 7]] - assert_equal(np.alen(m), 2) - - def test_singleton(self): - with pytest.warns(DeprecationWarning): - assert_equal(np.alen(5), 1) - - -class TestChoose(object): - def setup(self): - self.x = 2*np.ones((3,), dtype=int) - self.y = 3*np.ones((3,), dtype=int) - self.x2 = 2*np.ones((2, 3), dtype=int) - self.y2 = 3*np.ones((2, 3), dtype=int) - self.ind = [0, 0, 1] - - def test_basic(self): - A = np.choose(self.ind, (self.x, self.y)) - assert_equal(A, [2, 2, 3]) - - def test_broadcast1(self): - A = np.choose(self.ind, (self.x2, self.y2)) - assert_equal(A, [[2, 2, 3], [2, 2, 3]]) - - def test_broadcast2(self): - A = np.choose(self.ind, (self.x, self.y2)) - assert_equal(A, [[2, 2, 3], [2, 2, 3]]) - - -class TestRepeat(object): - def setup(self): - self.m = np.array([1, 2, 3, 4, 5, 6]) - self.m_rect = self.m.reshape((2, 3)) - - def test_basic(self): - A = np.repeat(self.m, [1, 3, 2, 1, 1, 2]) - assert_equal(A, [1, 2, 2, 2, 3, - 3, 4, 5, 6, 6]) - - def test_broadcast1(self): - A = np.repeat(self.m, 2) - assert_equal(A, [1, 1, 2, 2, 3, 3, - 4, 4, 5, 5, 6, 6]) - - def test_axis_spec(self): - A = np.repeat(self.m_rect, [2, 1], axis=0) - assert_equal(A, [[1, 2, 3], - [1, 2, 3], - [4, 5, 6]]) - - A = np.repeat(self.m_rect, [1, 3, 2], axis=1) - assert_equal(A, [[1, 2, 2, 2, 3, 3], - [4, 5, 5, 5, 6, 6]]) - - def test_broadcast2(self): - A = np.repeat(self.m_rect, 2, axis=0) - assert_equal(A, [[1, 2, 3], - [1, 2, 3], - [4, 5, 6], - [4, 5, 6]]) - - A = np.repeat(self.m_rect, 2, axis=1) - assert_equal(A, [[1, 1, 2, 2, 3, 3], - [4, 4, 5, 5, 6, 6]]) - - -# TODO: test for multidimensional -NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4} - - -@pytest.mark.parametrize('dt', [float, Decimal], ids=['float', 'object']) -class TestNeighborhoodIter(object): - # Simple, 2d tests - def test_simple2d(self, dt): - # Test zero and one padding for simple data type - x = np.array([[0, 1], [2, 3]], dtype=dt) - r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt), - np.array([[0, 0, 0], [0, 1, 0]], dtype=dt), - np.array([[0, 0, 1], [0, 2, 3]], dtype=dt), - np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)] - l = _multiarray_tests.test_neighborhood_iterator( - x, [-1, 0, -1, 1], x[0], NEIGH_MODE['zero']) - assert_array_equal(l, r) - - r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt), - np.array([[1, 1, 1], [0, 1, 1]], dtype=dt), - np.array([[1, 0, 1], [1, 2, 3]], dtype=dt), - np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)] - l = _multiarray_tests.test_neighborhood_iterator( - x, [-1, 0, -1, 1], x[0], NEIGH_MODE['one']) - assert_array_equal(l, r) - - r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt), - np.array([[4, 4, 4], [0, 1, 4]], dtype=dt), - np.array([[4, 0, 1], [4, 2, 3]], dtype=dt), - np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)] - l = _multiarray_tests.test_neighborhood_iterator( - x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant']) - assert_array_equal(l, r) - - def test_mirror2d(self, dt): - x = np.array([[0, 1], [2, 3]], dtype=dt) - r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt), - np.array([[0, 1, 1], [0, 1, 1]], dtype=dt), - np.array([[0, 0, 1], [2, 2, 3]], dtype=dt), - np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)] - l = _multiarray_tests.test_neighborhood_iterator( - x, [-1, 0, -1, 1], x[0], NEIGH_MODE['mirror']) - assert_array_equal(l, r) - - # Simple, 1d tests - def test_simple(self, dt): - # Test padding with constant values - x = np.linspace(1, 5, 5).astype(dt) - r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]] - l = _multiarray_tests.test_neighborhood_iterator( - x, [-1, 1], x[0], NEIGH_MODE['zero']) - assert_array_equal(l, r) - - r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]] - l = _multiarray_tests.test_neighborhood_iterator( - x, [-1, 1], x[0], NEIGH_MODE['one']) - assert_array_equal(l, r) - - r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]] - l = _multiarray_tests.test_neighborhood_iterator( - x, [-1, 1], x[4], NEIGH_MODE['constant']) - assert_array_equal(l, r) - - # Test mirror modes - def test_mirror(self, dt): - x = np.linspace(1, 5, 5).astype(dt) - r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5], - [2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt) - l = _multiarray_tests.test_neighborhood_iterator( - x, [-2, 2], x[1], NEIGH_MODE['mirror']) - assert_([i.dtype == dt for i in l]) - assert_array_equal(l, r) - - # Circular mode - def test_circular(self, dt): - x = np.linspace(1, 5, 5).astype(dt) - r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5], - [2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt) - l = _multiarray_tests.test_neighborhood_iterator( - x, [-2, 2], x[0], NEIGH_MODE['circular']) - assert_array_equal(l, r) - - -# Test stacking neighborhood iterators -class TestStackedNeighborhoodIter(object): - # Simple, 1d test: stacking 2 constant-padded neigh iterators - def test_simple_const(self): - dt = np.float64 - # Test zero and one padding for simple data type - x = np.array([1, 2, 3], dtype=dt) - r = [np.array([0], dtype=dt), - np.array([0], dtype=dt), - np.array([1], dtype=dt), - np.array([2], dtype=dt), - np.array([3], dtype=dt), - np.array([0], dtype=dt), - np.array([0], dtype=dt)] - l = _multiarray_tests.test_neighborhood_iterator_oob( - x, [-2, 4], NEIGH_MODE['zero'], [0, 0], NEIGH_MODE['zero']) - assert_array_equal(l, r) - - r = [np.array([1, 0, 1], dtype=dt), - np.array([0, 1, 2], dtype=dt), - np.array([1, 2, 3], dtype=dt), - np.array([2, 3, 0], dtype=dt), - np.array([3, 0, 1], dtype=dt)] - l = _multiarray_tests.test_neighborhood_iterator_oob( - x, [-1, 3], NEIGH_MODE['zero'], [-1, 1], NEIGH_MODE['one']) - assert_array_equal(l, r) - - # 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and - # mirror padding - def test_simple_mirror(self): - dt = np.float64 - # Stacking zero on top of mirror - x = np.array([1, 2, 3], dtype=dt) - r = [np.array([0, 1, 1], dtype=dt), - np.array([1, 1, 2], dtype=dt), - np.array([1, 2, 3], dtype=dt), - np.array([2, 3, 3], dtype=dt), - np.array([3, 3, 0], dtype=dt)] - l = _multiarray_tests.test_neighborhood_iterator_oob( - x, [-1, 3], NEIGH_MODE['mirror'], [-1, 1], NEIGH_MODE['zero']) - assert_array_equal(l, r) - - # Stacking mirror on top of zero - x = np.array([1, 2, 3], dtype=dt) - r = [np.array([1, 0, 0], dtype=dt), - np.array([0, 0, 1], dtype=dt), - np.array([0, 1, 2], dtype=dt), - np.array([1, 2, 3], dtype=dt), - np.array([2, 3, 0], dtype=dt)] - l = _multiarray_tests.test_neighborhood_iterator_oob( - x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['mirror']) - assert_array_equal(l, r) - - # Stacking mirror on top of zero: 2nd - x = np.array([1, 2, 3], dtype=dt) - r = [np.array([0, 1, 2], dtype=dt), - np.array([1, 2, 3], dtype=dt), - np.array([2, 3, 0], dtype=dt), - np.array([3, 0, 0], dtype=dt), - np.array([0, 0, 3], dtype=dt)] - l = _multiarray_tests.test_neighborhood_iterator_oob( - x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['mirror']) - assert_array_equal(l, r) - - # Stacking mirror on top of zero: 3rd - x = np.array([1, 2, 3], dtype=dt) - r = [np.array([1, 0, 0, 1, 2], dtype=dt), - np.array([0, 0, 1, 2, 3], dtype=dt), - np.array([0, 1, 2, 3, 0], dtype=dt), - np.array([1, 2, 3, 0, 0], dtype=dt), - np.array([2, 3, 0, 0, 3], dtype=dt)] - l = _multiarray_tests.test_neighborhood_iterator_oob( - x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['mirror']) - assert_array_equal(l, r) - - # 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and - # circular padding - def test_simple_circular(self): - dt = np.float64 - # Stacking zero on top of mirror - x = np.array([1, 2, 3], dtype=dt) - r = [np.array([0, 3, 1], dtype=dt), - np.array([3, 1, 2], dtype=dt), - np.array([1, 2, 3], dtype=dt), - np.array([2, 3, 1], dtype=dt), - np.array([3, 1, 0], dtype=dt)] - l = _multiarray_tests.test_neighborhood_iterator_oob( - x, [-1, 3], NEIGH_MODE['circular'], [-1, 1], NEIGH_MODE['zero']) - assert_array_equal(l, r) - - # Stacking mirror on top of zero - x = np.array([1, 2, 3], dtype=dt) - r = [np.array([3, 0, 0], dtype=dt), - np.array([0, 0, 1], dtype=dt), - np.array([0, 1, 2], dtype=dt), - np.array([1, 2, 3], dtype=dt), - np.array([2, 3, 0], dtype=dt)] - l = _multiarray_tests.test_neighborhood_iterator_oob( - x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['circular']) - assert_array_equal(l, r) - - # Stacking mirror on top of zero: 2nd - x = np.array([1, 2, 3], dtype=dt) - r = [np.array([0, 1, 2], dtype=dt), - np.array([1, 2, 3], dtype=dt), - np.array([2, 3, 0], dtype=dt), - np.array([3, 0, 0], dtype=dt), - np.array([0, 0, 1], dtype=dt)] - l = _multiarray_tests.test_neighborhood_iterator_oob( - x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['circular']) - assert_array_equal(l, r) - - # Stacking mirror on top of zero: 3rd - x = np.array([1, 2, 3], dtype=dt) - r = [np.array([3, 0, 0, 1, 2], dtype=dt), - np.array([0, 0, 1, 2, 3], dtype=dt), - np.array([0, 1, 2, 3, 0], dtype=dt), - np.array([1, 2, 3, 0, 0], dtype=dt), - np.array([2, 3, 0, 0, 1], dtype=dt)] - l = _multiarray_tests.test_neighborhood_iterator_oob( - x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['circular']) - assert_array_equal(l, r) - - # 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator - # being strictly within the array - def test_simple_strict_within(self): - dt = np.float64 - # Stacking zero on top of zero, first neighborhood strictly inside the - # array - x = np.array([1, 2, 3], dtype=dt) - r = [np.array([1, 2, 3, 0], dtype=dt)] - l = _multiarray_tests.test_neighborhood_iterator_oob( - x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['zero']) - assert_array_equal(l, r) - - # Stacking mirror on top of zero, first neighborhood strictly inside the - # array - x = np.array([1, 2, 3], dtype=dt) - r = [np.array([1, 2, 3, 3], dtype=dt)] - l = _multiarray_tests.test_neighborhood_iterator_oob( - x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['mirror']) - assert_array_equal(l, r) - - # Stacking mirror on top of zero, first neighborhood strictly inside the - # array - x = np.array([1, 2, 3], dtype=dt) - r = [np.array([1, 2, 3, 1], dtype=dt)] - l = _multiarray_tests.test_neighborhood_iterator_oob( - x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['circular']) - assert_array_equal(l, r) - -class TestWarnings(object): - - def test_complex_warning(self): - x = np.array([1, 2]) - y = np.array([1-2j, 1+2j]) - - with warnings.catch_warnings(): - warnings.simplefilter("error", np.ComplexWarning) - assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y) - assert_equal(x, [1, 2]) - - -class TestMinScalarType(object): - - def test_usigned_shortshort(self): - dt = np.min_scalar_type(2**8-1) - wanted = np.dtype('uint8') - assert_equal(wanted, dt) - - def test_usigned_short(self): - dt = np.min_scalar_type(2**16-1) - wanted = np.dtype('uint16') - assert_equal(wanted, dt) - - def test_usigned_int(self): - dt = np.min_scalar_type(2**32-1) - wanted = np.dtype('uint32') - assert_equal(wanted, dt) - - def test_usigned_longlong(self): - dt = np.min_scalar_type(2**63-1) - wanted = np.dtype('uint64') - assert_equal(wanted, dt) - - def test_object(self): - dt = np.min_scalar_type(2**64) - wanted = np.dtype('O') - assert_equal(wanted, dt) - - -from numpy.core._internal import _dtype_from_pep3118 - - -class TestPEP3118Dtype(object): - def _check(self, spec, wanted): - dt = np.dtype(wanted) - actual = _dtype_from_pep3118(spec) - assert_equal(actual, dt, - err_msg="spec %r != dtype %r" % (spec, wanted)) - - def test_native_padding(self): - align = np.dtype('i').alignment - for j in range(8): - if j == 0: - s = 'bi' - else: - s = 'b%dxi' % j - self._check('@'+s, {'f0': ('i1', 0), - 'f1': ('i', align*(1 + j//align))}) - self._check('='+s, {'f0': ('i1', 0), - 'f1': ('i', 1+j)}) - - def test_native_padding_2(self): - # Native padding should work also for structs and sub-arrays - self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)}) - self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)}) - - def test_trailing_padding(self): - # Trailing padding should be included, *and*, the item size - # should match the alignment if in aligned mode - align = np.dtype('i').alignment - size = np.dtype('i').itemsize - - def aligned(n): - return align*(1 + (n-1)//align) - - base = dict(formats=['i'], names=['f0']) - - self._check('ix', dict(itemsize=aligned(size + 1), **base)) - self._check('ixx', dict(itemsize=aligned(size + 2), **base)) - self._check('ixxx', dict(itemsize=aligned(size + 3), **base)) - self._check('ixxxx', dict(itemsize=aligned(size + 4), **base)) - self._check('i7x', dict(itemsize=aligned(size + 7), **base)) - - self._check('^ix', dict(itemsize=size + 1, **base)) - self._check('^ixx', dict(itemsize=size + 2, **base)) - self._check('^ixxx', dict(itemsize=size + 3, **base)) - self._check('^ixxxx', dict(itemsize=size + 4, **base)) - self._check('^i7x', dict(itemsize=size + 7, **base)) - - def test_native_padding_3(self): - dt = np.dtype( - [('a', 'b'), ('b', 'i'), - ('sub', np.dtype('b,i')), ('c', 'i')], - align=True) - self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt) - - dt = np.dtype( - [('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'), - ('e', 'b'), ('sub', np.dtype('b,i', align=True))]) - self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt) - - def test_padding_with_array_inside_struct(self): - dt = np.dtype( - [('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), - ('d', 'i')], - align=True) - self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt) - - def test_byteorder_inside_struct(self): - # The byte order after @T{=i} should be '=', not '@'. - # Check this by noting the absence of native alignment. - self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0), - 'f1': ('i', 5)}) - - def test_intra_padding(self): - # Natively aligned sub-arrays may require some internal padding - align = np.dtype('i').alignment - size = np.dtype('i').itemsize - - def aligned(n): - return (align*(1 + (n-1)//align)) - - self._check('(3)T{ix}', (dict( - names=['f0'], - formats=['i'], - offsets=[0], - itemsize=aligned(size + 1) - ), (3,))) - - def test_char_vs_string(self): - dt = np.dtype('c') - self._check('c', dt) - - dt = np.dtype([('f0', 'S1', (4,)), ('f1', 'S4')]) - self._check('4c4s', dt) - - def test_field_order(self): - # gh-9053 - previously, we relied on dictionary key order - self._check("(0)I:a:f:b:", [('a', 'I', (0,)), ('b', 'f')]) - self._check("(0)I:b:f:a:", [('b', 'I', (0,)), ('a', 'f')]) - - def test_unnamed_fields(self): - self._check('ii', [('f0', 'i'), ('f1', 'i')]) - self._check('ii:f0:', [('f1', 'i'), ('f0', 'i')]) - - self._check('i', 'i') - self._check('i:f0:', [('f0', 'i')]) - - -class TestNewBufferProtocol(object): - """ Test PEP3118 buffers """ - - def _check_roundtrip(self, obj): - obj = np.asarray(obj) - x = memoryview(obj) - y = np.asarray(x) - y2 = np.array(x) - assert_(not y.flags.owndata) - assert_(y2.flags.owndata) - - assert_equal(y.dtype, obj.dtype) - assert_equal(y.shape, obj.shape) - assert_array_equal(obj, y) - - assert_equal(y2.dtype, obj.dtype) - assert_equal(y2.shape, obj.shape) - assert_array_equal(obj, y2) - - def test_roundtrip(self): - x = np.array([1, 2, 3, 4, 5], dtype='i4') - self._check_roundtrip(x) - - x = np.array([[1, 2], [3, 4]], dtype=np.float64) - self._check_roundtrip(x) - - x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:] - self._check_roundtrip(x) - - dt = [('a', 'b'), - ('b', 'h'), - ('c', 'i'), - ('d', 'l'), - ('dx', 'q'), - ('e', 'B'), - ('f', 'H'), - ('g', 'I'), - ('h', 'L'), - ('hx', 'Q'), - ('i', np.single), - ('j', np.double), - ('k', np.longdouble), - ('ix', np.csingle), - ('jx', np.cdouble), - ('kx', np.clongdouble), - ('l', 'S4'), - ('m', 'U4'), - ('n', 'V3'), - ('o', '?'), - ('p', np.half), - ] - x = np.array( - [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - b'aaaa', 'bbbb', b'xxx', True, 1.0)], - dtype=dt) - self._check_roundtrip(x) - - x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))]) - self._check_roundtrip(x) - - x = np.array([1, 2, 3], dtype='>i2') - self._check_roundtrip(x) - - x = np.array([1, 2, 3], dtype='') - x = np.zeros(4, dtype=dt) - self._check_roundtrip(x) - - def test_roundtrip_scalar(self): - # Issue #4015. - self._check_roundtrip(0) - - def test_invalid_buffer_format(self): - # datetime64 cannot be used fully in a buffer yet - # Should be fixed in the next Numpy major release - dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')]) - a = np.empty(3, dt) - assert_raises((ValueError, BufferError), memoryview, a) - assert_raises((ValueError, BufferError), memoryview, np.array((3), 'M8[D]')) - - def test_export_simple_1d(self): - x = np.array([1, 2, 3, 4, 5], dtype='i') - y = memoryview(x) - assert_equal(y.format, 'i') - assert_equal(y.shape, (5,)) - assert_equal(y.ndim, 1) - assert_equal(y.strides, (4,)) - assert_equal(y.suboffsets, EMPTY) - assert_equal(y.itemsize, 4) - - def test_export_simple_nd(self): - x = np.array([[1, 2], [3, 4]], dtype=np.float64) - y = memoryview(x) - assert_equal(y.format, 'd') - assert_equal(y.shape, (2, 2)) - assert_equal(y.ndim, 2) - assert_equal(y.strides, (16, 8)) - assert_equal(y.suboffsets, EMPTY) - assert_equal(y.itemsize, 8) - - def test_export_discontiguous(self): - x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:] - y = memoryview(x) - assert_equal(y.format, 'f') - assert_equal(y.shape, (3, 3)) - assert_equal(y.ndim, 2) - assert_equal(y.strides, (36, 4)) - assert_equal(y.suboffsets, EMPTY) - assert_equal(y.itemsize, 4) - - def test_export_record(self): - dt = [('a', 'b'), - ('b', 'h'), - ('c', 'i'), - ('d', 'l'), - ('dx', 'q'), - ('e', 'B'), - ('f', 'H'), - ('g', 'I'), - ('h', 'L'), - ('hx', 'Q'), - ('i', np.single), - ('j', np.double), - ('k', np.longdouble), - ('ix', np.csingle), - ('jx', np.cdouble), - ('kx', np.clongdouble), - ('l', 'S4'), - ('m', 'U4'), - ('n', 'V3'), - ('o', '?'), - ('p', np.half), - ] - x = np.array( - [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - b'aaaa', 'bbbb', b' ', True, 1.0)], - dtype=dt) - y = memoryview(x) - assert_equal(y.shape, (1,)) - assert_equal(y.ndim, 1) - assert_equal(y.suboffsets, EMPTY) - - sz = sum([np.dtype(b).itemsize for a, b in dt]) - if np.dtype('l').itemsize == 4: - assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') - else: - assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') - # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides - if not (np.ones(1).strides[0] == np.iinfo(np.intp).max): - assert_equal(y.strides, (sz,)) - assert_equal(y.itemsize, sz) - - def test_export_subarray(self): - x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))]) - y = memoryview(x) - assert_equal(y.format, 'T{(2,2)i:a:}') - assert_equal(y.shape, EMPTY) - assert_equal(y.ndim, 0) - assert_equal(y.strides, EMPTY) - assert_equal(y.suboffsets, EMPTY) - assert_equal(y.itemsize, 16) - - def test_export_endian(self): - x = np.array([1, 2, 3], dtype='>i') - y = memoryview(x) - if sys.byteorder == 'little': - assert_equal(y.format, '>i') - else: - assert_equal(y.format, 'i') - - x = np.array([1, 2, 3], dtype=' 2: - with assert_raises_regex( - NotImplementedError, - r"Unrepresentable .* 'u' \(UCS-2 strings\)" - ): - raise exc.__cause__ - - def test_ctypes_integer_via_memoryview(self): - # gh-11150, due to bpo-10746 - for c_integer in {ctypes.c_int, ctypes.c_long, ctypes.c_longlong}: - value = c_integer(42) - with warnings.catch_warnings(record=True): - warnings.filterwarnings('always', r'.*\bctypes\b', RuntimeWarning) - np.asarray(value) - - def test_ctypes_struct_via_memoryview(self): - # gh-10528 - class foo(ctypes.Structure): - _fields_ = [('a', ctypes.c_uint8), ('b', ctypes.c_uint32)] - f = foo(a=1, b=2) - - with warnings.catch_warnings(record=True): - warnings.filterwarnings('always', r'.*\bctypes\b', RuntimeWarning) - arr = np.asarray(f) - - assert_equal(arr['a'], 1) - assert_equal(arr['b'], 2) - f.a = 3 - assert_equal(arr['a'], 3) - - -class TestArrayAttributeDeletion(object): - - def test_multiarray_writable_attributes_deletion(self): - # ticket #2046, should not seqfault, raise AttributeError - a = np.ones(2) - attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat'] - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "Assigning the 'data' attribute") - for s in attr: - assert_raises(AttributeError, delattr, a, s) - - def test_multiarray_not_writable_attributes_deletion(self): - a = np.ones(2) - attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base", - "ctypes", "T", "__array_interface__", "__array_struct__", - "__array_priority__", "__array_finalize__"] - for s in attr: - assert_raises(AttributeError, delattr, a, s) - - def test_multiarray_flags_writable_attribute_deletion(self): - a = np.ones(2).flags - attr = ['writebackifcopy', 'updateifcopy', 'aligned', 'writeable'] - for s in attr: - assert_raises(AttributeError, delattr, a, s) - - def test_multiarray_flags_not_writable_attribute_deletion(self): - a = np.ones(2).flags - attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran", - "owndata", "fnc", "forc", "behaved", "carray", "farray", - "num"] - for s in attr: - assert_raises(AttributeError, delattr, a, s) - - -class TestArrayInterface(): - class Foo(object): - def __init__(self, value): - self.value = value - self.iface = {'typestr': 'f8'} - - def __float__(self): - return float(self.value) - - @property - def __array_interface__(self): - return self.iface - - - f = Foo(0.5) - - @pytest.mark.parametrize('val, iface, expected', [ - (f, {}, 0.5), - ([f], {}, [0.5]), - ([f, f], {}, [0.5, 0.5]), - (f, {'shape': ()}, 0.5), - (f, {'shape': None}, TypeError), - (f, {'shape': (1, 1)}, [[0.5]]), - (f, {'shape': (2,)}, ValueError), - (f, {'strides': ()}, 0.5), - (f, {'strides': (2,)}, ValueError), - (f, {'strides': 16}, TypeError), - ]) - def test_scalar_interface(self, val, iface, expected): - # Test scalar coercion within the array interface - self.f.iface = {'typestr': 'f8'} - self.f.iface.update(iface) - if HAS_REFCOUNT: - pre_cnt = sys.getrefcount(np.dtype('f8')) - if isinstance(expected, type): - assert_raises(expected, np.array, val) - else: - result = np.array(val) - assert_equal(np.array(val), expected) - assert result.dtype == 'f8' - del result - if HAS_REFCOUNT: - post_cnt = sys.getrefcount(np.dtype('f8')) - assert_equal(pre_cnt, post_cnt) - -def test_interface_no_shape(): - class ArrayLike(object): - array = np.array(1) - __array_interface__ = array.__array_interface__ - assert_equal(np.array(ArrayLike()), 1) - - -def test_array_interface_itemsize(): - # See gh-6361 - my_dtype = np.dtype({'names': ['A', 'B'], 'formats': ['f4', 'f4'], - 'offsets': [0, 8], 'itemsize': 16}) - a = np.ones(10, dtype=my_dtype) - descr_t = np.dtype(a.__array_interface__['descr']) - typestr_t = np.dtype(a.__array_interface__['typestr']) - assert_equal(descr_t.itemsize, typestr_t.itemsize) - - -def test_array_interface_empty_shape(): - # See gh-7994 - arr = np.array([1, 2, 3]) - interface1 = dict(arr.__array_interface__) - interface1['shape'] = () - - class DummyArray1(object): - __array_interface__ = interface1 - - # NOTE: Because Py2 str/Py3 bytes supports the buffer interface, setting - # the interface data to bytes would invoke the bug this tests for, that - # __array_interface__ with shape=() is not allowed if the data is an object - # exposing the buffer interface - interface2 = dict(interface1) - interface2['data'] = arr[0].tobytes() - - class DummyArray2(object): - __array_interface__ = interface2 - - arr1 = np.asarray(DummyArray1()) - arr2 = np.asarray(DummyArray2()) - arr3 = arr[:1].reshape(()) - assert_equal(arr1, arr2) - assert_equal(arr1, arr3) - -def test_array_interface_offset(): - arr = np.array([1, 2, 3], dtype='int32') - interface = dict(arr.__array_interface__) - interface['data'] = memoryview(arr) - interface['shape'] = (2,) - interface['offset'] = 4 - - - class DummyArray(object): - __array_interface__ = interface - - arr1 = np.asarray(DummyArray()) - assert_equal(arr1, arr[1:]) - -def test_flat_element_deletion(): - it = np.ones(3).flat - try: - del it[1] - del it[1:2] - except TypeError: - pass - except Exception: - raise AssertionError - - -def test_scalar_element_deletion(): - a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')]) - assert_raises(ValueError, a[0].__delitem__, 'x') - - -class TestMemEventHook(object): - def test_mem_seteventhook(self): - # The actual tests are within the C code in - # multiarray/_multiarray_tests.c.src - _multiarray_tests.test_pydatamem_seteventhook_start() - # force an allocation and free of a numpy array - # needs to be larger then limit of small memory cacher in ctors.c - a = np.zeros(1000) - del a - break_cycles() - _multiarray_tests.test_pydatamem_seteventhook_end() - -class TestMapIter(object): - def test_mapiter(self): - # The actual tests are within the C code in - # multiarray/_multiarray_tests.c.src - - a = np.arange(12).reshape((3, 4)).astype(float) - index = ([1, 1, 2, 0], - [0, 0, 2, 3]) - vals = [50, 50, 30, 16] - - _multiarray_tests.test_inplace_increment(a, index, vals) - assert_equal(a, [[0.00, 1., 2.0, 19.], - [104., 5., 6.0, 7.0], - [8.00, 9., 40., 11.]]) - - b = np.arange(6).astype(float) - index = (np.array([1, 2, 0]),) - vals = [50, 4, 100.1] - _multiarray_tests.test_inplace_increment(b, index, vals) - assert_equal(b, [100.1, 51., 6., 3., 4., 5.]) - - -class TestAsCArray(object): - def test_1darray(self): - array = np.arange(24, dtype=np.double) - from_c = _multiarray_tests.test_as_c_array(array, 3) - assert_equal(array[3], from_c) - - def test_2darray(self): - array = np.arange(24, dtype=np.double).reshape(3, 8) - from_c = _multiarray_tests.test_as_c_array(array, 2, 4) - assert_equal(array[2, 4], from_c) - - def test_3darray(self): - array = np.arange(24, dtype=np.double).reshape(2, 3, 4) - from_c = _multiarray_tests.test_as_c_array(array, 1, 2, 3) - assert_equal(array[1, 2, 3], from_c) - - -class TestConversion(object): - def test_array_scalar_relational_operation(self): - # All integer - for dt1 in np.typecodes['AllInteger']: - assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,)) - assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,)) - - for dt2 in np.typecodes['AllInteger']: - assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) - assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) - - # Unsigned integers - for dt1 in 'BHILQP': - assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,)) - assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,)) - assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,)) - - # Unsigned vs signed - for dt2 in 'bhilqp': - assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) - assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) - assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) - - # Signed integers and floats - for dt1 in 'bhlqp' + np.typecodes['Float']: - assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) - assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) - assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) - - for dt2 in 'bhlqp' + np.typecodes['Float']: - assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) - assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) - assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) - - def test_to_bool_scalar(self): - assert_equal(bool(np.array([False])), False) - assert_equal(bool(np.array([True])), True) - assert_equal(bool(np.array([[42]])), True) - assert_raises(ValueError, bool, np.array([1, 2])) - - class NotConvertible(object): - def __bool__(self): - raise NotImplementedError - __nonzero__ = __bool__ # python 2 - - assert_raises(NotImplementedError, bool, np.array(NotConvertible())) - assert_raises(NotImplementedError, bool, np.array([NotConvertible()])) - - self_containing = np.array([None]) - self_containing[0] = self_containing - try: - Error = RecursionError - except NameError: - Error = RuntimeError # python < 3.5 - assert_raises(Error, bool, self_containing) # previously stack overflow - self_containing[0] = None # resolve circular reference - - def test_to_int_scalar(self): - # gh-9972 means that these aren't always the same - int_funcs = (int, lambda x: x.__int__()) - for int_func in int_funcs: - assert_equal(int_func(np.array([1])), 1) - assert_equal(int_func(np.array([0])), 0) - assert_equal(int_func(np.array([[42]])), 42) - assert_raises(TypeError, int_func, np.array([1, 2])) - - # gh-9972 - assert_equal(4, int_func(np.array('4'))) - assert_equal(5, int_func(np.bytes_(b'5'))) - assert_equal(6, int_func(np.unicode_(u'6'))) - - class HasTrunc: - def __trunc__(self): - return 3 - assert_equal(3, int_func(np.array(HasTrunc()))) - assert_equal(3, int_func(np.array([HasTrunc()]))) - - class NotConvertible(object): - def __int__(self): - raise NotImplementedError - assert_raises(NotImplementedError, - int_func, np.array(NotConvertible())) - assert_raises(NotImplementedError, - int_func, np.array([NotConvertible()])) - - -class TestWhere(object): - def test_basic(self): - dts = [bool, np.int16, np.int32, np.int64, np.double, np.complex128, - np.longdouble, np.clongdouble] - for dt in dts: - c = np.ones(53, dtype=bool) - assert_equal(np.where( c, dt(0), dt(1)), dt(0)) - assert_equal(np.where(~c, dt(0), dt(1)), dt(1)) - assert_equal(np.where(True, dt(0), dt(1)), dt(0)) - assert_equal(np.where(False, dt(0), dt(1)), dt(1)) - d = np.ones_like(c).astype(dt) - e = np.zeros_like(d) - r = d.astype(dt) - c[7] = False - r[7] = e[7] - assert_equal(np.where(c, e, e), e) - assert_equal(np.where(c, d, e), r) - assert_equal(np.where(c, d, e[0]), r) - assert_equal(np.where(c, d[0], e), r) - assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2]) - assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2]) - assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3]) - assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3]) - assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2]) - assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3]) - assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3]) - - def test_exotic(self): - # object - assert_array_equal(np.where(True, None, None), np.array(None)) - # zero sized - m = np.array([], dtype=bool).reshape(0, 3) - b = np.array([], dtype=np.float64).reshape(0, 3) - assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3)) - - # object cast - d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313, - 0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013, - 1.267, 0.229, -1.39, 0.487]) - nan = float('NaN') - e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan, - 'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'], - dtype=object) - m = np.array([0, 0, 1, 0, 1, 1, 0, 0, 1, 1, - 0, 1, 1, 0, 1, 1, 0, 1, 0, 0], dtype=bool) - - r = e[:] - r[np.where(m)] = d[np.where(m)] - assert_array_equal(np.where(m, d, e), r) - - r = e[:] - r[np.where(~m)] = d[np.where(~m)] - assert_array_equal(np.where(m, e, d), r) - - assert_array_equal(np.where(m, e, e), e) - - # minimal dtype result with NaN scalar (e.g required by pandas) - d = np.array([1., 2.], dtype=np.float32) - e = float('NaN') - assert_equal(np.where(True, d, e).dtype, np.float32) - e = float('Infinity') - assert_equal(np.where(True, d, e).dtype, np.float32) - e = float('-Infinity') - assert_equal(np.where(True, d, e).dtype, np.float32) - # also check upcast - e = float(1e150) - assert_equal(np.where(True, d, e).dtype, np.float64) - - def test_ndim(self): - c = [True, False] - a = np.zeros((2, 25)) - b = np.ones((2, 25)) - r = np.where(np.array(c)[:,np.newaxis], a, b) - assert_array_equal(r[0], a[0]) - assert_array_equal(r[1], b[0]) - - a = a.T - b = b.T - r = np.where(c, a, b) - assert_array_equal(r[:,0], a[:,0]) - assert_array_equal(r[:,1], b[:,0]) - - def test_dtype_mix(self): - c = np.array([False, True, False, False, False, False, True, False, - False, False, True, False]) - a = np.uint32(1) - b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.], - dtype=np.float64) - r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.], - dtype=np.float64) - assert_equal(np.where(c, a, b), r) - - a = a.astype(np.float32) - b = b.astype(np.int64) - assert_equal(np.where(c, a, b), r) - - # non bool mask - c = c.astype(int) - c[c != 0] = 34242324 - assert_equal(np.where(c, a, b), r) - # invert - tmpmask = c != 0 - c[c == 0] = 41247212 - c[tmpmask] = 0 - assert_equal(np.where(c, b, a), r) - - def test_foreign(self): - c = np.array([False, True, False, False, False, False, True, False, - False, False, True, False]) - r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.], - dtype=np.float64) - a = np.ones(1, dtype='>i4') - b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.], - dtype=np.float64) - assert_equal(np.where(c, a, b), r) - - b = b.astype('>f8') - assert_equal(np.where(c, a, b), r) - - a = a.astype('i4') - assert_equal(np.where(c, a, b), r) - - def test_error(self): - c = [True, True] - a = np.ones((4, 5)) - b = np.ones((5, 5)) - assert_raises(ValueError, np.where, c, a, a) - assert_raises(ValueError, np.where, c[0], a, b) - - def test_string(self): - # gh-4778 check strings are properly filled with nulls - a = np.array("abc") - b = np.array("x" * 753) - assert_equal(np.where(True, a, b), "abc") - assert_equal(np.where(False, b, a), "abc") - - # check native datatype sized strings - a = np.array("abcd") - b = np.array("x" * 8) - assert_equal(np.where(True, a, b), "abcd") - assert_equal(np.where(False, b, a), "abcd") - - def test_empty_result(self): - # pass empty where result through an assignment which reads the data of - # empty arrays, error detectable with valgrind, see gh-8922 - x = np.zeros((1, 1)) - ibad = np.vstack(np.where(x == 99.)) - assert_array_equal(ibad, - np.atleast_2d(np.array([[],[]], dtype=np.intp))) - - def test_largedim(self): - # invalid read regression gh-9304 - shape = [10, 2, 3, 4, 5, 6] - np.random.seed(2) - array = np.random.rand(*shape) - - for i in range(10): - benchmark = array.nonzero() - result = array.nonzero() - assert_array_equal(benchmark, result) - - -if not IS_PYPY: - # sys.getsizeof() is not valid on PyPy - class TestSizeOf(object): - - def test_empty_array(self): - x = np.array([]) - assert_(sys.getsizeof(x) > 0) - - def check_array(self, dtype): - elem_size = dtype(0).itemsize - - for length in [10, 50, 100, 500]: - x = np.arange(length, dtype=dtype) - assert_(sys.getsizeof(x) > length * elem_size) - - def test_array_int32(self): - self.check_array(np.int32) - - def test_array_int64(self): - self.check_array(np.int64) - - def test_array_float32(self): - self.check_array(np.float32) - - def test_array_float64(self): - self.check_array(np.float64) - - def test_view(self): - d = np.ones(100) - assert_(sys.getsizeof(d[...]) < sys.getsizeof(d)) - - def test_reshape(self): - d = np.ones(100) - assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy())) - - @_no_tracing - def test_resize(self): - d = np.ones(100) - old = sys.getsizeof(d) - d.resize(50) - assert_(old > sys.getsizeof(d)) - d.resize(150) - assert_(old < sys.getsizeof(d)) - - def test_error(self): - d = np.ones(100) - assert_raises(TypeError, d.__sizeof__, "a") - - -class TestHashing(object): - - def test_arrays_not_hashable(self): - x = np.ones(3) - assert_raises(TypeError, hash, x) - - def test_collections_hashable(self): - x = np.array([]) - assert_(not isinstance(x, collections_abc.Hashable)) - - -class TestArrayPriority(object): - # This will go away when __array_priority__ is settled, meanwhile - # it serves to check unintended changes. - op = operator - binary_ops = [ - op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod, - op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt, - op.ge, op.lt, op.le, op.ne, op.eq - ] - - # See #7949. Don't use "/" operator With -3 switch, since python reports it - # as a DeprecationWarning - if sys.version_info[0] < 3 and not sys.py3kwarning: - binary_ops.append(op.div) - - class Foo(np.ndarray): - __array_priority__ = 100. - - def __new__(cls, *args, **kwargs): - return np.array(*args, **kwargs).view(cls) - - class Bar(np.ndarray): - __array_priority__ = 101. - - def __new__(cls, *args, **kwargs): - return np.array(*args, **kwargs).view(cls) - - class Other(object): - __array_priority__ = 1000. - - def _all(self, other): - return self.__class__() - - __add__ = __radd__ = _all - __sub__ = __rsub__ = _all - __mul__ = __rmul__ = _all - __pow__ = __rpow__ = _all - __div__ = __rdiv__ = _all - __mod__ = __rmod__ = _all - __truediv__ = __rtruediv__ = _all - __floordiv__ = __rfloordiv__ = _all - __and__ = __rand__ = _all - __xor__ = __rxor__ = _all - __or__ = __ror__ = _all - __lshift__ = __rlshift__ = _all - __rshift__ = __rrshift__ = _all - __eq__ = _all - __ne__ = _all - __gt__ = _all - __ge__ = _all - __lt__ = _all - __le__ = _all - - def test_ndarray_subclass(self): - a = np.array([1, 2]) - b = self.Bar([1, 2]) - for f in self.binary_ops: - msg = repr(f) - assert_(isinstance(f(a, b), self.Bar), msg) - assert_(isinstance(f(b, a), self.Bar), msg) - - def test_ndarray_other(self): - a = np.array([1, 2]) - b = self.Other() - for f in self.binary_ops: - msg = repr(f) - assert_(isinstance(f(a, b), self.Other), msg) - assert_(isinstance(f(b, a), self.Other), msg) - - def test_subclass_subclass(self): - a = self.Foo([1, 2]) - b = self.Bar([1, 2]) - for f in self.binary_ops: - msg = repr(f) - assert_(isinstance(f(a, b), self.Bar), msg) - assert_(isinstance(f(b, a), self.Bar), msg) - - def test_subclass_other(self): - a = self.Foo([1, 2]) - b = self.Other() - for f in self.binary_ops: - msg = repr(f) - assert_(isinstance(f(a, b), self.Other), msg) - assert_(isinstance(f(b, a), self.Other), msg) - - -class TestBytestringArrayNonzero(object): - - def test_empty_bstring_array_is_falsey(self): - assert_(not np.array([''], dtype=str)) - - def test_whitespace_bstring_array_is_falsey(self): - a = np.array(['spam'], dtype=str) - a[0] = ' \0\0' - assert_(not a) - - def test_all_null_bstring_array_is_falsey(self): - a = np.array(['spam'], dtype=str) - a[0] = '\0\0\0\0' - assert_(not a) - - def test_null_inside_bstring_array_is_truthy(self): - a = np.array(['spam'], dtype=str) - a[0] = ' \0 \0' - assert_(a) - - -class TestUnicodeArrayNonzero(object): - - def test_empty_ustring_array_is_falsey(self): - assert_(not np.array([''], dtype=np.unicode_)) - - def test_whitespace_ustring_array_is_falsey(self): - a = np.array(['eggs'], dtype=np.unicode_) - a[0] = ' \0\0' - assert_(not a) - - def test_all_null_ustring_array_is_falsey(self): - a = np.array(['eggs'], dtype=np.unicode_) - a[0] = '\0\0\0\0' - assert_(not a) - - def test_null_inside_ustring_array_is_truthy(self): - a = np.array(['eggs'], dtype=np.unicode_) - a[0] = ' \0 \0' - assert_(a) - - -class TestFormat(object): - - def test_0d(self): - a = np.array(np.pi) - assert_equal('{:0.3g}'.format(a), '3.14') - assert_equal('{:0.3g}'.format(a[()]), '3.14') - - def test_1d_no_format(self): - a = np.array([np.pi]) - assert_equal('{}'.format(a), str(a)) - - def test_1d_format(self): - # until gh-5543, ensure that the behaviour matches what it used to be - a = np.array([np.pi]) - if sys.version_info[:2] >= (3, 4): - assert_raises(TypeError, '{:30}'.format, a) - else: - with suppress_warnings() as sup: - sup.filter(PendingDeprecationWarning) - res = '{:30}'.format(a) - dst = object.__format__(a, '30') - assert_equal(res, dst) - -from numpy.testing import IS_PYPY - -class TestCTypes(object): - - def test_ctypes_is_available(self): - test_arr = np.array([[1, 2, 3], [4, 5, 6]]) - - assert_equal(ctypes, test_arr.ctypes._ctypes) - assert_equal(tuple(test_arr.ctypes.shape), (2, 3)) - - def test_ctypes_is_not_available(self): - from numpy.core import _internal - _internal.ctypes = None - try: - test_arr = np.array([[1, 2, 3], [4, 5, 6]]) - - assert_(isinstance(test_arr.ctypes._ctypes, - _internal._missing_ctypes)) - assert_equal(tuple(test_arr.ctypes.shape), (2, 3)) - finally: - _internal.ctypes = ctypes - - def _make_readonly(x): - x.flags.writeable = False - return x - - @pytest.mark.parametrize('arr', [ - np.array([1, 2, 3]), - np.array([['one', 'two'], ['three', 'four']]), - np.array((1, 2), dtype='i4,i4'), - np.zeros((2,), dtype= - np.dtype(dict( - formats=['2, [44, 55]) - assert_equal(a, np.array([[0, 44], [1, 55], [2, 44]])) - # hit one of the failing paths - assert_raises(ValueError, np.place, a, a>20, []) - - def test_put_noncontiguous(self): - a = np.arange(6).reshape(2,3).T # force non-c-contiguous - np.put(a, [0, 2], [44, 55]) - assert_equal(a, np.array([[44, 3], [55, 4], [2, 5]])) - - def test_putmask_noncontiguous(self): - a = np.arange(6).reshape(2,3).T # force non-c-contiguous - # uses arr_putmask - np.putmask(a, a>2, a**2) - assert_equal(a, np.array([[0, 9], [1, 16], [2, 25]])) - - def test_take_mode_raise(self): - a = np.arange(6, dtype='int') - out = np.empty(2, dtype='int') - np.take(a, [0, 2], out=out, mode='raise') - assert_equal(out, np.array([0, 2])) - - def test_choose_mod_raise(self): - a = np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1]]) - out = np.empty((3,3), dtype='int') - choices = [-10, 10] - np.choose(a, choices, out=out, mode='raise') - assert_equal(out, np.array([[ 10, -10, 10], - [-10, 10, -10], - [ 10, -10, 10]])) - - def test_flatiter__array__(self): - a = np.arange(9).reshape(3,3) - b = a.T.flat - c = b.__array__() - # triggers the WRITEBACKIFCOPY resolution, assuming refcount semantics - del c - - def test_dot_out(self): - # if HAVE_CBLAS, will use WRITEBACKIFCOPY - a = np.arange(9, dtype=float).reshape(3,3) - b = np.dot(a, a, out=a) - assert_equal(b, np.array([[15, 18, 21], [42, 54, 66], [69, 90, 111]])) - - def test_view_assign(self): - from numpy.core._multiarray_tests import npy_create_writebackifcopy, npy_resolve - - arr = np.arange(9).reshape(3, 3).T - arr_wb = npy_create_writebackifcopy(arr) - assert_(arr_wb.flags.writebackifcopy) - assert_(arr_wb.base is arr) - arr_wb[...] = -100 - npy_resolve(arr_wb) - # arr changes after resolve, even though we assigned to arr_wb - assert_equal(arr, -100) - # after resolve, the two arrays no longer reference each other - assert_(arr_wb.ctypes.data != 0) - assert_equal(arr_wb.base, None) - # assigning to arr_wb does not get transferred to arr - arr_wb[...] = 100 - assert_equal(arr, -100) - - @pytest.mark.leaks_references( - reason="increments self in dealloc; ignore since deprecated path.") - def test_dealloc_warning(self): - with suppress_warnings() as sup: - sup.record(RuntimeWarning) - arr = np.arange(9).reshape(3, 3) - v = arr.T - _multiarray_tests.npy_abuse_writebackifcopy(v) - assert len(sup.log) == 1 - - def test_view_discard_refcount(self): - from numpy.core._multiarray_tests import npy_create_writebackifcopy, npy_discard - - arr = np.arange(9).reshape(3, 3).T - orig = arr.copy() - if HAS_REFCOUNT: - arr_cnt = sys.getrefcount(arr) - arr_wb = npy_create_writebackifcopy(arr) - assert_(arr_wb.flags.writebackifcopy) - assert_(arr_wb.base is arr) - arr_wb[...] = -100 - npy_discard(arr_wb) - # arr remains unchanged after discard - assert_equal(arr, orig) - # after discard, the two arrays no longer reference each other - assert_(arr_wb.ctypes.data != 0) - assert_equal(arr_wb.base, None) - if HAS_REFCOUNT: - assert_equal(arr_cnt, sys.getrefcount(arr)) - # assigning to arr_wb does not get transferred to arr - arr_wb[...] = 100 - assert_equal(arr, orig) - - -class TestArange(object): - def test_infinite(self): - assert_raises_regex( - ValueError, "size exceeded", - np.arange, 0, np.inf - ) - - def test_nan_step(self): - assert_raises_regex( - ValueError, "cannot compute length", - np.arange, 0, 1, np.nan - ) - - def test_zero_step(self): - assert_raises(ZeroDivisionError, np.arange, 0, 10, 0) - assert_raises(ZeroDivisionError, np.arange, 0.0, 10.0, 0.0) - - # empty range - assert_raises(ZeroDivisionError, np.arange, 0, 0, 0) - assert_raises(ZeroDivisionError, np.arange, 0.0, 0.0, 0.0) - - -class TestArrayFinalize(object): - """ Tests __array_finalize__ """ - - def test_receives_base(self): - # gh-11237 - class SavesBase(np.ndarray): - def __array_finalize__(self, obj): - self.saved_base = self.base - - a = np.array(1).view(SavesBase) - assert_(a.saved_base is a.base) - - def test_lifetime_on_error(self): - # gh-11237 - class RaisesInFinalize(np.ndarray): - def __array_finalize__(self, obj): - # crash, but keep this object alive - raise Exception(self) - - # a plain object can't be weakref'd - class Dummy(object): pass - - # get a weak reference to an object within an array - obj_arr = np.array(Dummy()) - obj_ref = weakref.ref(obj_arr[()]) - - # get an array that crashed in __array_finalize__ - with assert_raises(Exception) as e: - obj_arr.view(RaisesInFinalize) - if sys.version_info.major == 2: - # prevent an extra reference being kept - sys.exc_clear() - - obj_subarray = e.exception.args[0] - del e - assert_(isinstance(obj_subarray, RaisesInFinalize)) - - # reference should still be held by obj_arr - break_cycles() - assert_(obj_ref() is not None, "object should not already be dead") - - del obj_arr - break_cycles() - assert_(obj_ref() is not None, "obj_arr should not hold the last reference") - - del obj_subarray - break_cycles() - assert_(obj_ref() is None, "no references should remain") - - -def test_orderconverter_with_nonASCII_unicode_ordering(): - # gh-7475 - a = np.arange(5) - assert_raises(ValueError, a.flatten, order=u'\xe2') - - -def test_equal_override(): - # gh-9153: ndarray.__eq__ uses special logic for structured arrays, which - # did not respect overrides with __array_priority__ or __array_ufunc__. - # The PR fixed this for __array_priority__ and __array_ufunc__ = None. - class MyAlwaysEqual(object): - def __eq__(self, other): - return "eq" - - def __ne__(self, other): - return "ne" - - class MyAlwaysEqualOld(MyAlwaysEqual): - __array_priority__ = 10000 - - class MyAlwaysEqualNew(MyAlwaysEqual): - __array_ufunc__ = None - - array = np.array([(0, 1), (2, 3)], dtype='i4,i4') - for my_always_equal_cls in MyAlwaysEqualOld, MyAlwaysEqualNew: - my_always_equal = my_always_equal_cls() - assert_equal(my_always_equal == array, 'eq') - assert_equal(array == my_always_equal, 'eq') - assert_equal(my_always_equal != array, 'ne') - assert_equal(array != my_always_equal, 'ne') - - -def test_npymath_complex(): - # Smoketest npymath functions - from numpy.core._multiarray_tests import ( - npy_cabs, npy_carg) - - funcs = {npy_cabs: np.absolute, - npy_carg: np.angle} - vals = (1, np.inf, -np.inf, np.nan) - types = (np.complex64, np.complex128, np.clongdouble) - - for fun, npfun in funcs.items(): - for x, y in itertools.product(vals, vals): - for t in types: - z = t(complex(x, y)) - got = fun(z) - expected = npfun(z) - assert_allclose(got, expected) - - -def test_npymath_real(): - # Smoketest npymath functions - from numpy.core._multiarray_tests import ( - npy_log10, npy_cosh, npy_sinh, npy_tan, npy_tanh) - - funcs = {npy_log10: np.log10, - npy_cosh: np.cosh, - npy_sinh: np.sinh, - npy_tan: np.tan, - npy_tanh: np.tanh} - vals = (1, np.inf, -np.inf, np.nan) - types = (np.float32, np.float64, np.longdouble) - - with np.errstate(all='ignore'): - for fun, npfun in funcs.items(): - for x, t in itertools.product(vals, types): - z = t(x) - got = fun(z) - expected = npfun(z) - assert_allclose(got, expected) - -def test_uintalignment_and_alignment(): - # alignment code needs to satisfy these requrements: - # 1. numpy structs match C struct layout - # 2. ufuncs/casting is safe wrt to aligned access - # 3. copy code is safe wrt to "uint alidned" access - # - # Complex types are the main problem, whose alignment may not be the same - # as their "uint alignment". - # - # This test might only fail on certain platforms, where uint64 alignment is - # not equal to complex64 alignment. The second 2 tests will only fail - # for DEBUG=1. - - d1 = np.dtype('u1,c8', align=True) - d2 = np.dtype('u4,c8', align=True) - d3 = np.dtype({'names': ['a', 'b'], 'formats': ['u1', d1]}, align=True) - - assert_equal(np.zeros(1, dtype=d1)['f1'].flags['ALIGNED'], True) - assert_equal(np.zeros(1, dtype=d2)['f1'].flags['ALIGNED'], True) - assert_equal(np.zeros(1, dtype='u1,c8')['f1'].flags['ALIGNED'], False) - - # check that C struct matches numpy struct size - s = _multiarray_tests.get_struct_alignments() - for d, (alignment, size) in zip([d1,d2,d3], s): - assert_equal(d.alignment, alignment) - assert_equal(d.itemsize, size) - - # check that ufuncs don't complain in debug mode - # (this is probably OK if the aligned flag is true above) - src = np.zeros((2,2), dtype=d1)['f1'] # 4-byte aligned, often - np.exp(src) # assert fails? - - # check that copy code doesn't complain in debug mode - dst = np.zeros((2,2), dtype='c8') - dst[:,1] = src[:,1] # assert in lowlevel_strided_loops fails? - -class TestAlignment(object): - # adapted from scipy._lib.tests.test__util.test__aligned_zeros - # Checks that unusual memory alignments don't trip up numpy. - # In particular, check RELAXED_STRIDES don't trip alignment assertions in - # NDEBUG mode for size-0 arrays (gh-12503) - - def check(self, shape, dtype, order, align): - err_msg = repr((shape, dtype, order, align)) - x = _aligned_zeros(shape, dtype, order, align=align) - if align is None: - align = np.dtype(dtype).alignment - assert_equal(x.__array_interface__['data'][0] % align, 0) - if hasattr(shape, '__len__'): - assert_equal(x.shape, shape, err_msg) - else: - assert_equal(x.shape, (shape,), err_msg) - assert_equal(x.dtype, dtype) - if order == "C": - assert_(x.flags.c_contiguous, err_msg) - elif order == "F": - if x.size > 0: - assert_(x.flags.f_contiguous, err_msg) - elif order is None: - assert_(x.flags.c_contiguous, err_msg) - else: - raise ValueError() - - def test_various_alignments(self): - for align in [1, 2, 3, 4, 8, 12, 16, 32, 64, None]: - for n in [0, 1, 3, 11]: - for order in ["C", "F", None]: - for dtype in list(np.typecodes["All"]) + ['i4,i4,i4']: - if dtype == 'O': - # object dtype can't be misaligned - continue - for shape in [n, (1, 2, 3, n)]: - self.check(shape, np.dtype(dtype), order, align) - - def test_strided_loop_alignments(self): - # particularly test that complex64 and float128 use right alignment - # code-paths, since these are particularly problematic. It is useful to - # turn on USE_DEBUG for this test, so lowlevel-loop asserts are run. - for align in [1, 2, 4, 8, 12, 16, None]: - xf64 = _aligned_zeros(3, np.float64) - - xc64 = _aligned_zeros(3, np.complex64, align=align) - xf128 = _aligned_zeros(3, np.longdouble, align=align) - - # test casting, both to and from misaligned - with suppress_warnings() as sup: - sup.filter(np.ComplexWarning, "Casting complex values") - xc64.astype('f8') - xf64.astype(np.complex64) - test = xc64 + xf64 - - xf128.astype('f8') - xf64.astype(np.longdouble) - test = xf128 + xf64 - - test = xf128 + xc64 - - # test copy, both to and from misaligned - # contig copy - xf64[:] = xf64.copy() - xc64[:] = xc64.copy() - xf128[:] = xf128.copy() - # strided copy - xf64[::2] = xf64[::2].copy() - xc64[::2] = xc64[::2].copy() - xf128[::2] = xf128[::2].copy() - -def test_getfield(): - a = np.arange(32, dtype='uint16') - if sys.byteorder == 'little': - i = 0 - j = 1 - else: - i = 1 - j = 0 - b = a.getfield('int8', i) - assert_equal(b, a) - b = a.getfield('int8', j) - assert_equal(b, 0) - pytest.raises(ValueError, a.getfield, 'uint8', -1) - pytest.raises(ValueError, a.getfield, 'uint8', 16) - pytest.raises(ValueError, a.getfield, 'uint64', 0) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_nditer.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_nditer.py deleted file mode 100644 index daec9ce..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_nditer.py +++ /dev/null @@ -1,2861 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import pytest - -import numpy as np -import numpy.core._multiarray_tests as _multiarray_tests -from numpy import array, arange, nditer, all -from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_raises, - HAS_REFCOUNT, suppress_warnings - ) - - -def iter_multi_index(i): - ret = [] - while not i.finished: - ret.append(i.multi_index) - i.iternext() - return ret - -def iter_indices(i): - ret = [] - while not i.finished: - ret.append(i.index) - i.iternext() - return ret - -def iter_iterindices(i): - ret = [] - while not i.finished: - ret.append(i.iterindex) - i.iternext() - return ret - -@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") -def test_iter_refcount(): - # Make sure the iterator doesn't leak - - # Basic - a = arange(6) - dt = np.dtype('f4').newbyteorder() - rc_a = sys.getrefcount(a) - rc_dt = sys.getrefcount(dt) - with nditer(a, [], - [['readwrite', 'updateifcopy']], - casting='unsafe', - op_dtypes=[dt]) as it: - assert_(not it.iterationneedsapi) - assert_(sys.getrefcount(a) > rc_a) - assert_(sys.getrefcount(dt) > rc_dt) - # del 'it' - it = None - assert_equal(sys.getrefcount(a), rc_a) - assert_equal(sys.getrefcount(dt), rc_dt) - - # With a copy - a = arange(6, dtype='f4') - dt = np.dtype('f4') - rc_a = sys.getrefcount(a) - rc_dt = sys.getrefcount(dt) - it = nditer(a, [], - [['readwrite']], - op_dtypes=[dt]) - rc2_a = sys.getrefcount(a) - rc2_dt = sys.getrefcount(dt) - it2 = it.copy() - assert_(sys.getrefcount(a) > rc2_a) - assert_(sys.getrefcount(dt) > rc2_dt) - it = None - assert_equal(sys.getrefcount(a), rc2_a) - assert_equal(sys.getrefcount(dt), rc2_dt) - it2 = None - assert_equal(sys.getrefcount(a), rc_a) - assert_equal(sys.getrefcount(dt), rc_dt) - - del it2 # avoid pyflakes unused variable warning - -def test_iter_best_order(): - # The iterator should always find the iteration order - # with increasing memory addresses - - # Test the ordering for 1-D to 5-D shapes - for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: - a = arange(np.prod(shape)) - # Test each combination of positive and negative strides - for dirs in range(2**len(shape)): - dirs_index = [slice(None)]*len(shape) - for bit in range(len(shape)): - if ((2**bit) & dirs): - dirs_index[bit] = slice(None, None, -1) - dirs_index = tuple(dirs_index) - - aview = a.reshape(shape)[dirs_index] - # C-order - i = nditer(aview, [], [['readonly']]) - assert_equal([x for x in i], a) - # Fortran-order - i = nditer(aview.T, [], [['readonly']]) - assert_equal([x for x in i], a) - # Other order - if len(shape) > 2: - i = nditer(aview.swapaxes(0, 1), [], [['readonly']]) - assert_equal([x for x in i], a) - -def test_iter_c_order(): - # Test forcing C order - - # Test the ordering for 1-D to 5-D shapes - for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: - a = arange(np.prod(shape)) - # Test each combination of positive and negative strides - for dirs in range(2**len(shape)): - dirs_index = [slice(None)]*len(shape) - for bit in range(len(shape)): - if ((2**bit) & dirs): - dirs_index[bit] = slice(None, None, -1) - dirs_index = tuple(dirs_index) - - aview = a.reshape(shape)[dirs_index] - # C-order - i = nditer(aview, order='C') - assert_equal([x for x in i], aview.ravel(order='C')) - # Fortran-order - i = nditer(aview.T, order='C') - assert_equal([x for x in i], aview.T.ravel(order='C')) - # Other order - if len(shape) > 2: - i = nditer(aview.swapaxes(0, 1), order='C') - assert_equal([x for x in i], - aview.swapaxes(0, 1).ravel(order='C')) - -def test_iter_f_order(): - # Test forcing F order - - # Test the ordering for 1-D to 5-D shapes - for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: - a = arange(np.prod(shape)) - # Test each combination of positive and negative strides - for dirs in range(2**len(shape)): - dirs_index = [slice(None)]*len(shape) - for bit in range(len(shape)): - if ((2**bit) & dirs): - dirs_index[bit] = slice(None, None, -1) - dirs_index = tuple(dirs_index) - - aview = a.reshape(shape)[dirs_index] - # C-order - i = nditer(aview, order='F') - assert_equal([x for x in i], aview.ravel(order='F')) - # Fortran-order - i = nditer(aview.T, order='F') - assert_equal([x for x in i], aview.T.ravel(order='F')) - # Other order - if len(shape) > 2: - i = nditer(aview.swapaxes(0, 1), order='F') - assert_equal([x for x in i], - aview.swapaxes(0, 1).ravel(order='F')) - -def test_iter_c_or_f_order(): - # Test forcing any contiguous (C or F) order - - # Test the ordering for 1-D to 5-D shapes - for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: - a = arange(np.prod(shape)) - # Test each combination of positive and negative strides - for dirs in range(2**len(shape)): - dirs_index = [slice(None)]*len(shape) - for bit in range(len(shape)): - if ((2**bit) & dirs): - dirs_index[bit] = slice(None, None, -1) - dirs_index = tuple(dirs_index) - - aview = a.reshape(shape)[dirs_index] - # C-order - i = nditer(aview, order='A') - assert_equal([x for x in i], aview.ravel(order='A')) - # Fortran-order - i = nditer(aview.T, order='A') - assert_equal([x for x in i], aview.T.ravel(order='A')) - # Other order - if len(shape) > 2: - i = nditer(aview.swapaxes(0, 1), order='A') - assert_equal([x for x in i], - aview.swapaxes(0, 1).ravel(order='A')) - -def test_iter_best_order_multi_index_1d(): - # The multi-indices should be correct with any reordering - - a = arange(4) - # 1D order - i = nditer(a, ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), [(0,), (1,), (2,), (3,)]) - # 1D reversed order - i = nditer(a[::-1], ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), [(3,), (2,), (1,), (0,)]) - -def test_iter_best_order_multi_index_2d(): - # The multi-indices should be correct with any reordering - - a = arange(6) - # 2D C-order - i = nditer(a.reshape(2, 3), ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)]) - # 2D Fortran-order - i = nditer(a.reshape(2, 3).copy(order='F'), ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), [(0, 0), (1, 0), (0, 1), (1, 1), (0, 2), (1, 2)]) - # 2D reversed C-order - i = nditer(a.reshape(2, 3)[::-1], ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), [(1, 0), (1, 1), (1, 2), (0, 0), (0, 1), (0, 2)]) - i = nditer(a.reshape(2, 3)[:, ::-1], ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), [(0, 2), (0, 1), (0, 0), (1, 2), (1, 1), (1, 0)]) - i = nditer(a.reshape(2, 3)[::-1, ::-1], ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), [(1, 2), (1, 1), (1, 0), (0, 2), (0, 1), (0, 0)]) - # 2D reversed Fortran-order - i = nditer(a.reshape(2, 3).copy(order='F')[::-1], ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), [(1, 0), (0, 0), (1, 1), (0, 1), (1, 2), (0, 2)]) - i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], - ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), [(0, 2), (1, 2), (0, 1), (1, 1), (0, 0), (1, 0)]) - i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], - ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), [(1, 2), (0, 2), (1, 1), (0, 1), (1, 0), (0, 0)]) - -def test_iter_best_order_multi_index_3d(): - # The multi-indices should be correct with any reordering - - a = arange(12) - # 3D C-order - i = nditer(a.reshape(2, 3, 2), ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1), - (1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1)]) - # 3D Fortran-order - i = nditer(a.reshape(2, 3, 2).copy(order='F'), ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0), - (0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1)]) - # 3D reversed C-order - i = nditer(a.reshape(2, 3, 2)[::-1], ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1), - (0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1)]) - i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(0, 2, 0), (0, 2, 1), (0, 1, 0), (0, 1, 1), (0, 0, 0), (0, 0, 1), - (1, 2, 0), (1, 2, 1), (1, 1, 0), (1, 1, 1), (1, 0, 0), (1, 0, 1)]) - i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(0, 0, 1), (0, 0, 0), (0, 1, 1), (0, 1, 0), (0, 2, 1), (0, 2, 0), - (1, 0, 1), (1, 0, 0), (1, 1, 1), (1, 1, 0), (1, 2, 1), (1, 2, 0)]) - # 3D reversed Fortran-order - i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], - ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(1, 0, 0), (0, 0, 0), (1, 1, 0), (0, 1, 0), (1, 2, 0), (0, 2, 0), - (1, 0, 1), (0, 0, 1), (1, 1, 1), (0, 1, 1), (1, 2, 1), (0, 2, 1)]) - i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], - ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(0, 2, 0), (1, 2, 0), (0, 1, 0), (1, 1, 0), (0, 0, 0), (1, 0, 0), - (0, 2, 1), (1, 2, 1), (0, 1, 1), (1, 1, 1), (0, 0, 1), (1, 0, 1)]) - i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], - ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1), - (0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0)]) - -def test_iter_best_order_c_index_1d(): - # The C index should be correct with any reordering - - a = arange(4) - # 1D order - i = nditer(a, ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), [0, 1, 2, 3]) - # 1D reversed order - i = nditer(a[::-1], ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), [3, 2, 1, 0]) - -def test_iter_best_order_c_index_2d(): - # The C index should be correct with any reordering - - a = arange(6) - # 2D C-order - i = nditer(a.reshape(2, 3), ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5]) - # 2D Fortran-order - i = nditer(a.reshape(2, 3).copy(order='F'), - ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), [0, 3, 1, 4, 2, 5]) - # 2D reversed C-order - i = nditer(a.reshape(2, 3)[::-1], ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), [3, 4, 5, 0, 1, 2]) - i = nditer(a.reshape(2, 3)[:, ::-1], ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), [2, 1, 0, 5, 4, 3]) - i = nditer(a.reshape(2, 3)[::-1, ::-1], ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0]) - # 2D reversed Fortran-order - i = nditer(a.reshape(2, 3).copy(order='F')[::-1], - ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), [3, 0, 4, 1, 5, 2]) - i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], - ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), [2, 5, 1, 4, 0, 3]) - i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], - ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), [5, 2, 4, 1, 3, 0]) - -def test_iter_best_order_c_index_3d(): - # The C index should be correct with any reordering - - a = arange(12) - # 3D C-order - i = nditer(a.reshape(2, 3, 2), ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) - # 3D Fortran-order - i = nditer(a.reshape(2, 3, 2).copy(order='F'), - ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), - [0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11]) - # 3D reversed C-order - i = nditer(a.reshape(2, 3, 2)[::-1], ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), - [6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5]) - i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), - [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7]) - i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), - [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10]) - # 3D reversed Fortran-order - i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], - ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), - [6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5]) - i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], - ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), - [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7]) - i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], - ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), - [1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10]) - -def test_iter_best_order_f_index_1d(): - # The Fortran index should be correct with any reordering - - a = arange(4) - # 1D order - i = nditer(a, ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), [0, 1, 2, 3]) - # 1D reversed order - i = nditer(a[::-1], ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), [3, 2, 1, 0]) - -def test_iter_best_order_f_index_2d(): - # The Fortran index should be correct with any reordering - - a = arange(6) - # 2D C-order - i = nditer(a.reshape(2, 3), ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), [0, 2, 4, 1, 3, 5]) - # 2D Fortran-order - i = nditer(a.reshape(2, 3).copy(order='F'), - ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5]) - # 2D reversed C-order - i = nditer(a.reshape(2, 3)[::-1], ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), [1, 3, 5, 0, 2, 4]) - i = nditer(a.reshape(2, 3)[:, ::-1], ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), [4, 2, 0, 5, 3, 1]) - i = nditer(a.reshape(2, 3)[::-1, ::-1], ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), [5, 3, 1, 4, 2, 0]) - # 2D reversed Fortran-order - i = nditer(a.reshape(2, 3).copy(order='F')[::-1], - ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), [1, 0, 3, 2, 5, 4]) - i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], - ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), [4, 5, 2, 3, 0, 1]) - i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], - ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0]) - -def test_iter_best_order_f_index_3d(): - # The Fortran index should be correct with any reordering - - a = arange(12) - # 3D C-order - i = nditer(a.reshape(2, 3, 2), ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), - [0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11]) - # 3D Fortran-order - i = nditer(a.reshape(2, 3, 2).copy(order='F'), - ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) - # 3D reversed C-order - i = nditer(a.reshape(2, 3, 2)[::-1], ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), - [1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10]) - i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), - [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7]) - i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), - [6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5]) - # 3D reversed Fortran-order - i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], - ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), - [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10]) - i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], - ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), - [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7]) - i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], - ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), - [6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5]) - -def test_iter_no_inner_full_coalesce(): - # Check no_inner iterators which coalesce into a single inner loop - - for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: - size = np.prod(shape) - a = arange(size) - # Test each combination of forward and backwards indexing - for dirs in range(2**len(shape)): - dirs_index = [slice(None)]*len(shape) - for bit in range(len(shape)): - if ((2**bit) & dirs): - dirs_index[bit] = slice(None, None, -1) - dirs_index = tuple(dirs_index) - - aview = a.reshape(shape)[dirs_index] - # C-order - i = nditer(aview, ['external_loop'], [['readonly']]) - assert_equal(i.ndim, 1) - assert_equal(i[0].shape, (size,)) - # Fortran-order - i = nditer(aview.T, ['external_loop'], [['readonly']]) - assert_equal(i.ndim, 1) - assert_equal(i[0].shape, (size,)) - # Other order - if len(shape) > 2: - i = nditer(aview.swapaxes(0, 1), - ['external_loop'], [['readonly']]) - assert_equal(i.ndim, 1) - assert_equal(i[0].shape, (size,)) - -def test_iter_no_inner_dim_coalescing(): - # Check no_inner iterators whose dimensions may not coalesce completely - - # Skipping the last element in a dimension prevents coalescing - # with the next-bigger dimension - a = arange(24).reshape(2, 3, 4)[:,:, :-1] - i = nditer(a, ['external_loop'], [['readonly']]) - assert_equal(i.ndim, 2) - assert_equal(i[0].shape, (3,)) - a = arange(24).reshape(2, 3, 4)[:, :-1,:] - i = nditer(a, ['external_loop'], [['readonly']]) - assert_equal(i.ndim, 2) - assert_equal(i[0].shape, (8,)) - a = arange(24).reshape(2, 3, 4)[:-1,:,:] - i = nditer(a, ['external_loop'], [['readonly']]) - assert_equal(i.ndim, 1) - assert_equal(i[0].shape, (12,)) - - # Even with lots of 1-sized dimensions, should still coalesce - a = arange(24).reshape(1, 1, 2, 1, 1, 3, 1, 1, 4, 1, 1) - i = nditer(a, ['external_loop'], [['readonly']]) - assert_equal(i.ndim, 1) - assert_equal(i[0].shape, (24,)) - -def test_iter_dim_coalescing(): - # Check that the correct number of dimensions are coalesced - - # Tracking a multi-index disables coalescing - a = arange(24).reshape(2, 3, 4) - i = nditer(a, ['multi_index'], [['readonly']]) - assert_equal(i.ndim, 3) - - # A tracked index can allow coalescing if it's compatible with the array - a3d = arange(24).reshape(2, 3, 4) - i = nditer(a3d, ['c_index'], [['readonly']]) - assert_equal(i.ndim, 1) - i = nditer(a3d.swapaxes(0, 1), ['c_index'], [['readonly']]) - assert_equal(i.ndim, 3) - i = nditer(a3d.T, ['c_index'], [['readonly']]) - assert_equal(i.ndim, 3) - i = nditer(a3d.T, ['f_index'], [['readonly']]) - assert_equal(i.ndim, 1) - i = nditer(a3d.T.swapaxes(0, 1), ['f_index'], [['readonly']]) - assert_equal(i.ndim, 3) - - # When C or F order is forced, coalescing may still occur - a3d = arange(24).reshape(2, 3, 4) - i = nditer(a3d, order='C') - assert_equal(i.ndim, 1) - i = nditer(a3d.T, order='C') - assert_equal(i.ndim, 3) - i = nditer(a3d, order='F') - assert_equal(i.ndim, 3) - i = nditer(a3d.T, order='F') - assert_equal(i.ndim, 1) - i = nditer(a3d, order='A') - assert_equal(i.ndim, 1) - i = nditer(a3d.T, order='A') - assert_equal(i.ndim, 1) - -def test_iter_broadcasting(): - # Standard NumPy broadcasting rules - - # 1D with scalar - i = nditer([arange(6), np.int32(2)], ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 6) - assert_equal(i.shape, (6,)) - - # 2D with scalar - i = nditer([arange(6).reshape(2, 3), np.int32(2)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 6) - assert_equal(i.shape, (2, 3)) - # 2D with 1D - i = nditer([arange(6).reshape(2, 3), arange(3)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 6) - assert_equal(i.shape, (2, 3)) - i = nditer([arange(2).reshape(2, 1), arange(3)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 6) - assert_equal(i.shape, (2, 3)) - # 2D with 2D - i = nditer([arange(2).reshape(2, 1), arange(3).reshape(1, 3)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 6) - assert_equal(i.shape, (2, 3)) - - # 3D with scalar - i = nditer([np.int32(2), arange(24).reshape(4, 2, 3)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 24) - assert_equal(i.shape, (4, 2, 3)) - # 3D with 1D - i = nditer([arange(3), arange(24).reshape(4, 2, 3)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 24) - assert_equal(i.shape, (4, 2, 3)) - i = nditer([arange(3), arange(8).reshape(4, 2, 1)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 24) - assert_equal(i.shape, (4, 2, 3)) - # 3D with 2D - i = nditer([arange(6).reshape(2, 3), arange(24).reshape(4, 2, 3)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 24) - assert_equal(i.shape, (4, 2, 3)) - i = nditer([arange(2).reshape(2, 1), arange(24).reshape(4, 2, 3)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 24) - assert_equal(i.shape, (4, 2, 3)) - i = nditer([arange(3).reshape(1, 3), arange(8).reshape(4, 2, 1)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 24) - assert_equal(i.shape, (4, 2, 3)) - # 3D with 3D - i = nditer([arange(2).reshape(1, 2, 1), arange(3).reshape(1, 1, 3), - arange(4).reshape(4, 1, 1)], - ['multi_index'], [['readonly']]*3) - assert_equal(i.itersize, 24) - assert_equal(i.shape, (4, 2, 3)) - i = nditer([arange(6).reshape(1, 2, 3), arange(4).reshape(4, 1, 1)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 24) - assert_equal(i.shape, (4, 2, 3)) - i = nditer([arange(24).reshape(4, 2, 3), arange(12).reshape(4, 1, 3)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 24) - assert_equal(i.shape, (4, 2, 3)) - -def test_iter_itershape(): - # Check that allocated outputs work with a specified shape - a = np.arange(6, dtype='i2').reshape(2, 3) - i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], - op_axes=[[0, 1, None], None], - itershape=(-1, -1, 4)) - assert_equal(i.operands[1].shape, (2, 3, 4)) - assert_equal(i.operands[1].strides, (24, 8, 2)) - - i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']], - op_axes=[[0, 1, None], None], - itershape=(-1, -1, 4)) - assert_equal(i.operands[1].shape, (3, 2, 4)) - assert_equal(i.operands[1].strides, (8, 24, 2)) - - i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']], - order='F', - op_axes=[[0, 1, None], None], - itershape=(-1, -1, 4)) - assert_equal(i.operands[1].shape, (3, 2, 4)) - assert_equal(i.operands[1].strides, (2, 6, 12)) - - # If we specify 1 in the itershape, it shouldn't allow broadcasting - # of that dimension to a bigger value - assert_raises(ValueError, nditer, [a, None], [], - [['readonly'], ['writeonly', 'allocate']], - op_axes=[[0, 1, None], None], - itershape=(-1, 1, 4)) - # Test bug that for no op_axes but itershape, they are NULLed correctly - i = np.nditer([np.ones(2), None, None], itershape=(2,)) - -def test_iter_broadcasting_errors(): - # Check that errors are thrown for bad broadcasting shapes - - # 1D with 1D - assert_raises(ValueError, nditer, [arange(2), arange(3)], - [], [['readonly']]*2) - # 2D with 1D - assert_raises(ValueError, nditer, - [arange(6).reshape(2, 3), arange(2)], - [], [['readonly']]*2) - # 2D with 2D - assert_raises(ValueError, nditer, - [arange(6).reshape(2, 3), arange(9).reshape(3, 3)], - [], [['readonly']]*2) - assert_raises(ValueError, nditer, - [arange(6).reshape(2, 3), arange(4).reshape(2, 2)], - [], [['readonly']]*2) - # 3D with 3D - assert_raises(ValueError, nditer, - [arange(36).reshape(3, 3, 4), arange(24).reshape(2, 3, 4)], - [], [['readonly']]*2) - assert_raises(ValueError, nditer, - [arange(8).reshape(2, 4, 1), arange(24).reshape(2, 3, 4)], - [], [['readonly']]*2) - - # Verify that the error message mentions the right shapes - try: - nditer([arange(2).reshape(1, 2, 1), - arange(3).reshape(1, 3), - arange(6).reshape(2, 3)], - [], - [['readonly'], ['readonly'], ['writeonly', 'no_broadcast']]) - raise AssertionError('Should have raised a broadcast error') - except ValueError as e: - msg = str(e) - # The message should contain the shape of the 3rd operand - assert_(msg.find('(2,3)') >= 0, - 'Message "%s" doesn\'t contain operand shape (2,3)' % msg) - # The message should contain the broadcast shape - assert_(msg.find('(1,2,3)') >= 0, - 'Message "%s" doesn\'t contain broadcast shape (1,2,3)' % msg) - - try: - nditer([arange(6).reshape(2, 3), arange(2)], - [], - [['readonly'], ['readonly']], - op_axes=[[0, 1], [0, np.newaxis]], - itershape=(4, 3)) - raise AssertionError('Should have raised a broadcast error') - except ValueError as e: - msg = str(e) - # The message should contain "shape->remappedshape" for each operand - assert_(msg.find('(2,3)->(2,3)') >= 0, - 'Message "%s" doesn\'t contain operand shape (2,3)->(2,3)' % msg) - assert_(msg.find('(2,)->(2,newaxis)') >= 0, - ('Message "%s" doesn\'t contain remapped operand shape' + - '(2,)->(2,newaxis)') % msg) - # The message should contain the itershape parameter - assert_(msg.find('(4,3)') >= 0, - 'Message "%s" doesn\'t contain itershape parameter (4,3)' % msg) - - try: - nditer([np.zeros((2, 1, 1)), np.zeros((2,))], - [], - [['writeonly', 'no_broadcast'], ['readonly']]) - raise AssertionError('Should have raised a broadcast error') - except ValueError as e: - msg = str(e) - # The message should contain the shape of the bad operand - assert_(msg.find('(2,1,1)') >= 0, - 'Message "%s" doesn\'t contain operand shape (2,1,1)' % msg) - # The message should contain the broadcast shape - assert_(msg.find('(2,1,2)') >= 0, - 'Message "%s" doesn\'t contain the broadcast shape (2,1,2)' % msg) - -def test_iter_flags_errors(): - # Check that bad combinations of flags produce errors - - a = arange(6) - - # Not enough operands - assert_raises(ValueError, nditer, [], [], []) - # Too many operands - assert_raises(ValueError, nditer, [a]*100, [], [['readonly']]*100) - # Bad global flag - assert_raises(ValueError, nditer, [a], ['bad flag'], [['readonly']]) - # Bad op flag - assert_raises(ValueError, nditer, [a], [], [['readonly', 'bad flag']]) - # Bad order parameter - assert_raises(ValueError, nditer, [a], [], [['readonly']], order='G') - # Bad casting parameter - assert_raises(ValueError, nditer, [a], [], [['readonly']], casting='noon') - # op_flags must match ops - assert_raises(ValueError, nditer, [a]*3, [], [['readonly']]*2) - # Cannot track both a C and an F index - assert_raises(ValueError, nditer, a, - ['c_index', 'f_index'], [['readonly']]) - # Inner iteration and multi-indices/indices are incompatible - assert_raises(ValueError, nditer, a, - ['external_loop', 'multi_index'], [['readonly']]) - assert_raises(ValueError, nditer, a, - ['external_loop', 'c_index'], [['readonly']]) - assert_raises(ValueError, nditer, a, - ['external_loop', 'f_index'], [['readonly']]) - # Must specify exactly one of readwrite/readonly/writeonly per operand - assert_raises(ValueError, nditer, a, [], [[]]) - assert_raises(ValueError, nditer, a, [], [['readonly', 'writeonly']]) - assert_raises(ValueError, nditer, a, [], [['readonly', 'readwrite']]) - assert_raises(ValueError, nditer, a, [], [['writeonly', 'readwrite']]) - assert_raises(ValueError, nditer, a, - [], [['readonly', 'writeonly', 'readwrite']]) - # Python scalars are always readonly - assert_raises(TypeError, nditer, 1.5, [], [['writeonly']]) - assert_raises(TypeError, nditer, 1.5, [], [['readwrite']]) - # Array scalars are always readonly - assert_raises(TypeError, nditer, np.int32(1), [], [['writeonly']]) - assert_raises(TypeError, nditer, np.int32(1), [], [['readwrite']]) - # Check readonly array - a.flags.writeable = False - assert_raises(ValueError, nditer, a, [], [['writeonly']]) - assert_raises(ValueError, nditer, a, [], [['readwrite']]) - a.flags.writeable = True - # Multi-indices available only with the multi_index flag - i = nditer(arange(6), [], [['readonly']]) - assert_raises(ValueError, lambda i:i.multi_index, i) - # Index available only with an index flag - assert_raises(ValueError, lambda i:i.index, i) - # GotoCoords and GotoIndex incompatible with buffering or no_inner - - def assign_multi_index(i): - i.multi_index = (0,) - - def assign_index(i): - i.index = 0 - - def assign_iterindex(i): - i.iterindex = 0 - - def assign_iterrange(i): - i.iterrange = (0, 1) - i = nditer(arange(6), ['external_loop']) - assert_raises(ValueError, assign_multi_index, i) - assert_raises(ValueError, assign_index, i) - assert_raises(ValueError, assign_iterindex, i) - assert_raises(ValueError, assign_iterrange, i) - i = nditer(arange(6), ['buffered']) - assert_raises(ValueError, assign_multi_index, i) - assert_raises(ValueError, assign_index, i) - assert_raises(ValueError, assign_iterrange, i) - # Can't iterate if size is zero - assert_raises(ValueError, nditer, np.array([])) - -def test_iter_slice(): - a, b, c = np.arange(3), np.arange(3), np.arange(3.) - i = nditer([a, b, c], [], ['readwrite']) - with i: - i[0:2] = (3, 3) - assert_equal(a, [3, 1, 2]) - assert_equal(b, [3, 1, 2]) - assert_equal(c, [0, 1, 2]) - i[1] = 12 - assert_equal(i[0:2], [3, 12]) - -def test_iter_assign_mapping(): - a = np.arange(24, dtype='f8').reshape(2, 3, 4).T - it = np.nditer(a, [], [['readwrite', 'updateifcopy']], - casting='same_kind', op_dtypes=[np.dtype('f4')]) - with it: - it.operands[0][...] = 3 - it.operands[0][...] = 14 - assert_equal(a, 14) - it = np.nditer(a, [], [['readwrite', 'updateifcopy']], - casting='same_kind', op_dtypes=[np.dtype('f4')]) - with it: - x = it.operands[0][-1:1] - x[...] = 14 - it.operands[0][...] = -1234 - assert_equal(a, -1234) - # check for no warnings on dealloc - x = None - it = None - -def test_iter_nbo_align_contig(): - # Check that byte order, alignment, and contig changes work - - # Byte order change by requesting a specific dtype - a = np.arange(6, dtype='f4') - au = a.byteswap().newbyteorder() - assert_(a.dtype.byteorder != au.dtype.byteorder) - i = nditer(au, [], [['readwrite', 'updateifcopy']], - casting='equiv', - op_dtypes=[np.dtype('f4')]) - with i: - # context manager triggers UPDATEIFCOPY on i at exit - assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder) - assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder) - assert_equal(i.operands[0], a) - i.operands[0][:] = 2 - assert_equal(au, [2]*6) - del i # should not raise a warning - # Byte order change by requesting NBO - a = np.arange(6, dtype='f4') - au = a.byteswap().newbyteorder() - assert_(a.dtype.byteorder != au.dtype.byteorder) - with nditer(au, [], [['readwrite', 'updateifcopy', 'nbo']], - casting='equiv') as i: - # context manager triggers UPDATEIFCOPY on i at exit - assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder) - assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder) - assert_equal(i.operands[0], a) - i.operands[0][:] = 12345 - i.operands[0][:] = 2 - assert_equal(au, [2]*6) - - # Unaligned input - a = np.zeros((6*4+1,), dtype='i1')[1:] - a.dtype = 'f4' - a[:] = np.arange(6, dtype='f4') - assert_(not a.flags.aligned) - # Without 'aligned', shouldn't copy - i = nditer(a, [], [['readonly']]) - assert_(not i.operands[0].flags.aligned) - assert_equal(i.operands[0], a) - # With 'aligned', should make a copy - with nditer(a, [], [['readwrite', 'updateifcopy', 'aligned']]) as i: - assert_(i.operands[0].flags.aligned) - # context manager triggers UPDATEIFCOPY on i at exit - assert_equal(i.operands[0], a) - i.operands[0][:] = 3 - assert_equal(a, [3]*6) - - # Discontiguous input - a = arange(12) - # If it is contiguous, shouldn't copy - i = nditer(a[:6], [], [['readonly']]) - assert_(i.operands[0].flags.contiguous) - assert_equal(i.operands[0], a[:6]) - # If it isn't contiguous, should buffer - i = nditer(a[::2], ['buffered', 'external_loop'], - [['readonly', 'contig']], - buffersize=10) - assert_(i[0].flags.contiguous) - assert_equal(i[0], a[::2]) - -def test_iter_array_cast(): - # Check that arrays are cast as requested - - # No cast 'f4' -> 'f4' - a = np.arange(6, dtype='f4').reshape(2, 3) - i = nditer(a, [], [['readwrite']], op_dtypes=[np.dtype('f4')]) - with i: - assert_equal(i.operands[0], a) - assert_equal(i.operands[0].dtype, np.dtype('f4')) - - # Byte-order cast ' '>f4' - a = np.arange(6, dtype='f4')]) as i: - assert_equal(i.operands[0], a) - assert_equal(i.operands[0].dtype, np.dtype('>f4')) - - # Safe case 'f4' -> 'f8' - a = np.arange(24, dtype='f4').reshape(2, 3, 4).swapaxes(1, 2) - i = nditer(a, [], [['readonly', 'copy']], - casting='safe', - op_dtypes=[np.dtype('f8')]) - assert_equal(i.operands[0], a) - assert_equal(i.operands[0].dtype, np.dtype('f8')) - # The memory layout of the temporary should match a (a is (48,4,16)) - # except negative strides get flipped to positive strides. - assert_equal(i.operands[0].strides, (96, 8, 32)) - a = a[::-1,:, ::-1] - i = nditer(a, [], [['readonly', 'copy']], - casting='safe', - op_dtypes=[np.dtype('f8')]) - assert_equal(i.operands[0], a) - assert_equal(i.operands[0].dtype, np.dtype('f8')) - assert_equal(i.operands[0].strides, (96, 8, 32)) - - # Same-kind cast 'f8' -> 'f4' -> 'f8' - a = np.arange(24, dtype='f8').reshape(2, 3, 4).T - with nditer(a, [], - [['readwrite', 'updateifcopy']], - casting='same_kind', - op_dtypes=[np.dtype('f4')]) as i: - assert_equal(i.operands[0], a) - assert_equal(i.operands[0].dtype, np.dtype('f4')) - assert_equal(i.operands[0].strides, (4, 16, 48)) - # Check that WRITEBACKIFCOPY is activated at exit - i.operands[0][2, 1, 1] = -12.5 - assert_(a[2, 1, 1] != -12.5) - assert_equal(a[2, 1, 1], -12.5) - - a = np.arange(6, dtype='i4')[::-2] - with nditer(a, [], - [['writeonly', 'updateifcopy']], - casting='unsafe', - op_dtypes=[np.dtype('f4')]) as i: - assert_equal(i.operands[0].dtype, np.dtype('f4')) - # Even though the stride was negative in 'a', it - # becomes positive in the temporary - assert_equal(i.operands[0].strides, (4,)) - i.operands[0][:] = [1, 2, 3] - assert_equal(a, [1, 2, 3]) - -def test_iter_array_cast_errors(): - # Check that invalid casts are caught - - # Need to enable copying for casts to occur - assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], - [['readonly']], op_dtypes=[np.dtype('f8')]) - # Also need to allow casting for casts to occur - assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], - [['readonly', 'copy']], casting='no', - op_dtypes=[np.dtype('f8')]) - assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], - [['readonly', 'copy']], casting='equiv', - op_dtypes=[np.dtype('f8')]) - assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], - [['writeonly', 'updateifcopy']], - casting='no', - op_dtypes=[np.dtype('f4')]) - assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], - [['writeonly', 'updateifcopy']], - casting='equiv', - op_dtypes=[np.dtype('f4')]) - # ' '>f4' should not work with casting='no' - assert_raises(TypeError, nditer, arange(2, dtype='f4')]) - # 'f4' -> 'f8' is a safe cast, but 'f8' -> 'f4' isn't - assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], - [['readwrite', 'updateifcopy']], - casting='safe', - op_dtypes=[np.dtype('f8')]) - assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], - [['readwrite', 'updateifcopy']], - casting='safe', - op_dtypes=[np.dtype('f4')]) - # 'f4' -> 'i4' is neither a safe nor a same-kind cast - assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], - [['readonly', 'copy']], - casting='same_kind', - op_dtypes=[np.dtype('i4')]) - assert_raises(TypeError, nditer, arange(2, dtype='i4'), [], - [['writeonly', 'updateifcopy']], - casting='same_kind', - op_dtypes=[np.dtype('f4')]) - -def test_iter_scalar_cast(): - # Check that scalars are cast as requested - - # No cast 'f4' -> 'f4' - i = nditer(np.float32(2.5), [], [['readonly']], - op_dtypes=[np.dtype('f4')]) - assert_equal(i.dtypes[0], np.dtype('f4')) - assert_equal(i.value.dtype, np.dtype('f4')) - assert_equal(i.value, 2.5) - # Safe cast 'f4' -> 'f8' - i = nditer(np.float32(2.5), [], - [['readonly', 'copy']], - casting='safe', - op_dtypes=[np.dtype('f8')]) - assert_equal(i.dtypes[0], np.dtype('f8')) - assert_equal(i.value.dtype, np.dtype('f8')) - assert_equal(i.value, 2.5) - # Same-kind cast 'f8' -> 'f4' - i = nditer(np.float64(2.5), [], - [['readonly', 'copy']], - casting='same_kind', - op_dtypes=[np.dtype('f4')]) - assert_equal(i.dtypes[0], np.dtype('f4')) - assert_equal(i.value.dtype, np.dtype('f4')) - assert_equal(i.value, 2.5) - # Unsafe cast 'f8' -> 'i4' - i = nditer(np.float64(3.0), [], - [['readonly', 'copy']], - casting='unsafe', - op_dtypes=[np.dtype('i4')]) - assert_equal(i.dtypes[0], np.dtype('i4')) - assert_equal(i.value.dtype, np.dtype('i4')) - assert_equal(i.value, 3) - # Readonly scalars may be cast even without setting COPY or BUFFERED - i = nditer(3, [], [['readonly']], op_dtypes=[np.dtype('f8')]) - assert_equal(i[0].dtype, np.dtype('f8')) - assert_equal(i[0], 3.) - -def test_iter_scalar_cast_errors(): - # Check that invalid casts are caught - - # Need to allow copying/buffering for write casts of scalars to occur - assert_raises(TypeError, nditer, np.float32(2), [], - [['readwrite']], op_dtypes=[np.dtype('f8')]) - assert_raises(TypeError, nditer, 2.5, [], - [['readwrite']], op_dtypes=[np.dtype('f4')]) - # 'f8' -> 'f4' isn't a safe cast if the value would overflow - assert_raises(TypeError, nditer, np.float64(1e60), [], - [['readonly']], - casting='safe', - op_dtypes=[np.dtype('f4')]) - # 'f4' -> 'i4' is neither a safe nor a same-kind cast - assert_raises(TypeError, nditer, np.float32(2), [], - [['readonly']], - casting='same_kind', - op_dtypes=[np.dtype('i4')]) - -def test_iter_object_arrays_basic(): - # Check that object arrays work - - obj = {'a':3,'b':'d'} - a = np.array([[1, 2, 3], None, obj, None], dtype='O') - if HAS_REFCOUNT: - rc = sys.getrefcount(obj) - - # Need to allow references for object arrays - assert_raises(TypeError, nditer, a) - if HAS_REFCOUNT: - assert_equal(sys.getrefcount(obj), rc) - - i = nditer(a, ['refs_ok'], ['readonly']) - vals = [x_[()] for x_ in i] - assert_equal(np.array(vals, dtype='O'), a) - vals, i, x = [None]*3 - if HAS_REFCOUNT: - assert_equal(sys.getrefcount(obj), rc) - - i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'], - ['readonly'], order='C') - assert_(i.iterationneedsapi) - vals = [x_[()] for x_ in i] - assert_equal(np.array(vals, dtype='O'), a.reshape(2, 2).ravel(order='F')) - vals, i, x = [None]*3 - if HAS_REFCOUNT: - assert_equal(sys.getrefcount(obj), rc) - - i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'], - ['readwrite'], order='C') - with i: - for x in i: - x[...] = None - vals, i, x = [None]*3 - if HAS_REFCOUNT: - assert_(sys.getrefcount(obj) == rc-1) - assert_equal(a, np.array([None]*4, dtype='O')) - -def test_iter_object_arrays_conversions(): - # Conversions to/from objects - a = np.arange(6, dtype='O') - i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], - casting='unsafe', op_dtypes='i4') - with i: - for x in i: - x[...] += 1 - assert_equal(a, np.arange(6)+1) - - a = np.arange(6, dtype='i4') - i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], - casting='unsafe', op_dtypes='O') - with i: - for x in i: - x[...] += 1 - assert_equal(a, np.arange(6)+1) - - # Non-contiguous object array - a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'O')]) - a = a['a'] - a[:] = np.arange(6) - i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], - casting='unsafe', op_dtypes='i4') - with i: - for x in i: - x[...] += 1 - assert_equal(a, np.arange(6)+1) - - #Non-contiguous value array - a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'i4')]) - a = a['a'] - a[:] = np.arange(6) + 98172488 - i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], - casting='unsafe', op_dtypes='O') - with i: - ob = i[0][()] - if HAS_REFCOUNT: - rc = sys.getrefcount(ob) - for x in i: - x[...] += 1 - if HAS_REFCOUNT: - assert_(sys.getrefcount(ob) == rc-1) - assert_equal(a, np.arange(6)+98172489) - -def test_iter_common_dtype(): - # Check that the iterator finds a common data type correctly - - i = nditer([array([3], dtype='f4'), array([0], dtype='f8')], - ['common_dtype'], - [['readonly', 'copy']]*2, - casting='safe') - assert_equal(i.dtypes[0], np.dtype('f8')) - assert_equal(i.dtypes[1], np.dtype('f8')) - i = nditer([array([3], dtype='i4'), array([0], dtype='f4')], - ['common_dtype'], - [['readonly', 'copy']]*2, - casting='safe') - assert_equal(i.dtypes[0], np.dtype('f8')) - assert_equal(i.dtypes[1], np.dtype('f8')) - i = nditer([array([3], dtype='f4'), array(0, dtype='f8')], - ['common_dtype'], - [['readonly', 'copy']]*2, - casting='same_kind') - assert_equal(i.dtypes[0], np.dtype('f4')) - assert_equal(i.dtypes[1], np.dtype('f4')) - i = nditer([array([3], dtype='u4'), array(0, dtype='i4')], - ['common_dtype'], - [['readonly', 'copy']]*2, - casting='safe') - assert_equal(i.dtypes[0], np.dtype('u4')) - assert_equal(i.dtypes[1], np.dtype('u4')) - i = nditer([array([3], dtype='u4'), array(-12, dtype='i4')], - ['common_dtype'], - [['readonly', 'copy']]*2, - casting='safe') - assert_equal(i.dtypes[0], np.dtype('i8')) - assert_equal(i.dtypes[1], np.dtype('i8')) - i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'), - array([2j], dtype='c8'), array([9], dtype='f8')], - ['common_dtype'], - [['readonly', 'copy']]*4, - casting='safe') - assert_equal(i.dtypes[0], np.dtype('c16')) - assert_equal(i.dtypes[1], np.dtype('c16')) - assert_equal(i.dtypes[2], np.dtype('c16')) - assert_equal(i.dtypes[3], np.dtype('c16')) - assert_equal(i.value, (3, -12, 2j, 9)) - - # When allocating outputs, other outputs aren't factored in - i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')], [], - [['readonly', 'copy'], - ['writeonly', 'allocate'], - ['writeonly']], - casting='safe') - assert_equal(i.dtypes[0], np.dtype('i4')) - assert_equal(i.dtypes[1], np.dtype('i4')) - assert_equal(i.dtypes[2], np.dtype('c16')) - # But, if common data types are requested, they are - i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')], - ['common_dtype'], - [['readonly', 'copy'], - ['writeonly', 'allocate'], - ['writeonly']], - casting='safe') - assert_equal(i.dtypes[0], np.dtype('c16')) - assert_equal(i.dtypes[1], np.dtype('c16')) - assert_equal(i.dtypes[2], np.dtype('c16')) - -def test_iter_copy_if_overlap(): - # Ensure the iterator makes copies on read/write overlap, if requested - - # Copy not needed, 1 op - for flag in ['readonly', 'writeonly', 'readwrite']: - a = arange(10) - i = nditer([a], ['copy_if_overlap'], [[flag]]) - with i: - assert_(i.operands[0] is a) - - # Copy needed, 2 ops, read-write overlap - x = arange(10) - a = x[1:] - b = x[:-1] - with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i: - assert_(not np.shares_memory(*i.operands)) - - # Copy not needed with elementwise, 2 ops, exactly same arrays - x = arange(10) - a = x - b = x - i = nditer([a, b], ['copy_if_overlap'], [['readonly', 'overlap_assume_elementwise'], - ['readwrite', 'overlap_assume_elementwise']]) - with i: - assert_(i.operands[0] is a and i.operands[1] is b) - with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i: - assert_(i.operands[0] is a and not np.shares_memory(i.operands[1], b)) - - # Copy not needed, 2 ops, no overlap - x = arange(10) - a = x[::2] - b = x[1::2] - i = nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']]) - assert_(i.operands[0] is a and i.operands[1] is b) - - # Copy needed, 2 ops, read-write overlap - x = arange(4, dtype=np.int8) - a = x[3:] - b = x.view(np.int32)[:1] - with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']]) as i: - assert_(not np.shares_memory(*i.operands)) - - # Copy needed, 3 ops, read-write overlap - for flag in ['writeonly', 'readwrite']: - x = np.ones([10, 10]) - a = x - b = x.T - c = x - with nditer([a, b, c], ['copy_if_overlap'], - [['readonly'], ['readonly'], [flag]]) as i: - a2, b2, c2 = i.operands - assert_(not np.shares_memory(a2, c2)) - assert_(not np.shares_memory(b2, c2)) - - # Copy not needed, 3 ops, read-only overlap - x = np.ones([10, 10]) - a = x - b = x.T - c = x - i = nditer([a, b, c], ['copy_if_overlap'], - [['readonly'], ['readonly'], ['readonly']]) - a2, b2, c2 = i.operands - assert_(a is a2) - assert_(b is b2) - assert_(c is c2) - - # Copy not needed, 3 ops, read-only overlap - x = np.ones([10, 10]) - a = x - b = np.ones([10, 10]) - c = x.T - i = nditer([a, b, c], ['copy_if_overlap'], - [['readonly'], ['writeonly'], ['readonly']]) - a2, b2, c2 = i.operands - assert_(a is a2) - assert_(b is b2) - assert_(c is c2) - - # Copy not needed, 3 ops, write-only overlap - x = np.arange(7) - a = x[:3] - b = x[3:6] - c = x[4:7] - i = nditer([a, b, c], ['copy_if_overlap'], - [['readonly'], ['writeonly'], ['writeonly']]) - a2, b2, c2 = i.operands - assert_(a is a2) - assert_(b is b2) - assert_(c is c2) - -def test_iter_op_axes(): - # Check that custom axes work - - # Reverse the axes - a = arange(6).reshape(2, 3) - i = nditer([a, a.T], [], [['readonly']]*2, op_axes=[[0, 1], [1, 0]]) - assert_(all([x == y for (x, y) in i])) - a = arange(24).reshape(2, 3, 4) - i = nditer([a.T, a], [], [['readonly']]*2, op_axes=[[2, 1, 0], None]) - assert_(all([x == y for (x, y) in i])) - - # Broadcast 1D to any dimension - a = arange(1, 31).reshape(2, 3, 5) - b = arange(1, 3) - i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [0, -1, -1]]) - assert_equal([x*y for (x, y) in i], (a*b.reshape(2, 1, 1)).ravel()) - b = arange(1, 4) - i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [-1, 0, -1]]) - assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 3, 1)).ravel()) - b = arange(1, 6) - i = nditer([a, b], [], [['readonly']]*2, - op_axes=[None, [np.newaxis, np.newaxis, 0]]) - assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 1, 5)).ravel()) - - # Inner product-style broadcasting - a = arange(24).reshape(2, 3, 4) - b = arange(40).reshape(5, 2, 4) - i = nditer([a, b], ['multi_index'], [['readonly']]*2, - op_axes=[[0, 1, -1, -1], [-1, -1, 0, 1]]) - assert_equal(i.shape, (2, 3, 5, 2)) - - # Matrix product-style broadcasting - a = arange(12).reshape(3, 4) - b = arange(20).reshape(4, 5) - i = nditer([a, b], ['multi_index'], [['readonly']]*2, - op_axes=[[0, -1], [-1, 1]]) - assert_equal(i.shape, (3, 5)) - -def test_iter_op_axes_errors(): - # Check that custom axes throws errors for bad inputs - - # Wrong number of items in op_axes - a = arange(6).reshape(2, 3) - assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, - op_axes=[[0], [1], [0]]) - # Out of bounds items in op_axes - assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, - op_axes=[[2, 1], [0, 1]]) - assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, - op_axes=[[0, 1], [2, -1]]) - # Duplicate items in op_axes - assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, - op_axes=[[0, 0], [0, 1]]) - assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, - op_axes=[[0, 1], [1, 1]]) - - # Different sized arrays in op_axes - assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, - op_axes=[[0, 1], [0, 1, 0]]) - - # Non-broadcastable dimensions in the result - assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, - op_axes=[[0, 1], [1, 0]]) - -def test_iter_copy(): - # Check that copying the iterator works correctly - a = arange(24).reshape(2, 3, 4) - - # Simple iterator - i = nditer(a) - j = i.copy() - assert_equal([x[()] for x in i], [x[()] for x in j]) - - i.iterindex = 3 - j = i.copy() - assert_equal([x[()] for x in i], [x[()] for x in j]) - - # Buffered iterator - i = nditer(a, ['buffered', 'ranged'], order='F', buffersize=3) - j = i.copy() - assert_equal([x[()] for x in i], [x[()] for x in j]) - - i.iterindex = 3 - j = i.copy() - assert_equal([x[()] for x in i], [x[()] for x in j]) - - i.iterrange = (3, 9) - j = i.copy() - assert_equal([x[()] for x in i], [x[()] for x in j]) - - i.iterrange = (2, 18) - next(i) - next(i) - j = i.copy() - assert_equal([x[()] for x in i], [x[()] for x in j]) - - # Casting iterator - with nditer(a, ['buffered'], order='F', casting='unsafe', - op_dtypes='f8', buffersize=5) as i: - j = i.copy() - assert_equal([x[()] for x in j], a.ravel(order='F')) - - a = arange(24, dtype='cast->swap - - a = np.arange(10, dtype='f4').newbyteorder().byteswap() - i = nditer(a, ['buffered', 'external_loop'], - [['readwrite', 'nbo', 'aligned']], - casting='same_kind', - op_dtypes=[np.dtype('f8').newbyteorder()], - buffersize=3) - with i: - for v in i: - v[...] *= 2 - - assert_equal(a, 2*np.arange(10, dtype='f4')) - - with suppress_warnings() as sup: - sup.filter(np.ComplexWarning) - - a = np.arange(10, dtype='f8').newbyteorder().byteswap() - i = nditer(a, ['buffered', 'external_loop'], - [['readwrite', 'nbo', 'aligned']], - casting='unsafe', - op_dtypes=[np.dtype('c8').newbyteorder()], - buffersize=3) - with i: - for v in i: - v[...] *= 2 - - assert_equal(a, 2*np.arange(10, dtype='f8')) - -def test_iter_buffered_cast_byteswapped_complex(): - # Test that buffering can handle a cast which requires swap->cast->copy - - a = np.arange(10, dtype='c8').newbyteorder().byteswap() - a += 2j - i = nditer(a, ['buffered', 'external_loop'], - [['readwrite', 'nbo', 'aligned']], - casting='same_kind', - op_dtypes=[np.dtype('c16')], - buffersize=3) - with i: - for v in i: - v[...] *= 2 - assert_equal(a, 2*np.arange(10, dtype='c8') + 4j) - - a = np.arange(10, dtype='c8') - a += 2j - i = nditer(a, ['buffered', 'external_loop'], - [['readwrite', 'nbo', 'aligned']], - casting='same_kind', - op_dtypes=[np.dtype('c16').newbyteorder()], - buffersize=3) - with i: - for v in i: - v[...] *= 2 - assert_equal(a, 2*np.arange(10, dtype='c8') + 4j) - - a = np.arange(10, dtype=np.clongdouble).newbyteorder().byteswap() - a += 2j - i = nditer(a, ['buffered', 'external_loop'], - [['readwrite', 'nbo', 'aligned']], - casting='same_kind', - op_dtypes=[np.dtype('c16')], - buffersize=3) - with i: - for v in i: - v[...] *= 2 - assert_equal(a, 2*np.arange(10, dtype=np.clongdouble) + 4j) - - a = np.arange(10, dtype=np.longdouble).newbyteorder().byteswap() - i = nditer(a, ['buffered', 'external_loop'], - [['readwrite', 'nbo', 'aligned']], - casting='same_kind', - op_dtypes=[np.dtype('f4')], - buffersize=7) - with i: - for v in i: - v[...] *= 2 - assert_equal(a, 2*np.arange(10, dtype=np.longdouble)) - -def test_iter_buffered_cast_structured_type(): - # Tests buffering of structured types - - # simple -> struct type (duplicates the value) - sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')] - a = np.arange(3, dtype='f4') + 0.5 - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt) - vals = [np.array(x) for x in i] - assert_equal(vals[0]['a'], 0.5) - assert_equal(vals[0]['b'], 0) - assert_equal(vals[0]['c'], [[(0.5)]*3]*2) - assert_equal(vals[0]['d'], 0.5) - assert_equal(vals[1]['a'], 1.5) - assert_equal(vals[1]['b'], 1) - assert_equal(vals[1]['c'], [[(1.5)]*3]*2) - assert_equal(vals[1]['d'], 1.5) - assert_equal(vals[0].dtype, np.dtype(sdt)) - - # object -> struct type - sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')] - a = np.zeros((3,), dtype='O') - a[0] = (0.5, 0.5, [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]], 0.5) - a[1] = (1.5, 1.5, [[1.5, 1.5, 1.5], [1.5, 1.5, 1.5]], 1.5) - a[2] = (2.5, 2.5, [[2.5, 2.5, 2.5], [2.5, 2.5, 2.5]], 2.5) - if HAS_REFCOUNT: - rc = sys.getrefcount(a[0]) - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt) - vals = [x.copy() for x in i] - assert_equal(vals[0]['a'], 0.5) - assert_equal(vals[0]['b'], 0) - assert_equal(vals[0]['c'], [[(0.5)]*3]*2) - assert_equal(vals[0]['d'], 0.5) - assert_equal(vals[1]['a'], 1.5) - assert_equal(vals[1]['b'], 1) - assert_equal(vals[1]['c'], [[(1.5)]*3]*2) - assert_equal(vals[1]['d'], 1.5) - assert_equal(vals[0].dtype, np.dtype(sdt)) - vals, i, x = [None]*3 - if HAS_REFCOUNT: - assert_equal(sys.getrefcount(a[0]), rc) - - # single-field struct type -> simple - sdt = [('a', 'f4')] - a = np.array([(5.5,), (8,)], dtype=sdt) - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes='i4') - assert_equal([x_[()] for x_ in i], [5, 8]) - - # make sure multi-field struct type -> simple doesn't work - sdt = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] - a = np.array([(5.5, 7, 'test'), (8, 10, 11)], dtype=sdt) - assert_raises(TypeError, lambda: ( - nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes='i4'))) - - # struct type -> struct type (field-wise copy) - sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] - sdt2 = [('d', 'u2'), ('a', 'O'), ('b', 'f8')] - a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1) - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - assert_equal([np.array(x_) for x_ in i], - [np.array((1, 2, 3), dtype=sdt2), - np.array((4, 5, 6), dtype=sdt2)]) - - # make sure struct type -> struct type with different - # number of fields fails - sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] - sdt2 = [('b', 'O'), ('a', 'f8')] - a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1) - - assert_raises(ValueError, lambda : ( - nditer(a, ['buffered', 'refs_ok'], ['readwrite'], - casting='unsafe', - op_dtypes=sdt2))) - - -def test_iter_buffered_cast_subarray(): - # Tests buffering of subarrays - - # one element -> many (copies it to all) - sdt1 = [('a', 'f4')] - sdt2 = [('a', 'f8', (3, 2, 2))] - a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6) - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - for x, count in zip(i, list(range(6))): - assert_(np.all(x['a'] == count)) - - # one element -> many -> back (copies it to all) - sdt1 = [('a', 'O', (1, 1))] - sdt2 = [('a', 'O', (3, 2, 2))] - a = np.zeros((6,), dtype=sdt1) - a['a'][:, 0, 0] = np.arange(6) - i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], - casting='unsafe', - op_dtypes=sdt2) - with i: - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_(np.all(x['a'] == count)) - x['a'][0] += 2 - count += 1 - assert_equal(a['a'], np.arange(6).reshape(6, 1, 1)+2) - - # many -> one element -> back (copies just element 0) - sdt1 = [('a', 'O', (3, 2, 2))] - sdt2 = [('a', 'O', (1,))] - a = np.zeros((6,), dtype=sdt1) - a['a'][:, 0, 0, 0] = np.arange(6) - i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], - casting='unsafe', - op_dtypes=sdt2) - with i: - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_equal(x['a'], count) - x['a'] += 2 - count += 1 - assert_equal(a['a'], np.arange(6).reshape(6, 1, 1, 1)*np.ones((1, 3, 2, 2))+2) - - # many -> one element -> back (copies just element 0) - sdt1 = [('a', 'f8', (3, 2, 2))] - sdt2 = [('a', 'O', (1,))] - a = np.zeros((6,), dtype=sdt1) - a['a'][:, 0, 0, 0] = np.arange(6) - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_equal(x['a'], count) - count += 1 - - # many -> one element (copies just element 0) - sdt1 = [('a', 'O', (3, 2, 2))] - sdt2 = [('a', 'f4', (1,))] - a = np.zeros((6,), dtype=sdt1) - a['a'][:, 0, 0, 0] = np.arange(6) - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_equal(x['a'], count) - count += 1 - - # many -> matching shape (straightforward copy) - sdt1 = [('a', 'O', (3, 2, 2))] - sdt2 = [('a', 'f4', (3, 2, 2))] - a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*3*2*2).reshape(6, 3, 2, 2) - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_equal(x['a'], a[count]['a']) - count += 1 - - # vector -> smaller vector (truncates) - sdt1 = [('a', 'f8', (6,))] - sdt2 = [('a', 'f4', (2,))] - a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*6).reshape(6, 6) - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_equal(x['a'], a[count]['a'][:2]) - count += 1 - - # vector -> bigger vector (pads with zeros) - sdt1 = [('a', 'f8', (2,))] - sdt2 = [('a', 'f4', (6,))] - a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*2).reshape(6, 2) - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_equal(x['a'][:2], a[count]['a']) - assert_equal(x['a'][2:], [0, 0, 0, 0]) - count += 1 - - # vector -> matrix (broadcasts) - sdt1 = [('a', 'f8', (2,))] - sdt2 = [('a', 'f4', (2, 2))] - a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*2).reshape(6, 2) - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_equal(x['a'][0], a[count]['a']) - assert_equal(x['a'][1], a[count]['a']) - count += 1 - - # vector -> matrix (broadcasts and zero-pads) - sdt1 = [('a', 'f8', (2, 1))] - sdt2 = [('a', 'f4', (3, 2))] - a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*2).reshape(6, 2, 1) - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_equal(x['a'][:2, 0], a[count]['a'][:, 0]) - assert_equal(x['a'][:2, 1], a[count]['a'][:, 0]) - assert_equal(x['a'][2,:], [0, 0]) - count += 1 - - # matrix -> matrix (truncates and zero-pads) - sdt1 = [('a', 'f8', (2, 3))] - sdt2 = [('a', 'f4', (3, 2))] - a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*2*3).reshape(6, 2, 3) - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_equal(x['a'][:2, 0], a[count]['a'][:, 0]) - assert_equal(x['a'][:2, 1], a[count]['a'][:, 1]) - assert_equal(x['a'][2,:], [0, 0]) - count += 1 - -def test_iter_buffering_badwriteback(): - # Writing back from a buffer cannot combine elements - - # a needs write buffering, but had a broadcast dimension - a = np.arange(6).reshape(2, 3, 1) - b = np.arange(12).reshape(2, 3, 2) - assert_raises(ValueError, nditer, [a, b], - ['buffered', 'external_loop'], - [['readwrite'], ['writeonly']], - order='C') - - # But if a is readonly, it's fine - nditer([a, b], ['buffered', 'external_loop'], - [['readonly'], ['writeonly']], - order='C') - - # If a has just one element, it's fine too (constant 0 stride, a reduction) - a = np.arange(1).reshape(1, 1, 1) - nditer([a, b], ['buffered', 'external_loop', 'reduce_ok'], - [['readwrite'], ['writeonly']], - order='C') - - # check that it fails on other dimensions too - a = np.arange(6).reshape(1, 3, 2) - assert_raises(ValueError, nditer, [a, b], - ['buffered', 'external_loop'], - [['readwrite'], ['writeonly']], - order='C') - a = np.arange(4).reshape(2, 1, 2) - assert_raises(ValueError, nditer, [a, b], - ['buffered', 'external_loop'], - [['readwrite'], ['writeonly']], - order='C') - -def test_iter_buffering_string(): - # Safe casting disallows shrinking strings - a = np.array(['abc', 'a', 'abcd'], dtype=np.bytes_) - assert_equal(a.dtype, np.dtype('S4')) - assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'], - op_dtypes='S2') - i = nditer(a, ['buffered'], ['readonly'], op_dtypes='S6') - assert_equal(i[0], b'abc') - assert_equal(i[0].dtype, np.dtype('S6')) - - a = np.array(['abc', 'a', 'abcd'], dtype=np.unicode_) - assert_equal(a.dtype, np.dtype('U4')) - assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'], - op_dtypes='U2') - i = nditer(a, ['buffered'], ['readonly'], op_dtypes='U6') - assert_equal(i[0], u'abc') - assert_equal(i[0].dtype, np.dtype('U6')) - -def test_iter_buffering_growinner(): - # Test that the inner loop grows when no buffering is needed - a = np.arange(30) - i = nditer(a, ['buffered', 'growinner', 'external_loop'], - buffersize=5) - # Should end up with just one inner loop here - assert_equal(i[0].size, a.size) - - -@pytest.mark.slow -def test_iter_buffered_reduce_reuse(): - # large enough array for all views, including negative strides. - a = np.arange(2*3**5)[3**5:3**5+1] - flags = ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok', 'refs_ok'] - op_flags = [('readonly',), ('readwrite', 'allocate')] - op_axes_list = [[(0, 1, 2), (0, 1, -1)], [(0, 1, 2), (0, -1, -1)]] - # wrong dtype to force buffering - op_dtypes = [float, a.dtype] - - def get_params(): - for xs in range(-3**2, 3**2 + 1): - for ys in range(xs, 3**2 + 1): - for op_axes in op_axes_list: - # last stride is reduced and because of that not - # important for this test, as it is the inner stride. - strides = (xs * a.itemsize, ys * a.itemsize, a.itemsize) - arr = np.lib.stride_tricks.as_strided(a, (3, 3, 3), strides) - - for skip in [0, 1]: - yield arr, op_axes, skip - - for arr, op_axes, skip in get_params(): - nditer2 = np.nditer([arr.copy(), None], - op_axes=op_axes, flags=flags, op_flags=op_flags, - op_dtypes=op_dtypes) - with nditer2: - nditer2.operands[-1][...] = 0 - nditer2.reset() - nditer2.iterindex = skip - - for (a2_in, b2_in) in nditer2: - b2_in += a2_in.astype(np.int_) - - comp_res = nditer2.operands[-1] - - for bufsize in range(0, 3**3): - nditer1 = np.nditer([arr, None], - op_axes=op_axes, flags=flags, op_flags=op_flags, - buffersize=bufsize, op_dtypes=op_dtypes) - with nditer1: - nditer1.operands[-1][...] = 0 - nditer1.reset() - nditer1.iterindex = skip - - for (a1_in, b1_in) in nditer1: - b1_in += a1_in.astype(np.int_) - - res = nditer1.operands[-1] - assert_array_equal(res, comp_res) - - -def test_iter_no_broadcast(): - # Test that the no_broadcast flag works - a = np.arange(24).reshape(2, 3, 4) - b = np.arange(6).reshape(2, 3, 1) - c = np.arange(12).reshape(3, 4) - - nditer([a, b, c], [], - [['readonly', 'no_broadcast'], - ['readonly'], ['readonly']]) - assert_raises(ValueError, nditer, [a, b, c], [], - [['readonly'], ['readonly', 'no_broadcast'], ['readonly']]) - assert_raises(ValueError, nditer, [a, b, c], [], - [['readonly'], ['readonly'], ['readonly', 'no_broadcast']]) - - -class TestIterNested(object): - - def test_basic(self): - # Test nested iteration basic usage - a = arange(12).reshape(2, 3, 2) - - i, j = np.nested_iters(a, [[0], [1, 2]]) - vals = [list(j) for _ in i] - assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) - - i, j = np.nested_iters(a, [[0, 1], [2]]) - vals = [list(j) for _ in i] - assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) - - i, j = np.nested_iters(a, [[0, 2], [1]]) - vals = [list(j) for _ in i] - assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) - - def test_reorder(self): - # Test nested iteration basic usage - a = arange(12).reshape(2, 3, 2) - - # In 'K' order (default), it gets reordered - i, j = np.nested_iters(a, [[0], [2, 1]]) - vals = [list(j) for _ in i] - assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) - - i, j = np.nested_iters(a, [[1, 0], [2]]) - vals = [list(j) for _ in i] - assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) - - i, j = np.nested_iters(a, [[2, 0], [1]]) - vals = [list(j) for _ in i] - assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) - - # In 'C' order, it doesn't - i, j = np.nested_iters(a, [[0], [2, 1]], order='C') - vals = [list(j) for _ in i] - assert_equal(vals, [[0, 2, 4, 1, 3, 5], [6, 8, 10, 7, 9, 11]]) - - i, j = np.nested_iters(a, [[1, 0], [2]], order='C') - vals = [list(j) for _ in i] - assert_equal(vals, [[0, 1], [6, 7], [2, 3], [8, 9], [4, 5], [10, 11]]) - - i, j = np.nested_iters(a, [[2, 0], [1]], order='C') - vals = [list(j) for _ in i] - assert_equal(vals, [[0, 2, 4], [6, 8, 10], [1, 3, 5], [7, 9, 11]]) - - def test_flip_axes(self): - # Test nested iteration with negative axes - a = arange(12).reshape(2, 3, 2)[::-1, ::-1, ::-1] - - # In 'K' order (default), the axes all get flipped - i, j = np.nested_iters(a, [[0], [1, 2]]) - vals = [list(j) for _ in i] - assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) - - i, j = np.nested_iters(a, [[0, 1], [2]]) - vals = [list(j) for _ in i] - assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) - - i, j = np.nested_iters(a, [[0, 2], [1]]) - vals = [list(j) for _ in i] - assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) - - # In 'C' order, flipping axes is disabled - i, j = np.nested_iters(a, [[0], [1, 2]], order='C') - vals = [list(j) for _ in i] - assert_equal(vals, [[11, 10, 9, 8, 7, 6], [5, 4, 3, 2, 1, 0]]) - - i, j = np.nested_iters(a, [[0, 1], [2]], order='C') - vals = [list(j) for _ in i] - assert_equal(vals, [[11, 10], [9, 8], [7, 6], [5, 4], [3, 2], [1, 0]]) - - i, j = np.nested_iters(a, [[0, 2], [1]], order='C') - vals = [list(j) for _ in i] - assert_equal(vals, [[11, 9, 7], [10, 8, 6], [5, 3, 1], [4, 2, 0]]) - - def test_broadcast(self): - # Test nested iteration with broadcasting - a = arange(2).reshape(2, 1) - b = arange(3).reshape(1, 3) - - i, j = np.nested_iters([a, b], [[0], [1]]) - vals = [list(j) for _ in i] - assert_equal(vals, [[[0, 0], [0, 1], [0, 2]], [[1, 0], [1, 1], [1, 2]]]) - - i, j = np.nested_iters([a, b], [[1], [0]]) - vals = [list(j) for _ in i] - assert_equal(vals, [[[0, 0], [1, 0]], [[0, 1], [1, 1]], [[0, 2], [1, 2]]]) - - def test_dtype_copy(self): - # Test nested iteration with a copy to change dtype - - # copy - a = arange(6, dtype='i4').reshape(2, 3) - i, j = np.nested_iters(a, [[0], [1]], - op_flags=['readonly', 'copy'], - op_dtypes='f8') - assert_equal(j[0].dtype, np.dtype('f8')) - vals = [list(j) for _ in i] - assert_equal(vals, [[0, 1, 2], [3, 4, 5]]) - vals = None - - # writebackifcopy - using context manager - a = arange(6, dtype='f4').reshape(2, 3) - i, j = np.nested_iters(a, [[0], [1]], - op_flags=['readwrite', 'updateifcopy'], - casting='same_kind', - op_dtypes='f8') - with i, j: - assert_equal(j[0].dtype, np.dtype('f8')) - for x in i: - for y in j: - y[...] += 1 - assert_equal(a, [[0, 1, 2], [3, 4, 5]]) - assert_equal(a, [[1, 2, 3], [4, 5, 6]]) - - # writebackifcopy - using close() - a = arange(6, dtype='f4').reshape(2, 3) - i, j = np.nested_iters(a, [[0], [1]], - op_flags=['readwrite', 'updateifcopy'], - casting='same_kind', - op_dtypes='f8') - assert_equal(j[0].dtype, np.dtype('f8')) - for x in i: - for y in j: - y[...] += 1 - assert_equal(a, [[0, 1, 2], [3, 4, 5]]) - i.close() - j.close() - assert_equal(a, [[1, 2, 3], [4, 5, 6]]) - - def test_dtype_buffered(self): - # Test nested iteration with buffering to change dtype - - a = arange(6, dtype='f4').reshape(2, 3) - i, j = np.nested_iters(a, [[0], [1]], - flags=['buffered'], - op_flags=['readwrite'], - casting='same_kind', - op_dtypes='f8') - assert_equal(j[0].dtype, np.dtype('f8')) - for x in i: - for y in j: - y[...] += 1 - assert_equal(a, [[1, 2, 3], [4, 5, 6]]) - - def test_0d(self): - a = np.arange(12).reshape(2, 3, 2) - i, j = np.nested_iters(a, [[], [1, 0, 2]]) - vals = [list(j) for _ in i] - assert_equal(vals, [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]]) - - i, j = np.nested_iters(a, [[1, 0, 2], []]) - vals = [list(j) for _ in i] - assert_equal(vals, [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]]) - - i, j, k = np.nested_iters(a, [[2, 0], [], [1]]) - vals = [] - for x in i: - for y in j: - vals.append([z for z in k]) - assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) - - def test_iter_nested_iters_dtype_buffered(self): - # Test nested iteration with buffering to change dtype - - a = arange(6, dtype='f4').reshape(2, 3) - i, j = np.nested_iters(a, [[0], [1]], - flags=['buffered'], - op_flags=['readwrite'], - casting='same_kind', - op_dtypes='f8') - with i, j: - assert_equal(j[0].dtype, np.dtype('f8')) - for x in i: - for y in j: - y[...] += 1 - assert_equal(a, [[1, 2, 3], [4, 5, 6]]) - -def test_iter_reduction_error(): - - a = np.arange(6) - assert_raises(ValueError, nditer, [a, None], [], - [['readonly'], ['readwrite', 'allocate']], - op_axes=[[0], [-1]]) - - a = np.arange(6).reshape(2, 3) - assert_raises(ValueError, nditer, [a, None], ['external_loop'], - [['readonly'], ['readwrite', 'allocate']], - op_axes=[[0, 1], [-1, -1]]) - -def test_iter_reduction(): - # Test doing reductions with the iterator - - a = np.arange(6) - i = nditer([a, None], ['reduce_ok'], - [['readonly'], ['readwrite', 'allocate']], - op_axes=[[0], [-1]]) - # Need to initialize the output operand to the addition unit - with i: - i.operands[1][...] = 0 - # Do the reduction - for x, y in i: - y[...] += x - # Since no axes were specified, should have allocated a scalar - assert_equal(i.operands[1].ndim, 0) - assert_equal(i.operands[1], np.sum(a)) - - a = np.arange(6).reshape(2, 3) - i = nditer([a, None], ['reduce_ok', 'external_loop'], - [['readonly'], ['readwrite', 'allocate']], - op_axes=[[0, 1], [-1, -1]]) - # Need to initialize the output operand to the addition unit - with i: - i.operands[1][...] = 0 - # Reduction shape/strides for the output - assert_equal(i[1].shape, (6,)) - assert_equal(i[1].strides, (0,)) - # Do the reduction - for x, y in i: - # Use a for loop instead of ``y[...] += x`` - # (equivalent to ``y[...] = y[...].copy() + x``), - # because y has zero strides we use for the reduction - for j in range(len(y)): - y[j] += x[j] - # Since no axes were specified, should have allocated a scalar - assert_equal(i.operands[1].ndim, 0) - assert_equal(i.operands[1], np.sum(a)) - - # This is a tricky reduction case for the buffering double loop - # to handle - a = np.ones((2, 3, 5)) - it1 = nditer([a, None], ['reduce_ok', 'external_loop'], - [['readonly'], ['readwrite', 'allocate']], - op_axes=[None, [0, -1, 1]]) - it2 = nditer([a, None], ['reduce_ok', 'external_loop', - 'buffered', 'delay_bufalloc'], - [['readonly'], ['readwrite', 'allocate']], - op_axes=[None, [0, -1, 1]], buffersize=10) - with it1, it2: - it1.operands[1].fill(0) - it2.operands[1].fill(0) - it2.reset() - for x in it1: - x[1][...] += x[0] - for x in it2: - x[1][...] += x[0] - assert_equal(it1.operands[1], it2.operands[1]) - assert_equal(it2.operands[1].sum(), a.size) - -def test_iter_buffering_reduction(): - # Test doing buffered reductions with the iterator - - a = np.arange(6) - b = np.array(0., dtype='f8').byteswap().newbyteorder() - i = nditer([a, b], ['reduce_ok', 'buffered'], - [['readonly'], ['readwrite', 'nbo']], - op_axes=[[0], [-1]]) - with i: - assert_equal(i[1].dtype, np.dtype('f8')) - assert_(i[1].dtype != b.dtype) - # Do the reduction - for x, y in i: - y[...] += x - # Since no axes were specified, should have allocated a scalar - assert_equal(b, np.sum(a)) - - a = np.arange(6).reshape(2, 3) - b = np.array([0, 0], dtype='f8').byteswap().newbyteorder() - i = nditer([a, b], ['reduce_ok', 'external_loop', 'buffered'], - [['readonly'], ['readwrite', 'nbo']], - op_axes=[[0, 1], [0, -1]]) - # Reduction shape/strides for the output - with i: - assert_equal(i[1].shape, (3,)) - assert_equal(i[1].strides, (0,)) - # Do the reduction - for x, y in i: - # Use a for loop instead of ``y[...] += x`` - # (equivalent to ``y[...] = y[...].copy() + x``), - # because y has zero strides we use for the reduction - for j in range(len(y)): - y[j] += x[j] - assert_equal(b, np.sum(a, axis=1)) - - # Iterator inner double loop was wrong on this one - p = np.arange(2) + 1 - it = np.nditer([p, None], - ['delay_bufalloc', 'reduce_ok', 'buffered', 'external_loop'], - [['readonly'], ['readwrite', 'allocate']], - op_axes=[[-1, 0], [-1, -1]], - itershape=(2, 2)) - with it: - it.operands[1].fill(0) - it.reset() - assert_equal(it[0], [1, 2, 1, 2]) - - # Iterator inner loop should take argument contiguity into account - x = np.ones((7, 13, 8), np.int8)[4:6,1:11:6,1:5].transpose(1, 2, 0) - x[...] = np.arange(x.size).reshape(x.shape) - y_base = np.arange(4*4, dtype=np.int8).reshape(4, 4) - y_base_copy = y_base.copy() - y = y_base[::2,:,None] - - it = np.nditer([y, x], - ['buffered', 'external_loop', 'reduce_ok'], - [['readwrite'], ['readonly']]) - with it: - for a, b in it: - a.fill(2) - - assert_equal(y_base[1::2], y_base_copy[1::2]) - assert_equal(y_base[::2], 2) - -def test_iter_buffering_reduction_reuse_reduce_loops(): - # There was a bug triggering reuse of the reduce loop inappropriately, - # which caused processing to happen in unnecessarily small chunks - # and overran the buffer. - - a = np.zeros((2, 7)) - b = np.zeros((1, 7)) - it = np.nditer([a, b], flags=['reduce_ok', 'external_loop', 'buffered'], - op_flags=[['readonly'], ['readwrite']], - buffersize=5) - - with it: - bufsizes = [x.shape[0] for x, y in it] - assert_equal(bufsizes, [5, 2, 5, 2]) - assert_equal(sum(bufsizes), a.size) - -def test_iter_writemasked_badinput(): - a = np.zeros((2, 3)) - b = np.zeros((3,)) - m = np.array([[True, True, False], [False, True, False]]) - m2 = np.array([True, True, False]) - m3 = np.array([0, 1, 1], dtype='u1') - mbad1 = np.array([0, 1, 1], dtype='i1') - mbad2 = np.array([0, 1, 1], dtype='f4') - - # Need an 'arraymask' if any operand is 'writemasked' - assert_raises(ValueError, nditer, [a, m], [], - [['readwrite', 'writemasked'], ['readonly']]) - - # A 'writemasked' operand must not be readonly - assert_raises(ValueError, nditer, [a, m], [], - [['readonly', 'writemasked'], ['readonly', 'arraymask']]) - - # 'writemasked' and 'arraymask' may not be used together - assert_raises(ValueError, nditer, [a, m], [], - [['readonly'], ['readwrite', 'arraymask', 'writemasked']]) - - # 'arraymask' may only be specified once - assert_raises(ValueError, nditer, [a, m, m2], [], - [['readwrite', 'writemasked'], - ['readonly', 'arraymask'], - ['readonly', 'arraymask']]) - - # An 'arraymask' with nothing 'writemasked' also doesn't make sense - assert_raises(ValueError, nditer, [a, m], [], - [['readwrite'], ['readonly', 'arraymask']]) - - # A writemasked reduction requires a similarly smaller mask - assert_raises(ValueError, nditer, [a, b, m], ['reduce_ok'], - [['readonly'], - ['readwrite', 'writemasked'], - ['readonly', 'arraymask']]) - # But this should work with a smaller/equal mask to the reduction operand - np.nditer([a, b, m2], ['reduce_ok'], - [['readonly'], - ['readwrite', 'writemasked'], - ['readonly', 'arraymask']]) - # The arraymask itself cannot be a reduction - assert_raises(ValueError, nditer, [a, b, m2], ['reduce_ok'], - [['readonly'], - ['readwrite', 'writemasked'], - ['readwrite', 'arraymask']]) - - # A uint8 mask is ok too - np.nditer([a, m3], ['buffered'], - [['readwrite', 'writemasked'], - ['readonly', 'arraymask']], - op_dtypes=['f4', None], - casting='same_kind') - # An int8 mask isn't ok - assert_raises(TypeError, np.nditer, [a, mbad1], ['buffered'], - [['readwrite', 'writemasked'], - ['readonly', 'arraymask']], - op_dtypes=['f4', None], - casting='same_kind') - # A float32 mask isn't ok - assert_raises(TypeError, np.nditer, [a, mbad2], ['buffered'], - [['readwrite', 'writemasked'], - ['readonly', 'arraymask']], - op_dtypes=['f4', None], - casting='same_kind') - -def test_iter_writemasked(): - a = np.zeros((3,), dtype='f8') - msk = np.array([True, True, False]) - - # When buffering is unused, 'writemasked' effectively does nothing. - # It's up to the user of the iterator to obey the requested semantics. - it = np.nditer([a, msk], [], - [['readwrite', 'writemasked'], - ['readonly', 'arraymask']]) - with it: - for x, m in it: - x[...] = 1 - # Because we violated the semantics, all the values became 1 - assert_equal(a, [1, 1, 1]) - - # Even if buffering is enabled, we still may be accessing the array - # directly. - it = np.nditer([a, msk], ['buffered'], - [['readwrite', 'writemasked'], - ['readonly', 'arraymask']]) - with it: - for x, m in it: - x[...] = 2.5 - # Because we violated the semantics, all the values became 2.5 - assert_equal(a, [2.5, 2.5, 2.5]) - - # If buffering will definitely happening, for instance because of - # a cast, only the items selected by the mask will be copied back from - # the buffer. - it = np.nditer([a, msk], ['buffered'], - [['readwrite', 'writemasked'], - ['readonly', 'arraymask']], - op_dtypes=['i8', None], - casting='unsafe') - with it: - for x, m in it: - x[...] = 3 - # Even though we violated the semantics, only the selected values - # were copied back - assert_equal(a, [3, 3, 2.5]) - -def test_iter_non_writable_attribute_deletion(): - it = np.nditer(np.ones(2)) - attr = ["value", "shape", "operands", "itviews", "has_delayed_bufalloc", - "iterationneedsapi", "has_multi_index", "has_index", "dtypes", - "ndim", "nop", "itersize", "finished"] - - for s in attr: - assert_raises(AttributeError, delattr, it, s) - - -def test_iter_writable_attribute_deletion(): - it = np.nditer(np.ones(2)) - attr = [ "multi_index", "index", "iterrange", "iterindex"] - for s in attr: - assert_raises(AttributeError, delattr, it, s) - - -def test_iter_element_deletion(): - it = np.nditer(np.ones(3)) - try: - del it[1] - del it[1:2] - except TypeError: - pass - except Exception: - raise AssertionError - -def test_iter_allocated_array_dtypes(): - # If the dtype of an allocated output has a shape, the shape gets - # tacked onto the end of the result. - it = np.nditer(([1, 3, 20], None), op_dtypes=[None, ('i4', (2,))]) - for a, b in it: - b[0] = a - 1 - b[1] = a + 1 - assert_equal(it.operands[1], [[0, 2], [2, 4], [19, 21]]) - - # Make sure this works for scalars too - it = np.nditer((10, 2, None), op_dtypes=[None, None, ('i4', (2, 2))]) - for a, b, c in it: - c[0, 0] = a - b - c[0, 1] = a + b - c[1, 0] = a * b - c[1, 1] = a / b - assert_equal(it.operands[2], [[8, 12], [20, 5]]) - - -def test_0d_iter(): - # Basic test for iteration of 0-d arrays: - i = nditer([2, 3], ['multi_index'], [['readonly']]*2) - assert_equal(i.ndim, 0) - assert_equal(next(i), (2, 3)) - assert_equal(i.multi_index, ()) - assert_equal(i.iterindex, 0) - assert_raises(StopIteration, next, i) - # test reset: - i.reset() - assert_equal(next(i), (2, 3)) - assert_raises(StopIteration, next, i) - - # test forcing to 0-d - i = nditer(np.arange(5), ['multi_index'], [['readonly']], op_axes=[()]) - assert_equal(i.ndim, 0) - assert_equal(len(i), 1) - # note that itershape=(), still behaves like None due to the conversions - - # Test a more complex buffered casting case (same as another test above) - sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')] - a = np.array(0.5, dtype='f4') - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', op_dtypes=sdt) - vals = next(i) - assert_equal(vals['a'], 0.5) - assert_equal(vals['b'], 0) - assert_equal(vals['c'], [[(0.5)]*3]*2) - assert_equal(vals['d'], 0.5) - - -def test_iter_too_large(): - # The total size of the iterator must not exceed the maximum intp due - # to broadcasting. Dividing by 1024 will keep it small enough to - # give a legal array. - size = np.iinfo(np.intp).max // 1024 - arr = np.lib.stride_tricks.as_strided(np.zeros(1), (size,), (0,)) - assert_raises(ValueError, nditer, (arr, arr[:, None])) - # test the same for multiindex. That may get more interesting when - # removing 0 dimensional axis is allowed (since an iterator can grow then) - assert_raises(ValueError, nditer, - (arr, arr[:, None]), flags=['multi_index']) - - -def test_iter_too_large_with_multiindex(): - # When a multi index is being tracked, the error is delayed this - # checks the delayed error messages and getting below that by - # removing an axis. - base_size = 2**10 - num = 1 - while base_size**num < np.iinfo(np.intp).max: - num += 1 - - shape_template = [1, 1] * num - arrays = [] - for i in range(num): - shape = shape_template[:] - shape[i * 2] = 2**10 - arrays.append(np.empty(shape)) - arrays = tuple(arrays) - - # arrays are now too large to be broadcast. The different modes test - # different nditer functionality with or without GIL. - for mode in range(6): - with assert_raises(ValueError): - _multiarray_tests.test_nditer_too_large(arrays, -1, mode) - # but if we do nothing with the nditer, it can be constructed: - _multiarray_tests.test_nditer_too_large(arrays, -1, 7) - - # When an axis is removed, things should work again (half the time): - for i in range(num): - for mode in range(6): - # an axis with size 1024 is removed: - _multiarray_tests.test_nditer_too_large(arrays, i*2, mode) - # an axis with size 1 is removed: - with assert_raises(ValueError): - _multiarray_tests.test_nditer_too_large(arrays, i*2 + 1, mode) - -def test_writebacks(): - a = np.arange(6, dtype='f4') - au = a.byteswap().newbyteorder() - assert_(a.dtype.byteorder != au.dtype.byteorder) - it = nditer(au, [], [['readwrite', 'updateifcopy']], - casting='equiv', op_dtypes=[np.dtype('f4')]) - with it: - it.operands[0][:] = 100 - assert_equal(au, 100) - # do it again, this time raise an error, - it = nditer(au, [], [['readwrite', 'updateifcopy']], - casting='equiv', op_dtypes=[np.dtype('f4')]) - try: - with it: - assert_equal(au.flags.writeable, False) - it.operands[0][:] = 0 - raise ValueError('exit context manager on exception') - except: - pass - assert_equal(au, 0) - assert_equal(au.flags.writeable, True) - # cannot reuse i outside context manager - assert_raises(ValueError, getattr, it, 'operands') - - it = nditer(au, [], [['readwrite', 'updateifcopy']], - casting='equiv', op_dtypes=[np.dtype('f4')]) - with it: - x = it.operands[0] - x[:] = 6 - assert_(x.flags.writebackifcopy) - assert_equal(au, 6) - assert_(not x.flags.writebackifcopy) - x[:] = 123 # x.data still valid - assert_equal(au, 6) # but not connected to au - - it = nditer(au, [], - [['readwrite', 'updateifcopy']], - casting='equiv', op_dtypes=[np.dtype('f4')]) - # reentering works - with it: - with it: - for x in it: - x[...] = 123 - - it = nditer(au, [], - [['readwrite', 'updateifcopy']], - casting='equiv', op_dtypes=[np.dtype('f4')]) - # make sure exiting the inner context manager closes the iterator - with it: - with it: - for x in it: - x[...] = 123 - assert_raises(ValueError, getattr, it, 'operands') - # do not crash if original data array is decrefed - it = nditer(au, [], - [['readwrite', 'updateifcopy']], - casting='equiv', op_dtypes=[np.dtype('f4')]) - del au - with it: - for x in it: - x[...] = 123 - # make sure we cannot reenter the closed iterator - enter = it.__enter__ - assert_raises(RuntimeError, enter) - -def test_close_equivalent(): - ''' using a context amanger and using nditer.close are equivalent - ''' - def add_close(x, y, out=None): - addop = np.add - it = np.nditer([x, y, out], [], - [['readonly'], ['readonly'], ['writeonly','allocate']]) - for (a, b, c) in it: - addop(a, b, out=c) - ret = it.operands[2] - it.close() - return ret - - def add_context(x, y, out=None): - addop = np.add - it = np.nditer([x, y, out], [], - [['readonly'], ['readonly'], ['writeonly','allocate']]) - with it: - for (a, b, c) in it: - addop(a, b, out=c) - return it.operands[2] - z = add_close(range(5), range(5)) - assert_equal(z, range(0, 10, 2)) - z = add_context(range(5), range(5)) - assert_equal(z, range(0, 10, 2)) - -def test_close_raises(): - it = np.nditer(np.arange(3)) - assert_equal (next(it), 0) - it.close() - assert_raises(StopIteration, next, it) - assert_raises(ValueError, getattr, it, 'operands') - -@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") -def test_warn_noclose(): - a = np.arange(6, dtype='f4') - au = a.byteswap().newbyteorder() - with suppress_warnings() as sup: - sup.record(RuntimeWarning) - it = np.nditer(au, [], [['readwrite', 'updateifcopy']], - casting='equiv', op_dtypes=[np.dtype('f4')]) - del it - assert len(sup.log) == 1 diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_numeric.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_numeric.py deleted file mode 100644 index ffebdf6..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_numeric.py +++ /dev/null @@ -1,3117 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import warnings -import itertools -import platform -import pytest -from decimal import Decimal - -import numpy as np -from numpy.core import umath -from numpy.random import rand, randint, randn -from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_raises_regex, - assert_array_equal, assert_almost_equal, assert_array_almost_equal, - assert_warns, HAS_REFCOUNT - ) - - -class TestResize(object): - def test_copies(self): - A = np.array([[1, 2], [3, 4]]) - Ar1 = np.array([[1, 2, 3, 4], [1, 2, 3, 4]]) - assert_equal(np.resize(A, (2, 4)), Ar1) - - Ar2 = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) - assert_equal(np.resize(A, (4, 2)), Ar2) - - Ar3 = np.array([[1, 2, 3], [4, 1, 2], [3, 4, 1], [2, 3, 4]]) - assert_equal(np.resize(A, (4, 3)), Ar3) - - def test_zeroresize(self): - A = np.array([[1, 2], [3, 4]]) - Ar = np.resize(A, (0,)) - assert_array_equal(Ar, np.array([])) - assert_equal(A.dtype, Ar.dtype) - - Ar = np.resize(A, (0, 2)) - assert_equal(Ar.shape, (0, 2)) - - Ar = np.resize(A, (2, 0)) - assert_equal(Ar.shape, (2, 0)) - - def test_reshape_from_zero(self): - # See also gh-6740 - A = np.zeros(0, dtype=[('a', np.float32)]) - Ar = np.resize(A, (2, 1)) - assert_array_equal(Ar, np.zeros((2, 1), Ar.dtype)) - assert_equal(A.dtype, Ar.dtype) - - -class TestNonarrayArgs(object): - # check that non-array arguments to functions wrap them in arrays - def test_choose(self): - choices = [[0, 1, 2], - [3, 4, 5], - [5, 6, 7]] - tgt = [5, 1, 5] - a = [2, 0, 1] - - out = np.choose(a, choices) - assert_equal(out, tgt) - - def test_clip(self): - arr = [-1, 5, 2, 3, 10, -4, -9] - out = np.clip(arr, 2, 7) - tgt = [2, 5, 2, 3, 7, 2, 2] - assert_equal(out, tgt) - - def test_compress(self): - arr = [[0, 1, 2, 3, 4], - [5, 6, 7, 8, 9]] - tgt = [[5, 6, 7, 8, 9]] - out = np.compress([0, 1], arr, axis=0) - assert_equal(out, tgt) - - def test_count_nonzero(self): - arr = [[0, 1, 7, 0, 0], - [3, 0, 0, 2, 19]] - tgt = np.array([2, 3]) - out = np.count_nonzero(arr, axis=1) - assert_equal(out, tgt) - - def test_cumproduct(self): - A = [[1, 2, 3], [4, 5, 6]] - assert_(np.all(np.cumproduct(A) == np.array([1, 2, 6, 24, 120, 720]))) - - def test_diagonal(self): - a = [[0, 1, 2, 3], - [4, 5, 6, 7], - [8, 9, 10, 11]] - out = np.diagonal(a) - tgt = [0, 5, 10] - - assert_equal(out, tgt) - - def test_mean(self): - A = [[1, 2, 3], [4, 5, 6]] - assert_(np.mean(A) == 3.5) - assert_(np.all(np.mean(A, 0) == np.array([2.5, 3.5, 4.5]))) - assert_(np.all(np.mean(A, 1) == np.array([2., 5.]))) - - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) - assert_(np.isnan(np.mean([]))) - assert_(w[0].category is RuntimeWarning) - - def test_ptp(self): - a = [3, 4, 5, 10, -3, -5, 6.0] - assert_equal(np.ptp(a, axis=0), 15.0) - - def test_prod(self): - arr = [[1, 2, 3, 4], - [5, 6, 7, 9], - [10, 3, 4, 5]] - tgt = [24, 1890, 600] - - assert_equal(np.prod(arr, axis=-1), tgt) - - def test_ravel(self): - a = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]] - tgt = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] - assert_equal(np.ravel(a), tgt) - - def test_repeat(self): - a = [1, 2, 3] - tgt = [1, 1, 2, 2, 3, 3] - - out = np.repeat(a, 2) - assert_equal(out, tgt) - - def test_reshape(self): - arr = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]] - tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]] - assert_equal(np.reshape(arr, (2, 6)), tgt) - - def test_round(self): - arr = [1.56, 72.54, 6.35, 3.25] - tgt = [1.6, 72.5, 6.4, 3.2] - assert_equal(np.around(arr, decimals=1), tgt) - - def test_searchsorted(self): - arr = [-8, -5, -1, 3, 6, 10] - out = np.searchsorted(arr, 0) - assert_equal(out, 3) - - def test_size(self): - A = [[1, 2, 3], [4, 5, 6]] - assert_(np.size(A) == 6) - assert_(np.size(A, 0) == 2) - assert_(np.size(A, 1) == 3) - - def test_squeeze(self): - A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]] - assert_equal(np.squeeze(A).shape, (3, 3)) - assert_equal(np.squeeze(np.zeros((1, 3, 1))).shape, (3,)) - assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=0).shape, (3, 1)) - assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=-1).shape, (1, 3)) - assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=2).shape, (1, 3)) - assert_equal(np.squeeze([np.zeros((3, 1))]).shape, (3,)) - assert_equal(np.squeeze([np.zeros((3, 1))], axis=0).shape, (3, 1)) - assert_equal(np.squeeze([np.zeros((3, 1))], axis=2).shape, (1, 3)) - assert_equal(np.squeeze([np.zeros((3, 1))], axis=-1).shape, (1, 3)) - - def test_std(self): - A = [[1, 2, 3], [4, 5, 6]] - assert_almost_equal(np.std(A), 1.707825127659933) - assert_almost_equal(np.std(A, 0), np.array([1.5, 1.5, 1.5])) - assert_almost_equal(np.std(A, 1), np.array([0.81649658, 0.81649658])) - - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) - assert_(np.isnan(np.std([]))) - assert_(w[0].category is RuntimeWarning) - - def test_swapaxes(self): - tgt = [[[0, 4], [2, 6]], [[1, 5], [3, 7]]] - a = [[[0, 1], [2, 3]], [[4, 5], [6, 7]]] - out = np.swapaxes(a, 0, 2) - assert_equal(out, tgt) - - def test_sum(self): - m = [[1, 2, 3], - [4, 5, 6], - [7, 8, 9]] - tgt = [[6], [15], [24]] - out = np.sum(m, axis=1, keepdims=True) - - assert_equal(tgt, out) - - def test_take(self): - tgt = [2, 3, 5] - indices = [1, 2, 4] - a = [1, 2, 3, 4, 5] - - out = np.take(a, indices) - assert_equal(out, tgt) - - def test_trace(self): - c = [[1, 2], [3, 4], [5, 6]] - assert_equal(np.trace(c), 5) - - def test_transpose(self): - arr = [[1, 2], [3, 4], [5, 6]] - tgt = [[1, 3, 5], [2, 4, 6]] - assert_equal(np.transpose(arr, (1, 0)), tgt) - - def test_var(self): - A = [[1, 2, 3], [4, 5, 6]] - assert_almost_equal(np.var(A), 2.9166666666666665) - assert_almost_equal(np.var(A, 0), np.array([2.25, 2.25, 2.25])) - assert_almost_equal(np.var(A, 1), np.array([0.66666667, 0.66666667])) - - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) - assert_(np.isnan(np.var([]))) - assert_(w[0].category is RuntimeWarning) - - B = np.array([None, 0]) - B[0] = 1j - assert_almost_equal(np.var(B), 0.25) - -class TestIsscalar(object): - def test_isscalar(self): - assert_(np.isscalar(3.1)) - assert_(np.isscalar(np.int16(12345))) - assert_(np.isscalar(False)) - assert_(np.isscalar('numpy')) - assert_(not np.isscalar([3.1])) - assert_(not np.isscalar(None)) - - # PEP 3141 - from fractions import Fraction - assert_(np.isscalar(Fraction(5, 17))) - from numbers import Number - assert_(np.isscalar(Number())) - - -class TestBoolScalar(object): - def test_logical(self): - f = np.False_ - t = np.True_ - s = "xyz" - assert_((t and s) is s) - assert_((f and s) is f) - - def test_bitwise_or(self): - f = np.False_ - t = np.True_ - assert_((t | t) is t) - assert_((f | t) is t) - assert_((t | f) is t) - assert_((f | f) is f) - - def test_bitwise_and(self): - f = np.False_ - t = np.True_ - assert_((t & t) is t) - assert_((f & t) is f) - assert_((t & f) is f) - assert_((f & f) is f) - - def test_bitwise_xor(self): - f = np.False_ - t = np.True_ - assert_((t ^ t) is f) - assert_((f ^ t) is t) - assert_((t ^ f) is t) - assert_((f ^ f) is f) - - -class TestBoolArray(object): - def setup(self): - # offset for simd tests - self.t = np.array([True] * 41, dtype=bool)[1::] - self.f = np.array([False] * 41, dtype=bool)[1::] - self.o = np.array([False] * 42, dtype=bool)[2::] - self.nm = self.f.copy() - self.im = self.t.copy() - self.nm[3] = True - self.nm[-2] = True - self.im[3] = False - self.im[-2] = False - - def test_all_any(self): - assert_(self.t.all()) - assert_(self.t.any()) - assert_(not self.f.all()) - assert_(not self.f.any()) - assert_(self.nm.any()) - assert_(self.im.any()) - assert_(not self.nm.all()) - assert_(not self.im.all()) - # check bad element in all positions - for i in range(256 - 7): - d = np.array([False] * 256, dtype=bool)[7::] - d[i] = True - assert_(np.any(d)) - e = np.array([True] * 256, dtype=bool)[7::] - e[i] = False - assert_(not np.all(e)) - assert_array_equal(e, ~d) - # big array test for blocked libc loops - for i in list(range(9, 6000, 507)) + [7764, 90021, -10]: - d = np.array([False] * 100043, dtype=bool) - d[i] = True - assert_(np.any(d), msg="%r" % i) - e = np.array([True] * 100043, dtype=bool) - e[i] = False - assert_(not np.all(e), msg="%r" % i) - - def test_logical_not_abs(self): - assert_array_equal(~self.t, self.f) - assert_array_equal(np.abs(~self.t), self.f) - assert_array_equal(np.abs(~self.f), self.t) - assert_array_equal(np.abs(self.f), self.f) - assert_array_equal(~np.abs(self.f), self.t) - assert_array_equal(~np.abs(self.t), self.f) - assert_array_equal(np.abs(~self.nm), self.im) - np.logical_not(self.t, out=self.o) - assert_array_equal(self.o, self.f) - np.abs(self.t, out=self.o) - assert_array_equal(self.o, self.t) - - def test_logical_and_or_xor(self): - assert_array_equal(self.t | self.t, self.t) - assert_array_equal(self.f | self.f, self.f) - assert_array_equal(self.t | self.f, self.t) - assert_array_equal(self.f | self.t, self.t) - np.logical_or(self.t, self.t, out=self.o) - assert_array_equal(self.o, self.t) - assert_array_equal(self.t & self.t, self.t) - assert_array_equal(self.f & self.f, self.f) - assert_array_equal(self.t & self.f, self.f) - assert_array_equal(self.f & self.t, self.f) - np.logical_and(self.t, self.t, out=self.o) - assert_array_equal(self.o, self.t) - assert_array_equal(self.t ^ self.t, self.f) - assert_array_equal(self.f ^ self.f, self.f) - assert_array_equal(self.t ^ self.f, self.t) - assert_array_equal(self.f ^ self.t, self.t) - np.logical_xor(self.t, self.t, out=self.o) - assert_array_equal(self.o, self.f) - - assert_array_equal(self.nm & self.t, self.nm) - assert_array_equal(self.im & self.f, False) - assert_array_equal(self.nm & True, self.nm) - assert_array_equal(self.im & False, self.f) - assert_array_equal(self.nm | self.t, self.t) - assert_array_equal(self.im | self.f, self.im) - assert_array_equal(self.nm | True, self.t) - assert_array_equal(self.im | False, self.im) - assert_array_equal(self.nm ^ self.t, self.im) - assert_array_equal(self.im ^ self.f, self.im) - assert_array_equal(self.nm ^ True, self.im) - assert_array_equal(self.im ^ False, self.im) - - -class TestBoolCmp(object): - def setup(self): - self.f = np.ones(256, dtype=np.float32) - self.ef = np.ones(self.f.size, dtype=bool) - self.d = np.ones(128, dtype=np.float64) - self.ed = np.ones(self.d.size, dtype=bool) - # generate values for all permutation of 256bit simd vectors - s = 0 - for i in range(32): - self.f[s:s+8] = [i & 2**x for x in range(8)] - self.ef[s:s+8] = [(i & 2**x) != 0 for x in range(8)] - s += 8 - s = 0 - for i in range(16): - self.d[s:s+4] = [i & 2**x for x in range(4)] - self.ed[s:s+4] = [(i & 2**x) != 0 for x in range(4)] - s += 4 - - self.nf = self.f.copy() - self.nd = self.d.copy() - self.nf[self.ef] = np.nan - self.nd[self.ed] = np.nan - - self.inff = self.f.copy() - self.infd = self.d.copy() - self.inff[::3][self.ef[::3]] = np.inf - self.infd[::3][self.ed[::3]] = np.inf - self.inff[1::3][self.ef[1::3]] = -np.inf - self.infd[1::3][self.ed[1::3]] = -np.inf - self.inff[2::3][self.ef[2::3]] = np.nan - self.infd[2::3][self.ed[2::3]] = np.nan - self.efnonan = self.ef.copy() - self.efnonan[2::3] = False - self.ednonan = self.ed.copy() - self.ednonan[2::3] = False - - self.signf = self.f.copy() - self.signd = self.d.copy() - self.signf[self.ef] *= -1. - self.signd[self.ed] *= -1. - self.signf[1::6][self.ef[1::6]] = -np.inf - self.signd[1::6][self.ed[1::6]] = -np.inf - self.signf[3::6][self.ef[3::6]] = -np.nan - self.signd[3::6][self.ed[3::6]] = -np.nan - self.signf[4::6][self.ef[4::6]] = -0. - self.signd[4::6][self.ed[4::6]] = -0. - - def test_float(self): - # offset for alignment test - for i in range(4): - assert_array_equal(self.f[i:] > 0, self.ef[i:]) - assert_array_equal(self.f[i:] - 1 >= 0, self.ef[i:]) - assert_array_equal(self.f[i:] == 0, ~self.ef[i:]) - assert_array_equal(-self.f[i:] < 0, self.ef[i:]) - assert_array_equal(-self.f[i:] + 1 <= 0, self.ef[i:]) - r = self.f[i:] != 0 - assert_array_equal(r, self.ef[i:]) - r2 = self.f[i:] != np.zeros_like(self.f[i:]) - r3 = 0 != self.f[i:] - assert_array_equal(r, r2) - assert_array_equal(r, r3) - # check bool == 0x1 - assert_array_equal(r.view(np.int8), r.astype(np.int8)) - assert_array_equal(r2.view(np.int8), r2.astype(np.int8)) - assert_array_equal(r3.view(np.int8), r3.astype(np.int8)) - - # isnan on amd64 takes the same code path - assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:]) - assert_array_equal(np.isfinite(self.nf[i:]), ~self.ef[i:]) - assert_array_equal(np.isfinite(self.inff[i:]), ~self.ef[i:]) - assert_array_equal(np.isinf(self.inff[i:]), self.efnonan[i:]) - assert_array_equal(np.signbit(self.signf[i:]), self.ef[i:]) - - def test_double(self): - # offset for alignment test - for i in range(2): - assert_array_equal(self.d[i:] > 0, self.ed[i:]) - assert_array_equal(self.d[i:] - 1 >= 0, self.ed[i:]) - assert_array_equal(self.d[i:] == 0, ~self.ed[i:]) - assert_array_equal(-self.d[i:] < 0, self.ed[i:]) - assert_array_equal(-self.d[i:] + 1 <= 0, self.ed[i:]) - r = self.d[i:] != 0 - assert_array_equal(r, self.ed[i:]) - r2 = self.d[i:] != np.zeros_like(self.d[i:]) - r3 = 0 != self.d[i:] - assert_array_equal(r, r2) - assert_array_equal(r, r3) - # check bool == 0x1 - assert_array_equal(r.view(np.int8), r.astype(np.int8)) - assert_array_equal(r2.view(np.int8), r2.astype(np.int8)) - assert_array_equal(r3.view(np.int8), r3.astype(np.int8)) - - # isnan on amd64 takes the same code path - assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:]) - assert_array_equal(np.isfinite(self.nd[i:]), ~self.ed[i:]) - assert_array_equal(np.isfinite(self.infd[i:]), ~self.ed[i:]) - assert_array_equal(np.isinf(self.infd[i:]), self.ednonan[i:]) - assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:]) - - -class TestSeterr(object): - def test_default(self): - err = np.geterr() - assert_equal(err, - dict(divide='warn', - invalid='warn', - over='warn', - under='ignore') - ) - - def test_set(self): - with np.errstate(): - err = np.seterr() - old = np.seterr(divide='print') - assert_(err == old) - new = np.seterr() - assert_(new['divide'] == 'print') - np.seterr(over='raise') - assert_(np.geterr()['over'] == 'raise') - assert_(new['divide'] == 'print') - np.seterr(**old) - assert_(np.geterr() == old) - - @pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.") - def test_divide_err(self): - with np.errstate(divide='raise'): - with assert_raises(FloatingPointError): - np.array([1.]) / np.array([0.]) - - np.seterr(divide='ignore') - np.array([1.]) / np.array([0.]) - - def test_errobj(self): - olderrobj = np.geterrobj() - self.called = 0 - try: - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - with np.errstate(divide='warn'): - np.seterrobj([20000, 1, None]) - np.array([1.]) / np.array([0.]) - assert_equal(len(w), 1) - - def log_err(*args): - self.called += 1 - extobj_err = args - assert_(len(extobj_err) == 2) - assert_("divide" in extobj_err[0]) - - with np.errstate(divide='ignore'): - np.seterrobj([20000, 3, log_err]) - np.array([1.]) / np.array([0.]) - assert_equal(self.called, 1) - - np.seterrobj(olderrobj) - with np.errstate(divide='ignore'): - np.divide(1., 0., extobj=[20000, 3, log_err]) - assert_equal(self.called, 2) - finally: - np.seterrobj(olderrobj) - del self.called - - def test_errobj_noerrmask(self): - # errmask = 0 has a special code path for the default - olderrobj = np.geterrobj() - try: - # set errobj to something non default - np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT, - umath.ERR_DEFAULT + 1, None]) - # call a ufunc - np.isnan(np.array([6])) - # same with the default, lots of times to get rid of possible - # pre-existing stack in the code - for i in range(10000): - np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT, umath.ERR_DEFAULT, - None]) - np.isnan(np.array([6])) - finally: - np.seterrobj(olderrobj) - - -class TestFloatExceptions(object): - def assert_raises_fpe(self, fpeerr, flop, x, y): - ftype = type(x) - try: - flop(x, y) - assert_(False, - "Type %s did not raise fpe error '%s'." % (ftype, fpeerr)) - except FloatingPointError as exc: - assert_(str(exc).find(fpeerr) >= 0, - "Type %s raised wrong fpe error '%s'." % (ftype, exc)) - - def assert_op_raises_fpe(self, fpeerr, flop, sc1, sc2): - # Check that fpe exception is raised. - # - # Given a floating operation `flop` and two scalar values, check that - # the operation raises the floating point exception specified by - # `fpeerr`. Tests all variants with 0-d array scalars as well. - - self.assert_raises_fpe(fpeerr, flop, sc1, sc2) - self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2) - self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()]) - self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2[()]) - - def test_floating_exceptions(self): - # Test basic arithmetic function errors - with np.errstate(all='raise'): - # Test for all real and complex float types - for typecode in np.typecodes['AllFloat']: - ftype = np.obj2sctype(typecode) - if np.dtype(ftype).kind == 'f': - # Get some extreme values for the type - fi = np.finfo(ftype) - ft_tiny = fi.tiny - ft_max = fi.max - ft_eps = fi.eps - underflow = 'underflow' - divbyzero = 'divide by zero' - else: - # 'c', complex, corresponding real dtype - rtype = type(ftype(0).real) - fi = np.finfo(rtype) - ft_tiny = ftype(fi.tiny) - ft_max = ftype(fi.max) - ft_eps = ftype(fi.eps) - # The complex types raise different exceptions - underflow = '' - divbyzero = '' - overflow = 'overflow' - invalid = 'invalid' - - self.assert_raises_fpe(underflow, - lambda a, b: a/b, ft_tiny, ft_max) - self.assert_raises_fpe(underflow, - lambda a, b: a*b, ft_tiny, ft_tiny) - self.assert_raises_fpe(overflow, - lambda a, b: a*b, ft_max, ftype(2)) - self.assert_raises_fpe(overflow, - lambda a, b: a/b, ft_max, ftype(0.5)) - self.assert_raises_fpe(overflow, - lambda a, b: a+b, ft_max, ft_max*ft_eps) - self.assert_raises_fpe(overflow, - lambda a, b: a-b, -ft_max, ft_max*ft_eps) - self.assert_raises_fpe(overflow, - np.power, ftype(2), ftype(2**fi.nexp)) - self.assert_raises_fpe(divbyzero, - lambda a, b: a/b, ftype(1), ftype(0)) - self.assert_raises_fpe(invalid, - lambda a, b: a/b, ftype(np.inf), ftype(np.inf)) - self.assert_raises_fpe(invalid, - lambda a, b: a/b, ftype(0), ftype(0)) - self.assert_raises_fpe(invalid, - lambda a, b: a-b, ftype(np.inf), ftype(np.inf)) - self.assert_raises_fpe(invalid, - lambda a, b: a+b, ftype(np.inf), ftype(-np.inf)) - self.assert_raises_fpe(invalid, - lambda a, b: a*b, ftype(0), ftype(np.inf)) - - def test_warnings(self): - # test warning code path - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - with np.errstate(all="warn"): - np.divide(1, 0.) - assert_equal(len(w), 1) - assert_("divide by zero" in str(w[0].message)) - np.array(1e300) * np.array(1e300) - assert_equal(len(w), 2) - assert_("overflow" in str(w[-1].message)) - np.array(np.inf) - np.array(np.inf) - assert_equal(len(w), 3) - assert_("invalid value" in str(w[-1].message)) - np.array(1e-300) * np.array(1e-300) - assert_equal(len(w), 4) - assert_("underflow" in str(w[-1].message)) - - -class TestTypes(object): - def check_promotion_cases(self, promote_func): - # tests that the scalars get coerced correctly. - b = np.bool_(0) - i8, i16, i32, i64 = np.int8(0), np.int16(0), np.int32(0), np.int64(0) - u8, u16, u32, u64 = np.uint8(0), np.uint16(0), np.uint32(0), np.uint64(0) - f32, f64, fld = np.float32(0), np.float64(0), np.longdouble(0) - c64, c128, cld = np.complex64(0), np.complex128(0), np.clongdouble(0) - - # coercion within the same kind - assert_equal(promote_func(i8, i16), np.dtype(np.int16)) - assert_equal(promote_func(i32, i8), np.dtype(np.int32)) - assert_equal(promote_func(i16, i64), np.dtype(np.int64)) - assert_equal(promote_func(u8, u32), np.dtype(np.uint32)) - assert_equal(promote_func(f32, f64), np.dtype(np.float64)) - assert_equal(promote_func(fld, f32), np.dtype(np.longdouble)) - assert_equal(promote_func(f64, fld), np.dtype(np.longdouble)) - assert_equal(promote_func(c128, c64), np.dtype(np.complex128)) - assert_equal(promote_func(cld, c128), np.dtype(np.clongdouble)) - assert_equal(promote_func(c64, fld), np.dtype(np.clongdouble)) - - # coercion between kinds - assert_equal(promote_func(b, i32), np.dtype(np.int32)) - assert_equal(promote_func(b, u8), np.dtype(np.uint8)) - assert_equal(promote_func(i8, u8), np.dtype(np.int16)) - assert_equal(promote_func(u8, i32), np.dtype(np.int32)) - assert_equal(promote_func(i64, u32), np.dtype(np.int64)) - assert_equal(promote_func(u64, i32), np.dtype(np.float64)) - assert_equal(promote_func(i32, f32), np.dtype(np.float64)) - assert_equal(promote_func(i64, f32), np.dtype(np.float64)) - assert_equal(promote_func(f32, i16), np.dtype(np.float32)) - assert_equal(promote_func(f32, u32), np.dtype(np.float64)) - assert_equal(promote_func(f32, c64), np.dtype(np.complex64)) - assert_equal(promote_func(c128, f32), np.dtype(np.complex128)) - assert_equal(promote_func(cld, f64), np.dtype(np.clongdouble)) - - # coercion between scalars and 1-D arrays - assert_equal(promote_func(np.array([b]), i8), np.dtype(np.int8)) - assert_equal(promote_func(np.array([b]), u8), np.dtype(np.uint8)) - assert_equal(promote_func(np.array([b]), i32), np.dtype(np.int32)) - assert_equal(promote_func(np.array([b]), u32), np.dtype(np.uint32)) - assert_equal(promote_func(np.array([i8]), i64), np.dtype(np.int8)) - assert_equal(promote_func(u64, np.array([i32])), np.dtype(np.int32)) - assert_equal(promote_func(i64, np.array([u32])), np.dtype(np.uint32)) - assert_equal(promote_func(np.int32(-1), np.array([u64])), - np.dtype(np.float64)) - assert_equal(promote_func(f64, np.array([f32])), np.dtype(np.float32)) - assert_equal(promote_func(fld, np.array([f32])), np.dtype(np.float32)) - assert_equal(promote_func(np.array([f64]), fld), np.dtype(np.float64)) - assert_equal(promote_func(fld, np.array([c64])), - np.dtype(np.complex64)) - assert_equal(promote_func(c64, np.array([f64])), - np.dtype(np.complex128)) - assert_equal(promote_func(np.complex64(3j), np.array([f64])), - np.dtype(np.complex128)) - - # coercion between scalars and 1-D arrays, where - # the scalar has greater kind than the array - assert_equal(promote_func(np.array([b]), f64), np.dtype(np.float64)) - assert_equal(promote_func(np.array([b]), i64), np.dtype(np.int64)) - assert_equal(promote_func(np.array([b]), u64), np.dtype(np.uint64)) - assert_equal(promote_func(np.array([i8]), f64), np.dtype(np.float64)) - assert_equal(promote_func(np.array([u16]), f64), np.dtype(np.float64)) - - # uint and int are treated as the same "kind" for - # the purposes of array-scalar promotion. - assert_equal(promote_func(np.array([u16]), i32), np.dtype(np.uint16)) - - # float and complex are treated as the same "kind" for - # the purposes of array-scalar promotion, so that you can do - # (0j + float32array) to get a complex64 array instead of - # a complex128 array. - assert_equal(promote_func(np.array([f32]), c128), - np.dtype(np.complex64)) - - def test_coercion(self): - def res_type(a, b): - return np.add(a, b).dtype - - self.check_promotion_cases(res_type) - - # Use-case: float/complex scalar * bool/int8 array - # shouldn't narrow the float/complex type - for a in [np.array([True, False]), np.array([-3, 12], dtype=np.int8)]: - b = 1.234 * a - assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype) - b = np.longdouble(1.234) * a - assert_equal(b.dtype, np.dtype(np.longdouble), - "array type %s" % a.dtype) - b = np.float64(1.234) * a - assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype) - b = np.float32(1.234) * a - assert_equal(b.dtype, np.dtype('f4'), "array type %s" % a.dtype) - b = np.float16(1.234) * a - assert_equal(b.dtype, np.dtype('f2'), "array type %s" % a.dtype) - - b = 1.234j * a - assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype) - b = np.clongdouble(1.234j) * a - assert_equal(b.dtype, np.dtype(np.clongdouble), - "array type %s" % a.dtype) - b = np.complex128(1.234j) * a - assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype) - b = np.complex64(1.234j) * a - assert_equal(b.dtype, np.dtype('c8'), "array type %s" % a.dtype) - - # The following use-case is problematic, and to resolve its - # tricky side-effects requires more changes. - # - # Use-case: (1-t)*a, where 't' is a boolean array and 'a' is - # a float32, shouldn't promote to float64 - # - # a = np.array([1.0, 1.5], dtype=np.float32) - # t = np.array([True, False]) - # b = t*a - # assert_equal(b, [1.0, 0.0]) - # assert_equal(b.dtype, np.dtype('f4')) - # b = (1-t)*a - # assert_equal(b, [0.0, 1.5]) - # assert_equal(b.dtype, np.dtype('f4')) - # - # Probably ~t (bitwise negation) is more proper to use here, - # but this is arguably less intuitive to understand at a glance, and - # would fail if 't' is actually an integer array instead of boolean: - # - # b = (~t)*a - # assert_equal(b, [0.0, 1.5]) - # assert_equal(b.dtype, np.dtype('f4')) - - def test_result_type(self): - self.check_promotion_cases(np.result_type) - assert_(np.result_type(None) == np.dtype(None)) - - def test_promote_types_endian(self): - # promote_types should always return native-endian types - assert_equal(np.promote_types('i8', '>i8'), np.dtype('i8')) - - assert_equal(np.promote_types('>i8', '>U16'), np.dtype('U21')) - assert_equal(np.promote_types('U16', '>i8'), np.dtype('U21')) - assert_equal(np.promote_types('S5', '>U8'), np.dtype('U8')) - assert_equal(np.promote_types('U8', '>S5'), np.dtype('U8')) - assert_equal(np.promote_types('U8', '>U5'), np.dtype('U8')) - - assert_equal(np.promote_types('M8', '>M8'), np.dtype('M8')) - assert_equal(np.promote_types('m8', '>m8'), np.dtype('m8')) - - def test_promote_types_strings(self): - assert_equal(np.promote_types('bool', 'S'), np.dtype('S5')) - assert_equal(np.promote_types('b', 'S'), np.dtype('S4')) - assert_equal(np.promote_types('u1', 'S'), np.dtype('S3')) - assert_equal(np.promote_types('u2', 'S'), np.dtype('S5')) - assert_equal(np.promote_types('u4', 'S'), np.dtype('S10')) - assert_equal(np.promote_types('u8', 'S'), np.dtype('S20')) - assert_equal(np.promote_types('i1', 'S'), np.dtype('S4')) - assert_equal(np.promote_types('i2', 'S'), np.dtype('S6')) - assert_equal(np.promote_types('i4', 'S'), np.dtype('S11')) - assert_equal(np.promote_types('i8', 'S'), np.dtype('S21')) - assert_equal(np.promote_types('bool', 'U'), np.dtype('U5')) - assert_equal(np.promote_types('b', 'U'), np.dtype('U4')) - assert_equal(np.promote_types('u1', 'U'), np.dtype('U3')) - assert_equal(np.promote_types('u2', 'U'), np.dtype('U5')) - assert_equal(np.promote_types('u4', 'U'), np.dtype('U10')) - assert_equal(np.promote_types('u8', 'U'), np.dtype('U20')) - assert_equal(np.promote_types('i1', 'U'), np.dtype('U4')) - assert_equal(np.promote_types('i2', 'U'), np.dtype('U6')) - assert_equal(np.promote_types('i4', 'U'), np.dtype('U11')) - assert_equal(np.promote_types('i8', 'U'), np.dtype('U21')) - assert_equal(np.promote_types('bool', 'S1'), np.dtype('S5')) - assert_equal(np.promote_types('bool', 'S30'), np.dtype('S30')) - assert_equal(np.promote_types('b', 'S1'), np.dtype('S4')) - assert_equal(np.promote_types('b', 'S30'), np.dtype('S30')) - assert_equal(np.promote_types('u1', 'S1'), np.dtype('S3')) - assert_equal(np.promote_types('u1', 'S30'), np.dtype('S30')) - assert_equal(np.promote_types('u2', 'S1'), np.dtype('S5')) - assert_equal(np.promote_types('u2', 'S30'), np.dtype('S30')) - assert_equal(np.promote_types('u4', 'S1'), np.dtype('S10')) - assert_equal(np.promote_types('u4', 'S30'), np.dtype('S30')) - assert_equal(np.promote_types('u8', 'S1'), np.dtype('S20')) - assert_equal(np.promote_types('u8', 'S30'), np.dtype('S30')) - - def test_can_cast(self): - assert_(np.can_cast(np.int32, np.int64)) - assert_(np.can_cast(np.float64, complex)) - assert_(not np.can_cast(complex, float)) - - assert_(np.can_cast('i8', 'f8')) - assert_(not np.can_cast('i8', 'f4')) - assert_(np.can_cast('i4', 'S11')) - - assert_(np.can_cast('i8', 'i8', 'no')) - assert_(not np.can_cast('i8', 'no')) - - assert_(np.can_cast('i8', 'equiv')) - assert_(not np.can_cast('i8', 'equiv')) - - assert_(np.can_cast('i8', 'safe')) - assert_(not np.can_cast('i4', 'safe')) - - assert_(np.can_cast('i4', 'same_kind')) - assert_(not np.can_cast('u4', 'same_kind')) - - assert_(np.can_cast('u4', 'unsafe')) - - assert_(np.can_cast('bool', 'S5')) - assert_(not np.can_cast('bool', 'S4')) - - assert_(np.can_cast('b', 'S4')) - assert_(not np.can_cast('b', 'S3')) - - assert_(np.can_cast('u1', 'S3')) - assert_(not np.can_cast('u1', 'S2')) - assert_(np.can_cast('u2', 'S5')) - assert_(not np.can_cast('u2', 'S4')) - assert_(np.can_cast('u4', 'S10')) - assert_(not np.can_cast('u4', 'S9')) - assert_(np.can_cast('u8', 'S20')) - assert_(not np.can_cast('u8', 'S19')) - - assert_(np.can_cast('i1', 'S4')) - assert_(not np.can_cast('i1', 'S3')) - assert_(np.can_cast('i2', 'S6')) - assert_(not np.can_cast('i2', 'S5')) - assert_(np.can_cast('i4', 'S11')) - assert_(not np.can_cast('i4', 'S10')) - assert_(np.can_cast('i8', 'S21')) - assert_(not np.can_cast('i8', 'S20')) - - assert_(np.can_cast('bool', 'S5')) - assert_(not np.can_cast('bool', 'S4')) - - assert_(np.can_cast('b', 'U4')) - assert_(not np.can_cast('b', 'U3')) - - assert_(np.can_cast('u1', 'U3')) - assert_(not np.can_cast('u1', 'U2')) - assert_(np.can_cast('u2', 'U5')) - assert_(not np.can_cast('u2', 'U4')) - assert_(np.can_cast('u4', 'U10')) - assert_(not np.can_cast('u4', 'U9')) - assert_(np.can_cast('u8', 'U20')) - assert_(not np.can_cast('u8', 'U19')) - - assert_(np.can_cast('i1', 'U4')) - assert_(not np.can_cast('i1', 'U3')) - assert_(np.can_cast('i2', 'U6')) - assert_(not np.can_cast('i2', 'U5')) - assert_(np.can_cast('i4', 'U11')) - assert_(not np.can_cast('i4', 'U10')) - assert_(np.can_cast('i8', 'U21')) - assert_(not np.can_cast('i8', 'U20')) - - assert_raises(TypeError, np.can_cast, 'i4', None) - assert_raises(TypeError, np.can_cast, None, 'i4') - - # Also test keyword arguments - assert_(np.can_cast(from_=np.int32, to=np.int64)) - - def test_can_cast_simple_to_structured(self): - # Non-structured can only be cast to structured in 'unsafe' mode. - assert_(not np.can_cast('i4', 'i4,i4')) - assert_(not np.can_cast('i4', 'i4,i2')) - assert_(np.can_cast('i4', 'i4,i4', casting='unsafe')) - assert_(np.can_cast('i4', 'i4,i2', casting='unsafe')) - # Even if there is just a single field which is OK. - assert_(not np.can_cast('i2', [('f1', 'i4')])) - assert_(not np.can_cast('i2', [('f1', 'i4')], casting='same_kind')) - assert_(np.can_cast('i2', [('f1', 'i4')], casting='unsafe')) - # It should be the same for recursive structured or subarrays. - assert_(not np.can_cast('i2', [('f1', 'i4,i4')])) - assert_(np.can_cast('i2', [('f1', 'i4,i4')], casting='unsafe')) - assert_(not np.can_cast('i2', [('f1', '(2,3)i4')])) - assert_(np.can_cast('i2', [('f1', '(2,3)i4')], casting='unsafe')) - - def test_can_cast_structured_to_simple(self): - # Need unsafe casting for structured to simple. - assert_(not np.can_cast([('f1', 'i4')], 'i4')) - assert_(np.can_cast([('f1', 'i4')], 'i4', casting='unsafe')) - assert_(np.can_cast([('f1', 'i4')], 'i2', casting='unsafe')) - # Since it is unclear what is being cast, multiple fields to - # single should not work even for unsafe casting. - assert_(not np.can_cast('i4,i4', 'i4', casting='unsafe')) - # But a single field inside a single field is OK. - assert_(not np.can_cast([('f1', [('x', 'i4')])], 'i4')) - assert_(np.can_cast([('f1', [('x', 'i4')])], 'i4', casting='unsafe')) - # And a subarray is fine too - it will just take the first element - # (arguably not very consistently; might also take the first field). - assert_(not np.can_cast([('f0', '(3,)i4')], 'i4')) - assert_(np.can_cast([('f0', '(3,)i4')], 'i4', casting='unsafe')) - # But a structured subarray with multiple fields should fail. - assert_(not np.can_cast([('f0', ('i4,i4'), (2,))], 'i4', - casting='unsafe')) - - def test_can_cast_values(self): - # gh-5917 - for dt in np.sctypes['int'] + np.sctypes['uint']: - ii = np.iinfo(dt) - assert_(np.can_cast(ii.min, dt)) - assert_(np.can_cast(ii.max, dt)) - assert_(not np.can_cast(ii.min - 1, dt)) - assert_(not np.can_cast(ii.max + 1, dt)) - - for dt in np.sctypes['float']: - fi = np.finfo(dt) - assert_(np.can_cast(fi.min, dt)) - assert_(np.can_cast(fi.max, dt)) - - -# Custom exception class to test exception propagation in fromiter -class NIterError(Exception): - pass - - -class TestFromiter(object): - def makegen(self): - for x in range(24): - yield x**2 - - def test_types(self): - ai32 = np.fromiter(self.makegen(), np.int32) - ai64 = np.fromiter(self.makegen(), np.int64) - af = np.fromiter(self.makegen(), float) - assert_(ai32.dtype == np.dtype(np.int32)) - assert_(ai64.dtype == np.dtype(np.int64)) - assert_(af.dtype == np.dtype(float)) - - def test_lengths(self): - expected = np.array(list(self.makegen())) - a = np.fromiter(self.makegen(), int) - a20 = np.fromiter(self.makegen(), int, 20) - assert_(len(a) == len(expected)) - assert_(len(a20) == 20) - assert_raises(ValueError, np.fromiter, - self.makegen(), int, len(expected) + 10) - - def test_values(self): - expected = np.array(list(self.makegen())) - a = np.fromiter(self.makegen(), int) - a20 = np.fromiter(self.makegen(), int, 20) - assert_(np.alltrue(a == expected, axis=0)) - assert_(np.alltrue(a20 == expected[:20], axis=0)) - - def load_data(self, n, eindex): - # Utility method for the issue 2592 tests. - # Raise an exception at the desired index in the iterator. - for e in range(n): - if e == eindex: - raise NIterError('error at index %s' % eindex) - yield e - - def test_2592(self): - # Test iteration exceptions are correctly raised. - count, eindex = 10, 5 - assert_raises(NIterError, np.fromiter, - self.load_data(count, eindex), dtype=int, count=count) - - def test_2592_edge(self): - # Test iter. exceptions, edge case (exception at end of iterator). - count = 10 - eindex = count-1 - assert_raises(NIterError, np.fromiter, - self.load_data(count, eindex), dtype=int, count=count) - - -class TestNonzero(object): - def test_nonzero_trivial(self): - assert_equal(np.count_nonzero(np.array([])), 0) - assert_equal(np.count_nonzero(np.array([], dtype='?')), 0) - assert_equal(np.nonzero(np.array([])), ([],)) - - assert_equal(np.count_nonzero(np.array([0])), 0) - assert_equal(np.count_nonzero(np.array([0], dtype='?')), 0) - assert_equal(np.nonzero(np.array([0])), ([],)) - - assert_equal(np.count_nonzero(np.array([1])), 1) - assert_equal(np.count_nonzero(np.array([1], dtype='?')), 1) - assert_equal(np.nonzero(np.array([1])), ([0],)) - - def test_nonzero_zerod(self): - assert_equal(np.count_nonzero(np.array(0)), 0) - assert_equal(np.count_nonzero(np.array(0, dtype='?')), 0) - with assert_warns(DeprecationWarning): - assert_equal(np.nonzero(np.array(0)), ([],)) - - assert_equal(np.count_nonzero(np.array(1)), 1) - assert_equal(np.count_nonzero(np.array(1, dtype='?')), 1) - with assert_warns(DeprecationWarning): - assert_equal(np.nonzero(np.array(1)), ([0],)) - - def test_nonzero_onedim(self): - x = np.array([1, 0, 2, -1, 0, 0, 8]) - assert_equal(np.count_nonzero(x), 4) - assert_equal(np.count_nonzero(x), 4) - assert_equal(np.nonzero(x), ([0, 2, 3, 6],)) - - x = np.array([(1, 2), (0, 0), (1, 1), (-1, 3), (0, 7)], - dtype=[('a', 'i4'), ('b', 'i2')]) - assert_equal(np.count_nonzero(x['a']), 3) - assert_equal(np.count_nonzero(x['b']), 4) - assert_equal(np.nonzero(x['a']), ([0, 2, 3],)) - assert_equal(np.nonzero(x['b']), ([0, 2, 3, 4],)) - - def test_nonzero_twodim(self): - x = np.array([[0, 1, 0], [2, 0, 3]]) - assert_equal(np.count_nonzero(x), 3) - assert_equal(np.nonzero(x), ([0, 1, 1], [1, 0, 2])) - - x = np.eye(3) - assert_equal(np.count_nonzero(x), 3) - assert_equal(np.nonzero(x), ([0, 1, 2], [0, 1, 2])) - - x = np.array([[(0, 1), (0, 0), (1, 11)], - [(1, 1), (1, 0), (0, 0)], - [(0, 0), (1, 5), (0, 1)]], dtype=[('a', 'f4'), ('b', 'u1')]) - assert_equal(np.count_nonzero(x['a']), 4) - assert_equal(np.count_nonzero(x['b']), 5) - assert_equal(np.nonzero(x['a']), ([0, 1, 1, 2], [2, 0, 1, 1])) - assert_equal(np.nonzero(x['b']), ([0, 0, 1, 2, 2], [0, 2, 0, 1, 2])) - - assert_(not x['a'].T.flags.aligned) - assert_equal(np.count_nonzero(x['a'].T), 4) - assert_equal(np.count_nonzero(x['b'].T), 5) - assert_equal(np.nonzero(x['a'].T), ([0, 1, 1, 2], [1, 1, 2, 0])) - assert_equal(np.nonzero(x['b'].T), ([0, 0, 1, 2, 2], [0, 1, 2, 0, 2])) - - def test_sparse(self): - # test special sparse condition boolean code path - for i in range(20): - c = np.zeros(200, dtype=bool) - c[i::20] = True - assert_equal(np.nonzero(c)[0], np.arange(i, 200 + i, 20)) - - c = np.zeros(400, dtype=bool) - c[10 + i:20 + i] = True - c[20 + i*2] = True - assert_equal(np.nonzero(c)[0], - np.concatenate((np.arange(10 + i, 20 + i), [20 + i*2]))) - - def test_return_type(self): - class C(np.ndarray): - pass - - for view in (C, np.ndarray): - for nd in range(1, 4): - shape = tuple(range(2, 2+nd)) - x = np.arange(np.prod(shape)).reshape(shape).view(view) - for nzx in (np.nonzero(x), x.nonzero()): - for nzx_i in nzx: - assert_(type(nzx_i) is np.ndarray) - assert_(nzx_i.flags.writeable) - - def test_count_nonzero_axis(self): - # Basic check of functionality - m = np.array([[0, 1, 7, 0, 0], [3, 0, 0, 2, 19]]) - - expected = np.array([1, 1, 1, 1, 1]) - assert_equal(np.count_nonzero(m, axis=0), expected) - - expected = np.array([2, 3]) - assert_equal(np.count_nonzero(m, axis=1), expected) - - assert_raises(ValueError, np.count_nonzero, m, axis=(1, 1)) - assert_raises(TypeError, np.count_nonzero, m, axis='foo') - assert_raises(np.AxisError, np.count_nonzero, m, axis=3) - assert_raises(TypeError, np.count_nonzero, - m, axis=np.array([[1], [2]])) - - def test_count_nonzero_axis_all_dtypes(self): - # More thorough test that the axis argument is respected - # for all dtypes and responds correctly when presented with - # either integer or tuple arguments for axis - msg = "Mismatch for dtype: %s" - - def assert_equal_w_dt(a, b, err_msg): - assert_equal(a.dtype, b.dtype, err_msg=err_msg) - assert_equal(a, b, err_msg=err_msg) - - for dt in np.typecodes['All']: - err_msg = msg % (np.dtype(dt).name,) - - if dt != 'V': - if dt != 'M': - m = np.zeros((3, 3), dtype=dt) - n = np.ones(1, dtype=dt) - - m[0, 0] = n[0] - m[1, 0] = n[0] - - else: # np.zeros doesn't work for np.datetime64 - m = np.array(['1970-01-01'] * 9) - m = m.reshape((3, 3)) - - m[0, 0] = '1970-01-12' - m[1, 0] = '1970-01-12' - m = m.astype(dt) - - expected = np.array([2, 0, 0], dtype=np.intp) - assert_equal_w_dt(np.count_nonzero(m, axis=0), - expected, err_msg=err_msg) - - expected = np.array([1, 1, 0], dtype=np.intp) - assert_equal_w_dt(np.count_nonzero(m, axis=1), - expected, err_msg=err_msg) - - expected = np.array(2) - assert_equal(np.count_nonzero(m, axis=(0, 1)), - expected, err_msg=err_msg) - assert_equal(np.count_nonzero(m, axis=None), - expected, err_msg=err_msg) - assert_equal(np.count_nonzero(m), - expected, err_msg=err_msg) - - if dt == 'V': - # There are no 'nonzero' objects for np.void, so the testing - # setup is slightly different for this dtype - m = np.array([np.void(1)] * 6).reshape((2, 3)) - - expected = np.array([0, 0, 0], dtype=np.intp) - assert_equal_w_dt(np.count_nonzero(m, axis=0), - expected, err_msg=err_msg) - - expected = np.array([0, 0], dtype=np.intp) - assert_equal_w_dt(np.count_nonzero(m, axis=1), - expected, err_msg=err_msg) - - expected = np.array(0) - assert_equal(np.count_nonzero(m, axis=(0, 1)), - expected, err_msg=err_msg) - assert_equal(np.count_nonzero(m, axis=None), - expected, err_msg=err_msg) - assert_equal(np.count_nonzero(m), - expected, err_msg=err_msg) - - def test_count_nonzero_axis_consistent(self): - # Check that the axis behaviour for valid axes in - # non-special cases is consistent (and therefore - # correct) by checking it against an integer array - # that is then casted to the generic object dtype - from itertools import combinations, permutations - - axis = (0, 1, 2, 3) - size = (5, 5, 5, 5) - msg = "Mismatch for axis: %s" - - rng = np.random.RandomState(1234) - m = rng.randint(-100, 100, size=size) - n = m.astype(object) - - for length in range(len(axis)): - for combo in combinations(axis, length): - for perm in permutations(combo): - assert_equal( - np.count_nonzero(m, axis=perm), - np.count_nonzero(n, axis=perm), - err_msg=msg % (perm,)) - - def test_countnonzero_axis_empty(self): - a = np.array([[0, 0, 1], [1, 0, 1]]) - assert_equal(np.count_nonzero(a, axis=()), a.astype(bool)) - - def test_array_method(self): - # Tests that the array method - # call to nonzero works - m = np.array([[1, 0, 0], [4, 0, 6]]) - tgt = [[0, 1, 1], [0, 0, 2]] - - assert_equal(m.nonzero(), tgt) - - def test_nonzero_invalid_object(self): - # gh-9295 - a = np.array([np.array([1, 2]), 3]) - assert_raises(ValueError, np.nonzero, a) - - class BoolErrors: - def __bool__(self): - raise ValueError("Not allowed") - def __nonzero__(self): - raise ValueError("Not allowed") - - assert_raises(ValueError, np.nonzero, np.array([BoolErrors()])) - - def test_nonzero_sideeffect_safety(self): - # gh-13631 - class FalseThenTrue: - _val = False - def __bool__(self): - try: - return self._val - finally: - self._val = True - - class TrueThenFalse: - _val = True - def __bool__(self): - try: - return self._val - finally: - self._val = False - - # result grows on the second pass - a = np.array([True, FalseThenTrue()]) - assert_raises(RuntimeError, np.nonzero, a) - - a = np.array([[True], [FalseThenTrue()]]) - assert_raises(RuntimeError, np.nonzero, a) - - # result shrinks on the second pass - a = np.array([False, TrueThenFalse()]) - assert_raises(RuntimeError, np.nonzero, a) - - a = np.array([[False], [TrueThenFalse()]]) - assert_raises(RuntimeError, np.nonzero, a) - - def test_nonzero_exception_safe(self): - # gh-13930 - - class ThrowsAfter: - def __init__(self, iters): - self.iters_left = iters - - def __bool__(self): - if self.iters_left == 0: - raise ValueError("called `iters` times") - - self.iters_left -= 1 - return True - - """ - Test that a ValueError is raised instead of a SystemError - - If the __bool__ function is called after the error state is set, - Python (cpython) will raise a SystemError. - """ - - # assert that an exception in first pass is handled correctly - a = np.array([ThrowsAfter(5)]*10) - assert_raises(ValueError, np.nonzero, a) - - # raise exception in second pass for 1-dimensional loop - a = np.array([ThrowsAfter(15)]*10) - assert_raises(ValueError, np.nonzero, a) - - # raise exception in second pass for n-dimensional loop - a = np.array([[ThrowsAfter(15)]]*10) - assert_raises(ValueError, np.nonzero, a) - - -class TestIndex(object): - def test_boolean(self): - a = rand(3, 5, 8) - V = rand(5, 8) - g1 = randint(0, 5, size=15) - g2 = randint(0, 8, size=15) - V[g1, g2] = -V[g1, g2] - assert_((np.array([a[0][V > 0], a[1][V > 0], a[2][V > 0]]) == a[:, V > 0]).all()) - - def test_boolean_edgecase(self): - a = np.array([], dtype='int32') - b = np.array([], dtype='bool') - c = a[b] - assert_equal(c, []) - assert_equal(c.dtype, np.dtype('int32')) - - -class TestBinaryRepr(object): - def test_zero(self): - assert_equal(np.binary_repr(0), '0') - - def test_positive(self): - assert_equal(np.binary_repr(10), '1010') - assert_equal(np.binary_repr(12522), - '11000011101010') - assert_equal(np.binary_repr(10736848), - '101000111101010011010000') - - def test_negative(self): - assert_equal(np.binary_repr(-1), '-1') - assert_equal(np.binary_repr(-10), '-1010') - assert_equal(np.binary_repr(-12522), - '-11000011101010') - assert_equal(np.binary_repr(-10736848), - '-101000111101010011010000') - - def test_sufficient_width(self): - assert_equal(np.binary_repr(0, width=5), '00000') - assert_equal(np.binary_repr(10, width=7), '0001010') - assert_equal(np.binary_repr(-5, width=7), '1111011') - - def test_neg_width_boundaries(self): - # see gh-8670 - - # Ensure that the example in the issue does not - # break before proceeding to a more thorough test. - assert_equal(np.binary_repr(-128, width=8), '10000000') - - for width in range(1, 11): - num = -2**(width - 1) - exp = '1' + (width - 1) * '0' - assert_equal(np.binary_repr(num, width=width), exp) - - def test_large_neg_int64(self): - # See gh-14289. - assert_equal(np.binary_repr(np.int64(-2**62), width=64), - '11' + '0'*62) - - -class TestBaseRepr(object): - def test_base3(self): - assert_equal(np.base_repr(3**5, 3), '100000') - - def test_positive(self): - assert_equal(np.base_repr(12, 10), '12') - assert_equal(np.base_repr(12, 10, 4), '000012') - assert_equal(np.base_repr(12, 4), '30') - assert_equal(np.base_repr(3731624803700888, 36), '10QR0ROFCEW') - - def test_negative(self): - assert_equal(np.base_repr(-12, 10), '-12') - assert_equal(np.base_repr(-12, 10, 4), '-000012') - assert_equal(np.base_repr(-12, 4), '-30') - - def test_base_range(self): - with assert_raises(ValueError): - np.base_repr(1, 1) - with assert_raises(ValueError): - np.base_repr(1, 37) - - -class TestArrayComparisons(object): - def test_array_equal(self): - res = np.array_equal(np.array([1, 2]), np.array([1, 2])) - assert_(res) - assert_(type(res) is bool) - res = np.array_equal(np.array([1, 2]), np.array([1, 2, 3])) - assert_(not res) - assert_(type(res) is bool) - res = np.array_equal(np.array([1, 2]), np.array([3, 4])) - assert_(not res) - assert_(type(res) is bool) - res = np.array_equal(np.array([1, 2]), np.array([1, 3])) - assert_(not res) - assert_(type(res) is bool) - res = np.array_equal(np.array(['a'], dtype='S1'), np.array(['a'], dtype='S1')) - assert_(res) - assert_(type(res) is bool) - res = np.array_equal(np.array([('a', 1)], dtype='S1,u4'), - np.array([('a', 1)], dtype='S1,u4')) - assert_(res) - assert_(type(res) is bool) - - def test_none_compares_elementwise(self): - a = np.array([None, 1, None], dtype=object) - assert_equal(a == None, [True, False, True]) - assert_equal(a != None, [False, True, False]) - - a = np.ones(3) - assert_equal(a == None, [False, False, False]) - assert_equal(a != None, [True, True, True]) - - def test_array_equiv(self): - res = np.array_equiv(np.array([1, 2]), np.array([1, 2])) - assert_(res) - assert_(type(res) is bool) - res = np.array_equiv(np.array([1, 2]), np.array([1, 2, 3])) - assert_(not res) - assert_(type(res) is bool) - res = np.array_equiv(np.array([1, 2]), np.array([3, 4])) - assert_(not res) - assert_(type(res) is bool) - res = np.array_equiv(np.array([1, 2]), np.array([1, 3])) - assert_(not res) - assert_(type(res) is bool) - - res = np.array_equiv(np.array([1, 1]), np.array([1])) - assert_(res) - assert_(type(res) is bool) - res = np.array_equiv(np.array([1, 1]), np.array([[1], [1]])) - assert_(res) - assert_(type(res) is bool) - res = np.array_equiv(np.array([1, 2]), np.array([2])) - assert_(not res) - assert_(type(res) is bool) - res = np.array_equiv(np.array([1, 2]), np.array([[1], [2]])) - assert_(not res) - assert_(type(res) is bool) - res = np.array_equiv(np.array([1, 2]), np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])) - assert_(not res) - assert_(type(res) is bool) - - -def assert_array_strict_equal(x, y): - assert_array_equal(x, y) - # Check flags, 32 bit arches typically don't provide 16 byte alignment - if ((x.dtype.alignment <= 8 or - np.intp().dtype.itemsize != 4) and - sys.platform != 'win32'): - assert_(x.flags == y.flags) - else: - assert_(x.flags.owndata == y.flags.owndata) - assert_(x.flags.writeable == y.flags.writeable) - assert_(x.flags.c_contiguous == y.flags.c_contiguous) - assert_(x.flags.f_contiguous == y.flags.f_contiguous) - assert_(x.flags.writebackifcopy == y.flags.writebackifcopy) - # check endianness - assert_(x.dtype.isnative == y.dtype.isnative) - - -class TestClip(object): - def setup(self): - self.nr = 5 - self.nc = 3 - - def fastclip(self, a, m, M, out=None, casting=None): - if out is None: - if casting is None: - return a.clip(m, M) - else: - return a.clip(m, M, casting=casting) - else: - if casting is None: - return a.clip(m, M, out) - else: - return a.clip(m, M, out, casting=casting) - - def clip(self, a, m, M, out=None): - # use slow-clip - selector = np.less(a, m) + 2*np.greater(a, M) - return selector.choose((a, m, M), out=out) - - # Handy functions - def _generate_data(self, n, m): - return randn(n, m) - - def _generate_data_complex(self, n, m): - return randn(n, m) + 1.j * rand(n, m) - - def _generate_flt_data(self, n, m): - return (randn(n, m)).astype(np.float32) - - def _neg_byteorder(self, a): - a = np.asarray(a) - if sys.byteorder == 'little': - a = a.astype(a.dtype.newbyteorder('>')) - else: - a = a.astype(a.dtype.newbyteorder('<')) - return a - - def _generate_non_native_data(self, n, m): - data = randn(n, m) - data = self._neg_byteorder(data) - assert_(not data.dtype.isnative) - return data - - def _generate_int_data(self, n, m): - return (10 * rand(n, m)).astype(np.int64) - - def _generate_int32_data(self, n, m): - return (10 * rand(n, m)).astype(np.int32) - - # Now the real test cases - - @pytest.mark.parametrize("dtype", '?bhilqpBHILQPefdgFDGO') - def test_ones_pathological(self, dtype): - # for preservation of behavior described in - # gh-12519; amin > amax behavior may still change - # in the future - arr = np.ones(10, dtype=dtype) - expected = np.zeros(10, dtype=dtype) - actual = np.clip(arr, 1, 0) - if dtype == 'O': - assert actual.tolist() == expected.tolist() - else: - assert_equal(actual, expected) - - def test_simple_double(self): - # Test native double input with scalar min/max. - a = self._generate_data(self.nr, self.nc) - m = 0.1 - M = 0.6 - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_strict_equal(ac, act) - - def test_simple_int(self): - # Test native int input with scalar min/max. - a = self._generate_int_data(self.nr, self.nc) - a = a.astype(int) - m = -2 - M = 4 - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_strict_equal(ac, act) - - def test_array_double(self): - # Test native double input with array min/max. - a = self._generate_data(self.nr, self.nc) - m = np.zeros(a.shape) - M = m + 0.5 - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_strict_equal(ac, act) - - def test_simple_nonnative(self): - # Test non native double input with scalar min/max. - # Test native double input with non native double scalar min/max. - a = self._generate_non_native_data(self.nr, self.nc) - m = -0.5 - M = 0.6 - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_equal(ac, act) - - # Test native double input with non native double scalar min/max. - a = self._generate_data(self.nr, self.nc) - m = -0.5 - M = self._neg_byteorder(0.6) - assert_(not M.dtype.isnative) - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_equal(ac, act) - - def test_simple_complex(self): - # Test native complex input with native double scalar min/max. - # Test native input with complex double scalar min/max. - a = 3 * self._generate_data_complex(self.nr, self.nc) - m = -0.5 - M = 1. - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_strict_equal(ac, act) - - # Test native input with complex double scalar min/max. - a = 3 * self._generate_data(self.nr, self.nc) - m = -0.5 + 1.j - M = 1. + 2.j - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_strict_equal(ac, act) - - def test_clip_complex(self): - # Address Issue gh-5354 for clipping complex arrays - # Test native complex input without explicit min/max - # ie, either min=None or max=None - a = np.ones(10, dtype=complex) - m = a.min() - M = a.max() - am = self.fastclip(a, m, None) - aM = self.fastclip(a, None, M) - assert_array_strict_equal(am, a) - assert_array_strict_equal(aM, a) - - def test_clip_non_contig(self): - # Test clip for non contiguous native input and native scalar min/max. - a = self._generate_data(self.nr * 2, self.nc * 3) - a = a[::2, ::3] - assert_(not a.flags['F_CONTIGUOUS']) - assert_(not a.flags['C_CONTIGUOUS']) - ac = self.fastclip(a, -1.6, 1.7) - act = self.clip(a, -1.6, 1.7) - assert_array_strict_equal(ac, act) - - def test_simple_out(self): - # Test native double input with scalar min/max. - a = self._generate_data(self.nr, self.nc) - m = -0.5 - M = 0.6 - ac = np.zeros(a.shape) - act = np.zeros(a.shape) - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - @pytest.mark.parametrize("casting", [None, "unsafe"]) - def test_simple_int32_inout(self, casting): - # Test native int32 input with double min/max and int32 out. - a = self._generate_int32_data(self.nr, self.nc) - m = np.float64(0) - M = np.float64(2) - ac = np.zeros(a.shape, dtype=np.int32) - act = ac.copy() - if casting is None: - with assert_warns(DeprecationWarning): - # NumPy 1.17.0, 2018-02-24 - casting is unsafe - self.fastclip(a, m, M, ac, casting=casting) - else: - # explicitly passing "unsafe" will silence warning - self.fastclip(a, m, M, ac, casting=casting) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_simple_int64_out(self): - # Test native int32 input with int32 scalar min/max and int64 out. - a = self._generate_int32_data(self.nr, self.nc) - m = np.int32(-1) - M = np.int32(1) - ac = np.zeros(a.shape, dtype=np.int64) - act = ac.copy() - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_simple_int64_inout(self): - # Test native int32 input with double array min/max and int32 out. - a = self._generate_int32_data(self.nr, self.nc) - m = np.zeros(a.shape, np.float64) - M = np.float64(1) - ac = np.zeros(a.shape, dtype=np.int32) - act = ac.copy() - with assert_warns(DeprecationWarning): - # NumPy 1.17.0, 2018-02-24 - casting is unsafe - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_simple_int32_out(self): - # Test native double input with scalar min/max and int out. - a = self._generate_data(self.nr, self.nc) - m = -1.0 - M = 2.0 - ac = np.zeros(a.shape, dtype=np.int32) - act = ac.copy() - with assert_warns(DeprecationWarning): - # NumPy 1.17.0, 2018-02-24 - casting is unsafe - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_simple_inplace_01(self): - # Test native double input with array min/max in-place. - a = self._generate_data(self.nr, self.nc) - ac = a.copy() - m = np.zeros(a.shape) - M = 1.0 - self.fastclip(a, m, M, a) - self.clip(a, m, M, ac) - assert_array_strict_equal(a, ac) - - def test_simple_inplace_02(self): - # Test native double input with scalar min/max in-place. - a = self._generate_data(self.nr, self.nc) - ac = a.copy() - m = -0.5 - M = 0.6 - self.fastclip(a, m, M, a) - self.clip(ac, m, M, ac) - assert_array_strict_equal(a, ac) - - def test_noncontig_inplace(self): - # Test non contiguous double input with double scalar min/max in-place. - a = self._generate_data(self.nr * 2, self.nc * 3) - a = a[::2, ::3] - assert_(not a.flags['F_CONTIGUOUS']) - assert_(not a.flags['C_CONTIGUOUS']) - ac = a.copy() - m = -0.5 - M = 0.6 - self.fastclip(a, m, M, a) - self.clip(ac, m, M, ac) - assert_array_equal(a, ac) - - def test_type_cast_01(self): - # Test native double input with scalar min/max. - a = self._generate_data(self.nr, self.nc) - m = -0.5 - M = 0.6 - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_strict_equal(ac, act) - - def test_type_cast_02(self): - # Test native int32 input with int32 scalar min/max. - a = self._generate_int_data(self.nr, self.nc) - a = a.astype(np.int32) - m = -2 - M = 4 - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_strict_equal(ac, act) - - def test_type_cast_03(self): - # Test native int32 input with float64 scalar min/max. - a = self._generate_int32_data(self.nr, self.nc) - m = -2 - M = 4 - ac = self.fastclip(a, np.float64(m), np.float64(M)) - act = self.clip(a, np.float64(m), np.float64(M)) - assert_array_strict_equal(ac, act) - - def test_type_cast_04(self): - # Test native int32 input with float32 scalar min/max. - a = self._generate_int32_data(self.nr, self.nc) - m = np.float32(-2) - M = np.float32(4) - act = self.fastclip(a, m, M) - ac = self.clip(a, m, M) - assert_array_strict_equal(ac, act) - - def test_type_cast_05(self): - # Test native int32 with double arrays min/max. - a = self._generate_int_data(self.nr, self.nc) - m = -0.5 - M = 1. - ac = self.fastclip(a, m * np.zeros(a.shape), M) - act = self.clip(a, m * np.zeros(a.shape), M) - assert_array_strict_equal(ac, act) - - def test_type_cast_06(self): - # Test native with NON native scalar min/max. - a = self._generate_data(self.nr, self.nc) - m = 0.5 - m_s = self._neg_byteorder(m) - M = 1. - act = self.clip(a, m_s, M) - ac = self.fastclip(a, m_s, M) - assert_array_strict_equal(ac, act) - - def test_type_cast_07(self): - # Test NON native with native array min/max. - a = self._generate_data(self.nr, self.nc) - m = -0.5 * np.ones(a.shape) - M = 1. - a_s = self._neg_byteorder(a) - assert_(not a_s.dtype.isnative) - act = a_s.clip(m, M) - ac = self.fastclip(a_s, m, M) - assert_array_strict_equal(ac, act) - - def test_type_cast_08(self): - # Test NON native with native scalar min/max. - a = self._generate_data(self.nr, self.nc) - m = -0.5 - M = 1. - a_s = self._neg_byteorder(a) - assert_(not a_s.dtype.isnative) - ac = self.fastclip(a_s, m, M) - act = a_s.clip(m, M) - assert_array_strict_equal(ac, act) - - def test_type_cast_09(self): - # Test native with NON native array min/max. - a = self._generate_data(self.nr, self.nc) - m = -0.5 * np.ones(a.shape) - M = 1. - m_s = self._neg_byteorder(m) - assert_(not m_s.dtype.isnative) - ac = self.fastclip(a, m_s, M) - act = self.clip(a, m_s, M) - assert_array_strict_equal(ac, act) - - def test_type_cast_10(self): - # Test native int32 with float min/max and float out for output argument. - a = self._generate_int_data(self.nr, self.nc) - b = np.zeros(a.shape, dtype=np.float32) - m = np.float32(-0.5) - M = np.float32(1) - act = self.clip(a, m, M, out=b) - ac = self.fastclip(a, m, M, out=b) - assert_array_strict_equal(ac, act) - - def test_type_cast_11(self): - # Test non native with native scalar, min/max, out non native - a = self._generate_non_native_data(self.nr, self.nc) - b = a.copy() - b = b.astype(b.dtype.newbyteorder('>')) - bt = b.copy() - m = -0.5 - M = 1. - self.fastclip(a, m, M, out=b) - self.clip(a, m, M, out=bt) - assert_array_strict_equal(b, bt) - - def test_type_cast_12(self): - # Test native int32 input and min/max and float out - a = self._generate_int_data(self.nr, self.nc) - b = np.zeros(a.shape, dtype=np.float32) - m = np.int32(0) - M = np.int32(1) - act = self.clip(a, m, M, out=b) - ac = self.fastclip(a, m, M, out=b) - assert_array_strict_equal(ac, act) - - def test_clip_with_out_simple(self): - # Test native double input with scalar min/max - a = self._generate_data(self.nr, self.nc) - m = -0.5 - M = 0.6 - ac = np.zeros(a.shape) - act = np.zeros(a.shape) - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_clip_with_out_simple2(self): - # Test native int32 input with double min/max and int32 out - a = self._generate_int32_data(self.nr, self.nc) - m = np.float64(0) - M = np.float64(2) - ac = np.zeros(a.shape, dtype=np.int32) - act = ac.copy() - with assert_warns(DeprecationWarning): - # NumPy 1.17.0, 2018-02-24 - casting is unsafe - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_clip_with_out_simple_int32(self): - # Test native int32 input with int32 scalar min/max and int64 out - a = self._generate_int32_data(self.nr, self.nc) - m = np.int32(-1) - M = np.int32(1) - ac = np.zeros(a.shape, dtype=np.int64) - act = ac.copy() - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_clip_with_out_array_int32(self): - # Test native int32 input with double array min/max and int32 out - a = self._generate_int32_data(self.nr, self.nc) - m = np.zeros(a.shape, np.float64) - M = np.float64(1) - ac = np.zeros(a.shape, dtype=np.int32) - act = ac.copy() - with assert_warns(DeprecationWarning): - # NumPy 1.17.0, 2018-02-24 - casting is unsafe - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_clip_with_out_array_outint32(self): - # Test native double input with scalar min/max and int out - a = self._generate_data(self.nr, self.nc) - m = -1.0 - M = 2.0 - ac = np.zeros(a.shape, dtype=np.int32) - act = ac.copy() - with assert_warns(DeprecationWarning): - # NumPy 1.17.0, 2018-02-24 - casting is unsafe - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_clip_with_out_transposed(self): - # Test that the out argument works when tranposed - a = np.arange(16).reshape(4, 4) - out = np.empty_like(a).T - a.clip(4, 10, out=out) - expected = self.clip(a, 4, 10) - assert_array_equal(out, expected) - - def test_clip_with_out_memory_overlap(self): - # Test that the out argument works when it has memory overlap - a = np.arange(16).reshape(4, 4) - ac = a.copy() - a[:-1].clip(4, 10, out=a[1:]) - expected = self.clip(ac[:-1], 4, 10) - assert_array_equal(a[1:], expected) - - def test_clip_inplace_array(self): - # Test native double input with array min/max - a = self._generate_data(self.nr, self.nc) - ac = a.copy() - m = np.zeros(a.shape) - M = 1.0 - self.fastclip(a, m, M, a) - self.clip(a, m, M, ac) - assert_array_strict_equal(a, ac) - - def test_clip_inplace_simple(self): - # Test native double input with scalar min/max - a = self._generate_data(self.nr, self.nc) - ac = a.copy() - m = -0.5 - M = 0.6 - self.fastclip(a, m, M, a) - self.clip(a, m, M, ac) - assert_array_strict_equal(a, ac) - - def test_clip_func_takes_out(self): - # Ensure that the clip() function takes an out=argument. - a = self._generate_data(self.nr, self.nc) - ac = a.copy() - m = -0.5 - M = 0.6 - a2 = np.clip(a, m, M, out=a) - self.clip(a, m, M, ac) - assert_array_strict_equal(a2, ac) - assert_(a2 is a) - - def test_clip_nan(self): - d = np.arange(7.) - with assert_warns(DeprecationWarning): - assert_equal(d.clip(min=np.nan), d) - with assert_warns(DeprecationWarning): - assert_equal(d.clip(max=np.nan), d) - with assert_warns(DeprecationWarning): - assert_equal(d.clip(min=np.nan, max=np.nan), d) - with assert_warns(DeprecationWarning): - assert_equal(d.clip(min=-2, max=np.nan), d) - with assert_warns(DeprecationWarning): - assert_equal(d.clip(min=np.nan, max=10), d) - - def test_object_clip(self): - a = np.arange(10, dtype=object) - actual = np.clip(a, 1, 5) - expected = np.array([1, 1, 2, 3, 4, 5, 5, 5, 5, 5]) - assert actual.tolist() == expected.tolist() - - def test_clip_all_none(self): - a = np.arange(10, dtype=object) - with assert_raises_regex(ValueError, 'max or min'): - np.clip(a, None, None) - - def test_clip_invalid_casting(self): - a = np.arange(10, dtype=object) - with assert_raises_regex(ValueError, - 'casting must be one of'): - self.fastclip(a, 1, 8, casting="garbage") - - @pytest.mark.parametrize("amin, amax", [ - # two scalars - (1, 0), - # mix scalar and array - (1, np.zeros(10)), - # two arrays - (np.ones(10), np.zeros(10)), - ]) - def test_clip_value_min_max_flip(self, amin, amax): - a = np.arange(10, dtype=np.int64) - # requirement from ufunc_docstrings.py - expected = np.minimum(np.maximum(a, amin), amax) - actual = np.clip(a, amin, amax) - assert_equal(actual, expected) - - @pytest.mark.parametrize("arr, amin, amax, exp", [ - # for a bug in npy_ObjectClip, based on a - # case produced by hypothesis - (np.zeros(10, dtype=np.int64), - 0, - -2**64+1, - np.full(10, -2**64+1, dtype=object)), - # for bugs in NPY_TIMEDELTA_MAX, based on a case - # produced by hypothesis - (np.zeros(10, dtype='m8') - 1, - 0, - 0, - np.zeros(10, dtype='m8')), - ]) - def test_clip_problem_cases(self, arr, amin, amax, exp): - actual = np.clip(arr, amin, amax) - assert_equal(actual, exp) - - @pytest.mark.xfail(reason="no scalar nan propagation yet") - @pytest.mark.parametrize("arr, amin, amax", [ - # problematic scalar nan case from hypothesis - (np.zeros(10, dtype=np.int64), - np.array(np.nan), - np.zeros(10, dtype=np.int32)), - ]) - def test_clip_scalar_nan_propagation(self, arr, amin, amax): - # enforcement of scalar nan propagation for comparisons - # called through clip() - expected = np.minimum(np.maximum(a, amin), amax) - with assert_warns(DeprecationWarning): - actual = np.clip(arr, amin, amax) - assert_equal(actual, expected) - - @pytest.mark.xfail(reason="propagation doesn't match spec") - @pytest.mark.parametrize("arr, amin, amax", [ - (np.array([1] * 10, dtype='m8'), - np.timedelta64('NaT'), - np.zeros(10, dtype=np.int32)), - ]) - def test_NaT_propagation(self, arr, amin, amax): - # NOTE: the expected function spec doesn't - # propagate NaT, but clip() now does - expected = np.minimum(np.maximum(a, amin), amax) - actual = np.clip(arr, amin, amax) - assert_equal(actual, expected) - - -class TestAllclose(object): - rtol = 1e-5 - atol = 1e-8 - - def setup(self): - self.olderr = np.seterr(invalid='ignore') - - def teardown(self): - np.seterr(**self.olderr) - - def tst_allclose(self, x, y): - assert_(np.allclose(x, y), "%s and %s not close" % (x, y)) - - def tst_not_allclose(self, x, y): - assert_(not np.allclose(x, y), "%s and %s shouldn't be close" % (x, y)) - - def test_ip_allclose(self): - # Parametric test factory. - arr = np.array([100, 1000]) - aran = np.arange(125).reshape((5, 5, 5)) - - atol = self.atol - rtol = self.rtol - - data = [([1, 0], [1, 0]), - ([atol], [0]), - ([1], [1+rtol+atol]), - (arr, arr + arr*rtol), - (arr, arr + arr*rtol + atol*2), - (aran, aran + aran*rtol), - (np.inf, np.inf), - (np.inf, [np.inf])] - - for (x, y) in data: - self.tst_allclose(x, y) - - def test_ip_not_allclose(self): - # Parametric test factory. - aran = np.arange(125).reshape((5, 5, 5)) - - atol = self.atol - rtol = self.rtol - - data = [([np.inf, 0], [1, np.inf]), - ([np.inf, 0], [1, 0]), - ([np.inf, np.inf], [1, np.inf]), - ([np.inf, np.inf], [1, 0]), - ([-np.inf, 0], [np.inf, 0]), - ([np.nan, 0], [np.nan, 0]), - ([atol*2], [0]), - ([1], [1+rtol+atol*2]), - (aran, aran + aran*atol + atol*2), - (np.array([np.inf, 1]), np.array([0, np.inf]))] - - for (x, y) in data: - self.tst_not_allclose(x, y) - - def test_no_parameter_modification(self): - x = np.array([np.inf, 1]) - y = np.array([0, np.inf]) - np.allclose(x, y) - assert_array_equal(x, np.array([np.inf, 1])) - assert_array_equal(y, np.array([0, np.inf])) - - def test_min_int(self): - # Could make problems because of abs(min_int) == min_int - min_int = np.iinfo(np.int_).min - a = np.array([min_int], dtype=np.int_) - assert_(np.allclose(a, a)) - - def test_equalnan(self): - x = np.array([1.0, np.nan]) - assert_(np.allclose(x, x, equal_nan=True)) - - def test_return_class_is_ndarray(self): - # Issue gh-6475 - # Check that allclose does not preserve subtypes - class Foo(np.ndarray): - def __new__(cls, *args, **kwargs): - return np.array(*args, **kwargs).view(cls) - - a = Foo([1]) - assert_(type(np.allclose(a, a)) is bool) - - -class TestIsclose(object): - rtol = 1e-5 - atol = 1e-8 - - def setup(self): - atol = self.atol - rtol = self.rtol - arr = np.array([100, 1000]) - aran = np.arange(125).reshape((5, 5, 5)) - - self.all_close_tests = [ - ([1, 0], [1, 0]), - ([atol], [0]), - ([1], [1 + rtol + atol]), - (arr, arr + arr*rtol), - (arr, arr + arr*rtol + atol), - (aran, aran + aran*rtol), - (np.inf, np.inf), - (np.inf, [np.inf]), - ([np.inf, -np.inf], [np.inf, -np.inf]), - ] - self.none_close_tests = [ - ([np.inf, 0], [1, np.inf]), - ([np.inf, -np.inf], [1, 0]), - ([np.inf, np.inf], [1, -np.inf]), - ([np.inf, np.inf], [1, 0]), - ([np.nan, 0], [np.nan, -np.inf]), - ([atol*2], [0]), - ([1], [1 + rtol + atol*2]), - (aran, aran + rtol*1.1*aran + atol*1.1), - (np.array([np.inf, 1]), np.array([0, np.inf])), - ] - self.some_close_tests = [ - ([np.inf, 0], [np.inf, atol*2]), - ([atol, 1, 1e6*(1 + 2*rtol) + atol], [0, np.nan, 1e6]), - (np.arange(3), [0, 1, 2.1]), - (np.nan, [np.nan, np.nan, np.nan]), - ([0], [atol, np.inf, -np.inf, np.nan]), - (0, [atol, np.inf, -np.inf, np.nan]), - ] - self.some_close_results = [ - [True, False], - [True, False, False], - [True, True, False], - [False, False, False], - [True, False, False, False], - [True, False, False, False], - ] - - def test_ip_isclose(self): - self.setup() - tests = self.some_close_tests - results = self.some_close_results - for (x, y), result in zip(tests, results): - assert_array_equal(np.isclose(x, y), result) - - def tst_all_isclose(self, x, y): - assert_(np.all(np.isclose(x, y)), "%s and %s not close" % (x, y)) - - def tst_none_isclose(self, x, y): - msg = "%s and %s shouldn't be close" - assert_(not np.any(np.isclose(x, y)), msg % (x, y)) - - def tst_isclose_allclose(self, x, y): - msg = "isclose.all() and allclose aren't same for %s and %s" - msg2 = "isclose and allclose aren't same for %s and %s" - if np.isscalar(x) and np.isscalar(y): - assert_(np.isclose(x, y) == np.allclose(x, y), msg=msg2 % (x, y)) - else: - assert_array_equal(np.isclose(x, y).all(), np.allclose(x, y), msg % (x, y)) - - def test_ip_all_isclose(self): - self.setup() - for (x, y) in self.all_close_tests: - self.tst_all_isclose(x, y) - - def test_ip_none_isclose(self): - self.setup() - for (x, y) in self.none_close_tests: - self.tst_none_isclose(x, y) - - def test_ip_isclose_allclose(self): - self.setup() - tests = (self.all_close_tests + self.none_close_tests + - self.some_close_tests) - for (x, y) in tests: - self.tst_isclose_allclose(x, y) - - def test_equal_nan(self): - assert_array_equal(np.isclose(np.nan, np.nan, equal_nan=True), [True]) - arr = np.array([1.0, np.nan]) - assert_array_equal(np.isclose(arr, arr, equal_nan=True), [True, True]) - - def test_masked_arrays(self): - # Make sure to test the output type when arguments are interchanged. - - x = np.ma.masked_where([True, True, False], np.arange(3)) - assert_(type(x) is type(np.isclose(2, x))) - assert_(type(x) is type(np.isclose(x, 2))) - - x = np.ma.masked_where([True, True, False], [np.nan, np.inf, np.nan]) - assert_(type(x) is type(np.isclose(np.inf, x))) - assert_(type(x) is type(np.isclose(x, np.inf))) - - x = np.ma.masked_where([True, True, False], [np.nan, np.nan, np.nan]) - y = np.isclose(np.nan, x, equal_nan=True) - assert_(type(x) is type(y)) - # Ensure that the mask isn't modified... - assert_array_equal([True, True, False], y.mask) - y = np.isclose(x, np.nan, equal_nan=True) - assert_(type(x) is type(y)) - # Ensure that the mask isn't modified... - assert_array_equal([True, True, False], y.mask) - - x = np.ma.masked_where([True, True, False], [np.nan, np.nan, np.nan]) - y = np.isclose(x, x, equal_nan=True) - assert_(type(x) is type(y)) - # Ensure that the mask isn't modified... - assert_array_equal([True, True, False], y.mask) - - def test_scalar_return(self): - assert_(np.isscalar(np.isclose(1, 1))) - - def test_no_parameter_modification(self): - x = np.array([np.inf, 1]) - y = np.array([0, np.inf]) - np.isclose(x, y) - assert_array_equal(x, np.array([np.inf, 1])) - assert_array_equal(y, np.array([0, np.inf])) - - def test_non_finite_scalar(self): - # GH7014, when two scalars are compared the output should also be a - # scalar - assert_(np.isclose(np.inf, -np.inf) is np.False_) - assert_(np.isclose(0, np.inf) is np.False_) - assert_(type(np.isclose(0, np.inf)) is np.bool_) - - -class TestStdVar(object): - def setup(self): - self.A = np.array([1, -1, 1, -1]) - self.real_var = 1 - - def test_basic(self): - assert_almost_equal(np.var(self.A), self.real_var) - assert_almost_equal(np.std(self.A)**2, self.real_var) - - def test_scalars(self): - assert_equal(np.var(1), 0) - assert_equal(np.std(1), 0) - - def test_ddof1(self): - assert_almost_equal(np.var(self.A, ddof=1), - self.real_var*len(self.A)/float(len(self.A)-1)) - assert_almost_equal(np.std(self.A, ddof=1)**2, - self.real_var*len(self.A)/float(len(self.A)-1)) - - def test_ddof2(self): - assert_almost_equal(np.var(self.A, ddof=2), - self.real_var*len(self.A)/float(len(self.A)-2)) - assert_almost_equal(np.std(self.A, ddof=2)**2, - self.real_var*len(self.A)/float(len(self.A)-2)) - - def test_out_scalar(self): - d = np.arange(10) - out = np.array(0.) - r = np.std(d, out=out) - assert_(r is out) - assert_array_equal(r, out) - r = np.var(d, out=out) - assert_(r is out) - assert_array_equal(r, out) - r = np.mean(d, out=out) - assert_(r is out) - assert_array_equal(r, out) - - -class TestStdVarComplex(object): - def test_basic(self): - A = np.array([1, 1.j, -1, -1.j]) - real_var = 1 - assert_almost_equal(np.var(A), real_var) - assert_almost_equal(np.std(A)**2, real_var) - - def test_scalars(self): - assert_equal(np.var(1j), 0) - assert_equal(np.std(1j), 0) - - -class TestCreationFuncs(object): - # Test ones, zeros, empty and full. - - def setup(self): - dtypes = {np.dtype(tp) for tp in itertools.chain(*np.sctypes.values())} - # void, bytes, str - variable_sized = {tp for tp in dtypes if tp.str.endswith('0')} - self.dtypes = sorted(dtypes - variable_sized | - {np.dtype(tp.str.replace("0", str(i))) - for tp in variable_sized for i in range(1, 10)}, - key=lambda dtype: dtype.str) - self.orders = {'C': 'c_contiguous', 'F': 'f_contiguous'} - self.ndims = 10 - - def check_function(self, func, fill_value=None): - par = ((0, 1, 2), - range(self.ndims), - self.orders, - self.dtypes) - fill_kwarg = {} - if fill_value is not None: - fill_kwarg = {'fill_value': fill_value} - - for size, ndims, order, dtype in itertools.product(*par): - shape = ndims * [size] - - # do not fill void type - if fill_kwarg and dtype.str.startswith('|V'): - continue - - arr = func(shape, order=order, dtype=dtype, - **fill_kwarg) - - assert_equal(arr.dtype, dtype) - assert_(getattr(arr.flags, self.orders[order])) - - if fill_value is not None: - if dtype.str.startswith('|S'): - val = str(fill_value) - else: - val = fill_value - assert_equal(arr, dtype.type(val)) - - def test_zeros(self): - self.check_function(np.zeros) - - def test_ones(self): - self.check_function(np.zeros) - - def test_empty(self): - self.check_function(np.empty) - - def test_full(self): - self.check_function(np.full, 0) - self.check_function(np.full, 1) - - @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") - def test_for_reference_leak(self): - # Make sure we have an object for reference - dim = 1 - beg = sys.getrefcount(dim) - np.zeros([dim]*10) - assert_(sys.getrefcount(dim) == beg) - np.ones([dim]*10) - assert_(sys.getrefcount(dim) == beg) - np.empty([dim]*10) - assert_(sys.getrefcount(dim) == beg) - np.full([dim]*10, 0) - assert_(sys.getrefcount(dim) == beg) - - -class TestLikeFuncs(object): - '''Test ones_like, zeros_like, empty_like and full_like''' - - def setup(self): - self.data = [ - # Array scalars - (np.array(3.), None), - (np.array(3), 'f8'), - # 1D arrays - (np.arange(6, dtype='f4'), None), - (np.arange(6), 'c16'), - # 2D C-layout arrays - (np.arange(6).reshape(2, 3), None), - (np.arange(6).reshape(3, 2), 'i1'), - # 2D F-layout arrays - (np.arange(6).reshape((2, 3), order='F'), None), - (np.arange(6).reshape((3, 2), order='F'), 'i1'), - # 3D C-layout arrays - (np.arange(24).reshape(2, 3, 4), None), - (np.arange(24).reshape(4, 3, 2), 'f4'), - # 3D F-layout arrays - (np.arange(24).reshape((2, 3, 4), order='F'), None), - (np.arange(24).reshape((4, 3, 2), order='F'), 'f4'), - # 3D non-C/F-layout arrays - (np.arange(24).reshape(2, 3, 4).swapaxes(0, 1), None), - (np.arange(24).reshape(4, 3, 2).swapaxes(0, 1), '?'), - ] - self.shapes = [(5,), (5,6,), (5,6,7,)] - - def compare_array_value(self, dz, value, fill_value): - if value is not None: - if fill_value: - try: - z = dz.dtype.type(value) - except OverflowError: - pass - else: - assert_(np.all(dz == z)) - else: - assert_(np.all(dz == value)) - - def check_like_function(self, like_function, value, fill_value=False): - if fill_value: - fill_kwarg = {'fill_value': value} - else: - fill_kwarg = {} - for d, dtype in self.data: - # default (K) order, dtype - dz = like_function(d, dtype=dtype, **fill_kwarg) - assert_equal(dz.shape, d.shape) - assert_equal(np.array(dz.strides)*d.dtype.itemsize, - np.array(d.strides)*dz.dtype.itemsize) - assert_equal(d.flags.c_contiguous, dz.flags.c_contiguous) - assert_equal(d.flags.f_contiguous, dz.flags.f_contiguous) - if dtype is None: - assert_equal(dz.dtype, d.dtype) - else: - assert_equal(dz.dtype, np.dtype(dtype)) - self.compare_array_value(dz, value, fill_value) - - # C order, default dtype - dz = like_function(d, order='C', dtype=dtype, **fill_kwarg) - assert_equal(dz.shape, d.shape) - assert_(dz.flags.c_contiguous) - if dtype is None: - assert_equal(dz.dtype, d.dtype) - else: - assert_equal(dz.dtype, np.dtype(dtype)) - self.compare_array_value(dz, value, fill_value) - - # F order, default dtype - dz = like_function(d, order='F', dtype=dtype, **fill_kwarg) - assert_equal(dz.shape, d.shape) - assert_(dz.flags.f_contiguous) - if dtype is None: - assert_equal(dz.dtype, d.dtype) - else: - assert_equal(dz.dtype, np.dtype(dtype)) - self.compare_array_value(dz, value, fill_value) - - # A order - dz = like_function(d, order='A', dtype=dtype, **fill_kwarg) - assert_equal(dz.shape, d.shape) - if d.flags.f_contiguous: - assert_(dz.flags.f_contiguous) - else: - assert_(dz.flags.c_contiguous) - if dtype is None: - assert_equal(dz.dtype, d.dtype) - else: - assert_equal(dz.dtype, np.dtype(dtype)) - self.compare_array_value(dz, value, fill_value) - - # Test the 'shape' parameter - for s in self.shapes: - for o in 'CFA': - sz = like_function(d, dtype=dtype, shape=s, order=o, - **fill_kwarg) - assert_equal(sz.shape, s) - if dtype is None: - assert_equal(sz.dtype, d.dtype) - else: - assert_equal(sz.dtype, np.dtype(dtype)) - if o == 'C' or (o == 'A' and d.flags.c_contiguous): - assert_(sz.flags.c_contiguous) - elif o == 'F' or (o == 'A' and d.flags.f_contiguous): - assert_(sz.flags.f_contiguous) - self.compare_array_value(sz, value, fill_value) - - if (d.ndim != len(s)): - assert_equal(np.argsort(like_function(d, dtype=dtype, - shape=s, order='K', - **fill_kwarg).strides), - np.argsort(np.empty(s, dtype=dtype, - order='C').strides)) - else: - assert_equal(np.argsort(like_function(d, dtype=dtype, - shape=s, order='K', - **fill_kwarg).strides), - np.argsort(d.strides)) - - # Test the 'subok' parameter - class MyNDArray(np.ndarray): - pass - - a = np.array([[1, 2], [3, 4]]).view(MyNDArray) - - b = like_function(a, **fill_kwarg) - assert_(type(b) is MyNDArray) - - b = like_function(a, subok=False, **fill_kwarg) - assert_(type(b) is not MyNDArray) - - def test_ones_like(self): - self.check_like_function(np.ones_like, 1) - - def test_zeros_like(self): - self.check_like_function(np.zeros_like, 0) - - def test_empty_like(self): - self.check_like_function(np.empty_like, None) - - def test_filled_like(self): - self.check_like_function(np.full_like, 0, True) - self.check_like_function(np.full_like, 1, True) - self.check_like_function(np.full_like, 1000, True) - self.check_like_function(np.full_like, 123.456, True) - self.check_like_function(np.full_like, np.inf, True) - - -class TestCorrelate(object): - def _setup(self, dt): - self.x = np.array([1, 2, 3, 4, 5], dtype=dt) - self.xs = np.arange(1, 20)[::3] - self.y = np.array([-1, -2, -3], dtype=dt) - self.z1 = np.array([ -3., -8., -14., -20., -26., -14., -5.], dtype=dt) - self.z1_4 = np.array([-2., -5., -8., -11., -14., -5.], dtype=dt) - self.z1r = np.array([-15., -22., -22., -16., -10., -4., -1.], dtype=dt) - self.z2 = np.array([-5., -14., -26., -20., -14., -8., -3.], dtype=dt) - self.z2r = np.array([-1., -4., -10., -16., -22., -22., -15.], dtype=dt) - self.zs = np.array([-3., -14., -30., -48., -66., -84., - -102., -54., -19.], dtype=dt) - - def test_float(self): - self._setup(float) - z = np.correlate(self.x, self.y, 'full') - assert_array_almost_equal(z, self.z1) - z = np.correlate(self.x, self.y[:-1], 'full') - assert_array_almost_equal(z, self.z1_4) - z = np.correlate(self.y, self.x, 'full') - assert_array_almost_equal(z, self.z2) - z = np.correlate(self.x[::-1], self.y, 'full') - assert_array_almost_equal(z, self.z1r) - z = np.correlate(self.y, self.x[::-1], 'full') - assert_array_almost_equal(z, self.z2r) - z = np.correlate(self.xs, self.y, 'full') - assert_array_almost_equal(z, self.zs) - - def test_object(self): - self._setup(Decimal) - z = np.correlate(self.x, self.y, 'full') - assert_array_almost_equal(z, self.z1) - z = np.correlate(self.y, self.x, 'full') - assert_array_almost_equal(z, self.z2) - - def test_no_overwrite(self): - d = np.ones(100) - k = np.ones(3) - np.correlate(d, k) - assert_array_equal(d, np.ones(100)) - assert_array_equal(k, np.ones(3)) - - def test_complex(self): - x = np.array([1, 2, 3, 4+1j], dtype=complex) - y = np.array([-1, -2j, 3+1j], dtype=complex) - r_z = np.array([3-1j, 6, 8+1j, 11+5j, -5+8j, -4-1j], dtype=complex) - r_z = r_z[::-1].conjugate() - z = np.correlate(y, x, mode='full') - assert_array_almost_equal(z, r_z) - - def test_zero_size(self): - with pytest.raises(ValueError): - np.correlate(np.array([]), np.ones(1000), mode='full') - with pytest.raises(ValueError): - np.correlate(np.ones(1000), np.array([]), mode='full') - -class TestConvolve(object): - def test_object(self): - d = [1.] * 100 - k = [1.] * 3 - assert_array_almost_equal(np.convolve(d, k)[2:-2], np.full(98, 3)) - - def test_no_overwrite(self): - d = np.ones(100) - k = np.ones(3) - np.convolve(d, k) - assert_array_equal(d, np.ones(100)) - assert_array_equal(k, np.ones(3)) - - -class TestArgwhere(object): - - @pytest.mark.parametrize('nd', [0, 1, 2]) - def test_nd(self, nd): - # get an nd array with multiple elements in every dimension - x = np.empty((2,)*nd, bool) - - # none - x[...] = False - assert_equal(np.argwhere(x).shape, (0, nd)) - - # only one - x[...] = False - x.flat[0] = True - assert_equal(np.argwhere(x).shape, (1, nd)) - - # all but one - x[...] = True - x.flat[0] = False - assert_equal(np.argwhere(x).shape, (x.size - 1, nd)) - - # all - x[...] = True - assert_equal(np.argwhere(x).shape, (x.size, nd)) - - def test_2D(self): - x = np.arange(6).reshape((2, 3)) - assert_array_equal(np.argwhere(x > 1), - [[0, 2], - [1, 0], - [1, 1], - [1, 2]]) - - def test_list(self): - assert_equal(np.argwhere([4, 0, 2, 1, 3]), [[0], [2], [3], [4]]) - - -class TestStringFunction(object): - - def test_set_string_function(self): - a = np.array([1]) - np.set_string_function(lambda x: "FOO", repr=True) - assert_equal(repr(a), "FOO") - np.set_string_function(None, repr=True) - assert_equal(repr(a), "array([1])") - - np.set_string_function(lambda x: "FOO", repr=False) - assert_equal(str(a), "FOO") - np.set_string_function(None, repr=False) - assert_equal(str(a), "[1]") - - -class TestRoll(object): - def test_roll1d(self): - x = np.arange(10) - xr = np.roll(x, 2) - assert_equal(xr, np.array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])) - - def test_roll2d(self): - x2 = np.reshape(np.arange(10), (2, 5)) - x2r = np.roll(x2, 1) - assert_equal(x2r, np.array([[9, 0, 1, 2, 3], [4, 5, 6, 7, 8]])) - - x2r = np.roll(x2, 1, axis=0) - assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]])) - - x2r = np.roll(x2, 1, axis=1) - assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]])) - - # Roll multiple axes at once. - x2r = np.roll(x2, 1, axis=(0, 1)) - assert_equal(x2r, np.array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]])) - - x2r = np.roll(x2, (1, 0), axis=(0, 1)) - assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]])) - - x2r = np.roll(x2, (-1, 0), axis=(0, 1)) - assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]])) - - x2r = np.roll(x2, (0, 1), axis=(0, 1)) - assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]])) - - x2r = np.roll(x2, (0, -1), axis=(0, 1)) - assert_equal(x2r, np.array([[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]])) - - x2r = np.roll(x2, (1, 1), axis=(0, 1)) - assert_equal(x2r, np.array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]])) - - x2r = np.roll(x2, (-1, -1), axis=(0, 1)) - assert_equal(x2r, np.array([[6, 7, 8, 9, 5], [1, 2, 3, 4, 0]])) - - # Roll the same axis multiple times. - x2r = np.roll(x2, 1, axis=(0, 0)) - assert_equal(x2r, np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])) - - x2r = np.roll(x2, 1, axis=(1, 1)) - assert_equal(x2r, np.array([[3, 4, 0, 1, 2], [8, 9, 5, 6, 7]])) - - # Roll more than one turn in either direction. - x2r = np.roll(x2, 6, axis=1) - assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]])) - - x2r = np.roll(x2, -4, axis=1) - assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]])) - - def test_roll_empty(self): - x = np.array([]) - assert_equal(np.roll(x, 1), np.array([])) - - -class TestRollaxis(object): - - # expected shape indexed by (axis, start) for array of - # shape (1, 2, 3, 4) - tgtshape = {(0, 0): (1, 2, 3, 4), (0, 1): (1, 2, 3, 4), - (0, 2): (2, 1, 3, 4), (0, 3): (2, 3, 1, 4), - (0, 4): (2, 3, 4, 1), - (1, 0): (2, 1, 3, 4), (1, 1): (1, 2, 3, 4), - (1, 2): (1, 2, 3, 4), (1, 3): (1, 3, 2, 4), - (1, 4): (1, 3, 4, 2), - (2, 0): (3, 1, 2, 4), (2, 1): (1, 3, 2, 4), - (2, 2): (1, 2, 3, 4), (2, 3): (1, 2, 3, 4), - (2, 4): (1, 2, 4, 3), - (3, 0): (4, 1, 2, 3), (3, 1): (1, 4, 2, 3), - (3, 2): (1, 2, 4, 3), (3, 3): (1, 2, 3, 4), - (3, 4): (1, 2, 3, 4)} - - def test_exceptions(self): - a = np.arange(1*2*3*4).reshape(1, 2, 3, 4) - assert_raises(np.AxisError, np.rollaxis, a, -5, 0) - assert_raises(np.AxisError, np.rollaxis, a, 0, -5) - assert_raises(np.AxisError, np.rollaxis, a, 4, 0) - assert_raises(np.AxisError, np.rollaxis, a, 0, 5) - - def test_results(self): - a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy() - aind = np.indices(a.shape) - assert_(a.flags['OWNDATA']) - for (i, j) in self.tgtshape: - # positive axis, positive start - res = np.rollaxis(a, axis=i, start=j) - i0, i1, i2, i3 = aind[np.array(res.shape) - 1] - assert_(np.all(res[i0, i1, i2, i3] == a)) - assert_(res.shape == self.tgtshape[(i, j)], str((i,j))) - assert_(not res.flags['OWNDATA']) - - # negative axis, positive start - ip = i + 1 - res = np.rollaxis(a, axis=-ip, start=j) - i0, i1, i2, i3 = aind[np.array(res.shape) - 1] - assert_(np.all(res[i0, i1, i2, i3] == a)) - assert_(res.shape == self.tgtshape[(4 - ip, j)]) - assert_(not res.flags['OWNDATA']) - - # positive axis, negative start - jp = j + 1 if j < 4 else j - res = np.rollaxis(a, axis=i, start=-jp) - i0, i1, i2, i3 = aind[np.array(res.shape) - 1] - assert_(np.all(res[i0, i1, i2, i3] == a)) - assert_(res.shape == self.tgtshape[(i, 4 - jp)]) - assert_(not res.flags['OWNDATA']) - - # negative axis, negative start - ip = i + 1 - jp = j + 1 if j < 4 else j - res = np.rollaxis(a, axis=-ip, start=-jp) - i0, i1, i2, i3 = aind[np.array(res.shape) - 1] - assert_(np.all(res[i0, i1, i2, i3] == a)) - assert_(res.shape == self.tgtshape[(4 - ip, 4 - jp)]) - assert_(not res.flags['OWNDATA']) - - -class TestMoveaxis(object): - def test_move_to_end(self): - x = np.random.randn(5, 6, 7) - for source, expected in [(0, (6, 7, 5)), - (1, (5, 7, 6)), - (2, (5, 6, 7)), - (-1, (5, 6, 7))]: - actual = np.moveaxis(x, source, -1).shape - assert_(actual, expected) - - def test_move_new_position(self): - x = np.random.randn(1, 2, 3, 4) - for source, destination, expected in [ - (0, 1, (2, 1, 3, 4)), - (1, 2, (1, 3, 2, 4)), - (1, -1, (1, 3, 4, 2)), - ]: - actual = np.moveaxis(x, source, destination).shape - assert_(actual, expected) - - def test_preserve_order(self): - x = np.zeros((1, 2, 3, 4)) - for source, destination in [ - (0, 0), - (3, -1), - (-1, 3), - ([0, -1], [0, -1]), - ([2, 0], [2, 0]), - (range(4), range(4)), - ]: - actual = np.moveaxis(x, source, destination).shape - assert_(actual, (1, 2, 3, 4)) - - def test_move_multiples(self): - x = np.zeros((0, 1, 2, 3)) - for source, destination, expected in [ - ([0, 1], [2, 3], (2, 3, 0, 1)), - ([2, 3], [0, 1], (2, 3, 0, 1)), - ([0, 1, 2], [2, 3, 0], (2, 3, 0, 1)), - ([3, 0], [1, 0], (0, 3, 1, 2)), - ([0, 3], [0, 1], (0, 3, 1, 2)), - ]: - actual = np.moveaxis(x, source, destination).shape - assert_(actual, expected) - - def test_errors(self): - x = np.random.randn(1, 2, 3) - assert_raises_regex(np.AxisError, 'source.*out of bounds', - np.moveaxis, x, 3, 0) - assert_raises_regex(np.AxisError, 'source.*out of bounds', - np.moveaxis, x, -4, 0) - assert_raises_regex(np.AxisError, 'destination.*out of bounds', - np.moveaxis, x, 0, 5) - assert_raises_regex(ValueError, 'repeated axis in `source`', - np.moveaxis, x, [0, 0], [0, 1]) - assert_raises_regex(ValueError, 'repeated axis in `destination`', - np.moveaxis, x, [0, 1], [1, 1]) - assert_raises_regex(ValueError, 'must have the same number', - np.moveaxis, x, 0, [0, 1]) - assert_raises_regex(ValueError, 'must have the same number', - np.moveaxis, x, [0, 1], [0]) - - def test_array_likes(self): - x = np.ma.zeros((1, 2, 3)) - result = np.moveaxis(x, 0, 0) - assert_(x.shape, result.shape) - assert_(isinstance(result, np.ma.MaskedArray)) - - x = [1, 2, 3] - result = np.moveaxis(x, 0, 0) - assert_(x, list(result)) - assert_(isinstance(result, np.ndarray)) - - -class TestCross(object): - def test_2x2(self): - u = [1, 2] - v = [3, 4] - z = -2 - cp = np.cross(u, v) - assert_equal(cp, z) - cp = np.cross(v, u) - assert_equal(cp, -z) - - def test_2x3(self): - u = [1, 2] - v = [3, 4, 5] - z = np.array([10, -5, -2]) - cp = np.cross(u, v) - assert_equal(cp, z) - cp = np.cross(v, u) - assert_equal(cp, -z) - - def test_3x3(self): - u = [1, 2, 3] - v = [4, 5, 6] - z = np.array([-3, 6, -3]) - cp = np.cross(u, v) - assert_equal(cp, z) - cp = np.cross(v, u) - assert_equal(cp, -z) - - def test_broadcasting(self): - # Ticket #2624 (Trac #2032) - u = np.tile([1, 2], (11, 1)) - v = np.tile([3, 4], (11, 1)) - z = -2 - assert_equal(np.cross(u, v), z) - assert_equal(np.cross(v, u), -z) - assert_equal(np.cross(u, u), 0) - - u = np.tile([1, 2], (11, 1)).T - v = np.tile([3, 4, 5], (11, 1)) - z = np.tile([10, -5, -2], (11, 1)) - assert_equal(np.cross(u, v, axisa=0), z) - assert_equal(np.cross(v, u.T), -z) - assert_equal(np.cross(v, v), 0) - - u = np.tile([1, 2, 3], (11, 1)).T - v = np.tile([3, 4], (11, 1)).T - z = np.tile([-12, 9, -2], (11, 1)) - assert_equal(np.cross(u, v, axisa=0, axisb=0), z) - assert_equal(np.cross(v.T, u.T), -z) - assert_equal(np.cross(u.T, u.T), 0) - - u = np.tile([1, 2, 3], (5, 1)) - v = np.tile([4, 5, 6], (5, 1)).T - z = np.tile([-3, 6, -3], (5, 1)) - assert_equal(np.cross(u, v, axisb=0), z) - assert_equal(np.cross(v.T, u), -z) - assert_equal(np.cross(u, u), 0) - - def test_broadcasting_shapes(self): - u = np.ones((2, 1, 3)) - v = np.ones((5, 3)) - assert_equal(np.cross(u, v).shape, (2, 5, 3)) - u = np.ones((10, 3, 5)) - v = np.ones((2, 5)) - assert_equal(np.cross(u, v, axisa=1, axisb=0).shape, (10, 5, 3)) - assert_raises(np.AxisError, np.cross, u, v, axisa=1, axisb=2) - assert_raises(np.AxisError, np.cross, u, v, axisa=3, axisb=0) - u = np.ones((10, 3, 5, 7)) - v = np.ones((5, 7, 2)) - assert_equal(np.cross(u, v, axisa=1, axisc=2).shape, (10, 5, 3, 7)) - assert_raises(np.AxisError, np.cross, u, v, axisa=-5, axisb=2) - assert_raises(np.AxisError, np.cross, u, v, axisa=1, axisb=-4) - # gh-5885 - u = np.ones((3, 4, 2)) - for axisc in range(-2, 2): - assert_equal(np.cross(u, u, axisc=axisc).shape, (3, 4)) - - -def test_outer_out_param(): - arr1 = np.ones((5,)) - arr2 = np.ones((2,)) - arr3 = np.linspace(-2, 2, 5) - out1 = np.ndarray(shape=(5,5)) - out2 = np.ndarray(shape=(2, 5)) - res1 = np.outer(arr1, arr3, out1) - assert_equal(res1, out1) - assert_equal(np.outer(arr2, arr3, out2), out2) - - -class TestIndices(object): - - def test_simple(self): - [x, y] = np.indices((4, 3)) - assert_array_equal(x, np.array([[0, 0, 0], - [1, 1, 1], - [2, 2, 2], - [3, 3, 3]])) - assert_array_equal(y, np.array([[0, 1, 2], - [0, 1, 2], - [0, 1, 2], - [0, 1, 2]])) - - def test_single_input(self): - [x] = np.indices((4,)) - assert_array_equal(x, np.array([0, 1, 2, 3])) - - [x] = np.indices((4,), sparse=True) - assert_array_equal(x, np.array([0, 1, 2, 3])) - - def test_scalar_input(self): - assert_array_equal([], np.indices(())) - assert_array_equal([], np.indices((), sparse=True)) - assert_array_equal([[]], np.indices((0,))) - assert_array_equal([[]], np.indices((0,), sparse=True)) - - def test_sparse(self): - [x, y] = np.indices((4,3), sparse=True) - assert_array_equal(x, np.array([[0], [1], [2], [3]])) - assert_array_equal(y, np.array([[0, 1, 2]])) - - @pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32, np.float64]) - @pytest.mark.parametrize("dims", [(), (0,), (4, 3)]) - def test_return_type(self, dtype, dims): - inds = np.indices(dims, dtype=dtype) - assert_(inds.dtype == dtype) - - for arr in np.indices(dims, dtype=dtype, sparse=True): - assert_(arr.dtype == dtype) - - -class TestRequire(object): - flag_names = ['C', 'C_CONTIGUOUS', 'CONTIGUOUS', - 'F', 'F_CONTIGUOUS', 'FORTRAN', - 'A', 'ALIGNED', - 'W', 'WRITEABLE', - 'O', 'OWNDATA'] - - def generate_all_false(self, dtype): - arr = np.zeros((2, 2), [('junk', 'i1'), ('a', dtype)]) - arr.setflags(write=False) - a = arr['a'] - assert_(not a.flags['C']) - assert_(not a.flags['F']) - assert_(not a.flags['O']) - assert_(not a.flags['W']) - assert_(not a.flags['A']) - return a - - def set_and_check_flag(self, flag, dtype, arr): - if dtype is None: - dtype = arr.dtype - b = np.require(arr, dtype, [flag]) - assert_(b.flags[flag]) - assert_(b.dtype == dtype) - - # a further call to np.require ought to return the same array - # unless OWNDATA is specified. - c = np.require(b, None, [flag]) - if flag[0] != 'O': - assert_(c is b) - else: - assert_(c.flags[flag]) - - def test_require_each(self): - - id = ['f8', 'i4'] - fd = [None, 'f8', 'c16'] - for idtype, fdtype, flag in itertools.product(id, fd, self.flag_names): - a = self.generate_all_false(idtype) - self.set_and_check_flag(flag, fdtype, a) - - def test_unknown_requirement(self): - a = self.generate_all_false('f8') - assert_raises(KeyError, np.require, a, None, 'Q') - - def test_non_array_input(self): - a = np.require([1, 2, 3, 4], 'i4', ['C', 'A', 'O']) - assert_(a.flags['O']) - assert_(a.flags['C']) - assert_(a.flags['A']) - assert_(a.dtype == 'i4') - assert_equal(a, [1, 2, 3, 4]) - - def test_C_and_F_simul(self): - a = self.generate_all_false('f8') - assert_raises(ValueError, np.require, a, None, ['C', 'F']) - - def test_ensure_array(self): - class ArraySubclass(np.ndarray): - pass - - a = ArraySubclass((2, 2)) - b = np.require(a, None, ['E']) - assert_(type(b) is np.ndarray) - - def test_preserve_subtype(self): - class ArraySubclass(np.ndarray): - pass - - for flag in self.flag_names: - a = ArraySubclass((2, 2)) - self.set_and_check_flag(flag, None, a) - - -class TestBroadcast(object): - def test_broadcast_in_args(self): - # gh-5881 - arrs = [np.empty((6, 7)), np.empty((5, 6, 1)), np.empty((7,)), - np.empty((5, 1, 7))] - mits = [np.broadcast(*arrs), - np.broadcast(np.broadcast(*arrs[:0]), np.broadcast(*arrs[0:])), - np.broadcast(np.broadcast(*arrs[:1]), np.broadcast(*arrs[1:])), - np.broadcast(np.broadcast(*arrs[:2]), np.broadcast(*arrs[2:])), - np.broadcast(arrs[0], np.broadcast(*arrs[1:-1]), arrs[-1])] - for mit in mits: - assert_equal(mit.shape, (5, 6, 7)) - assert_equal(mit.ndim, 3) - assert_equal(mit.nd, 3) - assert_equal(mit.numiter, 4) - for a, ia in zip(arrs, mit.iters): - assert_(a is ia.base) - - def test_broadcast_single_arg(self): - # gh-6899 - arrs = [np.empty((5, 6, 7))] - mit = np.broadcast(*arrs) - assert_equal(mit.shape, (5, 6, 7)) - assert_equal(mit.ndim, 3) - assert_equal(mit.nd, 3) - assert_equal(mit.numiter, 1) - assert_(arrs[0] is mit.iters[0].base) - - def test_number_of_arguments(self): - arr = np.empty((5,)) - for j in range(35): - arrs = [arr] * j - if j > 32: - assert_raises(ValueError, np.broadcast, *arrs) - else: - mit = np.broadcast(*arrs) - assert_equal(mit.numiter, j) - - def test_broadcast_error_kwargs(self): - #gh-13455 - arrs = [np.empty((5, 6, 7))] - mit = np.broadcast(*arrs) - mit2 = np.broadcast(*arrs, **{}) - assert_equal(mit.shape, mit2.shape) - assert_equal(mit.ndim, mit2.ndim) - assert_equal(mit.nd, mit2.nd) - assert_equal(mit.numiter, mit2.numiter) - assert_(mit.iters[0].base is mit2.iters[0].base) - - assert_raises(ValueError, np.broadcast, 1, **{'x': 1}) - -class TestKeepdims(object): - - class sub_array(np.ndarray): - def sum(self, axis=None, dtype=None, out=None): - return np.ndarray.sum(self, axis, dtype, out, keepdims=True) - - def test_raise(self): - sub_class = self.sub_array - x = np.arange(30).view(sub_class) - assert_raises(TypeError, np.sum, x, keepdims=True) - - -class TestTensordot(object): - - def test_zero_dimension(self): - # Test resolution to issue #5663 - a = np.ndarray((3,0)) - b = np.ndarray((0,4)) - td = np.tensordot(a, b, (1, 0)) - assert_array_equal(td, np.dot(a, b)) - assert_array_equal(td, np.einsum('ij,jk', a, b)) - - def test_zero_dimensional(self): - # gh-12130 - arr_0d = np.array(1) - ret = np.tensordot(arr_0d, arr_0d, ([], [])) # contracting no axes is well defined - assert_array_equal(ret, arr_0d) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_numerictypes.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_numerictypes.py deleted file mode 100644 index 387740e..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_numerictypes.py +++ /dev/null @@ -1,529 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import itertools - -import pytest -import numpy as np -from numpy.testing import assert_, assert_equal, assert_raises, IS_PYPY - -# This is the structure of the table used for plain objects: -# -# +-+-+-+ -# |x|y|z| -# +-+-+-+ - -# Structure of a plain array description: -Pdescr = [ - ('x', 'i4', (2,)), - ('y', 'f8', (2, 2)), - ('z', 'u1')] - -# A plain list of tuples with values for testing: -PbufferT = [ - # x y z - ([3, 2], [[6., 4.], [6., 4.]], 8), - ([4, 3], [[7., 5.], [7., 5.]], 9), - ] - - -# This is the structure of the table used for nested objects (DON'T PANIC!): -# -# +-+---------------------------------+-----+----------+-+-+ -# |x|Info |color|info |y|z| -# | +-----+--+----------------+----+--+ +----+-----+ | | -# | |value|y2|Info2 |name|z2| |Name|Value| | | -# | | | +----+-----+--+--+ | | | | | | | -# | | | |name|value|y3|z3| | | | | | | | -# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+ -# - -# The corresponding nested array description: -Ndescr = [ - ('x', 'i4', (2,)), - ('Info', [ - ('value', 'c16'), - ('y2', 'f8'), - ('Info2', [ - ('name', 'S2'), - ('value', 'c16', (2,)), - ('y3', 'f8', (2,)), - ('z3', 'u4', (2,))]), - ('name', 'S2'), - ('z2', 'b1')]), - ('color', 'S2'), - ('info', [ - ('Name', 'U8'), - ('Value', 'c16')]), - ('y', 'f8', (2, 2)), - ('z', 'u1')] - -NbufferT = [ - # x Info color info y z - # value y2 Info2 name z2 Name Value - # name value y3 z3 - ([3, 2], (6j, 6., (b'nn', [6j, 4j], [6., 4.], [1, 2]), b'NN', True), b'cc', (u'NN', 6j), [[6., 4.], [6., 4.]], 8), - ([4, 3], (7j, 7., (b'oo', [7j, 5j], [7., 5.], [2, 1]), b'OO', False), b'dd', (u'OO', 7j), [[7., 5.], [7., 5.]], 9), - ] - - -byteorder = {'little':'<', 'big':'>'}[sys.byteorder] - -def normalize_descr(descr): - "Normalize a description adding the platform byteorder." - - out = [] - for item in descr: - dtype = item[1] - if isinstance(dtype, str): - if dtype[0] not in ['|', '<', '>']: - onebyte = dtype[1:] == "1" - if onebyte or dtype[0] in ['S', 'V', 'b']: - dtype = "|" + dtype - else: - dtype = byteorder + dtype - if len(item) > 2 and np.prod(item[2]) > 1: - nitem = (item[0], dtype, item[2]) - else: - nitem = (item[0], dtype) - out.append(nitem) - elif isinstance(dtype, list): - l = normalize_descr(dtype) - out.append((item[0], l)) - else: - raise ValueError("Expected a str or list and got %s" % - (type(item))) - return out - - -############################################################ -# Creation tests -############################################################ - -class CreateZeros(object): - """Check the creation of heterogeneous arrays zero-valued""" - - def test_zeros0D(self): - """Check creation of 0-dimensional objects""" - h = np.zeros((), dtype=self._descr) - assert_(normalize_descr(self._descr) == h.dtype.descr) - assert_(h.dtype.fields['x'][0].name[:4] == 'void') - assert_(h.dtype.fields['x'][0].char == 'V') - assert_(h.dtype.fields['x'][0].type == np.void) - # A small check that data is ok - assert_equal(h['z'], np.zeros((), dtype='u1')) - - def test_zerosSD(self): - """Check creation of single-dimensional objects""" - h = np.zeros((2,), dtype=self._descr) - assert_(normalize_descr(self._descr) == h.dtype.descr) - assert_(h.dtype['y'].name[:4] == 'void') - assert_(h.dtype['y'].char == 'V') - assert_(h.dtype['y'].type == np.void) - # A small check that data is ok - assert_equal(h['z'], np.zeros((2,), dtype='u1')) - - def test_zerosMD(self): - """Check creation of multi-dimensional objects""" - h = np.zeros((2, 3), dtype=self._descr) - assert_(normalize_descr(self._descr) == h.dtype.descr) - assert_(h.dtype['z'].name == 'uint8') - assert_(h.dtype['z'].char == 'B') - assert_(h.dtype['z'].type == np.uint8) - # A small check that data is ok - assert_equal(h['z'], np.zeros((2, 3), dtype='u1')) - - -class TestCreateZerosPlain(CreateZeros): - """Check the creation of heterogeneous arrays zero-valued (plain)""" - _descr = Pdescr - -class TestCreateZerosNested(CreateZeros): - """Check the creation of heterogeneous arrays zero-valued (nested)""" - _descr = Ndescr - - -class CreateValues(object): - """Check the creation of heterogeneous arrays with values""" - - def test_tuple(self): - """Check creation from tuples""" - h = np.array(self._buffer, dtype=self._descr) - assert_(normalize_descr(self._descr) == h.dtype.descr) - if self.multiple_rows: - assert_(h.shape == (2,)) - else: - assert_(h.shape == ()) - - def test_list_of_tuple(self): - """Check creation from list of tuples""" - h = np.array([self._buffer], dtype=self._descr) - assert_(normalize_descr(self._descr) == h.dtype.descr) - if self.multiple_rows: - assert_(h.shape == (1, 2)) - else: - assert_(h.shape == (1,)) - - def test_list_of_list_of_tuple(self): - """Check creation from list of list of tuples""" - h = np.array([[self._buffer]], dtype=self._descr) - assert_(normalize_descr(self._descr) == h.dtype.descr) - if self.multiple_rows: - assert_(h.shape == (1, 1, 2)) - else: - assert_(h.shape == (1, 1)) - - -class TestCreateValuesPlainSingle(CreateValues): - """Check the creation of heterogeneous arrays (plain, single row)""" - _descr = Pdescr - multiple_rows = 0 - _buffer = PbufferT[0] - -class TestCreateValuesPlainMultiple(CreateValues): - """Check the creation of heterogeneous arrays (plain, multiple rows)""" - _descr = Pdescr - multiple_rows = 1 - _buffer = PbufferT - -class TestCreateValuesNestedSingle(CreateValues): - """Check the creation of heterogeneous arrays (nested, single row)""" - _descr = Ndescr - multiple_rows = 0 - _buffer = NbufferT[0] - -class TestCreateValuesNestedMultiple(CreateValues): - """Check the creation of heterogeneous arrays (nested, multiple rows)""" - _descr = Ndescr - multiple_rows = 1 - _buffer = NbufferT - - -############################################################ -# Reading tests -############################################################ - -class ReadValuesPlain(object): - """Check the reading of values in heterogeneous arrays (plain)""" - - def test_access_fields(self): - h = np.array(self._buffer, dtype=self._descr) - if not self.multiple_rows: - assert_(h.shape == ()) - assert_equal(h['x'], np.array(self._buffer[0], dtype='i4')) - assert_equal(h['y'], np.array(self._buffer[1], dtype='f8')) - assert_equal(h['z'], np.array(self._buffer[2], dtype='u1')) - else: - assert_(len(h) == 2) - assert_equal(h['x'], np.array([self._buffer[0][0], - self._buffer[1][0]], dtype='i4')) - assert_equal(h['y'], np.array([self._buffer[0][1], - self._buffer[1][1]], dtype='f8')) - assert_equal(h['z'], np.array([self._buffer[0][2], - self._buffer[1][2]], dtype='u1')) - - -class TestReadValuesPlainSingle(ReadValuesPlain): - """Check the creation of heterogeneous arrays (plain, single row)""" - _descr = Pdescr - multiple_rows = 0 - _buffer = PbufferT[0] - -class TestReadValuesPlainMultiple(ReadValuesPlain): - """Check the values of heterogeneous arrays (plain, multiple rows)""" - _descr = Pdescr - multiple_rows = 1 - _buffer = PbufferT - -class ReadValuesNested(object): - """Check the reading of values in heterogeneous arrays (nested)""" - - def test_access_top_fields(self): - """Check reading the top fields of a nested array""" - h = np.array(self._buffer, dtype=self._descr) - if not self.multiple_rows: - assert_(h.shape == ()) - assert_equal(h['x'], np.array(self._buffer[0], dtype='i4')) - assert_equal(h['y'], np.array(self._buffer[4], dtype='f8')) - assert_equal(h['z'], np.array(self._buffer[5], dtype='u1')) - else: - assert_(len(h) == 2) - assert_equal(h['x'], np.array([self._buffer[0][0], - self._buffer[1][0]], dtype='i4')) - assert_equal(h['y'], np.array([self._buffer[0][4], - self._buffer[1][4]], dtype='f8')) - assert_equal(h['z'], np.array([self._buffer[0][5], - self._buffer[1][5]], dtype='u1')) - - def test_nested1_acessors(self): - """Check reading the nested fields of a nested array (1st level)""" - h = np.array(self._buffer, dtype=self._descr) - if not self.multiple_rows: - assert_equal(h['Info']['value'], - np.array(self._buffer[1][0], dtype='c16')) - assert_equal(h['Info']['y2'], - np.array(self._buffer[1][1], dtype='f8')) - assert_equal(h['info']['Name'], - np.array(self._buffer[3][0], dtype='U2')) - assert_equal(h['info']['Value'], - np.array(self._buffer[3][1], dtype='c16')) - else: - assert_equal(h['Info']['value'], - np.array([self._buffer[0][1][0], - self._buffer[1][1][0]], - dtype='c16')) - assert_equal(h['Info']['y2'], - np.array([self._buffer[0][1][1], - self._buffer[1][1][1]], - dtype='f8')) - assert_equal(h['info']['Name'], - np.array([self._buffer[0][3][0], - self._buffer[1][3][0]], - dtype='U2')) - assert_equal(h['info']['Value'], - np.array([self._buffer[0][3][1], - self._buffer[1][3][1]], - dtype='c16')) - - def test_nested2_acessors(self): - """Check reading the nested fields of a nested array (2nd level)""" - h = np.array(self._buffer, dtype=self._descr) - if not self.multiple_rows: - assert_equal(h['Info']['Info2']['value'], - np.array(self._buffer[1][2][1], dtype='c16')) - assert_equal(h['Info']['Info2']['z3'], - np.array(self._buffer[1][2][3], dtype='u4')) - else: - assert_equal(h['Info']['Info2']['value'], - np.array([self._buffer[0][1][2][1], - self._buffer[1][1][2][1]], - dtype='c16')) - assert_equal(h['Info']['Info2']['z3'], - np.array([self._buffer[0][1][2][3], - self._buffer[1][1][2][3]], - dtype='u4')) - - def test_nested1_descriptor(self): - """Check access nested descriptors of a nested array (1st level)""" - h = np.array(self._buffer, dtype=self._descr) - assert_(h.dtype['Info']['value'].name == 'complex128') - assert_(h.dtype['Info']['y2'].name == 'float64') - if sys.version_info[0] >= 3: - assert_(h.dtype['info']['Name'].name == 'str256') - else: - assert_(h.dtype['info']['Name'].name == 'unicode256') - assert_(h.dtype['info']['Value'].name == 'complex128') - - def test_nested2_descriptor(self): - """Check access nested descriptors of a nested array (2nd level)""" - h = np.array(self._buffer, dtype=self._descr) - assert_(h.dtype['Info']['Info2']['value'].name == 'void256') - assert_(h.dtype['Info']['Info2']['z3'].name == 'void64') - - -class TestReadValuesNestedSingle(ReadValuesNested): - """Check the values of heterogeneous arrays (nested, single row)""" - _descr = Ndescr - multiple_rows = False - _buffer = NbufferT[0] - -class TestReadValuesNestedMultiple(ReadValuesNested): - """Check the values of heterogeneous arrays (nested, multiple rows)""" - _descr = Ndescr - multiple_rows = True - _buffer = NbufferT - -class TestEmptyField(object): - def test_assign(self): - a = np.arange(10, dtype=np.float32) - a.dtype = [("int", "<0i4"), ("float", "<2f4")] - assert_(a['int'].shape == (5, 0)) - assert_(a['float'].shape == (5, 2)) - -class TestCommonType(object): - def test_scalar_loses1(self): - res = np.find_common_type(['f4', 'f4', 'i2'], ['f8']) - assert_(res == 'f4') - - def test_scalar_loses2(self): - res = np.find_common_type(['f4', 'f4'], ['i8']) - assert_(res == 'f4') - - def test_scalar_wins(self): - res = np.find_common_type(['f4', 'f4', 'i2'], ['c8']) - assert_(res == 'c8') - - def test_scalar_wins2(self): - res = np.find_common_type(['u4', 'i4', 'i4'], ['f4']) - assert_(res == 'f8') - - def test_scalar_wins3(self): # doesn't go up to 'f16' on purpose - res = np.find_common_type(['u8', 'i8', 'i8'], ['f8']) - assert_(res == 'f8') - -class TestMultipleFields(object): - def setup(self): - self.ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8') - - def _bad_call(self): - return self.ary['f0', 'f1'] - - def test_no_tuple(self): - assert_raises(IndexError, self._bad_call) - - def test_return(self): - res = self.ary[['f0', 'f2']].tolist() - assert_(res == [(1, 3), (5, 7)]) - - -class TestIsSubDType(object): - # scalar types can be promoted into dtypes - wrappers = [np.dtype, lambda x: x] - - def test_both_abstract(self): - assert_(np.issubdtype(np.floating, np.inexact)) - assert_(not np.issubdtype(np.inexact, np.floating)) - - def test_same(self): - for cls in (np.float32, np.int32): - for w1, w2 in itertools.product(self.wrappers, repeat=2): - assert_(np.issubdtype(w1(cls), w2(cls))) - - def test_subclass(self): - # note we cannot promote floating to a dtype, as it would turn into a - # concrete type - for w in self.wrappers: - assert_(np.issubdtype(w(np.float32), np.floating)) - assert_(np.issubdtype(w(np.float64), np.floating)) - - def test_subclass_backwards(self): - for w in self.wrappers: - assert_(not np.issubdtype(np.floating, w(np.float32))) - assert_(not np.issubdtype(np.floating, w(np.float64))) - - def test_sibling_class(self): - for w1, w2 in itertools.product(self.wrappers, repeat=2): - assert_(not np.issubdtype(w1(np.float32), w2(np.float64))) - assert_(not np.issubdtype(w1(np.float64), w2(np.float32))) - - -class TestSctypeDict(object): - def test_longdouble(self): - assert_(np.sctypeDict['f8'] is not np.longdouble) - assert_(np.sctypeDict['c16'] is not np.clongdouble) - - -class TestBitName(object): - def test_abstract(self): - assert_raises(ValueError, np.core.numerictypes.bitname, np.floating) - - -class TestMaximumSctype(object): - - # note that parametrizing with sctype['int'] and similar would skip types - # with the same size (gh-11923) - - @pytest.mark.parametrize('t', [np.byte, np.short, np.intc, np.int_, np.longlong]) - def test_int(self, t): - assert_equal(np.maximum_sctype(t), np.sctypes['int'][-1]) - - @pytest.mark.parametrize('t', [np.ubyte, np.ushort, np.uintc, np.uint, np.ulonglong]) - def test_uint(self, t): - assert_equal(np.maximum_sctype(t), np.sctypes['uint'][-1]) - - @pytest.mark.parametrize('t', [np.half, np.single, np.double, np.longdouble]) - def test_float(self, t): - assert_equal(np.maximum_sctype(t), np.sctypes['float'][-1]) - - @pytest.mark.parametrize('t', [np.csingle, np.cdouble, np.clongdouble]) - def test_complex(self, t): - assert_equal(np.maximum_sctype(t), np.sctypes['complex'][-1]) - - @pytest.mark.parametrize('t', [np.bool_, np.object_, np.unicode_, np.bytes_, np.void]) - def test_other(self, t): - assert_equal(np.maximum_sctype(t), t) - - -class Test_sctype2char(object): - # This function is old enough that we're really just documenting the quirks - # at this point. - - def test_scalar_type(self): - assert_equal(np.sctype2char(np.double), 'd') - assert_equal(np.sctype2char(np.int_), 'l') - assert_equal(np.sctype2char(np.unicode_), 'U') - assert_equal(np.sctype2char(np.bytes_), 'S') - - def test_other_type(self): - assert_equal(np.sctype2char(float), 'd') - assert_equal(np.sctype2char(list), 'O') - assert_equal(np.sctype2char(np.ndarray), 'O') - - def test_third_party_scalar_type(self): - from numpy.core._rational_tests import rational - assert_raises(KeyError, np.sctype2char, rational) - assert_raises(KeyError, np.sctype2char, rational(1)) - - def test_array_instance(self): - assert_equal(np.sctype2char(np.array([1.0, 2.0])), 'd') - - def test_abstract_type(self): - assert_raises(KeyError, np.sctype2char, np.floating) - - def test_non_type(self): - assert_raises(ValueError, np.sctype2char, 1) - -@pytest.mark.parametrize("rep, expected", [ - (np.int32, True), - (list, False), - (1.1, False), - (str, True), - (np.dtype(np.float64), True), - (np.dtype((np.int16, (3, 4))), True), - (np.dtype([('a', np.int8)]), True), - ]) -def test_issctype(rep, expected): - # ensure proper identification of scalar - # data-types by issctype() - actual = np.issctype(rep) - assert_equal(actual, expected) - - -@pytest.mark.skipif(sys.flags.optimize > 1, - reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1") -@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") -class TestDocStrings(object): - def test_platform_dependent_aliases(self): - if np.int64 is np.int_: - assert_('int64' in np.int_.__doc__) - elif np.int64 is np.longlong: - assert_('int64' in np.longlong.__doc__) - - -class TestScalarTypeNames: - # gh-9799 - - numeric_types = [ - np.byte, np.short, np.intc, np.int_, np.longlong, - np.ubyte, np.ushort, np.uintc, np.uint, np.ulonglong, - np.half, np.single, np.double, np.longdouble, - np.csingle, np.cdouble, np.clongdouble, - ] - - def test_names_are_unique(self): - # none of the above may be aliases for each other - assert len(set(self.numeric_types)) == len(self.numeric_types) - - # names must be unique - names = [t.__name__ for t in self.numeric_types] - assert len(set(names)) == len(names) - - @pytest.mark.parametrize('t', numeric_types) - def test_names_reflect_attributes(self, t): - """ Test that names correspond to where the type is under ``np.`` """ - assert getattr(np, t.__name__) is t - - @pytest.mark.parametrize('t', numeric_types) - def test_names_are_undersood_by_dtype(self, t): - """ Test the dtype constructor maps names back to the type """ - assert np.dtype(t.__name__).type is t diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_overrides.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_overrides.py deleted file mode 100644 index 63b0e45..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_overrides.py +++ /dev/null @@ -1,429 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import inspect -import sys -from unittest import mock - -import numpy as np -from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_raises_regex) -from numpy.core.overrides import ( - _get_implementing_args, array_function_dispatch, - verify_matching_signatures, ARRAY_FUNCTION_ENABLED) -from numpy.compat import pickle -import pytest - - -requires_array_function = pytest.mark.skipif( - not ARRAY_FUNCTION_ENABLED, - reason="__array_function__ dispatch not enabled.") - - -def _return_not_implemented(self, *args, **kwargs): - return NotImplemented - - -# need to define this at the top level to test pickling -@array_function_dispatch(lambda array: (array,)) -def dispatched_one_arg(array): - """Docstring.""" - return 'original' - - -@array_function_dispatch(lambda array1, array2: (array1, array2)) -def dispatched_two_arg(array1, array2): - """Docstring.""" - return 'original' - - -class TestGetImplementingArgs(object): - - def test_ndarray(self): - array = np.array(1) - - args = _get_implementing_args([array]) - assert_equal(list(args), [array]) - - args = _get_implementing_args([array, array]) - assert_equal(list(args), [array]) - - args = _get_implementing_args([array, 1]) - assert_equal(list(args), [array]) - - args = _get_implementing_args([1, array]) - assert_equal(list(args), [array]) - - def test_ndarray_subclasses(self): - - class OverrideSub(np.ndarray): - __array_function__ = _return_not_implemented - - class NoOverrideSub(np.ndarray): - pass - - array = np.array(1).view(np.ndarray) - override_sub = np.array(1).view(OverrideSub) - no_override_sub = np.array(1).view(NoOverrideSub) - - args = _get_implementing_args([array, override_sub]) - assert_equal(list(args), [override_sub, array]) - - args = _get_implementing_args([array, no_override_sub]) - assert_equal(list(args), [no_override_sub, array]) - - args = _get_implementing_args( - [override_sub, no_override_sub]) - assert_equal(list(args), [override_sub, no_override_sub]) - - def test_ndarray_and_duck_array(self): - - class Other(object): - __array_function__ = _return_not_implemented - - array = np.array(1) - other = Other() - - args = _get_implementing_args([other, array]) - assert_equal(list(args), [other, array]) - - args = _get_implementing_args([array, other]) - assert_equal(list(args), [array, other]) - - def test_ndarray_subclass_and_duck_array(self): - - class OverrideSub(np.ndarray): - __array_function__ = _return_not_implemented - - class Other(object): - __array_function__ = _return_not_implemented - - array = np.array(1) - subarray = np.array(1).view(OverrideSub) - other = Other() - - assert_equal(_get_implementing_args([array, subarray, other]), - [subarray, array, other]) - assert_equal(_get_implementing_args([array, other, subarray]), - [subarray, array, other]) - - def test_many_duck_arrays(self): - - class A(object): - __array_function__ = _return_not_implemented - - class B(A): - __array_function__ = _return_not_implemented - - class C(A): - __array_function__ = _return_not_implemented - - class D(object): - __array_function__ = _return_not_implemented - - a = A() - b = B() - c = C() - d = D() - - assert_equal(_get_implementing_args([1]), []) - assert_equal(_get_implementing_args([a]), [a]) - assert_equal(_get_implementing_args([a, 1]), [a]) - assert_equal(_get_implementing_args([a, a, a]), [a]) - assert_equal(_get_implementing_args([a, d, a]), [a, d]) - assert_equal(_get_implementing_args([a, b]), [b, a]) - assert_equal(_get_implementing_args([b, a]), [b, a]) - assert_equal(_get_implementing_args([a, b, c]), [b, c, a]) - assert_equal(_get_implementing_args([a, c, b]), [c, b, a]) - - def test_too_many_duck_arrays(self): - namespace = dict(__array_function__=_return_not_implemented) - types = [type('A' + str(i), (object,), namespace) for i in range(33)] - relevant_args = [t() for t in types] - - actual = _get_implementing_args(relevant_args[:32]) - assert_equal(actual, relevant_args[:32]) - - with assert_raises_regex(TypeError, 'distinct argument types'): - _get_implementing_args(relevant_args) - - -class TestNDArrayArrayFunction(object): - - @requires_array_function - def test_method(self): - - class Other(object): - __array_function__ = _return_not_implemented - - class NoOverrideSub(np.ndarray): - pass - - class OverrideSub(np.ndarray): - __array_function__ = _return_not_implemented - - array = np.array([1]) - other = Other() - no_override_sub = array.view(NoOverrideSub) - override_sub = array.view(OverrideSub) - - result = array.__array_function__(func=dispatched_two_arg, - types=(np.ndarray,), - args=(array, 1.), kwargs={}) - assert_equal(result, 'original') - - result = array.__array_function__(func=dispatched_two_arg, - types=(np.ndarray, Other), - args=(array, other), kwargs={}) - assert_(result is NotImplemented) - - result = array.__array_function__(func=dispatched_two_arg, - types=(np.ndarray, NoOverrideSub), - args=(array, no_override_sub), - kwargs={}) - assert_equal(result, 'original') - - result = array.__array_function__(func=dispatched_two_arg, - types=(np.ndarray, OverrideSub), - args=(array, override_sub), - kwargs={}) - assert_equal(result, 'original') - - with assert_raises_regex(TypeError, 'no implementation found'): - np.concatenate((array, other)) - - expected = np.concatenate((array, array)) - result = np.concatenate((array, no_override_sub)) - assert_equal(result, expected.view(NoOverrideSub)) - result = np.concatenate((array, override_sub)) - assert_equal(result, expected.view(OverrideSub)) - - def test_no_wrapper(self): - # This shouldn't happen unless a user intentionally calls - # __array_function__ with invalid arguments, but check that we raise - # an appropriate error all the same. - array = np.array(1) - func = lambda x: x - with assert_raises_regex(AttributeError, '_implementation'): - array.__array_function__(func=func, types=(np.ndarray,), - args=(array,), kwargs={}) - - -@requires_array_function -class TestArrayFunctionDispatch(object): - - def test_pickle(self): - for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): - roundtripped = pickle.loads( - pickle.dumps(dispatched_one_arg, protocol=proto)) - assert_(roundtripped is dispatched_one_arg) - - def test_name_and_docstring(self): - assert_equal(dispatched_one_arg.__name__, 'dispatched_one_arg') - if sys.flags.optimize < 2: - assert_equal(dispatched_one_arg.__doc__, 'Docstring.') - - def test_interface(self): - - class MyArray(object): - def __array_function__(self, func, types, args, kwargs): - return (self, func, types, args, kwargs) - - original = MyArray() - (obj, func, types, args, kwargs) = dispatched_one_arg(original) - assert_(obj is original) - assert_(func is dispatched_one_arg) - assert_equal(set(types), {MyArray}) - # assert_equal uses the overloaded np.iscomplexobj() internally - assert_(args == (original,)) - assert_equal(kwargs, {}) - - def test_not_implemented(self): - - class MyArray(object): - def __array_function__(self, func, types, args, kwargs): - return NotImplemented - - array = MyArray() - with assert_raises_regex(TypeError, 'no implementation found'): - dispatched_one_arg(array) - - -@requires_array_function -class TestVerifyMatchingSignatures(object): - - def test_verify_matching_signatures(self): - - verify_matching_signatures(lambda x: 0, lambda x: 0) - verify_matching_signatures(lambda x=None: 0, lambda x=None: 0) - verify_matching_signatures(lambda x=1: 0, lambda x=None: 0) - - with assert_raises(RuntimeError): - verify_matching_signatures(lambda a: 0, lambda b: 0) - with assert_raises(RuntimeError): - verify_matching_signatures(lambda x: 0, lambda x=None: 0) - with assert_raises(RuntimeError): - verify_matching_signatures(lambda x=None: 0, lambda y=None: 0) - with assert_raises(RuntimeError): - verify_matching_signatures(lambda x=1: 0, lambda y=1: 0) - - def test_array_function_dispatch(self): - - with assert_raises(RuntimeError): - @array_function_dispatch(lambda x: (x,)) - def f(y): - pass - - # should not raise - @array_function_dispatch(lambda x: (x,), verify=False) - def f(y): - pass - - -def _new_duck_type_and_implements(): - """Create a duck array type and implements functions.""" - HANDLED_FUNCTIONS = {} - - class MyArray(object): - def __array_function__(self, func, types, args, kwargs): - if func not in HANDLED_FUNCTIONS: - return NotImplemented - if not all(issubclass(t, MyArray) for t in types): - return NotImplemented - return HANDLED_FUNCTIONS[func](*args, **kwargs) - - def implements(numpy_function): - """Register an __array_function__ implementations.""" - def decorator(func): - HANDLED_FUNCTIONS[numpy_function] = func - return func - return decorator - - return (MyArray, implements) - - -@requires_array_function -class TestArrayFunctionImplementation(object): - - def test_one_arg(self): - MyArray, implements = _new_duck_type_and_implements() - - @implements(dispatched_one_arg) - def _(array): - return 'myarray' - - assert_equal(dispatched_one_arg(1), 'original') - assert_equal(dispatched_one_arg(MyArray()), 'myarray') - - def test_optional_args(self): - MyArray, implements = _new_duck_type_and_implements() - - @array_function_dispatch(lambda array, option=None: (array,)) - def func_with_option(array, option='default'): - return option - - @implements(func_with_option) - def my_array_func_with_option(array, new_option='myarray'): - return new_option - - # we don't need to implement every option on __array_function__ - # implementations - assert_equal(func_with_option(1), 'default') - assert_equal(func_with_option(1, option='extra'), 'extra') - assert_equal(func_with_option(MyArray()), 'myarray') - with assert_raises(TypeError): - func_with_option(MyArray(), option='extra') - - # but new options on implementations can't be used - result = my_array_func_with_option(MyArray(), new_option='yes') - assert_equal(result, 'yes') - with assert_raises(TypeError): - func_with_option(MyArray(), new_option='no') - - def test_not_implemented(self): - MyArray, implements = _new_duck_type_and_implements() - - @array_function_dispatch(lambda array: (array,), module='my') - def func(array): - return array - - array = np.array(1) - assert_(func(array) is array) - assert_equal(func.__module__, 'my') - - with assert_raises_regex( - TypeError, "no implementation found for 'my.func'"): - func(MyArray()) - - -class TestNDArrayMethods(object): - - def test_repr(self): - # gh-12162: should still be defined even if __array_function__ doesn't - # implement np.array_repr() - - class MyArray(np.ndarray): - def __array_function__(*args, **kwargs): - return NotImplemented - - array = np.array(1).view(MyArray) - assert_equal(repr(array), 'MyArray(1)') - assert_equal(str(array), '1') - - -class TestNumPyFunctions(object): - - def test_set_module(self): - assert_equal(np.sum.__module__, 'numpy') - assert_equal(np.char.equal.__module__, 'numpy.char') - assert_equal(np.fft.fft.__module__, 'numpy.fft') - assert_equal(np.linalg.solve.__module__, 'numpy.linalg') - - def test_inspect_sum(self): - signature = inspect.signature(np.sum) - assert_('axis' in signature.parameters) - - @requires_array_function - def test_override_sum(self): - MyArray, implements = _new_duck_type_and_implements() - - @implements(np.sum) - def _(array): - return 'yes' - - assert_equal(np.sum(MyArray()), 'yes') - - @requires_array_function - def test_sum_on_mock_array(self): - - # We need a proxy for mocks because __array_function__ is only looked - # up in the class dict - class ArrayProxy: - def __init__(self, value): - self.value = value - def __array_function__(self, *args, **kwargs): - return self.value.__array_function__(*args, **kwargs) - def __array__(self, *args, **kwargs): - return self.value.__array__(*args, **kwargs) - - proxy = ArrayProxy(mock.Mock(spec=ArrayProxy)) - proxy.value.__array_function__.return_value = 1 - result = np.sum(proxy) - assert_equal(result, 1) - proxy.value.__array_function__.assert_called_once_with( - np.sum, (ArrayProxy,), (proxy,), {}) - proxy.value.__array__.assert_not_called() - - @requires_array_function - def test_sum_forwarding_implementation(self): - - class MyArray(np.ndarray): - - def sum(self, axis, out): - return 'summed' - - def __array_function__(self, func, types, args, kwargs): - return super().__array_function__(func, types, args, kwargs) - - # note: the internal implementation of np.sum() calls the .sum() method - array = np.array(1).view(MyArray) - assert_equal(np.sum(array), 'summed') diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_print.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_print.py deleted file mode 100644 index c5c091e..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_print.py +++ /dev/null @@ -1,205 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys - -import pytest - -import numpy as np -from numpy.testing import assert_, assert_equal -from numpy.core.tests._locales import CommaDecimalPointLocale - - -if sys.version_info[0] >= 3: - from io import StringIO -else: - from StringIO import StringIO - -_REF = {np.inf: 'inf', -np.inf: '-inf', np.nan: 'nan'} - - -@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble]) -def test_float_types(tp): - """ Check formatting. - - This is only for the str function, and only for simple types. - The precision of np.float32 and np.longdouble aren't the same as the - python float precision. - - """ - for x in [0, 1, -1, 1e20]: - assert_equal(str(tp(x)), str(float(x)), - err_msg='Failed str formatting for type %s' % tp) - - if tp(1e16).itemsize > 4: - assert_equal(str(tp(1e16)), str(float('1e16')), - err_msg='Failed str formatting for type %s' % tp) - else: - ref = '1e+16' - assert_equal(str(tp(1e16)), ref, - err_msg='Failed str formatting for type %s' % tp) - - -@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble]) -def test_nan_inf_float(tp): - """ Check formatting of nan & inf. - - This is only for the str function, and only for simple types. - The precision of np.float32 and np.longdouble aren't the same as the - python float precision. - - """ - for x in [np.inf, -np.inf, np.nan]: - assert_equal(str(tp(x)), _REF[x], - err_msg='Failed str formatting for type %s' % tp) - - -@pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble]) -def test_complex_types(tp): - """Check formatting of complex types. - - This is only for the str function, and only for simple types. - The precision of np.float32 and np.longdouble aren't the same as the - python float precision. - - """ - for x in [0, 1, -1, 1e20]: - assert_equal(str(tp(x)), str(complex(x)), - err_msg='Failed str formatting for type %s' % tp) - assert_equal(str(tp(x*1j)), str(complex(x*1j)), - err_msg='Failed str formatting for type %s' % tp) - assert_equal(str(tp(x + x*1j)), str(complex(x + x*1j)), - err_msg='Failed str formatting for type %s' % tp) - - if tp(1e16).itemsize > 8: - assert_equal(str(tp(1e16)), str(complex(1e16)), - err_msg='Failed str formatting for type %s' % tp) - else: - ref = '(1e+16+0j)' - assert_equal(str(tp(1e16)), ref, - err_msg='Failed str formatting for type %s' % tp) - - -@pytest.mark.parametrize('dtype', [np.complex64, np.cdouble, np.clongdouble]) -def test_complex_inf_nan(dtype): - """Check inf/nan formatting of complex types.""" - TESTS = { - complex(np.inf, 0): "(inf+0j)", - complex(0, np.inf): "infj", - complex(-np.inf, 0): "(-inf+0j)", - complex(0, -np.inf): "-infj", - complex(np.inf, 1): "(inf+1j)", - complex(1, np.inf): "(1+infj)", - complex(-np.inf, 1): "(-inf+1j)", - complex(1, -np.inf): "(1-infj)", - complex(np.nan, 0): "(nan+0j)", - complex(0, np.nan): "nanj", - complex(-np.nan, 0): "(nan+0j)", - complex(0, -np.nan): "nanj", - complex(np.nan, 1): "(nan+1j)", - complex(1, np.nan): "(1+nanj)", - complex(-np.nan, 1): "(nan+1j)", - complex(1, -np.nan): "(1+nanj)", - } - for c, s in TESTS.items(): - assert_equal(str(dtype(c)), s) - - -# print tests -def _test_redirected_print(x, tp, ref=None): - file = StringIO() - file_tp = StringIO() - stdout = sys.stdout - try: - sys.stdout = file_tp - print(tp(x)) - sys.stdout = file - if ref: - print(ref) - else: - print(x) - finally: - sys.stdout = stdout - - assert_equal(file.getvalue(), file_tp.getvalue(), - err_msg='print failed for type%s' % tp) - - -@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble]) -def test_float_type_print(tp): - """Check formatting when using print """ - for x in [0, 1, -1, 1e20]: - _test_redirected_print(float(x), tp) - - for x in [np.inf, -np.inf, np.nan]: - _test_redirected_print(float(x), tp, _REF[x]) - - if tp(1e16).itemsize > 4: - _test_redirected_print(float(1e16), tp) - else: - ref = '1e+16' - _test_redirected_print(float(1e16), tp, ref) - - -@pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble]) -def test_complex_type_print(tp): - """Check formatting when using print """ - # We do not create complex with inf/nan directly because the feature is - # missing in python < 2.6 - for x in [0, 1, -1, 1e20]: - _test_redirected_print(complex(x), tp) - - if tp(1e16).itemsize > 8: - _test_redirected_print(complex(1e16), tp) - else: - ref = '(1e+16+0j)' - _test_redirected_print(complex(1e16), tp, ref) - - _test_redirected_print(complex(np.inf, 1), tp, '(inf+1j)') - _test_redirected_print(complex(-np.inf, 1), tp, '(-inf+1j)') - _test_redirected_print(complex(-np.nan, 1), tp, '(nan+1j)') - - -def test_scalar_format(): - """Test the str.format method with NumPy scalar types""" - tests = [('{0}', True, np.bool_), - ('{0}', False, np.bool_), - ('{0:d}', 130, np.uint8), - ('{0:d}', 50000, np.uint16), - ('{0:d}', 3000000000, np.uint32), - ('{0:d}', 15000000000000000000, np.uint64), - ('{0:d}', -120, np.int8), - ('{0:d}', -30000, np.int16), - ('{0:d}', -2000000000, np.int32), - ('{0:d}', -7000000000000000000, np.int64), - ('{0:g}', 1.5, np.float16), - ('{0:g}', 1.5, np.float32), - ('{0:g}', 1.5, np.float64), - ('{0:g}', 1.5, np.longdouble), - ('{0:g}', 1.5+0.5j, np.complex64), - ('{0:g}', 1.5+0.5j, np.complex128), - ('{0:g}', 1.5+0.5j, np.clongdouble)] - - for (fmat, val, valtype) in tests: - try: - assert_equal(fmat.format(val), fmat.format(valtype(val)), - "failed with val %s, type %s" % (val, valtype)) - except ValueError as e: - assert_(False, - "format raised exception (fmt='%s', val=%s, type=%s, exc='%s')" % - (fmat, repr(val), repr(valtype), str(e))) - - -# -# Locale tests: scalar types formatting should be independent of the locale -# - -class TestCommaDecimalPointLocale(CommaDecimalPointLocale): - - def test_locale_single(self): - assert_equal(str(np.float32(1.2)), str(float(1.2))) - - def test_locale_double(self): - assert_equal(str(np.double(1.2)), str(float(1.2))) - - def test_locale_longdouble(self): - assert_equal(str(np.longdouble('1.2')), str(float(1.2))) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_records.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_records.py deleted file mode 100644 index c1b7941..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_records.py +++ /dev/null @@ -1,501 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -try: - # Accessing collections abstract classes from collections - # has been deprecated since Python 3.3 - import collections.abc as collections_abc -except ImportError: - import collections as collections_abc -import textwrap -from os import path -import pytest - -import numpy as np -from numpy.compat import Path -from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_array_almost_equal, - assert_raises, temppath - ) -from numpy.compat import pickle - - -class TestFromrecords(object): - def test_fromrecords(self): - r = np.rec.fromrecords([[456, 'dbe', 1.2], [2, 'de', 1.3]], - names='col1,col2,col3') - assert_equal(r[0].item(), (456, 'dbe', 1.2)) - assert_equal(r['col1'].dtype.kind, 'i') - if sys.version_info[0] >= 3: - assert_equal(r['col2'].dtype.kind, 'U') - assert_equal(r['col2'].dtype.itemsize, 12) - else: - assert_equal(r['col2'].dtype.kind, 'S') - assert_equal(r['col2'].dtype.itemsize, 3) - assert_equal(r['col3'].dtype.kind, 'f') - - def test_fromrecords_0len(self): - """ Verify fromrecords works with a 0-length input """ - dtype = [('a', float), ('b', float)] - r = np.rec.fromrecords([], dtype=dtype) - assert_equal(r.shape, (0,)) - - def test_fromrecords_2d(self): - data = [ - [(1, 2), (3, 4), (5, 6)], - [(6, 5), (4, 3), (2, 1)] - ] - expected_a = [[1, 3, 5], [6, 4, 2]] - expected_b = [[2, 4, 6], [5, 3, 1]] - - # try with dtype - r1 = np.rec.fromrecords(data, dtype=[('a', int), ('b', int)]) - assert_equal(r1['a'], expected_a) - assert_equal(r1['b'], expected_b) - - # try with names - r2 = np.rec.fromrecords(data, names=['a', 'b']) - assert_equal(r2['a'], expected_a) - assert_equal(r2['b'], expected_b) - - assert_equal(r1, r2) - - def test_method_array(self): - r = np.rec.array(b'abcdefg' * 100, formats='i2,a3,i4', shape=3, byteorder='big') - assert_equal(r[1].item(), (25444, b'efg', 1633837924)) - - def test_method_array2(self): - r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'), - (6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1') - assert_equal(r[1].item(), (2, 22.0, b'b')) - - def test_recarray_slices(self): - r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'), - (6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1') - assert_equal(r[1::2][1].item(), (4, 44.0, b'd')) - - def test_recarray_fromarrays(self): - x1 = np.array([1, 2, 3, 4]) - x2 = np.array(['a', 'dd', 'xyz', '12']) - x3 = np.array([1.1, 2, 3, 4]) - r = np.rec.fromarrays([x1, x2, x3], names='a,b,c') - assert_equal(r[1].item(), (2, 'dd', 2.0)) - x1[1] = 34 - assert_equal(r.a, np.array([1, 2, 3, 4])) - - def test_recarray_fromfile(self): - data_dir = path.join(path.dirname(__file__), 'data') - filename = path.join(data_dir, 'recarray_from_file.fits') - fd = open(filename, 'rb') - fd.seek(2880 * 2) - r1 = np.rec.fromfile(fd, formats='f8,i4,a5', shape=3, byteorder='big') - fd.seek(2880 * 2) - r2 = np.rec.array(fd, formats='f8,i4,a5', shape=3, byteorder='big') - fd.close() - assert_equal(r1, r2) - - def test_recarray_from_obj(self): - count = 10 - a = np.zeros(count, dtype='O') - b = np.zeros(count, dtype='f8') - c = np.zeros(count, dtype='f8') - for i in range(len(a)): - a[i] = list(range(1, 10)) - - mine = np.rec.fromarrays([a, b, c], names='date,data1,data2') - for i in range(len(a)): - assert_((mine.date[i] == list(range(1, 10)))) - assert_((mine.data1[i] == 0.0)) - assert_((mine.data2[i] == 0.0)) - - def test_recarray_repr(self): - a = np.array([(1, 0.1), (2, 0.2)], - dtype=[('foo', ' 2) & (a < 6)) - xb = np.where((b > 2) & (b < 6)) - ya = ((a > 2) & (a < 6)) - yb = ((b > 2) & (b < 6)) - assert_array_almost_equal(xa, ya.nonzero()) - assert_array_almost_equal(xb, yb.nonzero()) - assert_(np.all(a[ya] > 0.5)) - assert_(np.all(b[yb] > 0.5)) - - def test_endian_where(self): - # GitHub issue #369 - net = np.zeros(3, dtype='>f4') - net[1] = 0.00458849 - net[2] = 0.605202 - max_net = net.max() - test = np.where(net <= 0., max_net, net) - correct = np.array([ 0.60520202, 0.00458849, 0.60520202]) - assert_array_almost_equal(test, correct) - - def test_endian_recarray(self): - # Ticket #2185 - dt = np.dtype([ - ('head', '>u4'), - ('data', '>u4', 2), - ]) - buf = np.recarray(1, dtype=dt) - buf[0]['head'] = 1 - buf[0]['data'][:] = [1, 1] - - h = buf[0]['head'] - d = buf[0]['data'][0] - buf[0]['head'] = h - buf[0]['data'][0] = d - assert_(buf[0]['head'] == 1) - - def test_mem_dot(self): - # Ticket #106 - x = np.random.randn(0, 1) - y = np.random.randn(10, 1) - # Dummy array to detect bad memory access: - _z = np.ones(10) - _dummy = np.empty((0, 10)) - z = np.lib.stride_tricks.as_strided(_z, _dummy.shape, _dummy.strides) - np.dot(x, np.transpose(y), out=z) - assert_equal(_z, np.ones(10)) - # Do the same for the built-in dot: - np.core.multiarray.dot(x, np.transpose(y), out=z) - assert_equal(_z, np.ones(10)) - - def test_arange_endian(self): - # Ticket #111 - ref = np.arange(10) - x = np.arange(10, dtype='= (3, 4): - # encoding='bytes' was added in Py3.4 - for original, data in test_data: - result = pickle.loads(data, encoding='bytes') - assert_equal(result, original) - - if isinstance(result, np.ndarray) and result.dtype.names is not None: - for name in result.dtype.names: - assert_(isinstance(name, str)) - - def test_pickle_dtype(self): - # Ticket #251 - for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): - pickle.dumps(float, protocol=proto) - - def test_swap_real(self): - # Ticket #265 - assert_equal(np.arange(4, dtype='>c8').imag.max(), 0.0) - assert_equal(np.arange(4, dtype=' 1 and x['two'] > 2) - - def test_method_args(self): - # Make sure methods and functions have same default axis - # keyword and arguments - funcs1 = ['argmax', 'argmin', 'sum', ('product', 'prod'), - ('sometrue', 'any'), - ('alltrue', 'all'), 'cumsum', ('cumproduct', 'cumprod'), - 'ptp', 'cumprod', 'prod', 'std', 'var', 'mean', - 'round', 'min', 'max', 'argsort', 'sort'] - funcs2 = ['compress', 'take', 'repeat'] - - for func in funcs1: - arr = np.random.rand(8, 7) - arr2 = arr.copy() - if isinstance(func, tuple): - func_meth = func[1] - func = func[0] - else: - func_meth = func - res1 = getattr(arr, func_meth)() - res2 = getattr(np, func)(arr2) - if res1 is None: - res1 = arr - - if res1.dtype.kind in 'uib': - assert_((res1 == res2).all(), func) - else: - assert_(abs(res1-res2).max() < 1e-8, func) - - for func in funcs2: - arr1 = np.random.rand(8, 7) - arr2 = np.random.rand(8, 7) - res1 = None - if func == 'compress': - arr1 = arr1.ravel() - res1 = getattr(arr2, func)(arr1) - else: - arr2 = (15*arr2).astype(int).ravel() - if res1 is None: - res1 = getattr(arr1, func)(arr2) - res2 = getattr(np, func)(arr1, arr2) - assert_(abs(res1-res2).max() < 1e-8, func) - - def test_mem_lexsort_strings(self): - # Ticket #298 - lst = ['abc', 'cde', 'fgh'] - np.lexsort((lst,)) - - def test_fancy_index(self): - # Ticket #302 - x = np.array([1, 2])[np.array([0])] - assert_equal(x.shape, (1,)) - - def test_recarray_copy(self): - # Ticket #312 - dt = [('x', np.int16), ('y', np.float64)] - ra = np.array([(1, 2.3)], dtype=dt) - rb = np.rec.array(ra, dtype=dt) - rb['x'] = 2. - assert_(ra['x'] != rb['x']) - - def test_rec_fromarray(self): - # Ticket #322 - x1 = np.array([[1, 2], [3, 4], [5, 6]]) - x2 = np.array(['a', 'dd', 'xyz']) - x3 = np.array([1.1, 2, 3]) - np.rec.fromarrays([x1, x2, x3], formats="(2,)i4,a3,f8") - - def test_object_array_assign(self): - x = np.empty((2, 2), object) - x.flat[2] = (1, 2, 3) - assert_equal(x.flat[2], (1, 2, 3)) - - def test_ndmin_float64(self): - # Ticket #324 - x = np.array([1, 2, 3], dtype=np.float64) - assert_equal(np.array(x, dtype=np.float32, ndmin=2).ndim, 2) - assert_equal(np.array(x, dtype=np.float64, ndmin=2).ndim, 2) - - def test_ndmin_order(self): - # Issue #465 and related checks - assert_(np.array([1, 2], order='C', ndmin=3).flags.c_contiguous) - assert_(np.array([1, 2], order='F', ndmin=3).flags.f_contiguous) - assert_(np.array(np.ones((2, 2), order='F'), ndmin=3).flags.f_contiguous) - assert_(np.array(np.ones((2, 2), order='C'), ndmin=3).flags.c_contiguous) - - def test_mem_axis_minimization(self): - # Ticket #327 - data = np.arange(5) - data = np.add.outer(data, data) - - def test_mem_float_imag(self): - # Ticket #330 - np.float64(1.0).imag - - def test_dtype_tuple(self): - # Ticket #334 - assert_(np.dtype('i4') == np.dtype(('i4', ()))) - - def test_dtype_posttuple(self): - # Ticket #335 - np.dtype([('col1', '()i4')]) - - def test_numeric_carray_compare(self): - # Ticket #341 - assert_equal(np.array(['X'], 'c'), b'X') - - def test_string_array_size(self): - # Ticket #342 - assert_raises(ValueError, - np.array, [['X'], ['X', 'X', 'X']], '|S1') - - def test_dtype_repr(self): - # Ticket #344 - dt1 = np.dtype(('uint32', 2)) - dt2 = np.dtype(('uint32', (2,))) - assert_equal(dt1.__repr__(), dt2.__repr__()) - - def test_reshape_order(self): - # Make sure reshape order works. - a = np.arange(6).reshape(2, 3, order='F') - assert_equal(a, [[0, 2, 4], [1, 3, 5]]) - a = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) - b = a[:, 1] - assert_equal(b.reshape(2, 2, order='F'), [[2, 6], [4, 8]]) - - def test_reshape_zero_strides(self): - # Issue #380, test reshaping of zero strided arrays - a = np.ones(1) - a = np.lib.stride_tricks.as_strided(a, shape=(5,), strides=(0,)) - assert_(a.reshape(5, 1).strides[0] == 0) - - def test_reshape_zero_size(self): - # GitHub Issue #2700, setting shape failed for 0-sized arrays - a = np.ones((0, 2)) - a.shape = (-1, 2) - - # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides. - # With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous. - @pytest.mark.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max, - reason="Using relaxed stride checking") - def test_reshape_trailing_ones_strides(self): - # GitHub issue gh-2949, bad strides for trailing ones of new shape - a = np.zeros(12, dtype=np.int32)[::2] # not contiguous - strides_c = (16, 8, 8, 8) - strides_f = (8, 24, 48, 48) - assert_equal(a.reshape(3, 2, 1, 1).strides, strides_c) - assert_equal(a.reshape(3, 2, 1, 1, order='F').strides, strides_f) - assert_equal(np.array(0, dtype=np.int32).reshape(1, 1).strides, (4, 4)) - - def test_repeat_discont(self): - # Ticket #352 - a = np.arange(12).reshape(4, 3)[:, 2] - assert_equal(a.repeat(3), [2, 2, 2, 5, 5, 5, 8, 8, 8, 11, 11, 11]) - - def test_array_index(self): - # Make sure optimization is not called in this case. - a = np.array([1, 2, 3]) - a2 = np.array([[1, 2, 3]]) - assert_equal(a[np.where(a == 3)], a2[np.where(a2 == 3)]) - - def test_object_argmax(self): - a = np.array([1, 2, 3], dtype=object) - assert_(a.argmax() == 2) - - def test_recarray_fields(self): - # Ticket #372 - dt0 = np.dtype([('f0', 'i4'), ('f1', 'i4')]) - dt1 = np.dtype([('f0', 'i8'), ('f1', 'i8')]) - for a in [np.array([(1, 2), (3, 4)], "i4,i4"), - np.rec.array([(1, 2), (3, 4)], "i4,i4"), - np.rec.array([(1, 2), (3, 4)]), - np.rec.fromarrays([(1, 2), (3, 4)], "i4,i4"), - np.rec.fromarrays([(1, 2), (3, 4)])]: - assert_(a.dtype in [dt0, dt1]) - - def test_random_shuffle(self): - # Ticket #374 - a = np.arange(5).reshape((5, 1)) - b = a.copy() - np.random.shuffle(b) - assert_equal(np.sort(b, axis=0), a) - - def test_refcount_vdot(self): - # Changeset #3443 - _assert_valid_refcount(np.vdot) - - def test_startswith(self): - ca = np.char.array(['Hi', 'There']) - assert_equal(ca.startswith('H'), [True, False]) - - def test_noncommutative_reduce_accumulate(self): - # Ticket #413 - tosubtract = np.arange(5) - todivide = np.array([2.0, 0.5, 0.25]) - assert_equal(np.subtract.reduce(tosubtract), -10) - assert_equal(np.divide.reduce(todivide), 16.0) - assert_array_equal(np.subtract.accumulate(tosubtract), - np.array([0, -1, -3, -6, -10])) - assert_array_equal(np.divide.accumulate(todivide), - np.array([2., 4., 16.])) - - def test_convolve_empty(self): - # Convolve should raise an error for empty input array. - assert_raises(ValueError, np.convolve, [], [1]) - assert_raises(ValueError, np.convolve, [1], []) - - def test_multidim_byteswap(self): - # Ticket #449 - r = np.array([(1, (0, 1, 2))], dtype="i2,3i2") - assert_array_equal(r.byteswap(), - np.array([(256, (0, 256, 512))], r.dtype)) - - def test_string_NULL(self): - # Changeset 3557 - assert_equal(np.array("a\x00\x0b\x0c\x00").item(), - 'a\x00\x0b\x0c') - - def test_junk_in_string_fields_of_recarray(self): - # Ticket #483 - r = np.array([[b'abc']], dtype=[('var1', '|S20')]) - assert_(asbytes(r['var1'][0][0]) == b'abc') - - def test_take_output(self): - # Ensure that 'take' honours output parameter. - x = np.arange(12).reshape((3, 4)) - a = np.take(x, [0, 2], axis=1) - b = np.zeros_like(a) - np.take(x, [0, 2], axis=1, out=b) - assert_array_equal(a, b) - - def test_take_object_fail(self): - # Issue gh-3001 - d = 123. - a = np.array([d, 1], dtype=object) - if HAS_REFCOUNT: - ref_d = sys.getrefcount(d) - try: - a.take([0, 100]) - except IndexError: - pass - if HAS_REFCOUNT: - assert_(ref_d == sys.getrefcount(d)) - - def test_array_str_64bit(self): - # Ticket #501 - s = np.array([1, np.nan], dtype=np.float64) - with np.errstate(all='raise'): - np.array_str(s) # Should succeed - - def test_frompyfunc_endian(self): - # Ticket #503 - from math import radians - uradians = np.frompyfunc(radians, 1, 1) - big_endian = np.array([83.4, 83.5], dtype='>f8') - little_endian = np.array([83.4, 83.5], dtype=' object - # casting succeeds - def rs(): - x = np.ones([484, 286]) - y = np.zeros([484, 286]) - x |= y - - assert_raises(TypeError, rs) - - def test_unicode_scalar(self): - # Ticket #600 - x = np.array(["DROND", "DROND1"], dtype="U6") - el = x[1] - for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): - new = pickle.loads(pickle.dumps(el, protocol=proto)) - assert_equal(new, el) - - def test_arange_non_native_dtype(self): - # Ticket #616 - for T in ('>f4', ' 0)] = v - - assert_raises(IndexError, ia, x, s, np.zeros(9, dtype=float)) - assert_raises(IndexError, ia, x, s, np.zeros(11, dtype=float)) - - # Old special case (different code path): - assert_raises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float)) - assert_raises(ValueError, ia, x.flat, s, np.zeros(11, dtype=float)) - - def test_mem_scalar_indexing(self): - # Ticket #603 - x = np.array([0], dtype=float) - index = np.array(0, dtype=np.int32) - x[index] - - def test_binary_repr_0_width(self): - assert_equal(np.binary_repr(0, width=3), '000') - - def test_fromstring(self): - assert_equal(np.fromstring("12:09:09", dtype=int, sep=":"), - [12, 9, 9]) - - def test_searchsorted_variable_length(self): - x = np.array(['a', 'aa', 'b']) - y = np.array(['d', 'e']) - assert_equal(x.searchsorted(y), [3, 3]) - - def test_string_argsort_with_zeros(self): - # Check argsort for strings containing zeros. - x = np.frombuffer(b"\x00\x02\x00\x01", dtype="|S2") - assert_array_equal(x.argsort(kind='m'), np.array([1, 0])) - assert_array_equal(x.argsort(kind='q'), np.array([1, 0])) - - def test_string_sort_with_zeros(self): - # Check sort for strings containing zeros. - x = np.frombuffer(b"\x00\x02\x00\x01", dtype="|S2") - y = np.frombuffer(b"\x00\x01\x00\x02", dtype="|S2") - assert_array_equal(np.sort(x, kind="q"), y) - - def test_copy_detection_zero_dim(self): - # Ticket #658 - np.indices((0, 3, 4)).T.reshape(-1, 3) - - def test_flat_byteorder(self): - # Ticket #657 - x = np.arange(10) - assert_array_equal(x.astype('>i4'), x.astype('i4').flat[:], x.astype('i4')): - x = np.array([-1, 0, 1], dtype=dt) - assert_equal(x.flat[0].dtype, x[0].dtype) - - def test_copy_detection_corner_case(self): - # Ticket #658 - np.indices((0, 3, 4)).T.reshape(-1, 3) - - # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides. - # With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous, - # 0-sized reshape itself is tested elsewhere. - @pytest.mark.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max, - reason="Using relaxed stride checking") - def test_copy_detection_corner_case2(self): - # Ticket #771: strides are not set correctly when reshaping 0-sized - # arrays - b = np.indices((0, 3, 4)).T.reshape(-1, 3) - assert_equal(b.strides, (3 * b.itemsize, b.itemsize)) - - def test_object_array_refcounting(self): - # Ticket #633 - if not hasattr(sys, 'getrefcount'): - return - - # NB. this is probably CPython-specific - - cnt = sys.getrefcount - - a = object() - b = object() - c = object() - - cnt0_a = cnt(a) - cnt0_b = cnt(b) - cnt0_c = cnt(c) - - # -- 0d -> 1-d broadcast slice assignment - - arr = np.zeros(5, dtype=np.object_) - - arr[:] = a - assert_equal(cnt(a), cnt0_a + 5) - - arr[:] = b - assert_equal(cnt(a), cnt0_a) - assert_equal(cnt(b), cnt0_b + 5) - - arr[:2] = c - assert_equal(cnt(b), cnt0_b + 3) - assert_equal(cnt(c), cnt0_c + 2) - - del arr - - # -- 1-d -> 2-d broadcast slice assignment - - arr = np.zeros((5, 2), dtype=np.object_) - arr0 = np.zeros(2, dtype=np.object_) - - arr0[0] = a - assert_(cnt(a) == cnt0_a + 1) - arr0[1] = b - assert_(cnt(b) == cnt0_b + 1) - - arr[:, :] = arr0 - assert_(cnt(a) == cnt0_a + 6) - assert_(cnt(b) == cnt0_b + 6) - - arr[:, 0] = None - assert_(cnt(a) == cnt0_a + 1) - - del arr, arr0 - - # -- 2-d copying + flattening - - arr = np.zeros((5, 2), dtype=np.object_) - - arr[:, 0] = a - arr[:, 1] = b - assert_(cnt(a) == cnt0_a + 5) - assert_(cnt(b) == cnt0_b + 5) - - arr2 = arr.copy() - assert_(cnt(a) == cnt0_a + 10) - assert_(cnt(b) == cnt0_b + 10) - - arr2 = arr[:, 0].copy() - assert_(cnt(a) == cnt0_a + 10) - assert_(cnt(b) == cnt0_b + 5) - - arr2 = arr.flatten() - assert_(cnt(a) == cnt0_a + 10) - assert_(cnt(b) == cnt0_b + 10) - - del arr, arr2 - - # -- concatenate, repeat, take, choose - - arr1 = np.zeros((5, 1), dtype=np.object_) - arr2 = np.zeros((5, 1), dtype=np.object_) - - arr1[...] = a - arr2[...] = b - assert_(cnt(a) == cnt0_a + 5) - assert_(cnt(b) == cnt0_b + 5) - - tmp = np.concatenate((arr1, arr2)) - assert_(cnt(a) == cnt0_a + 5 + 5) - assert_(cnt(b) == cnt0_b + 5 + 5) - - tmp = arr1.repeat(3, axis=0) - assert_(cnt(a) == cnt0_a + 5 + 3*5) - - tmp = arr1.take([1, 2, 3], axis=0) - assert_(cnt(a) == cnt0_a + 5 + 3) - - x = np.array([[0], [1], [0], [1], [1]], int) - tmp = x.choose(arr1, arr2) - assert_(cnt(a) == cnt0_a + 5 + 2) - assert_(cnt(b) == cnt0_b + 5 + 3) - - del tmp # Avoid pyflakes unused variable warning - - def test_mem_custom_float_to_array(self): - # Ticket 702 - class MyFloat(object): - def __float__(self): - return 1.0 - - tmp = np.atleast_1d([MyFloat()]) - tmp.astype(float) # Should succeed - - def test_object_array_refcount_self_assign(self): - # Ticket #711 - class VictimObject(object): - deleted = False - - def __del__(self): - self.deleted = True - - d = VictimObject() - arr = np.zeros(5, dtype=np.object_) - arr[:] = d - del d - arr[:] = arr # refcount of 'd' might hit zero here - assert_(not arr[0].deleted) - arr[:] = arr # trying to induce a segfault by doing it again... - assert_(not arr[0].deleted) - - def test_mem_fromiter_invalid_dtype_string(self): - x = [1, 2, 3] - assert_raises(ValueError, - np.fromiter, [xi for xi in x], dtype='S') - - def test_reduce_big_object_array(self): - # Ticket #713 - oldsize = np.setbufsize(10*16) - a = np.array([None]*161, object) - assert_(not np.any(a)) - np.setbufsize(oldsize) - - def test_mem_0d_array_index(self): - # Ticket #714 - np.zeros(10)[np.array(0)] - - def test_nonnative_endian_fill(self): - # Non-native endian arrays were incorrectly filled with scalars - # before r5034. - if sys.byteorder == 'little': - dtype = np.dtype('>i4') - else: - dtype = np.dtype('= 3: - f = open(filename, 'rb') - xp = pickle.load(f, encoding='latin1') - f.close() - else: - f = open(filename) - xp = pickle.load(f) - f.close() - xpd = xp.astype(np.float64) - assert_((xp.__array_interface__['data'][0] != - xpd.__array_interface__['data'][0])) - - def test_compress_small_type(self): - # Ticket #789, changeset 5217. - # compress with out argument segfaulted if cannot cast safely - import numpy as np - a = np.array([[1, 2], [3, 4]]) - b = np.zeros((2, 1), dtype=np.single) - try: - a.compress([True, False], axis=1, out=b) - raise AssertionError("compress with an out which cannot be " - "safely casted should not return " - "successfully") - except TypeError: - pass - - def test_attributes(self): - # Ticket #791 - class TestArray(np.ndarray): - def __new__(cls, data, info): - result = np.array(data) - result = result.view(cls) - result.info = info - return result - - def __array_finalize__(self, obj): - self.info = getattr(obj, 'info', '') - - dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba') - assert_(dat.info == 'jubba') - dat.resize((4, 2)) - assert_(dat.info == 'jubba') - dat.sort() - assert_(dat.info == 'jubba') - dat.fill(2) - assert_(dat.info == 'jubba') - dat.put([2, 3, 4], [6, 3, 4]) - assert_(dat.info == 'jubba') - dat.setfield(4, np.int32, 0) - assert_(dat.info == 'jubba') - dat.setflags() - assert_(dat.info == 'jubba') - assert_(dat.all(1).info == 'jubba') - assert_(dat.any(1).info == 'jubba') - assert_(dat.argmax(1).info == 'jubba') - assert_(dat.argmin(1).info == 'jubba') - assert_(dat.argsort(1).info == 'jubba') - assert_(dat.astype(TestArray).info == 'jubba') - assert_(dat.byteswap().info == 'jubba') - assert_(dat.clip(2, 7).info == 'jubba') - assert_(dat.compress([0, 1, 1]).info == 'jubba') - assert_(dat.conj().info == 'jubba') - assert_(dat.conjugate().info == 'jubba') - assert_(dat.copy().info == 'jubba') - dat2 = TestArray([2, 3, 1, 0], 'jubba') - choices = [[0, 1, 2, 3], [10, 11, 12, 13], - [20, 21, 22, 23], [30, 31, 32, 33]] - assert_(dat2.choose(choices).info == 'jubba') - assert_(dat.cumprod(1).info == 'jubba') - assert_(dat.cumsum(1).info == 'jubba') - assert_(dat.diagonal().info == 'jubba') - assert_(dat.flatten().info == 'jubba') - assert_(dat.getfield(np.int32, 0).info == 'jubba') - assert_(dat.imag.info == 'jubba') - assert_(dat.max(1).info == 'jubba') - assert_(dat.mean(1).info == 'jubba') - assert_(dat.min(1).info == 'jubba') - assert_(dat.newbyteorder().info == 'jubba') - assert_(dat.prod(1).info == 'jubba') - assert_(dat.ptp(1).info == 'jubba') - assert_(dat.ravel().info == 'jubba') - assert_(dat.real.info == 'jubba') - assert_(dat.repeat(2).info == 'jubba') - assert_(dat.reshape((2, 4)).info == 'jubba') - assert_(dat.round().info == 'jubba') - assert_(dat.squeeze().info == 'jubba') - assert_(dat.std(1).info == 'jubba') - assert_(dat.sum(1).info == 'jubba') - assert_(dat.swapaxes(0, 1).info == 'jubba') - assert_(dat.take([2, 3, 5]).info == 'jubba') - assert_(dat.transpose().info == 'jubba') - assert_(dat.T.info == 'jubba') - assert_(dat.var(1).info == 'jubba') - assert_(dat.view(TestArray).info == 'jubba') - # These methods do not preserve subclasses - assert_(type(dat.nonzero()[0]) is np.ndarray) - assert_(type(dat.nonzero()[1]) is np.ndarray) - - def test_recarray_tolist(self): - # Ticket #793, changeset r5215 - # Comparisons fail for NaN, so we can't use random memory - # for the test. - buf = np.zeros(40, dtype=np.int8) - a = np.recarray(2, formats="i4,f8,f8", names="id,x,y", buf=buf) - b = a.tolist() - assert_( a[0].tolist() == b[0]) - assert_( a[1].tolist() == b[1]) - - def test_nonscalar_item_method(self): - # Make sure that .item() fails graciously when it should - a = np.arange(5) - assert_raises(ValueError, a.item) - - def test_char_array_creation(self): - a = np.array('123', dtype='c') - b = np.array([b'1', b'2', b'3']) - assert_equal(a, b) - - def test_unaligned_unicode_access(self): - # Ticket #825 - for i in range(1, 9): - msg = 'unicode offset: %d chars' % i - t = np.dtype([('a', 'S%d' % i), ('b', 'U2')]) - x = np.array([(b'a', u'b')], dtype=t) - if sys.version_info[0] >= 3: - assert_equal(str(x), "[(b'a', 'b')]", err_msg=msg) - else: - assert_equal(str(x), "[('a', u'b')]", err_msg=msg) - - def test_sign_for_complex_nan(self): - # Ticket 794. - with np.errstate(invalid='ignore'): - C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan]) - have = np.sign(C) - want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan]) - assert_equal(have, want) - - def test_for_equal_names(self): - # Ticket #674 - dt = np.dtype([('foo', float), ('bar', float)]) - a = np.zeros(10, dt) - b = list(a.dtype.names) - b[0] = "notfoo" - a.dtype.names = b - assert_(a.dtype.names[0] == "notfoo") - assert_(a.dtype.names[1] == "bar") - - def test_for_object_scalar_creation(self): - # Ticket #816 - a = np.object_() - b = np.object_(3) - b2 = np.object_(3.0) - c = np.object_([4, 5]) - d = np.object_([None, {}, []]) - assert_(a is None) - assert_(type(b) is int) - assert_(type(b2) is float) - assert_(type(c) is np.ndarray) - assert_(c.dtype == object) - assert_(d.dtype == object) - - def test_array_resize_method_system_error(self): - # Ticket #840 - order should be an invalid keyword. - x = np.array([[0, 1], [2, 3]]) - assert_raises(TypeError, x.resize, (2, 2), order='C') - - def test_for_zero_length_in_choose(self): - "Ticket #882" - a = np.array(1) - assert_raises(ValueError, lambda x: x.choose([]), a) - - def test_array_ndmin_overflow(self): - "Ticket #947." - assert_raises(ValueError, lambda: np.array([1], ndmin=33)) - - def test_void_scalar_with_titles(self): - # No ticket - data = [('john', 4), ('mary', 5)] - dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)] - arr = np.array(data, dtype=dtype1) - assert_(arr[0][0] == 'john') - assert_(arr[0][1] == 4) - - def test_void_scalar_constructor(self): - #Issue #1550 - - #Create test string data, construct void scalar from data and assert - #that void scalar contains original data. - test_string = np.array("test") - test_string_void_scalar = np.core.multiarray.scalar( - np.dtype(("V", test_string.dtype.itemsize)), test_string.tobytes()) - - assert_(test_string_void_scalar.view(test_string.dtype) == test_string) - - #Create record scalar, construct from data and assert that - #reconstructed scalar is correct. - test_record = np.ones((), "i,i") - test_record_void_scalar = np.core.multiarray.scalar( - test_record.dtype, test_record.tobytes()) - - assert_(test_record_void_scalar == test_record) - - # Test pickle and unpickle of void and record scalars - for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): - assert_(pickle.loads( - pickle.dumps(test_string, protocol=proto)) == test_string) - assert_(pickle.loads( - pickle.dumps(test_record, protocol=proto)) == test_record) - - @_no_tracing - def test_blasdot_uninitialized_memory(self): - # Ticket #950 - for m in [0, 1, 2]: - for n in [0, 1, 2]: - for k in range(3): - # Try to ensure that x->data contains non-zero floats - x = np.array([123456789e199], dtype=np.float64) - if IS_PYPY: - x.resize((m, 0), refcheck=False) - else: - x.resize((m, 0)) - y = np.array([123456789e199], dtype=np.float64) - if IS_PYPY: - y.resize((0, n), refcheck=False) - else: - y.resize((0, n)) - - # `dot` should just return zero (m, n) matrix - z = np.dot(x, y) - assert_(np.all(z == 0)) - assert_(z.shape == (m, n)) - - def test_zeros(self): - # Regression test for #1061. - # Set a size which cannot fit into a 64 bits signed integer - sz = 2 ** 64 - with assert_raises_regex(ValueError, - 'Maximum allowed dimension exceeded'): - np.empty(sz) - - def test_huge_arange(self): - # Regression test for #1062. - # Set a size which cannot fit into a 64 bits signed integer - sz = 2 ** 64 - with assert_raises_regex(ValueError, - 'Maximum allowed size exceeded'): - np.arange(sz) - assert_(np.size == sz) - - def test_fromiter_bytes(self): - # Ticket #1058 - a = np.fromiter(list(range(10)), dtype='b') - b = np.fromiter(list(range(10)), dtype='B') - assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) - assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) - - def test_array_from_sequence_scalar_array(self): - # Ticket #1078: segfaults when creating an array with a sequence of - # 0d arrays. - a = np.array((np.ones(2), np.array(2))) - assert_equal(a.shape, (2,)) - assert_equal(a.dtype, np.dtype(object)) - assert_equal(a[0], np.ones(2)) - assert_equal(a[1], np.array(2)) - - a = np.array(((1,), np.array(1))) - assert_equal(a.shape, (2,)) - assert_equal(a.dtype, np.dtype(object)) - assert_equal(a[0], (1,)) - assert_equal(a[1], np.array(1)) - - def test_array_from_sequence_scalar_array2(self): - # Ticket #1081: weird array with strange input... - t = np.array([np.array([]), np.array(0, object)]) - assert_equal(t.shape, (2,)) - assert_equal(t.dtype, np.dtype(object)) - - def test_array_too_big(self): - # Ticket #1080. - assert_raises(ValueError, np.zeros, [975]*7, np.int8) - assert_raises(ValueError, np.zeros, [26244]*5, np.int8) - - def test_dtype_keyerrors_(self): - # Ticket #1106. - dt = np.dtype([('f1', np.uint)]) - assert_raises(KeyError, dt.__getitem__, "f2") - assert_raises(IndexError, dt.__getitem__, 1) - assert_raises(TypeError, dt.__getitem__, 0.0) - - def test_lexsort_buffer_length(self): - # Ticket #1217, don't segfault. - a = np.ones(100, dtype=np.int8) - b = np.ones(100, dtype=np.int32) - i = np.lexsort((a[::-1], b)) - assert_equal(i, np.arange(100, dtype=int)) - - def test_object_array_to_fixed_string(self): - # Ticket #1235. - a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_) - b = np.array(a, dtype=(np.str_, 8)) - assert_equal(a, b) - c = np.array(a, dtype=(np.str_, 5)) - assert_equal(c, np.array(['abcde', 'ijklm'])) - d = np.array(a, dtype=(np.str_, 12)) - assert_equal(a, d) - e = np.empty((2, ), dtype=(np.str_, 8)) - e[:] = a[:] - assert_equal(a, e) - - def test_unicode_to_string_cast(self): - # Ticket #1240. - a = np.array([[u'abc', u'\u03a3'], - [u'asdf', u'erw']], - dtype='U') - assert_raises(UnicodeEncodeError, np.array, a, 'S4') - - def test_mixed_string_unicode_array_creation(self): - a = np.array(['1234', u'123']) - assert_(a.itemsize == 16) - a = np.array([u'123', '1234']) - assert_(a.itemsize == 16) - a = np.array(['1234', u'123', '12345']) - assert_(a.itemsize == 20) - a = np.array([u'123', '1234', u'12345']) - assert_(a.itemsize == 20) - a = np.array([u'123', '1234', u'1234']) - assert_(a.itemsize == 16) - - def test_misaligned_objects_segfault(self): - # Ticket #1198 and #1267 - a1 = np.zeros((10,), dtype='O,c') - a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10') - a1['f0'] = a2 - repr(a1) - np.argmax(a1['f0']) - a1['f0'][1] = "FOO" - a1['f0'] = "FOO" - np.array(a1['f0'], dtype='S') - np.nonzero(a1['f0']) - a1.sort() - copy.deepcopy(a1) - - def test_misaligned_scalars_segfault(self): - # Ticket #1267 - s1 = np.array(('a', 'Foo'), dtype='c,O') - s2 = np.array(('b', 'Bar'), dtype='c,O') - s1['f1'] = s2['f1'] - s1['f1'] = 'Baz' - - def test_misaligned_dot_product_objects(self): - # Ticket #1267 - # This didn't require a fix, but it's worth testing anyway, because - # it may fail if .dot stops enforcing the arrays to be BEHAVED - a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c') - b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c') - np.dot(a['f0'], b['f0']) - - def test_byteswap_complex_scalar(self): - # Ticket #1259 and gh-441 - for dtype in [np.dtype('<'+t) for t in np.typecodes['Complex']]: - z = np.array([2.2-1.1j], dtype) - x = z[0] # always native-endian - y = x.byteswap() - if x.dtype.byteorder == z.dtype.byteorder: - # little-endian machine - assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype.newbyteorder())) - else: - # big-endian machine - assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype)) - # double check real and imaginary parts: - assert_equal(x.real, y.real.byteswap()) - assert_equal(x.imag, y.imag.byteswap()) - - def test_structured_arrays_with_objects1(self): - # Ticket #1299 - stra = 'aaaa' - strb = 'bbbb' - x = np.array([[(0, stra), (1, strb)]], 'i8,O') - x[x.nonzero()] = x.ravel()[:1] - assert_(x[0, 1] == x[0, 0]) - - @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") - def test_structured_arrays_with_objects2(self): - # Ticket #1299 second test - stra = 'aaaa' - strb = 'bbbb' - numb = sys.getrefcount(strb) - numa = sys.getrefcount(stra) - x = np.array([[(0, stra), (1, strb)]], 'i8,O') - x[x.nonzero()] = x.ravel()[:1] - assert_(sys.getrefcount(strb) == numb) - assert_(sys.getrefcount(stra) == numa + 2) - - def test_duplicate_title_and_name(self): - # Ticket #1254 - dtspec = [(('a', 'a'), 'i'), ('b', 'i')] - assert_raises(ValueError, np.dtype, dtspec) - - def test_signed_integer_division_overflow(self): - # Ticket #1317. - def test_type(t): - min = np.array([np.iinfo(t).min]) - min //= -1 - - with np.errstate(divide="ignore"): - for t in (np.int8, np.int16, np.int32, np.int64, int, np.compat.long): - test_type(t) - - def test_buffer_hashlib(self): - try: - from hashlib import md5 - except ImportError: - from md5 import new as md5 - - x = np.array([1, 2, 3], dtype=np.dtype('c') - - def test_log1p_compiler_shenanigans(self): - # Check if log1p is behaving on 32 bit intel systems. - assert_(np.isfinite(np.log1p(np.exp2(-53)))) - - def test_fromiter_comparison(self): - a = np.fromiter(list(range(10)), dtype='b') - b = np.fromiter(list(range(10)), dtype='B') - assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) - assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) - - def test_fromstring_crash(self): - # Ticket #1345: the following should not cause a crash - with assert_warns(DeprecationWarning): - np.fromstring(b'aa, aa, 1.0', sep=',') - - def test_ticket_1539(self): - dtypes = [x for x in np.typeDict.values() - if (issubclass(x, np.number) - and not issubclass(x, np.timedelta64))] - a = np.array([], np.bool_) # not x[0] because it is unordered - failures = [] - - for x in dtypes: - b = a.astype(x) - for y in dtypes: - c = a.astype(y) - try: - np.dot(b, c) - except TypeError: - failures.append((x, y)) - if failures: - raise AssertionError("Failures: %r" % failures) - - def test_ticket_1538(self): - x = np.finfo(np.float32) - for name in 'eps epsneg max min resolution tiny'.split(): - assert_equal(type(getattr(x, name)), np.float32, - err_msg=name) - - def test_ticket_1434(self): - # Check that the out= argument in var and std has an effect - data = np.array(((1, 2, 3), (4, 5, 6), (7, 8, 9))) - out = np.zeros((3,)) - - ret = data.var(axis=1, out=out) - assert_(ret is out) - assert_array_equal(ret, data.var(axis=1)) - - ret = data.std(axis=1, out=out) - assert_(ret is out) - assert_array_equal(ret, data.std(axis=1)) - - def test_complex_nan_maximum(self): - cnan = complex(0, np.nan) - assert_equal(np.maximum(1, cnan), cnan) - - def test_subclass_int_tuple_assignment(self): - # ticket #1563 - class Subclass(np.ndarray): - def __new__(cls, i): - return np.ones((i,)).view(cls) - - x = Subclass(5) - x[(0,)] = 2 # shouldn't raise an exception - assert_equal(x[0], 2) - - def test_ufunc_no_unnecessary_views(self): - # ticket #1548 - class Subclass(np.ndarray): - pass - x = np.array([1, 2, 3]).view(Subclass) - y = np.add(x, x, x) - assert_equal(id(x), id(y)) - - @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") - def test_take_refcount(self): - # ticket #939 - a = np.arange(16, dtype=float) - a.shape = (4, 4) - lut = np.ones((5 + 3, 4), float) - rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype) - c1 = sys.getrefcount(rgba) - try: - lut.take(a, axis=0, mode='clip', out=rgba) - except TypeError: - pass - c2 = sys.getrefcount(rgba) - assert_equal(c1, c2) - - def test_fromfile_tofile_seeks(self): - # On Python 3, tofile/fromfile used to get (#1610) the Python - # file handle out of sync - f0 = tempfile.NamedTemporaryFile() - f = f0.file - f.write(np.arange(255, dtype='u1').tobytes()) - - f.seek(20) - ret = np.fromfile(f, count=4, dtype='u1') - assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1')) - assert_equal(f.tell(), 24) - - f.seek(40) - np.array([1, 2, 3], dtype='u1').tofile(f) - assert_equal(f.tell(), 43) - - f.seek(40) - data = f.read(3) - assert_equal(data, b"\x01\x02\x03") - - f.seek(80) - f.read(4) - data = np.fromfile(f, dtype='u1', count=4) - assert_equal(data, np.array([84, 85, 86, 87], dtype='u1')) - - f.close() - - def test_complex_scalar_warning(self): - for tp in [np.csingle, np.cdouble, np.clongdouble]: - x = tp(1+2j) - assert_warns(np.ComplexWarning, float, x) - with suppress_warnings() as sup: - sup.filter(np.ComplexWarning) - assert_equal(float(x), float(x.real)) - - def test_complex_scalar_complex_cast(self): - for tp in [np.csingle, np.cdouble, np.clongdouble]: - x = tp(1+2j) - assert_equal(complex(x), 1+2j) - - def test_complex_boolean_cast(self): - # Ticket #2218 - for tp in [np.csingle, np.cdouble, np.clongdouble]: - x = np.array([0, 0+0.5j, 0.5+0j], dtype=tp) - assert_equal(x.astype(bool), np.array([0, 1, 1], dtype=bool)) - assert_(np.any(x)) - assert_(np.all(x[1:])) - - def test_uint_int_conversion(self): - x = 2**64 - 1 - assert_equal(int(np.uint64(x)), x) - - def test_duplicate_field_names_assign(self): - ra = np.fromiter(((i*3, i*2) for i in range(10)), dtype='i8,f8') - ra.dtype.names = ('f1', 'f2') - repr(ra) # should not cause a segmentation fault - assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1')) - - def test_eq_string_and_object_array(self): - # From e-mail thread "__eq__ with str and object" (Keith Goodman) - a1 = np.array(['a', 'b'], dtype=object) - a2 = np.array(['a', 'c']) - assert_array_equal(a1 == a2, [True, False]) - assert_array_equal(a2 == a1, [True, False]) - - def test_nonzero_byteswap(self): - a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32) - a.dtype = np.float32 - assert_equal(a.nonzero()[0], [1]) - a = a.byteswap().newbyteorder() - assert_equal(a.nonzero()[0], [1]) # [0] if nonzero() ignores swap - - def test_find_common_type_boolean(self): - # Ticket #1695 - assert_(np.find_common_type([], ['?', '?']) == '?') - - def test_empty_mul(self): - a = np.array([1.]) - a[1:1] *= 2 - assert_equal(a, [1.]) - - def test_array_side_effect(self): - # The second use of itemsize was throwing an exception because in - # ctors.c, discover_itemsize was calling PyObject_Length without - # checking the return code. This failed to get the length of the - # number 2, and the exception hung around until something checked - # PyErr_Occurred() and returned an error. - assert_equal(np.dtype('S10').itemsize, 10) - np.array([['abc', 2], ['long ', '0123456789']], dtype=np.string_) - assert_equal(np.dtype('S10').itemsize, 10) - - def test_any_float(self): - # all and any for floats - a = np.array([0.1, 0.9]) - assert_(np.any(a)) - assert_(np.all(a)) - - def test_large_float_sum(self): - a = np.arange(10000, dtype='f') - assert_equal(a.sum(dtype='d'), a.astype('d').sum()) - - def test_ufunc_casting_out(self): - a = np.array(1.0, dtype=np.float32) - b = np.array(1.0, dtype=np.float64) - c = np.array(1.0, dtype=np.float32) - np.add(a, b, out=c) - assert_equal(c, 2.0) - - def test_array_scalar_contiguous(self): - # Array scalars are both C and Fortran contiguous - assert_(np.array(1.0).flags.c_contiguous) - assert_(np.array(1.0).flags.f_contiguous) - assert_(np.array(np.float32(1.0)).flags.c_contiguous) - assert_(np.array(np.float32(1.0)).flags.f_contiguous) - - def test_squeeze_contiguous(self): - # Similar to GitHub issue #387 - a = np.zeros((1, 2)).squeeze() - b = np.zeros((2, 2, 2), order='F')[:, :, ::2].squeeze() - assert_(a.flags.c_contiguous) - assert_(a.flags.f_contiguous) - assert_(b.flags.f_contiguous) - - def test_squeeze_axis_handling(self): - # Issue #10779 - # Ensure proper handling of objects - # that don't support axis specification - # when squeezing - - class OldSqueeze(np.ndarray): - - def __new__(cls, - input_array): - obj = np.asarray(input_array).view(cls) - return obj - - # it is perfectly reasonable that prior - # to numpy version 1.7.0 a subclass of ndarray - # might have been created that did not expect - # squeeze to have an axis argument - # NOTE: this example is somewhat artificial; - # it is designed to simulate an old API - # expectation to guard against regression - def squeeze(self): - return super(OldSqueeze, self).squeeze() - - oldsqueeze = OldSqueeze(np.array([[1],[2],[3]])) - - # if no axis argument is specified the old API - # expectation should give the correct result - assert_equal(np.squeeze(oldsqueeze), - np.array([1,2,3])) - - # likewise, axis=None should work perfectly well - # with the old API expectation - assert_equal(np.squeeze(oldsqueeze, axis=None), - np.array([1,2,3])) - - # however, specification of any particular axis - # should raise a TypeError in the context of the - # old API specification, even when using a valid - # axis specification like 1 for this array - with assert_raises(TypeError): - # this would silently succeed for array - # subclasses / objects that did not support - # squeeze axis argument handling before fixing - # Issue #10779 - np.squeeze(oldsqueeze, axis=1) - - # check for the same behavior when using an invalid - # axis specification -- in this case axis=0 does not - # have size 1, but the priority should be to raise - # a TypeError for the axis argument and NOT a - # ValueError for squeezing a non-empty dimension - with assert_raises(TypeError): - np.squeeze(oldsqueeze, axis=0) - - # the new API knows how to handle the axis - # argument and will return a ValueError if - # attempting to squeeze an axis that is not - # of length 1 - with assert_raises(ValueError): - np.squeeze(np.array([[1],[2],[3]]), axis=0) - - def test_reduce_contiguous(self): - # GitHub issue #387 - a = np.add.reduce(np.zeros((2, 1, 2)), (0, 1)) - b = np.add.reduce(np.zeros((2, 1, 2)), 1) - assert_(a.flags.c_contiguous) - assert_(a.flags.f_contiguous) - assert_(b.flags.c_contiguous) - - def test_object_array_self_reference(self): - # Object arrays with references to themselves can cause problems - a = np.array(0, dtype=object) - a[()] = a - assert_raises(RecursionError, int, a) - assert_raises(RecursionError, long, a) - assert_raises(RecursionError, float, a) - if sys.version_info.major == 2: - # in python 3, this falls back on operator.index, which fails on - # on dtype=object - assert_raises(RecursionError, oct, a) - assert_raises(RecursionError, hex, a) - a[()] = None - - def test_object_array_circular_reference(self): - # Test the same for a circular reference. - a = np.array(0, dtype=object) - b = np.array(0, dtype=object) - a[()] = b - b[()] = a - assert_raises(RecursionError, int, a) - # NumPy has no tp_traverse currently, so circular references - # cannot be detected. So resolve it: - a[()] = None - - # This was causing a to become like the above - a = np.array(0, dtype=object) - a[...] += 1 - assert_equal(a, 1) - - def test_object_array_nested(self): - # but is fine with a reference to a different array - a = np.array(0, dtype=object) - b = np.array(0, dtype=object) - a[()] = b - assert_equal(int(a), int(0)) - assert_equal(long(a), long(0)) - assert_equal(float(a), float(0)) - if sys.version_info.major == 2: - # in python 3, this falls back on operator.index, which fails on - # on dtype=object - assert_equal(oct(a), oct(0)) - assert_equal(hex(a), hex(0)) - - def test_object_array_self_copy(self): - # An object array being copied into itself DECREF'ed before INCREF'ing - # causing segmentation faults (gh-3787) - a = np.array(object(), dtype=object) - np.copyto(a, a) - if HAS_REFCOUNT: - assert_(sys.getrefcount(a[()]) == 2) - a[()].__class__ # will segfault if object was deleted - - def test_zerosize_accumulate(self): - "Ticket #1733" - x = np.array([[42, 0]], dtype=np.uint32) - assert_equal(np.add.accumulate(x[:-1, 0]), []) - - def test_objectarray_setfield(self): - # Setfield should not overwrite Object fields with non-Object data - x = np.array([1, 2, 3], dtype=object) - assert_raises(TypeError, x.setfield, 4, np.int32, 0) - - def test_setting_rank0_string(self): - "Ticket #1736" - s1 = b"hello1" - s2 = b"hello2" - a = np.zeros((), dtype="S10") - a[()] = s1 - assert_equal(a, np.array(s1)) - a[()] = np.array(s2) - assert_equal(a, np.array(s2)) - - a = np.zeros((), dtype='f4') - a[()] = 3 - assert_equal(a, np.array(3)) - a[()] = np.array(4) - assert_equal(a, np.array(4)) - - def test_string_astype(self): - "Ticket #1748" - s1 = b'black' - s2 = b'white' - s3 = b'other' - a = np.array([[s1], [s2], [s3]]) - assert_equal(a.dtype, np.dtype('S5')) - b = a.astype(np.dtype('S0')) - assert_equal(b.dtype, np.dtype('S5')) - - def test_ticket_1756(self): - # Ticket #1756 - s = b'0123456789abcdef' - a = np.array([s]*5) - for i in range(1, 17): - a1 = np.array(a, "|S%d" % i) - a2 = np.array([s[:i]]*5) - assert_equal(a1, a2) - - def test_fields_strides(self): - "gh-2355" - r = np.frombuffer(b'abcdefghijklmnop'*4*3, dtype='i4,(2,3)u2') - assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2]) - assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1']) - assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()]) - assert_equal(r[0:3:2]['f1'][0].strides, r[0:3:2][0]['f1'].strides) - - def test_alignment_update(self): - # Check that alignment flag is updated on stride setting - a = np.arange(10) - assert_(a.flags.aligned) - a.strides = 3 - assert_(not a.flags.aligned) - - def test_ticket_1770(self): - "Should not segfault on python 3k" - import numpy as np - try: - a = np.zeros((1,), dtype=[('f1', 'f')]) - a['f1'] = 1 - a['f2'] = 1 - except ValueError: - pass - except Exception: - raise AssertionError - - def test_ticket_1608(self): - "x.flat shouldn't modify data" - x = np.array([[1, 2], [3, 4]]).T - np.array(x.flat) - assert_equal(x, [[1, 3], [2, 4]]) - - def test_pickle_string_overwrite(self): - import re - - data = np.array([1], dtype='b') - blob = pickle.dumps(data, protocol=1) - data = pickle.loads(blob) - - # Check that loads does not clobber interned strings - s = re.sub("a(.)", "\x01\\1", "a_") - assert_equal(s[0], "\x01") - data[0] = 0xbb - s = re.sub("a(.)", "\x01\\1", "a_") - assert_equal(s[0], "\x01") - - def test_pickle_bytes_overwrite(self): - if sys.version_info[0] >= 3: - for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): - data = np.array([1], dtype='b') - data = pickle.loads(pickle.dumps(data, protocol=proto)) - data[0] = 0xdd - bytestring = "\x01 ".encode('ascii') - assert_equal(bytestring[0:1], '\x01'.encode('ascii')) - - def test_pickle_py2_array_latin1_hack(self): - # Check that unpickling hacks in Py3 that support - # encoding='latin1' work correctly. - - # Python2 output for pickle.dumps(numpy.array([129], dtype='b')) - data = (b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\n" - b"tp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'i1'\np8\n" - b"I0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nNNNI-1\nI-1\nI0\ntp12\nbI00\nS'\\x81'\n" - b"p13\ntp14\nb.") - if sys.version_info[0] >= 3: - # This should work: - result = pickle.loads(data, encoding='latin1') - assert_array_equal(result, np.array([129], dtype='b')) - # Should not segfault: - assert_raises(Exception, pickle.loads, data, encoding='koi8-r') - - def test_pickle_py2_scalar_latin1_hack(self): - # Check that scalar unpickling hack in Py3 that supports - # encoding='latin1' work correctly. - - # Python2 output for pickle.dumps(...) - datas = [ - # (original, python2_pickle, koi8r_validity) - (np.unicode_('\u6bd2'), - (b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n" - b"(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\nI0\n" - b"tp6\nbS'\\xd2k\\x00\\x00'\np7\ntp8\nRp9\n."), - 'invalid'), - - (np.float64(9e123), - (b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'f8'\n" - b"p2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI-1\nI-1\nI0\ntp6\n" - b"bS'O\\x81\\xb7Z\\xaa:\\xabY'\np7\ntp8\nRp9\n."), - 'invalid'), - - (np.bytes_(b'\x9c'), # different 8-bit code point in KOI8-R vs latin1 - (b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'S1'\np2\n" - b"I0\nI1\ntp3\nRp4\n(I3\nS'|'\np5\nNNNI1\nI1\nI0\ntp6\nbS'\\x9c'\np7\n" - b"tp8\nRp9\n."), - 'different'), - ] - if sys.version_info[0] >= 3: - for original, data, koi8r_validity in datas: - result = pickle.loads(data, encoding='latin1') - assert_equal(result, original) - - # Decoding under non-latin1 encoding (e.g.) KOI8-R can - # produce bad results, but should not segfault. - if koi8r_validity == 'different': - # Unicode code points happen to lie within latin1, - # but are different in koi8-r, resulting to silent - # bogus results - result = pickle.loads(data, encoding='koi8-r') - assert_(result != original) - elif koi8r_validity == 'invalid': - # Unicode code points outside latin1, so results - # to an encoding exception - assert_raises(ValueError, pickle.loads, data, encoding='koi8-r') - else: - raise ValueError(koi8r_validity) - - def test_structured_type_to_object(self): - a_rec = np.array([(0, 1), (3, 2)], dtype='i4,i8') - a_obj = np.empty((2,), dtype=object) - a_obj[0] = (0, 1) - a_obj[1] = (3, 2) - # astype records -> object - assert_equal(a_rec.astype(object), a_obj) - # '=' records -> object - b = np.empty_like(a_obj) - b[...] = a_rec - assert_equal(b, a_obj) - # '=' object -> records - b = np.empty_like(a_rec) - b[...] = a_obj - assert_equal(b, a_rec) - - def test_assign_obj_listoflists(self): - # Ticket # 1870 - # The inner list should get assigned to the object elements - a = np.zeros(4, dtype=object) - b = a.copy() - a[0] = [1] - a[1] = [2] - a[2] = [3] - a[3] = [4] - b[...] = [[1], [2], [3], [4]] - assert_equal(a, b) - # The first dimension should get broadcast - a = np.zeros((2, 2), dtype=object) - a[...] = [[1, 2]] - assert_equal(a, [[1, 2], [1, 2]]) - - def test_memoryleak(self): - # Ticket #1917 - ensure that array data doesn't leak - for i in range(1000): - # 100MB times 1000 would give 100GB of memory usage if it leaks - a = np.empty((100000000,), dtype='i1') - del a - - @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") - def test_ufunc_reduce_memoryleak(self): - a = np.arange(6) - acnt = sys.getrefcount(a) - np.add.reduce(a) - assert_equal(sys.getrefcount(a), acnt) - - def test_search_sorted_invalid_arguments(self): - # Ticket #2021, should not segfault. - x = np.arange(0, 4, dtype='datetime64[D]') - assert_raises(TypeError, x.searchsorted, 1) - - def test_string_truncation(self): - # Ticket #1990 - Data can be truncated in creation of an array from a - # mixed sequence of numeric values and strings - for val in [True, 1234, 123.4, complex(1, 234)]: - for tostr in [asunicode, asbytes]: - b = np.array([val, tostr('xx')]) - assert_equal(tostr(b[0]), tostr(val)) - b = np.array([tostr('xx'), val]) - assert_equal(tostr(b[1]), tostr(val)) - - # test also with longer strings - b = np.array([val, tostr('xxxxxxxxxx')]) - assert_equal(tostr(b[0]), tostr(val)) - b = np.array([tostr('xxxxxxxxxx'), val]) - assert_equal(tostr(b[1]), tostr(val)) - - def test_string_truncation_ucs2(self): - # Ticket #2081. Python compiled with two byte unicode - # can lead to truncation if itemsize is not properly - # adjusted for NumPy's four byte unicode. - if sys.version_info[0] >= 3: - a = np.array(['abcd']) - else: - a = np.array([u'abcd']) - assert_equal(a.dtype.itemsize, 16) - - def test_unique_stable(self): - # Ticket #2063 must always choose stable sort for argsort to - # get consistent results - v = np.array(([0]*5 + [1]*6 + [2]*6)*4) - res = np.unique(v, return_index=True) - tgt = (np.array([0, 1, 2]), np.array([ 0, 5, 11])) - assert_equal(res, tgt) - - def test_unicode_alloc_dealloc_match(self): - # Ticket #1578, the mismatch only showed up when running - # python-debug for python versions >= 2.7, and then as - # a core dump and error message. - a = np.array(['abc'], dtype=np.unicode_)[0] - del a - - def test_refcount_error_in_clip(self): - # Ticket #1588 - a = np.zeros((2,), dtype='>i2').clip(min=0) - x = a + a - # This used to segfault: - y = str(x) - # Check the final string: - assert_(y == "[0 0]") - - def test_searchsorted_wrong_dtype(self): - # Ticket #2189, it used to segfault, so we check that it raises the - # proper exception. - a = np.array([('a', 1)], dtype='S1, int') - assert_raises(TypeError, np.searchsorted, a, 1.2) - # Ticket #2066, similar problem: - dtype = np.format_parser(['i4', 'i4'], [], []) - a = np.recarray((2, ), dtype) - assert_raises(TypeError, np.searchsorted, a, 1) - - def test_complex64_alignment(self): - # Issue gh-2668 (trac 2076), segfault on sparc due to misalignment - dtt = np.complex64 - arr = np.arange(10, dtype=dtt) - # 2D array - arr2 = np.reshape(arr, (2, 5)) - # Fortran write followed by (C or F) read caused bus error - data_str = arr2.tobytes('F') - data_back = np.ndarray(arr2.shape, - arr2.dtype, - buffer=data_str, - order='F') - assert_array_equal(arr2, data_back) - - def test_structured_count_nonzero(self): - arr = np.array([0, 1]).astype('i4, (2)i4')[:1] - count = np.count_nonzero(arr) - assert_equal(count, 0) - - def test_copymodule_preserves_f_contiguity(self): - a = np.empty((2, 2), order='F') - b = copy.copy(a) - c = copy.deepcopy(a) - assert_(b.flags.fortran) - assert_(b.flags.f_contiguous) - assert_(c.flags.fortran) - assert_(c.flags.f_contiguous) - - def test_fortran_order_buffer(self): - import numpy as np - a = np.array([['Hello', 'Foob']], dtype='U5', order='F') - arr = np.ndarray(shape=[1, 2, 5], dtype='U1', buffer=a) - arr2 = np.array([[[u'H', u'e', u'l', u'l', u'o'], - [u'F', u'o', u'o', u'b', u'']]]) - assert_array_equal(arr, arr2) - - def test_assign_from_sequence_error(self): - # Ticket #4024. - arr = np.array([1, 2, 3]) - assert_raises(ValueError, arr.__setitem__, slice(None), [9, 9]) - arr.__setitem__(slice(None), [9]) - assert_equal(arr, [9, 9, 9]) - - def test_format_on_flex_array_element(self): - # Ticket #4369. - dt = np.dtype([('date', '= 3: - assert_raises(TypeError, f, lhs, rhs) - elif not sys.py3kwarning: - # With -3 switch in python 2, DeprecationWarning is raised - # which we are not interested in - f(lhs, rhs) - assert_(not op.eq(lhs, rhs)) - assert_(op.ne(lhs, rhs)) - - def test_richcompare_scalar_and_subclass(self): - # gh-4709 - class Foo(np.ndarray): - def __eq__(self, other): - return "OK" - - x = np.array([1, 2, 3]).view(Foo) - assert_equal(10 == x, "OK") - assert_equal(np.int32(10) == x, "OK") - assert_equal(np.array([10]) == x, "OK") - - def test_pickle_empty_string(self): - # gh-3926 - for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): - test_string = np.string_('') - assert_equal(pickle.loads( - pickle.dumps(test_string, protocol=proto)), test_string) - - def test_frompyfunc_many_args(self): - # gh-5672 - - def passer(*args): - pass - - assert_raises(ValueError, np.frompyfunc, passer, 32, 1) - - def test_repeat_broadcasting(self): - # gh-5743 - a = np.arange(60).reshape(3, 4, 5) - for axis in chain(range(-a.ndim, a.ndim), [None]): - assert_equal(a.repeat(2, axis=axis), a.repeat([2], axis=axis)) - - def test_frompyfunc_nout_0(self): - # gh-2014 - - def f(x): - x[0], x[-1] = x[-1], x[0] - - uf = np.frompyfunc(f, 1, 0) - a = np.array([[1, 2, 3], [4, 5], [6, 7, 8, 9]]) - assert_equal(uf(a), ()) - assert_array_equal(a, [[3, 2, 1], [5, 4], [9, 7, 8, 6]]) - - @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") - def test_leak_in_structured_dtype_comparison(self): - # gh-6250 - recordtype = np.dtype([('a', np.float64), - ('b', np.int32), - ('d', (str, 5))]) - - # Simple case - a = np.zeros(2, dtype=recordtype) - for i in range(100): - a == a - assert_(sys.getrefcount(a) < 10) - - # The case in the bug report. - before = sys.getrefcount(a) - u, v = a[0], a[1] - u == v - del u, v - gc.collect() - after = sys.getrefcount(a) - assert_equal(before, after) - - def test_empty_percentile(self): - # gh-6530 / gh-6553 - assert_array_equal(np.percentile(np.arange(10), []), np.array([])) - - def test_void_compare_segfault(self): - # gh-6922. The following should not segfault - a = np.ones(3, dtype=[('object', 'O'), ('int', ' 0: - # unpickling ndarray goes through _frombuffer for protocol 5 - assert b'numpy.core.numeric' in s - else: - assert b'numpy.core.multiarray' in s - - def test_object_casting_errors(self): - # gh-11993 - arr = np.array(['AAAAA', 18465886.0, 18465886.0], dtype=object) - assert_raises(TypeError, arr.astype, 'c8') - - def test_eff1d_casting(self): - # gh-12711 - x = np.array([1, 2, 4, 7, 0], dtype=np.int16) - res = np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99])) - assert_equal(res, [-99, 1, 2, 3, -7, 88, 99]) - assert_raises(ValueError, np.ediff1d, x, to_begin=(1<<20)) - assert_raises(ValueError, np.ediff1d, x, to_end=(1<<20)) - - def test_pickle_datetime64_array(self): - # gh-12745 (would fail with pickle5 installed) - d = np.datetime64('2015-07-04 12:59:59.50', 'ns') - arr = np.array([d]) - for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): - dumped = pickle.dumps(arr, protocol=proto) - assert_equal(pickle.loads(dumped), arr) - - def test_bad_array_interface(self): - class T(object): - __array_interface__ = {} - - np.array([T()]) - - @pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python') - @pytest.mark.skipif(sys.platform == 'win32' and sys.version_info[:2] < (3, 8), - reason='overflows on windows, fixed in bpo-16865') - def test_to_ctypes(self): - #gh-14214 - arr = np.zeros((2 ** 31 + 1,), 'b') - assert arr.size * arr.itemsize > 2 ** 31 - c_arr = np.ctypeslib.as_ctypes(arr) - assert_equal(c_arr._length_, arr.size) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_scalar_ctors.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_scalar_ctors.py deleted file mode 100644 index b21bc9d..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_scalar_ctors.py +++ /dev/null @@ -1,65 +0,0 @@ -""" -Test the scalar constructors, which also do type-coercion -""" -from __future__ import division, absolute_import, print_function - -import sys -import platform -import pytest - -import numpy as np -from numpy.testing import ( - assert_equal, assert_almost_equal, assert_raises, assert_warns, - ) - -class TestFromString(object): - def test_floating(self): - # Ticket #640, floats from string - fsingle = np.single('1.234') - fdouble = np.double('1.234') - flongdouble = np.longdouble('1.234') - assert_almost_equal(fsingle, 1.234) - assert_almost_equal(fdouble, 1.234) - assert_almost_equal(flongdouble, 1.234) - - def test_floating_overflow(self): - """ Strings containing an unrepresentable float overflow """ - fhalf = np.half('1e10000') - assert_equal(fhalf, np.inf) - fsingle = np.single('1e10000') - assert_equal(fsingle, np.inf) - fdouble = np.double('1e10000') - assert_equal(fdouble, np.inf) - flongdouble = assert_warns(RuntimeWarning, np.longdouble, '1e10000') - assert_equal(flongdouble, np.inf) - - fhalf = np.half('-1e10000') - assert_equal(fhalf, -np.inf) - fsingle = np.single('-1e10000') - assert_equal(fsingle, -np.inf) - fdouble = np.double('-1e10000') - assert_equal(fdouble, -np.inf) - flongdouble = assert_warns(RuntimeWarning, np.longdouble, '-1e10000') - assert_equal(flongdouble, -np.inf) - - @pytest.mark.skipif((sys.version_info[0] >= 3) - or (sys.platform == "win32" - and platform.architecture()[0] == "64bit"), - reason="numpy.intp('0xff', 16) not supported on Py3 " - "or 64 bit Windows") - def test_intp(self): - # Ticket #99 - i_width = np.int_(0).nbytes*2 - 1 - np.intp('0x' + 'f'*i_width, 16) - assert_raises(OverflowError, np.intp, '0x' + 'f'*(i_width+1), 16) - assert_raises(ValueError, np.intp, '0x1', 32) - assert_equal(255, np.intp('0xFF', 16)) - - -class TestFromInt(object): - def test_intp(self): - # Ticket #99 - assert_equal(1024, np.intp(1024)) - - def test_uint64_from_negative(self): - assert_equal(np.uint64(-2), np.uint64(18446744073709551614)) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_scalar_methods.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_scalar_methods.py deleted file mode 100644 index 93434dd..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_scalar_methods.py +++ /dev/null @@ -1,109 +0,0 @@ -""" -Test the scalar constructors, which also do type-coercion -""" -from __future__ import division, absolute_import, print_function - -import os -import fractions -import platform - -import pytest -import numpy as np - -from numpy.testing import ( - run_module_suite, - assert_equal, assert_almost_equal, assert_raises, assert_warns, - dec -) - -class TestAsIntegerRatio(object): - # derived in part from the cpython test "test_floatasratio" - - @pytest.mark.parametrize("ftype", [ - np.half, np.single, np.double, np.longdouble]) - @pytest.mark.parametrize("f, ratio", [ - (0.875, (7, 8)), - (-0.875, (-7, 8)), - (0.0, (0, 1)), - (11.5, (23, 2)), - ]) - def test_small(self, ftype, f, ratio): - assert_equal(ftype(f).as_integer_ratio(), ratio) - - @pytest.mark.parametrize("ftype", [ - np.half, np.single, np.double, np.longdouble]) - def test_simple_fractions(self, ftype): - R = fractions.Fraction - assert_equal(R(0, 1), - R(*ftype(0.0).as_integer_ratio())) - assert_equal(R(5, 2), - R(*ftype(2.5).as_integer_ratio())) - assert_equal(R(1, 2), - R(*ftype(0.5).as_integer_ratio())) - assert_equal(R(-2100, 1), - R(*ftype(-2100.0).as_integer_ratio())) - - @pytest.mark.parametrize("ftype", [ - np.half, np.single, np.double, np.longdouble]) - def test_errors(self, ftype): - assert_raises(OverflowError, ftype('inf').as_integer_ratio) - assert_raises(OverflowError, ftype('-inf').as_integer_ratio) - assert_raises(ValueError, ftype('nan').as_integer_ratio) - - def test_against_known_values(self): - R = fractions.Fraction - assert_equal(R(1075, 512), - R(*np.half(2.1).as_integer_ratio())) - assert_equal(R(-1075, 512), - R(*np.half(-2.1).as_integer_ratio())) - assert_equal(R(4404019, 2097152), - R(*np.single(2.1).as_integer_ratio())) - assert_equal(R(-4404019, 2097152), - R(*np.single(-2.1).as_integer_ratio())) - assert_equal(R(4728779608739021, 2251799813685248), - R(*np.double(2.1).as_integer_ratio())) - assert_equal(R(-4728779608739021, 2251799813685248), - R(*np.double(-2.1).as_integer_ratio())) - # longdouble is platform dependent - - @pytest.mark.parametrize("ftype, frac_vals, exp_vals", [ - # dtype test cases generated using hypothesis - # first five generated cases per dtype - (np.half, [0.0, 0.01154830649280303, 0.31082276347447274, - 0.527350517124794, 0.8308562335072596], - [0, 1, 0, -8, 12]), - (np.single, [0.0, 0.09248576989263226, 0.8160498218131407, - 0.17389442853722373, 0.7956044195067877], - [0, 12, 10, 17, -26]), - (np.double, [0.0, 0.031066908499895136, 0.5214135908877832, - 0.45780736035689296, 0.5906586745934036], - [0, -801, 51, 194, -653]), - pytest.param( - np.longdouble, - [0.0, 0.20492557202724854, 0.4277180662199366, 0.9888085019891495, - 0.9620175814461964], - [0, -7400, 14266, -7822, -8721], - marks=[ - pytest.mark.skipif( - np.finfo(np.double) == np.finfo(np.longdouble), - reason="long double is same as double"), - pytest.mark.skipif( - platform.machine().startswith("ppc"), - reason="IBM double double"), - ] - ) - ]) - def test_roundtrip(self, ftype, frac_vals, exp_vals): - for frac, exp in zip(frac_vals, exp_vals): - f = np.ldexp(frac, exp, dtype=ftype) - n, d = f.as_integer_ratio() - - try: - # workaround for gh-9968 - nf = np.longdouble(str(n)) - df = np.longdouble(str(d)) - except (OverflowError, RuntimeWarning): - # the values may not fit in any float type - pytest.skip("longdouble too small on this platform") - - assert_equal(nf / df, f, "{}/{}".format(n, d)) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_scalarbuffer.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_scalarbuffer.py deleted file mode 100644 index 3ded7ee..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_scalarbuffer.py +++ /dev/null @@ -1,105 +0,0 @@ -""" -Test scalar buffer interface adheres to PEP 3118 -""" -import sys -import numpy as np -import pytest - -from numpy.testing import assert_, assert_equal, assert_raises - -# PEP3118 format strings for native (standard alignment and byteorder) types -scalars_and_codes = [ - (np.bool_, '?'), - (np.byte, 'b'), - (np.short, 'h'), - (np.intc, 'i'), - (np.int_, 'l'), - (np.longlong, 'q'), - (np.ubyte, 'B'), - (np.ushort, 'H'), - (np.uintc, 'I'), - (np.uint, 'L'), - (np.ulonglong, 'Q'), - (np.half, 'e'), - (np.single, 'f'), - (np.double, 'd'), - (np.longdouble, 'g'), - (np.csingle, 'Zf'), - (np.cdouble, 'Zd'), - (np.clongdouble, 'Zg'), -] -scalars_only, codes_only = zip(*scalars_and_codes) - - -@pytest.mark.skipif(sys.version_info.major < 3, - reason="Python 2 scalars lack a buffer interface") -class TestScalarPEP3118(object): - - @pytest.mark.parametrize('scalar', scalars_only, ids=codes_only) - def test_scalar_match_array(self, scalar): - x = scalar() - a = np.array([], dtype=np.dtype(scalar)) - mv_x = memoryview(x) - mv_a = memoryview(a) - assert_equal(mv_x.format, mv_a.format) - - @pytest.mark.parametrize('scalar', scalars_only, ids=codes_only) - def test_scalar_dim(self, scalar): - x = scalar() - mv_x = memoryview(x) - assert_equal(mv_x.itemsize, np.dtype(scalar).itemsize) - assert_equal(mv_x.ndim, 0) - assert_equal(mv_x.shape, ()) - assert_equal(mv_x.strides, ()) - assert_equal(mv_x.suboffsets, ()) - - @pytest.mark.parametrize('scalar, code', scalars_and_codes, ids=codes_only) - def test_scalar_known_code(self, scalar, code): - x = scalar() - mv_x = memoryview(x) - assert_equal(mv_x.format, code) - - def test_void_scalar_structured_data(self): - dt = np.dtype([('name', np.unicode_, 16), ('grades', np.float64, (2,))]) - x = np.array(('ndarray_scalar', (1.2, 3.0)), dtype=dt)[()] - assert_(isinstance(x, np.void)) - mv_x = memoryview(x) - expected_size = 16 * np.dtype((np.unicode_, 1)).itemsize - expected_size += 2 * np.dtype(np.float64).itemsize - assert_equal(mv_x.itemsize, expected_size) - assert_equal(mv_x.ndim, 0) - assert_equal(mv_x.shape, ()) - assert_equal(mv_x.strides, ()) - assert_equal(mv_x.suboffsets, ()) - - # check scalar format string against ndarray format string - a = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt) - assert_(isinstance(a, np.ndarray)) - mv_a = memoryview(a) - assert_equal(mv_x.itemsize, mv_a.itemsize) - assert_equal(mv_x.format, mv_a.format) - - def test_datetime_memoryview(self): - # gh-11656 - # Values verified with v1.13.3, shape is not () as in test_scalar_dim - def as_dict(m): - return dict(strides=m.strides, shape=m.shape, itemsize=m.itemsize, - ndim=m.ndim, format=m.format) - - dt1 = np.datetime64('2016-01-01') - dt2 = np.datetime64('2017-01-01') - expected = {'strides': (1,), 'itemsize': 1, 'ndim': 1, - 'shape': (8,), 'format': 'B'} - v = memoryview(dt1) - res = as_dict(v) - assert_equal(res, expected) - - v = memoryview(dt2 - dt1) - res = as_dict(v) - assert_equal(res, expected) - - dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')]) - a = np.empty(1, dt) - # Fails to create a PEP 3118 valid buffer - assert_raises((ValueError, BufferError), memoryview, a[0]) - diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_scalarinherit.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_scalarinherit.py deleted file mode 100644 index 6a5c4fd..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_scalarinherit.py +++ /dev/null @@ -1,74 +0,0 @@ -# -*- coding: utf-8 -*- -""" Test printing of scalar types. - -""" -from __future__ import division, absolute_import, print_function - -import numpy as np -from numpy.testing import assert_ - - -class A(object): - pass -class B(A, np.float64): - pass - -class C(B): - pass -class D(C, B): - pass - -class B0(np.float64, A): - pass -class C0(B0): - pass - -class TestInherit(object): - def test_init(self): - x = B(1.0) - assert_(str(x) == '1.0') - y = C(2.0) - assert_(str(y) == '2.0') - z = D(3.0) - assert_(str(z) == '3.0') - - def test_init2(self): - x = B0(1.0) - assert_(str(x) == '1.0') - y = C0(2.0) - assert_(str(y) == '2.0') - - -class TestCharacter(object): - def test_char_radd(self): - # GH issue 9620, reached gentype_add and raise TypeError - np_s = np.string_('abc') - np_u = np.unicode_('abc') - s = b'def' - u = u'def' - assert_(np_s.__radd__(np_s) is NotImplemented) - assert_(np_s.__radd__(np_u) is NotImplemented) - assert_(np_s.__radd__(s) is NotImplemented) - assert_(np_s.__radd__(u) is NotImplemented) - assert_(np_u.__radd__(np_s) is NotImplemented) - assert_(np_u.__radd__(np_u) is NotImplemented) - assert_(np_u.__radd__(s) is NotImplemented) - assert_(np_u.__radd__(u) is NotImplemented) - assert_(s + np_s == b'defabc') - assert_(u + np_u == u'defabc') - - - class Mystr(str, np.generic): - # would segfault - pass - - ret = s + Mystr('abc') - assert_(type(ret) is type(s)) - - def test_char_repeat(self): - np_s = np.string_('abc') - np_u = np.unicode_('abc') - res_s = b'abc' * 5 - res_u = u'abc' * 5 - assert_(np_s * 5 == res_s) - assert_(np_u * 5 == res_u) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_scalarmath.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_scalarmath.py deleted file mode 100644 index c84380c..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_scalarmath.py +++ /dev/null @@ -1,704 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import warnings -import itertools -import operator -import platform -import pytest - -import numpy as np -from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_almost_equal, - assert_array_equal, IS_PYPY, suppress_warnings, _gen_alignment_data, - assert_warns, assert_raises_regex, - ) - -types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc, - np.int_, np.uint, np.longlong, np.ulonglong, - np.single, np.double, np.longdouble, np.csingle, - np.cdouble, np.clongdouble] - -floating_types = np.floating.__subclasses__() -complex_floating_types = np.complexfloating.__subclasses__() - - -# This compares scalarmath against ufuncs. - -class TestTypes(object): - def test_types(self): - for atype in types: - a = atype(1) - assert_(a == 1, "error with %r: got %r" % (atype, a)) - - def test_type_add(self): - # list of types - for k, atype in enumerate(types): - a_scalar = atype(3) - a_array = np.array([3], dtype=atype) - for l, btype in enumerate(types): - b_scalar = btype(1) - b_array = np.array([1], dtype=btype) - c_scalar = a_scalar + b_scalar - c_array = a_array + b_array - # It was comparing the type numbers, but the new ufunc - # function-finding mechanism finds the lowest function - # to which both inputs can be cast - which produces 'l' - # when you do 'q' + 'b'. The old function finding mechanism - # skipped ahead based on the first argument, but that - # does not produce properly symmetric results... - assert_equal(c_scalar.dtype, c_array.dtype, - "error with types (%d/'%c' + %d/'%c')" % - (k, np.dtype(atype).char, l, np.dtype(btype).char)) - - def test_type_create(self): - for k, atype in enumerate(types): - a = np.array([1, 2, 3], atype) - b = atype([1, 2, 3]) - assert_equal(a, b) - - def test_leak(self): - # test leak of scalar objects - # a leak would show up in valgrind as still-reachable of ~2.6MB - for i in range(200000): - np.add(1, 1) - - -class TestBaseMath(object): - def test_blocked(self): - # test alignments offsets for simd instructions - # alignments for vz + 2 * (vs - 1) + 1 - for dt, sz in [(np.float32, 11), (np.float64, 7), (np.int32, 11)]: - for out, inp1, inp2, msg in _gen_alignment_data(dtype=dt, - type='binary', - max_size=sz): - exp1 = np.ones_like(inp1) - inp1[...] = np.ones_like(inp1) - inp2[...] = np.zeros_like(inp2) - assert_almost_equal(np.add(inp1, inp2), exp1, err_msg=msg) - assert_almost_equal(np.add(inp1, 2), exp1 + 2, err_msg=msg) - assert_almost_equal(np.add(1, inp2), exp1, err_msg=msg) - - np.add(inp1, inp2, out=out) - assert_almost_equal(out, exp1, err_msg=msg) - - inp2[...] += np.arange(inp2.size, dtype=dt) + 1 - assert_almost_equal(np.square(inp2), - np.multiply(inp2, inp2), err_msg=msg) - # skip true divide for ints - if dt != np.int32 or (sys.version_info.major < 3 and not sys.py3kwarning): - assert_almost_equal(np.reciprocal(inp2), - np.divide(1, inp2), err_msg=msg) - - inp1[...] = np.ones_like(inp1) - np.add(inp1, 2, out=out) - assert_almost_equal(out, exp1 + 2, err_msg=msg) - inp2[...] = np.ones_like(inp2) - np.add(2, inp2, out=out) - assert_almost_equal(out, exp1 + 2, err_msg=msg) - - def test_lower_align(self): - # check data that is not aligned to element size - # i.e doubles are aligned to 4 bytes on i386 - d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) - o = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) - assert_almost_equal(d + d, d * 2) - np.add(d, d, out=o) - np.add(np.ones_like(d), d, out=o) - np.add(d, np.ones_like(d), out=o) - np.add(np.ones_like(d), d) - np.add(d, np.ones_like(d)) - - -class TestPower(object): - def test_small_types(self): - for t in [np.int8, np.int16, np.float16]: - a = t(3) - b = a ** 4 - assert_(b == 81, "error with %r: got %r" % (t, b)) - - def test_large_types(self): - for t in [np.int32, np.int64, np.float32, np.float64, np.longdouble]: - a = t(51) - b = a ** 4 - msg = "error with %r: got %r" % (t, b) - if np.issubdtype(t, np.integer): - assert_(b == 6765201, msg) - else: - assert_almost_equal(b, 6765201, err_msg=msg) - - def test_integers_to_negative_integer_power(self): - # Note that the combination of uint64 with a signed integer - # has common type np.float64. The other combinations should all - # raise a ValueError for integer ** negative integer. - exp = [np.array(-1, dt)[()] for dt in 'bhilq'] - - # 1 ** -1 possible special case - base = [np.array(1, dt)[()] for dt in 'bhilqBHILQ'] - for i1, i2 in itertools.product(base, exp): - if i1.dtype != np.uint64: - assert_raises(ValueError, operator.pow, i1, i2) - else: - res = operator.pow(i1, i2) - assert_(res.dtype.type is np.float64) - assert_almost_equal(res, 1.) - - # -1 ** -1 possible special case - base = [np.array(-1, dt)[()] for dt in 'bhilq'] - for i1, i2 in itertools.product(base, exp): - if i1.dtype != np.uint64: - assert_raises(ValueError, operator.pow, i1, i2) - else: - res = operator.pow(i1, i2) - assert_(res.dtype.type is np.float64) - assert_almost_equal(res, -1.) - - # 2 ** -1 perhaps generic - base = [np.array(2, dt)[()] for dt in 'bhilqBHILQ'] - for i1, i2 in itertools.product(base, exp): - if i1.dtype != np.uint64: - assert_raises(ValueError, operator.pow, i1, i2) - else: - res = operator.pow(i1, i2) - assert_(res.dtype.type is np.float64) - assert_almost_equal(res, .5) - - def test_mixed_types(self): - typelist = [np.int8, np.int16, np.float16, - np.float32, np.float64, np.int8, - np.int16, np.int32, np.int64] - for t1 in typelist: - for t2 in typelist: - a = t1(3) - b = t2(2) - result = a**b - msg = ("error with %r and %r:" - "got %r, expected %r") % (t1, t2, result, 9) - if np.issubdtype(np.dtype(result), np.integer): - assert_(result == 9, msg) - else: - assert_almost_equal(result, 9, err_msg=msg) - - def test_modular_power(self): - # modular power is not implemented, so ensure it errors - a = 5 - b = 4 - c = 10 - expected = pow(a, b, c) # noqa: F841 - for t in (np.int32, np.float32, np.complex64): - # note that 3-operand power only dispatches on the first argument - assert_raises(TypeError, operator.pow, t(a), b, c) - assert_raises(TypeError, operator.pow, np.array(t(a)), b, c) - - -def floordiv_and_mod(x, y): - return (x // y, x % y) - - -def _signs(dt): - if dt in np.typecodes['UnsignedInteger']: - return (+1,) - else: - return (+1, -1) - - -class TestModulus(object): - - def test_modulus_basic(self): - dt = np.typecodes['AllInteger'] + np.typecodes['Float'] - for op in [floordiv_and_mod, divmod]: - for dt1, dt2 in itertools.product(dt, dt): - for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)): - fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' - msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) - a = np.array(sg1*71, dtype=dt1)[()] - b = np.array(sg2*19, dtype=dt2)[()] - div, rem = op(a, b) - assert_equal(div*b + rem, a, err_msg=msg) - if sg2 == -1: - assert_(b < rem <= 0, msg) - else: - assert_(b > rem >= 0, msg) - - def test_float_modulus_exact(self): - # test that float results are exact for small integers. This also - # holds for the same integers scaled by powers of two. - nlst = list(range(-127, 0)) - plst = list(range(1, 128)) - dividend = nlst + [0] + plst - divisor = nlst + plst - arg = list(itertools.product(dividend, divisor)) - tgt = list(divmod(*t) for t in arg) - - a, b = np.array(arg, dtype=int).T - # convert exact integer results from Python to float so that - # signed zero can be used, it is checked. - tgtdiv, tgtrem = np.array(tgt, dtype=float).T - tgtdiv = np.where((tgtdiv == 0.0) & ((b < 0) ^ (a < 0)), -0.0, tgtdiv) - tgtrem = np.where((tgtrem == 0.0) & (b < 0), -0.0, tgtrem) - - for op in [floordiv_and_mod, divmod]: - for dt in np.typecodes['Float']: - msg = 'op: %s, dtype: %s' % (op.__name__, dt) - fa = a.astype(dt) - fb = b.astype(dt) - # use list comprehension so a_ and b_ are scalars - div, rem = zip(*[op(a_, b_) for a_, b_ in zip(fa, fb)]) - assert_equal(div, tgtdiv, err_msg=msg) - assert_equal(rem, tgtrem, err_msg=msg) - - def test_float_modulus_roundoff(self): - # gh-6127 - dt = np.typecodes['Float'] - for op in [floordiv_and_mod, divmod]: - for dt1, dt2 in itertools.product(dt, dt): - for sg1, sg2 in itertools.product((+1, -1), (+1, -1)): - fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' - msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) - a = np.array(sg1*78*6e-8, dtype=dt1)[()] - b = np.array(sg2*6e-8, dtype=dt2)[()] - div, rem = op(a, b) - # Equal assertion should hold when fmod is used - assert_equal(div*b + rem, a, err_msg=msg) - if sg2 == -1: - assert_(b < rem <= 0, msg) - else: - assert_(b > rem >= 0, msg) - - def test_float_modulus_corner_cases(self): - # Check remainder magnitude. - for dt in np.typecodes['Float']: - b = np.array(1.0, dtype=dt) - a = np.nextafter(np.array(0.0, dtype=dt), -b) - rem = operator.mod(a, b) - assert_(rem <= b, 'dt: %s' % dt) - rem = operator.mod(-a, -b) - assert_(rem >= -b, 'dt: %s' % dt) - - # Check nans, inf - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in remainder") - for dt in np.typecodes['Float']: - fone = np.array(1.0, dtype=dt) - fzer = np.array(0.0, dtype=dt) - finf = np.array(np.inf, dtype=dt) - fnan = np.array(np.nan, dtype=dt) - rem = operator.mod(fone, fzer) - assert_(np.isnan(rem), 'dt: %s' % dt) - # MSVC 2008 returns NaN here, so disable the check. - #rem = operator.mod(fone, finf) - #assert_(rem == fone, 'dt: %s' % dt) - rem = operator.mod(fone, fnan) - assert_(np.isnan(rem), 'dt: %s' % dt) - rem = operator.mod(finf, fone) - assert_(np.isnan(rem), 'dt: %s' % dt) - - def test_inplace_floordiv_handling(self): - # issue gh-12927 - # this only applies to in-place floordiv //=, because the output type - # promotes to float which does not fit - a = np.array([1, 2], np.int64) - b = np.array([1, 2], np.uint64) - pattern = 'could not be coerced to provided output parameter' - with assert_raises_regex(TypeError, pattern): - a //= b - - -class TestComplexDivision(object): - def test_zero_division(self): - with np.errstate(all="ignore"): - for t in [np.complex64, np.complex128]: - a = t(0.0) - b = t(1.0) - assert_(np.isinf(b/a)) - b = t(complex(np.inf, np.inf)) - assert_(np.isinf(b/a)) - b = t(complex(np.inf, np.nan)) - assert_(np.isinf(b/a)) - b = t(complex(np.nan, np.inf)) - assert_(np.isinf(b/a)) - b = t(complex(np.nan, np.nan)) - assert_(np.isnan(b/a)) - b = t(0.) - assert_(np.isnan(b/a)) - - def test_signed_zeros(self): - with np.errstate(all="ignore"): - for t in [np.complex64, np.complex128]: - # tupled (numerator, denominator, expected) - # for testing as expected == numerator/denominator - data = ( - (( 0.0,-1.0), ( 0.0, 1.0), (-1.0,-0.0)), - (( 0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)), - (( 0.0,-1.0), (-0.0,-1.0), ( 1.0, 0.0)), - (( 0.0,-1.0), (-0.0, 1.0), (-1.0, 0.0)), - (( 0.0, 1.0), ( 0.0,-1.0), (-1.0, 0.0)), - (( 0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)), - ((-0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)), - ((-0.0, 1.0), ( 0.0,-1.0), (-1.0,-0.0)) - ) - for cases in data: - n = cases[0] - d = cases[1] - ex = cases[2] - result = t(complex(n[0], n[1])) / t(complex(d[0], d[1])) - # check real and imag parts separately to avoid comparison - # in array context, which does not account for signed zeros - assert_equal(result.real, ex[0]) - assert_equal(result.imag, ex[1]) - - def test_branches(self): - with np.errstate(all="ignore"): - for t in [np.complex64, np.complex128]: - # tupled (numerator, denominator, expected) - # for testing as expected == numerator/denominator - data = list() - - # trigger branch: real(fabs(denom)) > imag(fabs(denom)) - # followed by else condition as neither are == 0 - data.append((( 2.0, 1.0), ( 2.0, 1.0), (1.0, 0.0))) - - # trigger branch: real(fabs(denom)) > imag(fabs(denom)) - # followed by if condition as both are == 0 - # is performed in test_zero_division(), so this is skipped - - # trigger else if branch: real(fabs(denom)) < imag(fabs(denom)) - data.append((( 1.0, 2.0), ( 1.0, 2.0), (1.0, 0.0))) - - for cases in data: - n = cases[0] - d = cases[1] - ex = cases[2] - result = t(complex(n[0], n[1])) / t(complex(d[0], d[1])) - # check real and imag parts separately to avoid comparison - # in array context, which does not account for signed zeros - assert_equal(result.real, ex[0]) - assert_equal(result.imag, ex[1]) - - -class TestConversion(object): - def test_int_from_long(self): - l = [1e6, 1e12, 1e18, -1e6, -1e12, -1e18] - li = [10**6, 10**12, 10**18, -10**6, -10**12, -10**18] - for T in [None, np.float64, np.int64]: - a = np.array(l, dtype=T) - assert_equal([int(_m) for _m in a], li) - - a = np.array(l[:3], dtype=np.uint64) - assert_equal([int(_m) for _m in a], li[:3]) - - def test_iinfo_long_values(self): - for code in 'bBhH': - res = np.array(np.iinfo(code).max + 1, dtype=code) - tgt = np.iinfo(code).min - assert_(res == tgt) - - for code in np.typecodes['AllInteger']: - res = np.array(np.iinfo(code).max, dtype=code) - tgt = np.iinfo(code).max - assert_(res == tgt) - - for code in np.typecodes['AllInteger']: - res = np.typeDict[code](np.iinfo(code).max) - tgt = np.iinfo(code).max - assert_(res == tgt) - - def test_int_raise_behaviour(self): - def overflow_error_func(dtype): - np.typeDict[dtype](np.iinfo(dtype).max + 1) - - for code in 'lLqQ': - assert_raises(OverflowError, overflow_error_func, code) - - def test_int_from_infinite_longdouble(self): - # gh-627 - x = np.longdouble(np.inf) - assert_raises(OverflowError, int, x) - with suppress_warnings() as sup: - sup.record(np.ComplexWarning) - x = np.clongdouble(np.inf) - assert_raises(OverflowError, int, x) - assert_equal(len(sup.log), 1) - - @pytest.mark.skipif(not IS_PYPY, reason="Test is PyPy only (gh-9972)") - def test_int_from_infinite_longdouble___int__(self): - x = np.longdouble(np.inf) - assert_raises(OverflowError, x.__int__) - with suppress_warnings() as sup: - sup.record(np.ComplexWarning) - x = np.clongdouble(np.inf) - assert_raises(OverflowError, x.__int__) - assert_equal(len(sup.log), 1) - - @pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble), - reason="long double is same as double") - @pytest.mark.skipif(platform.machine().startswith("ppc"), - reason="IBM double double") - def test_int_from_huge_longdouble(self): - # Produce a longdouble that would overflow a double, - # use exponent that avoids bug in Darwin pow function. - exp = np.finfo(np.double).maxexp - 1 - huge_ld = 2 * 1234 * np.longdouble(2) ** exp - huge_i = 2 * 1234 * 2 ** exp - assert_(huge_ld != np.inf) - assert_equal(int(huge_ld), huge_i) - - def test_int_from_longdouble(self): - x = np.longdouble(1.5) - assert_equal(int(x), 1) - x = np.longdouble(-10.5) - assert_equal(int(x), -10) - - def test_numpy_scalar_relational_operators(self): - # All integer - for dt1 in np.typecodes['AllInteger']: - assert_(1 > np.array(0, dtype=dt1)[()], "type %s failed" % (dt1,)) - assert_(not 1 < np.array(0, dtype=dt1)[()], "type %s failed" % (dt1,)) - - for dt2 in np.typecodes['AllInteger']: - assert_(np.array(1, dtype=dt1)[()] > np.array(0, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) - assert_(not np.array(1, dtype=dt1)[()] < np.array(0, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) - - #Unsigned integers - for dt1 in 'BHILQP': - assert_(-1 < np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,)) - assert_(not -1 > np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,)) - assert_(-1 != np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,)) - - #unsigned vs signed - for dt2 in 'bhilqp': - assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) - assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) - assert_(np.array(1, dtype=dt1)[()] != np.array(-1, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) - - #Signed integers and floats - for dt1 in 'bhlqp' + np.typecodes['Float']: - assert_(1 > np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,)) - assert_(not 1 < np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,)) - assert_(-1 == np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,)) - - for dt2 in 'bhlqp' + np.typecodes['Float']: - assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) - assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) - assert_(np.array(-1, dtype=dt1)[()] == np.array(-1, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) - - def test_scalar_comparison_to_none(self): - # Scalars should just return False and not give a warnings. - # The comparisons are flagged by pep8, ignore that. - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', FutureWarning) - assert_(not np.float32(1) == None) - assert_(not np.str_('test') == None) - # This is dubious (see below): - assert_(not np.datetime64('NaT') == None) - - assert_(np.float32(1) != None) - assert_(np.str_('test') != None) - # This is dubious (see below): - assert_(np.datetime64('NaT') != None) - assert_(len(w) == 0) - - # For documentation purposes, this is why the datetime is dubious. - # At the time of deprecation this was no behaviour change, but - # it has to be considered when the deprecations are done. - assert_(np.equal(np.datetime64('NaT'), None)) - - -#class TestRepr(object): -# def test_repr(self): -# for t in types: -# val = t(1197346475.0137341) -# val_repr = repr(val) -# val2 = eval(val_repr) -# assert_equal( val, val2 ) - - -class TestRepr(object): - def _test_type_repr(self, t): - finfo = np.finfo(t) - last_fraction_bit_idx = finfo.nexp + finfo.nmant - last_exponent_bit_idx = finfo.nexp - storage_bytes = np.dtype(t).itemsize*8 - # could add some more types to the list below - for which in ['small denorm', 'small norm']: - # Values from https://en.wikipedia.org/wiki/IEEE_754 - constr = np.array([0x00]*storage_bytes, dtype=np.uint8) - if which == 'small denorm': - byte = last_fraction_bit_idx // 8 - bytebit = 7-(last_fraction_bit_idx % 8) - constr[byte] = 1 << bytebit - elif which == 'small norm': - byte = last_exponent_bit_idx // 8 - bytebit = 7-(last_exponent_bit_idx % 8) - constr[byte] = 1 << bytebit - else: - raise ValueError('hmm') - val = constr.view(t)[0] - val_repr = repr(val) - val2 = t(eval(val_repr)) - if not (val2 == 0 and val < 1e-100): - assert_equal(val, val2) - - def test_float_repr(self): - # long double test cannot work, because eval goes through a python - # float - for t in [np.float32, np.float64]: - self._test_type_repr(t) - - -if not IS_PYPY: - # sys.getsizeof() is not valid on PyPy - class TestSizeOf(object): - - def test_equal_nbytes(self): - for type in types: - x = type(0) - assert_(sys.getsizeof(x) > x.nbytes) - - def test_error(self): - d = np.float32() - assert_raises(TypeError, d.__sizeof__, "a") - - -class TestMultiply(object): - def test_seq_repeat(self): - # Test that basic sequences get repeated when multiplied with - # numpy integers. And errors are raised when multiplied with others. - # Some of this behaviour may be controversial and could be open for - # change. - accepted_types = set(np.typecodes["AllInteger"]) - deprecated_types = {'?'} - forbidden_types = ( - set(np.typecodes["All"]) - accepted_types - deprecated_types) - forbidden_types -= {'V'} # can't default-construct void scalars - - for seq_type in (list, tuple): - seq = seq_type([1, 2, 3]) - for numpy_type in accepted_types: - i = np.dtype(numpy_type).type(2) - assert_equal(seq * i, seq * int(i)) - assert_equal(i * seq, int(i) * seq) - - for numpy_type in deprecated_types: - i = np.dtype(numpy_type).type() - assert_equal( - assert_warns(DeprecationWarning, operator.mul, seq, i), - seq * int(i)) - assert_equal( - assert_warns(DeprecationWarning, operator.mul, i, seq), - int(i) * seq) - - for numpy_type in forbidden_types: - i = np.dtype(numpy_type).type() - assert_raises(TypeError, operator.mul, seq, i) - assert_raises(TypeError, operator.mul, i, seq) - - def test_no_seq_repeat_basic_array_like(self): - # Test that an array-like which does not know how to be multiplied - # does not attempt sequence repeat (raise TypeError). - # See also gh-7428. - class ArrayLike(object): - def __init__(self, arr): - self.arr = arr - def __array__(self): - return self.arr - - # Test for simple ArrayLike above and memoryviews (original report) - for arr_like in (ArrayLike(np.ones(3)), memoryview(np.ones(3))): - assert_array_equal(arr_like * np.float32(3.), np.full(3, 3.)) - assert_array_equal(np.float32(3.) * arr_like, np.full(3, 3.)) - assert_array_equal(arr_like * np.int_(3), np.full(3, 3)) - assert_array_equal(np.int_(3) * arr_like, np.full(3, 3)) - - -class TestNegative(object): - def test_exceptions(self): - a = np.ones((), dtype=np.bool_)[()] - assert_raises(TypeError, operator.neg, a) - - def test_result(self): - types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] - with suppress_warnings() as sup: - sup.filter(RuntimeWarning) - for dt in types: - a = np.ones((), dtype=dt)[()] - assert_equal(operator.neg(a) + a, 0) - - -class TestSubtract(object): - def test_exceptions(self): - a = np.ones((), dtype=np.bool_)[()] - assert_raises(TypeError, operator.sub, a, a) - - def test_result(self): - types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] - with suppress_warnings() as sup: - sup.filter(RuntimeWarning) - for dt in types: - a = np.ones((), dtype=dt)[()] - assert_equal(operator.sub(a, a), 0) - - -class TestAbs(object): - def _test_abs_func(self, absfunc): - for tp in floating_types + complex_floating_types: - x = tp(-1.5) - assert_equal(absfunc(x), 1.5) - x = tp(0.0) - res = absfunc(x) - # assert_equal() checks zero signedness - assert_equal(res, 0.0) - x = tp(-0.0) - res = absfunc(x) - assert_equal(res, 0.0) - - x = tp(np.finfo(tp).max) - assert_equal(absfunc(x), x.real) - - x = tp(np.finfo(tp).tiny) - assert_equal(absfunc(x), x.real) - - x = tp(np.finfo(tp).min) - assert_equal(absfunc(x), -x.real) - - def test_builtin_abs(self): - self._test_abs_func(abs) - - def test_numpy_abs(self): - self._test_abs_func(np.abs) - - -class TestBitShifts(object): - - @pytest.mark.parametrize('type_code', np.typecodes['AllInteger']) - @pytest.mark.parametrize('op', - [operator.rshift, operator.lshift], ids=['>>', '<<']) - def test_shift_all_bits(self, type_code, op): - """ Shifts where the shift amount is the width of the type or wider """ - # gh-2449 - dt = np.dtype(type_code) - nbits = dt.itemsize * 8 - for val in [5, -5]: - for shift in [nbits, nbits + 4]: - val_scl = dt.type(val) - shift_scl = dt.type(shift) - res_scl = op(val_scl, shift_scl) - if val_scl < 0 and op is operator.rshift: - # sign bit is preserved - assert_equal(res_scl, -1) - else: - assert_equal(res_scl, 0) - - # Result on scalars should be the same as on arrays - val_arr = np.array([val]*32, dtype=dt) - shift_arr = np.array([shift]*32, dtype=dt) - res_arr = op(val_arr, shift_arr) - assert_equal(res_arr, res_scl) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_scalarprint.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_scalarprint.py deleted file mode 100644 index 86b0ca1..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_scalarprint.py +++ /dev/null @@ -1,326 +0,0 @@ -# -*- coding: utf-8 -*- -""" Test printing of scalar types. - -""" -from __future__ import division, absolute_import, print_function - -import code, sys -import platform -import pytest - -from tempfile import TemporaryFile -import numpy as np -from numpy.testing import assert_, assert_equal, suppress_warnings - -class TestRealScalars(object): - def test_str(self): - svals = [0.0, -0.0, 1, -1, np.inf, -np.inf, np.nan] - styps = [np.float16, np.float32, np.float64, np.longdouble] - wanted = [ - ['0.0', '0.0', '0.0', '0.0' ], - ['-0.0', '-0.0', '-0.0', '-0.0'], - ['1.0', '1.0', '1.0', '1.0' ], - ['-1.0', '-1.0', '-1.0', '-1.0'], - ['inf', 'inf', 'inf', 'inf' ], - ['-inf', '-inf', '-inf', '-inf'], - ['nan', 'nan', 'nan', 'nan']] - - for wants, val in zip(wanted, svals): - for want, styp in zip(wants, styps): - msg = 'for str({}({}))'.format(np.dtype(styp).name, repr(val)) - assert_equal(str(styp(val)), want, err_msg=msg) - - def test_scalar_cutoffs(self): - # test that both the str and repr of np.float64 behaves - # like python floats in python3. Note that in python2 - # the str has truncated digits, but we do not do this - def check(v): - # we compare str to repr, to avoid python2 truncation behavior - assert_equal(str(np.float64(v)), repr(v)) - assert_equal(repr(np.float64(v)), repr(v)) - - # check we use the same number of significant digits - check(1.12345678901234567890) - check(0.0112345678901234567890) - - # check switch from scientific output to positional and back - check(1e-5) - check(1e-4) - check(1e15) - check(1e16) - - def test_py2_float_print(self): - # gh-10753 - # In python2, the python float type implements an obsolete method - # tp_print, which overrides tp_repr and tp_str when using "print" to - # output to a "real file" (ie, not a StringIO). Make sure we don't - # inherit it. - x = np.double(0.1999999999999) - with TemporaryFile('r+t') as f: - print(x, file=f) - f.seek(0) - output = f.read() - assert_equal(output, str(x) + '\n') - # In python2 the value float('0.1999999999999') prints with reduced - # precision as '0.2', but we want numpy's np.double('0.1999999999999') - # to print the unique value, '0.1999999999999'. - - # gh-11031 - # Only in the python2 interactive shell and when stdout is a "real" - # file, the output of the last command is printed to stdout without - # Py_PRINT_RAW (unlike the print statement) so `>>> x` and `>>> print - # x` are potentially different. Make sure they are the same. The only - # way I found to get prompt-like output is using an actual prompt from - # the 'code' module. Again, must use tempfile to get a "real" file. - - # dummy user-input which enters one line and then ctrl-Ds. - def userinput(): - yield 'np.sqrt(2)' - raise EOFError - gen = userinput() - input_func = lambda prompt="": next(gen) - - with TemporaryFile('r+t') as fo, TemporaryFile('r+t') as fe: - orig_stdout, orig_stderr = sys.stdout, sys.stderr - sys.stdout, sys.stderr = fo, fe - - # py2 code.interact sends irrelevant internal DeprecationWarnings - with suppress_warnings() as sup: - sup.filter(DeprecationWarning) - code.interact(local={'np': np}, readfunc=input_func, banner='') - - sys.stdout, sys.stderr = orig_stdout, orig_stderr - - fo.seek(0) - capture = fo.read().strip() - - assert_equal(capture, repr(np.sqrt(2))) - - def test_dragon4(self): - # these tests are adapted from Ryan Juckett's dragon4 implementation, - # see dragon4.c for details. - - fpos32 = lambda x, **k: np.format_float_positional(np.float32(x), **k) - fsci32 = lambda x, **k: np.format_float_scientific(np.float32(x), **k) - fpos64 = lambda x, **k: np.format_float_positional(np.float64(x), **k) - fsci64 = lambda x, **k: np.format_float_scientific(np.float64(x), **k) - - preckwd = lambda prec: {'unique': False, 'precision': prec} - - assert_equal(fpos32('1.0'), "1.") - assert_equal(fsci32('1.0'), "1.e+00") - assert_equal(fpos32('10.234'), "10.234") - assert_equal(fpos32('-10.234'), "-10.234") - assert_equal(fsci32('10.234'), "1.0234e+01") - assert_equal(fsci32('-10.234'), "-1.0234e+01") - assert_equal(fpos32('1000.0'), "1000.") - assert_equal(fpos32('1.0', precision=0), "1.") - assert_equal(fsci32('1.0', precision=0), "1.e+00") - assert_equal(fpos32('10.234', precision=0), "10.") - assert_equal(fpos32('-10.234', precision=0), "-10.") - assert_equal(fsci32('10.234', precision=0), "1.e+01") - assert_equal(fsci32('-10.234', precision=0), "-1.e+01") - assert_equal(fpos32('10.234', precision=2), "10.23") - assert_equal(fsci32('-10.234', precision=2), "-1.02e+01") - assert_equal(fsci64('9.9999999999999995e-08', **preckwd(16)), - '9.9999999999999995e-08') - assert_equal(fsci64('9.8813129168249309e-324', **preckwd(16)), - '9.8813129168249309e-324') - assert_equal(fsci64('9.9999999999999694e-311', **preckwd(16)), - '9.9999999999999694e-311') - - - # test rounding - # 3.1415927410 is closest float32 to np.pi - assert_equal(fpos32('3.14159265358979323846', **preckwd(10)), - "3.1415927410") - assert_equal(fsci32('3.14159265358979323846', **preckwd(10)), - "3.1415927410e+00") - assert_equal(fpos64('3.14159265358979323846', **preckwd(10)), - "3.1415926536") - assert_equal(fsci64('3.14159265358979323846', **preckwd(10)), - "3.1415926536e+00") - # 299792448 is closest float32 to 299792458 - assert_equal(fpos32('299792458.0', **preckwd(5)), "299792448.00000") - assert_equal(fsci32('299792458.0', **preckwd(5)), "2.99792e+08") - assert_equal(fpos64('299792458.0', **preckwd(5)), "299792458.00000") - assert_equal(fsci64('299792458.0', **preckwd(5)), "2.99792e+08") - - assert_equal(fpos32('3.14159265358979323846', **preckwd(25)), - "3.1415927410125732421875000") - assert_equal(fpos64('3.14159265358979323846', **preckwd(50)), - "3.14159265358979311599796346854418516159057617187500") - assert_equal(fpos64('3.14159265358979323846'), "3.141592653589793") - - - # smallest numbers - assert_equal(fpos32(0.5**(126 + 23), unique=False, precision=149), - "0.00000000000000000000000000000000000000000000140129846432" - "4817070923729583289916131280261941876515771757068283889791" - "08268586060148663818836212158203125") - assert_equal(fpos64(0.5**(1022 + 52), unique=False, precision=1074), - "0.00000000000000000000000000000000000000000000000000000000" - "0000000000000000000000000000000000000000000000000000000000" - "0000000000000000000000000000000000000000000000000000000000" - "0000000000000000000000000000000000000000000000000000000000" - "0000000000000000000000000000000000000000000000000000000000" - "0000000000000000000000000000000000049406564584124654417656" - "8792868221372365059802614324764425585682500675507270208751" - "8652998363616359923797965646954457177309266567103559397963" - "9877479601078187812630071319031140452784581716784898210368" - "8718636056998730723050006387409153564984387312473397273169" - "6151400317153853980741262385655911710266585566867681870395" - "6031062493194527159149245532930545654440112748012970999954" - "1931989409080416563324524757147869014726780159355238611550" - "1348035264934720193790268107107491703332226844753335720832" - "4319360923828934583680601060115061698097530783422773183292" - "4790498252473077637592724787465608477820373446969953364701" - "7972677717585125660551199131504891101451037862738167250955" - "8373897335989936648099411642057026370902792427675445652290" - "87538682506419718265533447265625") - - # largest numbers - assert_equal(fpos32(np.finfo(np.float32).max, **preckwd(0)), - "340282346638528859811704183484516925440.") - assert_equal(fpos64(np.finfo(np.float64).max, **preckwd(0)), - "1797693134862315708145274237317043567980705675258449965989" - "1747680315726078002853876058955863276687817154045895351438" - "2464234321326889464182768467546703537516986049910576551282" - "0762454900903893289440758685084551339423045832369032229481" - "6580855933212334827479782620414472316873817718091929988125" - "0404026184124858368.") - # Warning: In unique mode only the integer digits necessary for - # uniqueness are computed, the rest are 0. Should we change this? - assert_equal(fpos32(np.finfo(np.float32).max, precision=0), - "340282350000000000000000000000000000000.") - - # test trailing zeros - assert_equal(fpos32('1.0', unique=False, precision=3), "1.000") - assert_equal(fpos64('1.0', unique=False, precision=3), "1.000") - assert_equal(fsci32('1.0', unique=False, precision=3), "1.000e+00") - assert_equal(fsci64('1.0', unique=False, precision=3), "1.000e+00") - assert_equal(fpos32('1.5', unique=False, precision=3), "1.500") - assert_equal(fpos64('1.5', unique=False, precision=3), "1.500") - assert_equal(fsci32('1.5', unique=False, precision=3), "1.500e+00") - assert_equal(fsci64('1.5', unique=False, precision=3), "1.500e+00") - # gh-10713 - assert_equal(fpos64('324', unique=False, precision=5, fractional=False), "324.00") - - def test_dragon4_interface(self): - tps = [np.float16, np.float32, np.float64] - if hasattr(np, 'float128'): - tps.append(np.float128) - - fpos = np.format_float_positional - fsci = np.format_float_scientific - - for tp in tps: - # test padding - assert_equal(fpos(tp('1.0'), pad_left=4, pad_right=4), " 1. ") - assert_equal(fpos(tp('-1.0'), pad_left=4, pad_right=4), " -1. ") - assert_equal(fpos(tp('-10.2'), - pad_left=4, pad_right=4), " -10.2 ") - - # test exp_digits - assert_equal(fsci(tp('1.23e1'), exp_digits=5), "1.23e+00001") - - # test fixed (non-unique) mode - assert_equal(fpos(tp('1.0'), unique=False, precision=4), "1.0000") - assert_equal(fsci(tp('1.0'), unique=False, precision=4), - "1.0000e+00") - - # test trimming - # trim of 'k' or '.' only affects non-unique mode, since unique - # mode will not output trailing 0s. - assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='k'), - "1.0000") - - assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='.'), - "1.") - assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='.'), - "1.2" if tp != np.float16 else "1.2002") - - assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='0'), - "1.0") - assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='0'), - "1.2" if tp != np.float16 else "1.2002") - assert_equal(fpos(tp('1.'), trim='0'), "1.0") - - assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='-'), - "1") - assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='-'), - "1.2" if tp != np.float16 else "1.2002") - assert_equal(fpos(tp('1.'), trim='-'), "1") - - @pytest.mark.skipif(not platform.machine().startswith("ppc64"), - reason="only applies to ppc float128 values") - def test_ppc64_ibm_double_double128(self): - # check that the precision decreases once we get into the subnormal - # range. Unlike float64, this starts around 1e-292 instead of 1e-308, - # which happens when the first double is normal and the second is - # subnormal. - x = np.float128('2.123123123123123123123123123123123e-286') - got = [str(x/np.float128('2e' + str(i))) for i in range(0,40)] - expected = [ - "1.06156156156156156156156156156157e-286", - "1.06156156156156156156156156156158e-287", - "1.06156156156156156156156156156159e-288", - "1.0615615615615615615615615615616e-289", - "1.06156156156156156156156156156157e-290", - "1.06156156156156156156156156156156e-291", - "1.0615615615615615615615615615616e-292", - "1.0615615615615615615615615615615e-293", - "1.061561561561561561561561561562e-294", - "1.06156156156156156156156156155e-295", - "1.0615615615615615615615615616e-296", - "1.06156156156156156156156156e-297", - "1.06156156156156156156156157e-298", - "1.0615615615615615615615616e-299", - "1.06156156156156156156156e-300", - "1.06156156156156156156155e-301", - "1.0615615615615615615616e-302", - "1.061561561561561561562e-303", - "1.06156156156156156156e-304", - "1.0615615615615615618e-305", - "1.06156156156156156e-306", - "1.06156156156156157e-307", - "1.0615615615615616e-308", - "1.06156156156156e-309", - "1.06156156156157e-310", - "1.0615615615616e-311", - "1.06156156156e-312", - "1.06156156154e-313", - "1.0615615616e-314", - "1.06156156e-315", - "1.06156155e-316", - "1.061562e-317", - "1.06156e-318", - "1.06155e-319", - "1.0617e-320", - "1.06e-321", - "1.04e-322", - "1e-323", - "0.0", - "0.0"] - assert_equal(got, expected) - - # Note: we follow glibc behavior, but it (or gcc) might not be right. - # In particular we can get two values that print the same but are not - # equal: - a = np.float128('2')/np.float128('3') - b = np.float128(str(a)) - assert_equal(str(a), str(b)) - assert_(a != b) - - def float32_roundtrip(self): - # gh-9360 - x = np.float32(1024 - 2**-14) - y = np.float32(1024 - 2**-13) - assert_(repr(x) != repr(y)) - assert_equal(np.float32(repr(x)), x) - assert_equal(np.float32(repr(y)), y) - - def float64_vs_python(self): - # gh-2643, gh-6136, gh-6908 - assert_equal(repr(np.float64(0.1)), repr(0.1)) - assert_(repr(np.float64(0.20000000000000004)) != repr(0.2)) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_shape_base.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_shape_base.py deleted file mode 100644 index 53d272f..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_shape_base.py +++ /dev/null @@ -1,720 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import pytest -import sys -import numpy as np -from numpy.core import ( - array, arange, atleast_1d, atleast_2d, atleast_3d, block, vstack, hstack, - newaxis, concatenate, stack - ) -from numpy.core.shape_base import (_block_dispatcher, _block_setup, - _block_concatenate, _block_slicing) -from numpy.testing import ( - assert_, assert_raises, assert_array_equal, assert_equal, - assert_raises_regex, assert_warns - ) - -from numpy.compat import long - -class TestAtleast1d(object): - def test_0D_array(self): - a = array(1) - b = array(2) - res = [atleast_1d(a), atleast_1d(b)] - desired = [array([1]), array([2])] - assert_array_equal(res, desired) - - def test_1D_array(self): - a = array([1, 2]) - b = array([2, 3]) - res = [atleast_1d(a), atleast_1d(b)] - desired = [array([1, 2]), array([2, 3])] - assert_array_equal(res, desired) - - def test_2D_array(self): - a = array([[1, 2], [1, 2]]) - b = array([[2, 3], [2, 3]]) - res = [atleast_1d(a), atleast_1d(b)] - desired = [a, b] - assert_array_equal(res, desired) - - def test_3D_array(self): - a = array([[1, 2], [1, 2]]) - b = array([[2, 3], [2, 3]]) - a = array([a, a]) - b = array([b, b]) - res = [atleast_1d(a), atleast_1d(b)] - desired = [a, b] - assert_array_equal(res, desired) - - def test_r1array(self): - """ Test to make sure equivalent Travis O's r1array function - """ - assert_(atleast_1d(3).shape == (1,)) - assert_(atleast_1d(3j).shape == (1,)) - assert_(atleast_1d(long(3)).shape == (1,)) - assert_(atleast_1d(3.0).shape == (1,)) - assert_(atleast_1d([[2, 3], [4, 5]]).shape == (2, 2)) - - -class TestAtleast2d(object): - def test_0D_array(self): - a = array(1) - b = array(2) - res = [atleast_2d(a), atleast_2d(b)] - desired = [array([[1]]), array([[2]])] - assert_array_equal(res, desired) - - def test_1D_array(self): - a = array([1, 2]) - b = array([2, 3]) - res = [atleast_2d(a), atleast_2d(b)] - desired = [array([[1, 2]]), array([[2, 3]])] - assert_array_equal(res, desired) - - def test_2D_array(self): - a = array([[1, 2], [1, 2]]) - b = array([[2, 3], [2, 3]]) - res = [atleast_2d(a), atleast_2d(b)] - desired = [a, b] - assert_array_equal(res, desired) - - def test_3D_array(self): - a = array([[1, 2], [1, 2]]) - b = array([[2, 3], [2, 3]]) - a = array([a, a]) - b = array([b, b]) - res = [atleast_2d(a), atleast_2d(b)] - desired = [a, b] - assert_array_equal(res, desired) - - def test_r2array(self): - """ Test to make sure equivalent Travis O's r2array function - """ - assert_(atleast_2d(3).shape == (1, 1)) - assert_(atleast_2d([3j, 1]).shape == (1, 2)) - assert_(atleast_2d([[[3, 1], [4, 5]], [[3, 5], [1, 2]]]).shape == (2, 2, 2)) - - -class TestAtleast3d(object): - def test_0D_array(self): - a = array(1) - b = array(2) - res = [atleast_3d(a), atleast_3d(b)] - desired = [array([[[1]]]), array([[[2]]])] - assert_array_equal(res, desired) - - def test_1D_array(self): - a = array([1, 2]) - b = array([2, 3]) - res = [atleast_3d(a), atleast_3d(b)] - desired = [array([[[1], [2]]]), array([[[2], [3]]])] - assert_array_equal(res, desired) - - def test_2D_array(self): - a = array([[1, 2], [1, 2]]) - b = array([[2, 3], [2, 3]]) - res = [atleast_3d(a), atleast_3d(b)] - desired = [a[:,:, newaxis], b[:,:, newaxis]] - assert_array_equal(res, desired) - - def test_3D_array(self): - a = array([[1, 2], [1, 2]]) - b = array([[2, 3], [2, 3]]) - a = array([a, a]) - b = array([b, b]) - res = [atleast_3d(a), atleast_3d(b)] - desired = [a, b] - assert_array_equal(res, desired) - - -class TestHstack(object): - def test_non_iterable(self): - assert_raises(TypeError, hstack, 1) - - def test_empty_input(self): - assert_raises(ValueError, hstack, ()) - - def test_0D_array(self): - a = array(1) - b = array(2) - res = hstack([a, b]) - desired = array([1, 2]) - assert_array_equal(res, desired) - - def test_1D_array(self): - a = array([1]) - b = array([2]) - res = hstack([a, b]) - desired = array([1, 2]) - assert_array_equal(res, desired) - - def test_2D_array(self): - a = array([[1], [2]]) - b = array([[1], [2]]) - res = hstack([a, b]) - desired = array([[1, 1], [2, 2]]) - assert_array_equal(res, desired) - - def test_generator(self): - with assert_warns(FutureWarning): - hstack((np.arange(3) for _ in range(2))) - if sys.version_info.major > 2: - # map returns a list on Python 2 - with assert_warns(FutureWarning): - hstack(map(lambda x: x, np.ones((3, 2)))) - - -class TestVstack(object): - def test_non_iterable(self): - assert_raises(TypeError, vstack, 1) - - def test_empty_input(self): - assert_raises(ValueError, vstack, ()) - - def test_0D_array(self): - a = array(1) - b = array(2) - res = vstack([a, b]) - desired = array([[1], [2]]) - assert_array_equal(res, desired) - - def test_1D_array(self): - a = array([1]) - b = array([2]) - res = vstack([a, b]) - desired = array([[1], [2]]) - assert_array_equal(res, desired) - - def test_2D_array(self): - a = array([[1], [2]]) - b = array([[1], [2]]) - res = vstack([a, b]) - desired = array([[1], [2], [1], [2]]) - assert_array_equal(res, desired) - - def test_2D_array2(self): - a = array([1, 2]) - b = array([1, 2]) - res = vstack([a, b]) - desired = array([[1, 2], [1, 2]]) - assert_array_equal(res, desired) - - def test_generator(self): - with assert_warns(FutureWarning): - vstack((np.arange(3) for _ in range(2))) - - -class TestConcatenate(object): - def test_returns_copy(self): - a = np.eye(3) - b = np.concatenate([a]) - b[0, 0] = 2 - assert b[0, 0] != a[0, 0] - - def test_exceptions(self): - # test axis must be in bounds - for ndim in [1, 2, 3]: - a = np.ones((1,)*ndim) - np.concatenate((a, a), axis=0) # OK - assert_raises(np.AxisError, np.concatenate, (a, a), axis=ndim) - assert_raises(np.AxisError, np.concatenate, (a, a), axis=-(ndim + 1)) - - # Scalars cannot be concatenated - assert_raises(ValueError, concatenate, (0,)) - assert_raises(ValueError, concatenate, (np.array(0),)) - - # dimensionality must match - assert_raises_regex( - ValueError, - r"all the input arrays must have same number of dimensions, but " - r"the array at index 0 has 1 dimension\(s\) and the array at " - r"index 1 has 2 dimension\(s\)", - np.concatenate, (np.zeros(1), np.zeros((1, 1)))) - - # test shapes must match except for concatenation axis - a = np.ones((1, 2, 3)) - b = np.ones((2, 2, 3)) - axis = list(range(3)) - for i in range(3): - np.concatenate((a, b), axis=axis[0]) # OK - assert_raises_regex( - ValueError, - "all the input array dimensions for the concatenation axis " - "must match exactly, but along dimension {}, the array at " - "index 0 has size 1 and the array at index 1 has size 2" - .format(i), - np.concatenate, (a, b), axis=axis[1]) - assert_raises(ValueError, np.concatenate, (a, b), axis=axis[2]) - a = np.moveaxis(a, -1, 0) - b = np.moveaxis(b, -1, 0) - axis.append(axis.pop(0)) - - # No arrays to concatenate raises ValueError - assert_raises(ValueError, concatenate, ()) - - def test_concatenate_axis_None(self): - a = np.arange(4, dtype=np.float64).reshape((2, 2)) - b = list(range(3)) - c = ['x'] - r = np.concatenate((a, a), axis=None) - assert_equal(r.dtype, a.dtype) - assert_equal(r.ndim, 1) - r = np.concatenate((a, b), axis=None) - assert_equal(r.size, a.size + len(b)) - assert_equal(r.dtype, a.dtype) - r = np.concatenate((a, b, c), axis=None) - d = array(['0.0', '1.0', '2.0', '3.0', - '0', '1', '2', 'x']) - assert_array_equal(r, d) - - out = np.zeros(a.size + len(b)) - r = np.concatenate((a, b), axis=None) - rout = np.concatenate((a, b), axis=None, out=out) - assert_(out is rout) - assert_equal(r, rout) - - def test_large_concatenate_axis_None(self): - # When no axis is given, concatenate uses flattened versions. - # This also had a bug with many arrays (see gh-5979). - x = np.arange(1, 100) - r = np.concatenate(x, None) - assert_array_equal(x, r) - - # This should probably be deprecated: - r = np.concatenate(x, 100) # axis is >= MAXDIMS - assert_array_equal(x, r) - - def test_concatenate(self): - # Test concatenate function - # One sequence returns unmodified (but as array) - r4 = list(range(4)) - assert_array_equal(concatenate((r4,)), r4) - # Any sequence - assert_array_equal(concatenate((tuple(r4),)), r4) - assert_array_equal(concatenate((array(r4),)), r4) - # 1D default concatenation - r3 = list(range(3)) - assert_array_equal(concatenate((r4, r3)), r4 + r3) - # Mixed sequence types - assert_array_equal(concatenate((tuple(r4), r3)), r4 + r3) - assert_array_equal(concatenate((array(r4), r3)), r4 + r3) - # Explicit axis specification - assert_array_equal(concatenate((r4, r3), 0), r4 + r3) - # Including negative - assert_array_equal(concatenate((r4, r3), -1), r4 + r3) - # 2D - a23 = array([[10, 11, 12], [13, 14, 15]]) - a13 = array([[0, 1, 2]]) - res = array([[10, 11, 12], [13, 14, 15], [0, 1, 2]]) - assert_array_equal(concatenate((a23, a13)), res) - assert_array_equal(concatenate((a23, a13), 0), res) - assert_array_equal(concatenate((a23.T, a13.T), 1), res.T) - assert_array_equal(concatenate((a23.T, a13.T), -1), res.T) - # Arrays much match shape - assert_raises(ValueError, concatenate, (a23.T, a13.T), 0) - # 3D - res = arange(2 * 3 * 7).reshape((2, 3, 7)) - a0 = res[..., :4] - a1 = res[..., 4:6] - a2 = res[..., 6:] - assert_array_equal(concatenate((a0, a1, a2), 2), res) - assert_array_equal(concatenate((a0, a1, a2), -1), res) - assert_array_equal(concatenate((a0.T, a1.T, a2.T), 0), res.T) - - out = res.copy() - rout = concatenate((a0, a1, a2), 2, out=out) - assert_(out is rout) - assert_equal(res, rout) - - def test_bad_out_shape(self): - a = array([1, 2]) - b = array([3, 4]) - - assert_raises(ValueError, concatenate, (a, b), out=np.empty(5)) - assert_raises(ValueError, concatenate, (a, b), out=np.empty((4,1))) - assert_raises(ValueError, concatenate, (a, b), out=np.empty((1,4))) - concatenate((a, b), out=np.empty(4)) - - def test_out_dtype(self): - out = np.empty(4, np.float32) - res = concatenate((array([1, 2]), array([3, 4])), out=out) - assert_(out is res) - - out = np.empty(4, np.complex64) - res = concatenate((array([0.1, 0.2]), array([0.3, 0.4])), out=out) - assert_(out is res) - - # invalid cast - out = np.empty(4, np.int32) - assert_raises(TypeError, concatenate, - (array([0.1, 0.2]), array([0.3, 0.4])), out=out) - - -def test_stack(): - # non-iterable input - assert_raises(TypeError, stack, 1) - - # 0d input - for input_ in [(1, 2, 3), - [np.int32(1), np.int32(2), np.int32(3)], - [np.array(1), np.array(2), np.array(3)]]: - assert_array_equal(stack(input_), [1, 2, 3]) - # 1d input examples - a = np.array([1, 2, 3]) - b = np.array([4, 5, 6]) - r1 = array([[1, 2, 3], [4, 5, 6]]) - assert_array_equal(np.stack((a, b)), r1) - assert_array_equal(np.stack((a, b), axis=1), r1.T) - # all input types - assert_array_equal(np.stack(list([a, b])), r1) - assert_array_equal(np.stack(array([a, b])), r1) - # all shapes for 1d input - arrays = [np.random.randn(3) for _ in range(10)] - axes = [0, 1, -1, -2] - expected_shapes = [(10, 3), (3, 10), (3, 10), (10, 3)] - for axis, expected_shape in zip(axes, expected_shapes): - assert_equal(np.stack(arrays, axis).shape, expected_shape) - assert_raises_regex(np.AxisError, 'out of bounds', stack, arrays, axis=2) - assert_raises_regex(np.AxisError, 'out of bounds', stack, arrays, axis=-3) - # all shapes for 2d input - arrays = [np.random.randn(3, 4) for _ in range(10)] - axes = [0, 1, 2, -1, -2, -3] - expected_shapes = [(10, 3, 4), (3, 10, 4), (3, 4, 10), - (3, 4, 10), (3, 10, 4), (10, 3, 4)] - for axis, expected_shape in zip(axes, expected_shapes): - assert_equal(np.stack(arrays, axis).shape, expected_shape) - # empty arrays - assert_(stack([[], [], []]).shape == (3, 0)) - assert_(stack([[], [], []], axis=1).shape == (0, 3)) - # out - out = np.zeros_like(r1) - np.stack((a, b), out=out) - assert_array_equal(out, r1) - # edge cases - assert_raises_regex(ValueError, 'need at least one array', stack, []) - assert_raises_regex(ValueError, 'must have the same shape', - stack, [1, np.arange(3)]) - assert_raises_regex(ValueError, 'must have the same shape', - stack, [np.arange(3), 1]) - assert_raises_regex(ValueError, 'must have the same shape', - stack, [np.arange(3), 1], axis=1) - assert_raises_regex(ValueError, 'must have the same shape', - stack, [np.zeros((3, 3)), np.zeros(3)], axis=1) - assert_raises_regex(ValueError, 'must have the same shape', - stack, [np.arange(2), np.arange(3)]) - # generator is deprecated - with assert_warns(FutureWarning): - result = stack((x for x in range(3))) - assert_array_equal(result, np.array([0, 1, 2])) - - -class TestBlock(object): - @pytest.fixture(params=['block', 'force_concatenate', 'force_slicing']) - def block(self, request): - # blocking small arrays and large arrays go through different paths. - # the algorithm is triggered depending on the number of element - # copies required. - # We define a test fixture that forces most tests to go through - # both code paths. - # Ultimately, this should be removed if a single algorithm is found - # to be faster for both small and large arrays. - def _block_force_concatenate(arrays): - arrays, list_ndim, result_ndim, _ = _block_setup(arrays) - return _block_concatenate(arrays, list_ndim, result_ndim) - - def _block_force_slicing(arrays): - arrays, list_ndim, result_ndim, _ = _block_setup(arrays) - return _block_slicing(arrays, list_ndim, result_ndim) - - if request.param == 'force_concatenate': - return _block_force_concatenate - elif request.param == 'force_slicing': - return _block_force_slicing - elif request.param == 'block': - return block - else: - raise ValueError('Unknown blocking request. There is a typo in the tests.') - - def test_returns_copy(self, block): - a = np.eye(3) - b = block(a) - b[0, 0] = 2 - assert b[0, 0] != a[0, 0] - - def test_block_total_size_estimate(self, block): - _, _, _, total_size = _block_setup([1]) - assert total_size == 1 - - _, _, _, total_size = _block_setup([[1]]) - assert total_size == 1 - - _, _, _, total_size = _block_setup([[1, 1]]) - assert total_size == 2 - - _, _, _, total_size = _block_setup([[1], [1]]) - assert total_size == 2 - - _, _, _, total_size = _block_setup([[1, 2], [3, 4]]) - assert total_size == 4 - - def test_block_simple_row_wise(self, block): - a_2d = np.ones((2, 2)) - b_2d = 2 * a_2d - desired = np.array([[1, 1, 2, 2], - [1, 1, 2, 2]]) - result = block([a_2d, b_2d]) - assert_equal(desired, result) - - def test_block_simple_column_wise(self, block): - a_2d = np.ones((2, 2)) - b_2d = 2 * a_2d - expected = np.array([[1, 1], - [1, 1], - [2, 2], - [2, 2]]) - result = block([[a_2d], [b_2d]]) - assert_equal(expected, result) - - def test_block_with_1d_arrays_row_wise(self, block): - # # # 1-D vectors are treated as row arrays - a = np.array([1, 2, 3]) - b = np.array([2, 3, 4]) - expected = np.array([1, 2, 3, 2, 3, 4]) - result = block([a, b]) - assert_equal(expected, result) - - def test_block_with_1d_arrays_multiple_rows(self, block): - a = np.array([1, 2, 3]) - b = np.array([2, 3, 4]) - expected = np.array([[1, 2, 3, 2, 3, 4], - [1, 2, 3, 2, 3, 4]]) - result = block([[a, b], [a, b]]) - assert_equal(expected, result) - - def test_block_with_1d_arrays_column_wise(self, block): - # # # 1-D vectors are treated as row arrays - a_1d = np.array([1, 2, 3]) - b_1d = np.array([2, 3, 4]) - expected = np.array([[1, 2, 3], - [2, 3, 4]]) - result = block([[a_1d], [b_1d]]) - assert_equal(expected, result) - - def test_block_mixed_1d_and_2d(self, block): - a_2d = np.ones((2, 2)) - b_1d = np.array([2, 2]) - result = block([[a_2d], [b_1d]]) - expected = np.array([[1, 1], - [1, 1], - [2, 2]]) - assert_equal(expected, result) - - def test_block_complicated(self, block): - # a bit more complicated - one_2d = np.array([[1, 1, 1]]) - two_2d = np.array([[2, 2, 2]]) - three_2d = np.array([[3, 3, 3, 3, 3, 3]]) - four_1d = np.array([4, 4, 4, 4, 4, 4]) - five_0d = np.array(5) - six_1d = np.array([6, 6, 6, 6, 6]) - zero_2d = np.zeros((2, 6)) - - expected = np.array([[1, 1, 1, 2, 2, 2], - [3, 3, 3, 3, 3, 3], - [4, 4, 4, 4, 4, 4], - [5, 6, 6, 6, 6, 6], - [0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0]]) - - result = block([[one_2d, two_2d], - [three_2d], - [four_1d], - [five_0d, six_1d], - [zero_2d]]) - assert_equal(result, expected) - - def test_nested(self, block): - one = np.array([1, 1, 1]) - two = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]]) - three = np.array([3, 3, 3]) - four = np.array([4, 4, 4]) - five = np.array(5) - six = np.array([6, 6, 6, 6, 6]) - zero = np.zeros((2, 6)) - - result = block([ - [ - block([ - [one], - [three], - [four] - ]), - two - ], - [five, six], - [zero] - ]) - expected = np.array([[1, 1, 1, 2, 2, 2], - [3, 3, 3, 2, 2, 2], - [4, 4, 4, 2, 2, 2], - [5, 6, 6, 6, 6, 6], - [0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0]]) - - assert_equal(result, expected) - - def test_3d(self, block): - a000 = np.ones((2, 2, 2), int) * 1 - - a100 = np.ones((3, 2, 2), int) * 2 - a010 = np.ones((2, 3, 2), int) * 3 - a001 = np.ones((2, 2, 3), int) * 4 - - a011 = np.ones((2, 3, 3), int) * 5 - a101 = np.ones((3, 2, 3), int) * 6 - a110 = np.ones((3, 3, 2), int) * 7 - - a111 = np.ones((3, 3, 3), int) * 8 - - result = block([ - [ - [a000, a001], - [a010, a011], - ], - [ - [a100, a101], - [a110, a111], - ] - ]) - expected = array([[[1, 1, 4, 4, 4], - [1, 1, 4, 4, 4], - [3, 3, 5, 5, 5], - [3, 3, 5, 5, 5], - [3, 3, 5, 5, 5]], - - [[1, 1, 4, 4, 4], - [1, 1, 4, 4, 4], - [3, 3, 5, 5, 5], - [3, 3, 5, 5, 5], - [3, 3, 5, 5, 5]], - - [[2, 2, 6, 6, 6], - [2, 2, 6, 6, 6], - [7, 7, 8, 8, 8], - [7, 7, 8, 8, 8], - [7, 7, 8, 8, 8]], - - [[2, 2, 6, 6, 6], - [2, 2, 6, 6, 6], - [7, 7, 8, 8, 8], - [7, 7, 8, 8, 8], - [7, 7, 8, 8, 8]], - - [[2, 2, 6, 6, 6], - [2, 2, 6, 6, 6], - [7, 7, 8, 8, 8], - [7, 7, 8, 8, 8], - [7, 7, 8, 8, 8]]]) - - assert_array_equal(result, expected) - - def test_block_with_mismatched_shape(self, block): - a = np.array([0, 0]) - b = np.eye(2) - assert_raises(ValueError, block, [a, b]) - assert_raises(ValueError, block, [b, a]) - - to_block = [[np.ones((2,3)), np.ones((2,2))], - [np.ones((2,2)), np.ones((2,2))]] - assert_raises(ValueError, block, to_block) - def test_no_lists(self, block): - assert_equal(block(1), np.array(1)) - assert_equal(block(np.eye(3)), np.eye(3)) - - def test_invalid_nesting(self, block): - msg = 'depths are mismatched' - assert_raises_regex(ValueError, msg, block, [1, [2]]) - assert_raises_regex(ValueError, msg, block, [1, []]) - assert_raises_regex(ValueError, msg, block, [[1], 2]) - assert_raises_regex(ValueError, msg, block, [[], 2]) - assert_raises_regex(ValueError, msg, block, [ - [[1], [2]], - [[3, 4]], - [5] # missing brackets - ]) - - def test_empty_lists(self, block): - assert_raises_regex(ValueError, 'empty', block, []) - assert_raises_regex(ValueError, 'empty', block, [[]]) - assert_raises_regex(ValueError, 'empty', block, [[1], []]) - - def test_tuple(self, block): - assert_raises_regex(TypeError, 'tuple', block, ([1, 2], [3, 4])) - assert_raises_regex(TypeError, 'tuple', block, [(1, 2), (3, 4)]) - - def test_different_ndims(self, block): - a = 1. - b = 2 * np.ones((1, 2)) - c = 3 * np.ones((1, 1, 3)) - - result = block([a, b, c]) - expected = np.array([[[1., 2., 2., 3., 3., 3.]]]) - - assert_equal(result, expected) - - def test_different_ndims_depths(self, block): - a = 1. - b = 2 * np.ones((1, 2)) - c = 3 * np.ones((1, 2, 3)) - - result = block([[a, b], [c]]) - expected = np.array([[[1., 2., 2.], - [3., 3., 3.], - [3., 3., 3.]]]) - - assert_equal(result, expected) - - def test_block_memory_order(self, block): - # 3D - arr_c = np.zeros((3,)*3, order='C') - arr_f = np.zeros((3,)*3, order='F') - - b_c = [[[arr_c, arr_c], - [arr_c, arr_c]], - [[arr_c, arr_c], - [arr_c, arr_c]]] - - b_f = [[[arr_f, arr_f], - [arr_f, arr_f]], - [[arr_f, arr_f], - [arr_f, arr_f]]] - - assert block(b_c).flags['C_CONTIGUOUS'] - assert block(b_f).flags['F_CONTIGUOUS'] - - arr_c = np.zeros((3, 3), order='C') - arr_f = np.zeros((3, 3), order='F') - # 2D - b_c = [[arr_c, arr_c], - [arr_c, arr_c]] - - b_f = [[arr_f, arr_f], - [arr_f, arr_f]] - - assert block(b_c).flags['C_CONTIGUOUS'] - assert block(b_f).flags['F_CONTIGUOUS'] - - -def test_block_dispatcher(): - class ArrayLike(object): - pass - a = ArrayLike() - b = ArrayLike() - c = ArrayLike() - assert_equal(list(_block_dispatcher(a)), [a]) - assert_equal(list(_block_dispatcher([a])), [a]) - assert_equal(list(_block_dispatcher([a, b])), [a, b]) - assert_equal(list(_block_dispatcher([[a], [b, [c]]])), [a, b, c]) - # don't recurse into non-lists - assert_equal(list(_block_dispatcher((a, b))), [(a, b)]) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_ufunc.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_ufunc.py deleted file mode 100644 index 526925e..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_ufunc.py +++ /dev/null @@ -1,1948 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import warnings -import itertools - -import pytest - -import numpy as np -import numpy.core._umath_tests as umt -import numpy.linalg._umath_linalg as uml -import numpy.core._operand_flag_tests as opflag_tests -import numpy.core._rational_tests as _rational_tests -from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_array_equal, - assert_almost_equal, assert_array_almost_equal, assert_no_warnings, - assert_allclose, - ) -from numpy.compat import pickle - - -class TestUfuncKwargs(object): - def test_kwarg_exact(self): - assert_raises(TypeError, np.add, 1, 2, castingx='safe') - assert_raises(TypeError, np.add, 1, 2, dtypex=int) - assert_raises(TypeError, np.add, 1, 2, extobjx=[4096]) - assert_raises(TypeError, np.add, 1, 2, outx=None) - assert_raises(TypeError, np.add, 1, 2, sigx='ii->i') - assert_raises(TypeError, np.add, 1, 2, signaturex='ii->i') - assert_raises(TypeError, np.add, 1, 2, subokx=False) - assert_raises(TypeError, np.add, 1, 2, wherex=[True]) - - def test_sig_signature(self): - assert_raises(ValueError, np.add, 1, 2, sig='ii->i', - signature='ii->i') - - def test_sig_dtype(self): - assert_raises(RuntimeError, np.add, 1, 2, sig='ii->i', - dtype=int) - assert_raises(RuntimeError, np.add, 1, 2, signature='ii->i', - dtype=int) - - def test_extobj_refcount(self): - # Should not segfault with USE_DEBUG. - assert_raises(TypeError, np.add, 1, 2, extobj=[4096], parrot=True) - - -class TestUfuncGenericLoops(object): - """Test generic loops. - - The loops to be tested are: - - PyUFunc_ff_f_As_dd_d - PyUFunc_ff_f - PyUFunc_dd_d - PyUFunc_gg_g - PyUFunc_FF_F_As_DD_D - PyUFunc_DD_D - PyUFunc_FF_F - PyUFunc_GG_G - PyUFunc_OO_O - PyUFunc_OO_O_method - PyUFunc_f_f_As_d_d - PyUFunc_d_d - PyUFunc_f_f - PyUFunc_g_g - PyUFunc_F_F_As_D_D - PyUFunc_F_F - PyUFunc_D_D - PyUFunc_G_G - PyUFunc_O_O - PyUFunc_O_O_method - PyUFunc_On_Om - - Where: - - f -- float - d -- double - g -- long double - F -- complex float - D -- complex double - G -- complex long double - O -- python object - - It is difficult to assure that each of these loops is entered from the - Python level as the special cased loops are a moving target and the - corresponding types are architecture dependent. We probably need to - define C level testing ufuncs to get at them. For the time being, I've - just looked at the signatures registered in the build directory to find - relevant functions. - - """ - np_dtypes = [ - (np.single, np.single), (np.single, np.double), - (np.csingle, np.csingle), (np.csingle, np.cdouble), - (np.double, np.double), (np.longdouble, np.longdouble), - (np.cdouble, np.cdouble), (np.clongdouble, np.clongdouble)] - - @pytest.mark.parametrize('input_dtype,output_dtype', np_dtypes) - def test_unary_PyUFunc(self, input_dtype, output_dtype, f=np.exp, x=0, y=1): - xs = np.full(10, input_dtype(x), dtype=output_dtype) - ys = f(xs)[::2] - assert_allclose(ys, y) - assert_equal(ys.dtype, output_dtype) - - def f2(x, y): - return x**y - - @pytest.mark.parametrize('input_dtype,output_dtype', np_dtypes) - def test_binary_PyUFunc(self, input_dtype, output_dtype, f=f2, x=0, y=1): - xs = np.full(10, input_dtype(x), dtype=output_dtype) - ys = f(xs, xs)[::2] - assert_allclose(ys, y) - assert_equal(ys.dtype, output_dtype) - - # class to use in testing object method loops - class foo(object): - def conjugate(self): - return np.bool_(1) - - def logical_xor(self, obj): - return np.bool_(1) - - def test_unary_PyUFunc_O_O(self): - x = np.ones(10, dtype=object) - assert_(np.all(np.abs(x) == 1)) - - def test_unary_PyUFunc_O_O_method(self, foo=foo): - x = np.full(10, foo(), dtype=object) - assert_(np.all(np.conjugate(x) == True)) - - def test_binary_PyUFunc_OO_O(self): - x = np.ones(10, dtype=object) - assert_(np.all(np.add(x, x) == 2)) - - def test_binary_PyUFunc_OO_O_method(self, foo=foo): - x = np.full(10, foo(), dtype=object) - assert_(np.all(np.logical_xor(x, x))) - - def test_binary_PyUFunc_On_Om_method(self, foo=foo): - x = np.full((10, 2, 3), foo(), dtype=object) - assert_(np.all(np.logical_xor(x, x))) - - -class TestUfunc(object): - def test_pickle(self): - for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): - assert_(pickle.loads(pickle.dumps(np.sin, - protocol=proto)) is np.sin) - - # Check that ufunc not defined in the top level numpy namespace - # such as numpy.core._rational_tests.test_add can also be pickled - res = pickle.loads(pickle.dumps(_rational_tests.test_add, - protocol=proto)) - assert_(res is _rational_tests.test_add) - - def test_pickle_withstring(self): - astring = (b"cnumpy.core\n_ufunc_reconstruct\np0\n" - b"(S'numpy.core.umath'\np1\nS'cos'\np2\ntp3\nRp4\n.") - assert_(pickle.loads(astring) is np.cos) - - def test_reduceat_shifting_sum(self): - L = 6 - x = np.arange(L) - idx = np.array(list(zip(np.arange(L - 2), np.arange(L - 2) + 2))).ravel() - assert_array_equal(np.add.reduceat(x, idx)[::2], [1, 3, 5, 7]) - - def test_all_ufunc(self): - """Try to check presence and results of all ufuncs. - - The list of ufuncs comes from generate_umath.py and is as follows: - - ===== ==== ============= =============== ======================== - done args function types notes - ===== ==== ============= =============== ======================== - n 1 conjugate nums + O - n 1 absolute nums + O complex -> real - n 1 negative nums + O - n 1 sign nums + O -> int - n 1 invert bool + ints + O flts raise an error - n 1 degrees real + M cmplx raise an error - n 1 radians real + M cmplx raise an error - n 1 arccos flts + M - n 1 arccosh flts + M - n 1 arcsin flts + M - n 1 arcsinh flts + M - n 1 arctan flts + M - n 1 arctanh flts + M - n 1 cos flts + M - n 1 sin flts + M - n 1 tan flts + M - n 1 cosh flts + M - n 1 sinh flts + M - n 1 tanh flts + M - n 1 exp flts + M - n 1 expm1 flts + M - n 1 log flts + M - n 1 log10 flts + M - n 1 log1p flts + M - n 1 sqrt flts + M real x < 0 raises error - n 1 ceil real + M - n 1 trunc real + M - n 1 floor real + M - n 1 fabs real + M - n 1 rint flts + M - n 1 isnan flts -> bool - n 1 isinf flts -> bool - n 1 isfinite flts -> bool - n 1 signbit real -> bool - n 1 modf real -> (frac, int) - n 1 logical_not bool + nums + M -> bool - n 2 left_shift ints + O flts raise an error - n 2 right_shift ints + O flts raise an error - n 2 add bool + nums + O boolean + is || - n 2 subtract bool + nums + O boolean - is ^ - n 2 multiply bool + nums + O boolean * is & - n 2 divide nums + O - n 2 floor_divide nums + O - n 2 true_divide nums + O bBhH -> f, iIlLqQ -> d - n 2 fmod nums + M - n 2 power nums + O - n 2 greater bool + nums + O -> bool - n 2 greater_equal bool + nums + O -> bool - n 2 less bool + nums + O -> bool - n 2 less_equal bool + nums + O -> bool - n 2 equal bool + nums + O -> bool - n 2 not_equal bool + nums + O -> bool - n 2 logical_and bool + nums + M -> bool - n 2 logical_or bool + nums + M -> bool - n 2 logical_xor bool + nums + M -> bool - n 2 maximum bool + nums + O - n 2 minimum bool + nums + O - n 2 bitwise_and bool + ints + O flts raise an error - n 2 bitwise_or bool + ints + O flts raise an error - n 2 bitwise_xor bool + ints + O flts raise an error - n 2 arctan2 real + M - n 2 remainder ints + real + O - n 2 hypot real + M - ===== ==== ============= =============== ======================== - - Types other than those listed will be accepted, but they are cast to - the smallest compatible type for which the function is defined. The - casting rules are: - - bool -> int8 -> float32 - ints -> double - - """ - pass - - # from include/numpy/ufuncobject.h - size_inferred = 2 - can_ignore = 4 - def test_signature0(self): - # the arguments to test_signature are: nin, nout, core_signature - enabled, num_dims, ixs, flags, sizes = umt.test_signature( - 2, 1, "(i),(i)->()") - assert_equal(enabled, 1) - assert_equal(num_dims, (1, 1, 0)) - assert_equal(ixs, (0, 0)) - assert_equal(flags, (self.size_inferred,)) - assert_equal(sizes, (-1,)) - - def test_signature1(self): - # empty core signature; treat as plain ufunc (with trivial core) - enabled, num_dims, ixs, flags, sizes = umt.test_signature( - 2, 1, "(),()->()") - assert_equal(enabled, 0) - assert_equal(num_dims, (0, 0, 0)) - assert_equal(ixs, ()) - assert_equal(flags, ()) - assert_equal(sizes, ()) - - def test_signature2(self): - # more complicated names for variables - enabled, num_dims, ixs, flags, sizes = umt.test_signature( - 2, 1, "(i1,i2),(J_1)->(_kAB)") - assert_equal(enabled, 1) - assert_equal(num_dims, (2, 1, 1)) - assert_equal(ixs, (0, 1, 2, 3)) - assert_equal(flags, (self.size_inferred,)*4) - assert_equal(sizes, (-1, -1, -1, -1)) - - def test_signature3(self): - enabled, num_dims, ixs, flags, sizes = umt.test_signature( - 2, 1, u"(i1, i12), (J_1)->(i12, i2)") - assert_equal(enabled, 1) - assert_equal(num_dims, (2, 1, 2)) - assert_equal(ixs, (0, 1, 2, 1, 3)) - assert_equal(flags, (self.size_inferred,)*4) - assert_equal(sizes, (-1, -1, -1, -1)) - - def test_signature4(self): - # matrix_multiply signature from _umath_tests - enabled, num_dims, ixs, flags, sizes = umt.test_signature( - 2, 1, "(n,k),(k,m)->(n,m)") - assert_equal(enabled, 1) - assert_equal(num_dims, (2, 2, 2)) - assert_equal(ixs, (0, 1, 1, 2, 0, 2)) - assert_equal(flags, (self.size_inferred,)*3) - assert_equal(sizes, (-1, -1, -1)) - - def test_signature5(self): - # matmul signature from _umath_tests - enabled, num_dims, ixs, flags, sizes = umt.test_signature( - 2, 1, "(n?,k),(k,m?)->(n?,m?)") - assert_equal(enabled, 1) - assert_equal(num_dims, (2, 2, 2)) - assert_equal(ixs, (0, 1, 1, 2, 0, 2)) - assert_equal(flags, (self.size_inferred | self.can_ignore, - self.size_inferred, - self.size_inferred | self.can_ignore)) - assert_equal(sizes, (-1, -1, -1)) - - def test_signature6(self): - enabled, num_dims, ixs, flags, sizes = umt.test_signature( - 1, 1, "(3)->()") - assert_equal(enabled, 1) - assert_equal(num_dims, (1, 0)) - assert_equal(ixs, (0,)) - assert_equal(flags, (0,)) - assert_equal(sizes, (3,)) - - def test_signature7(self): - enabled, num_dims, ixs, flags, sizes = umt.test_signature( - 3, 1, "(3),(03,3),(n)->(9)") - assert_equal(enabled, 1) - assert_equal(num_dims, (1, 2, 1, 1)) - assert_equal(ixs, (0, 0, 0, 1, 2)) - assert_equal(flags, (0, self.size_inferred, 0)) - assert_equal(sizes, (3, -1, 9)) - - def test_signature8(self): - enabled, num_dims, ixs, flags, sizes = umt.test_signature( - 3, 1, "(3?),(3?,3?),(n)->(9)") - assert_equal(enabled, 1) - assert_equal(num_dims, (1, 2, 1, 1)) - assert_equal(ixs, (0, 0, 0, 1, 2)) - assert_equal(flags, (self.can_ignore, self.size_inferred, 0)) - assert_equal(sizes, (3, -1, 9)) - - def test_signature_failure_extra_parenthesis(self): - with assert_raises(ValueError): - umt.test_signature(2, 1, "((i)),(i)->()") - - def test_signature_failure_mismatching_parenthesis(self): - with assert_raises(ValueError): - umt.test_signature(2, 1, "(i),)i(->()") - - def test_signature_failure_signature_missing_input_arg(self): - with assert_raises(ValueError): - umt.test_signature(2, 1, "(i),->()") - - def test_signature_failure_signature_missing_output_arg(self): - with assert_raises(ValueError): - umt.test_signature(2, 2, "(i),(i)->()") - - def test_get_signature(self): - assert_equal(umt.inner1d.signature, "(i),(i)->()") - - def test_forced_sig(self): - a = 0.5*np.arange(3, dtype='f8') - assert_equal(np.add(a, 0.5), [0.5, 1, 1.5]) - assert_equal(np.add(a, 0.5, sig='i', casting='unsafe'), [0, 0, 1]) - assert_equal(np.add(a, 0.5, sig='ii->i', casting='unsafe'), [0, 0, 1]) - assert_equal(np.add(a, 0.5, sig=('i4',), casting='unsafe'), [0, 0, 1]) - assert_equal(np.add(a, 0.5, sig=('i4', 'i4', 'i4'), - casting='unsafe'), [0, 0, 1]) - - b = np.zeros((3,), dtype='f8') - np.add(a, 0.5, out=b) - assert_equal(b, [0.5, 1, 1.5]) - b[:] = 0 - np.add(a, 0.5, sig='i', out=b, casting='unsafe') - assert_equal(b, [0, 0, 1]) - b[:] = 0 - np.add(a, 0.5, sig='ii->i', out=b, casting='unsafe') - assert_equal(b, [0, 0, 1]) - b[:] = 0 - np.add(a, 0.5, sig=('i4',), out=b, casting='unsafe') - assert_equal(b, [0, 0, 1]) - b[:] = 0 - np.add(a, 0.5, sig=('i4', 'i4', 'i4'), out=b, casting='unsafe') - assert_equal(b, [0, 0, 1]) - - def test_true_divide(self): - a = np.array(10) - b = np.array(20) - tgt = np.array(0.5) - - for tc in 'bhilqBHILQefdgFDG': - dt = np.dtype(tc) - aa = a.astype(dt) - bb = b.astype(dt) - - # Check result value and dtype. - for x, y in itertools.product([aa, -aa], [bb, -bb]): - - # Check with no output type specified - if tc in 'FDG': - tgt = complex(x)/complex(y) - else: - tgt = float(x)/float(y) - - res = np.true_divide(x, y) - rtol = max(np.finfo(res).resolution, 1e-15) - assert_allclose(res, tgt, rtol=rtol) - - if tc in 'bhilqBHILQ': - assert_(res.dtype.name == 'float64') - else: - assert_(res.dtype.name == dt.name ) - - # Check with output type specified. This also checks for the - # incorrect casts in issue gh-3484 because the unary '-' does - # not change types, even for unsigned types, Hence casts in the - # ufunc from signed to unsigned and vice versa will lead to - # errors in the values. - for tcout in 'bhilqBHILQ': - dtout = np.dtype(tcout) - assert_raises(TypeError, np.true_divide, x, y, dtype=dtout) - - for tcout in 'efdg': - dtout = np.dtype(tcout) - if tc in 'FDG': - # Casting complex to float is not allowed - assert_raises(TypeError, np.true_divide, x, y, dtype=dtout) - else: - tgt = float(x)/float(y) - rtol = max(np.finfo(dtout).resolution, 1e-15) - atol = max(np.finfo(dtout).tiny, 3e-308) - # Some test values result in invalid for float16. - with np.errstate(invalid='ignore'): - res = np.true_divide(x, y, dtype=dtout) - if not np.isfinite(res) and tcout == 'e': - continue - assert_allclose(res, tgt, rtol=rtol, atol=atol) - assert_(res.dtype.name == dtout.name) - - for tcout in 'FDG': - dtout = np.dtype(tcout) - tgt = complex(x)/complex(y) - rtol = max(np.finfo(dtout).resolution, 1e-15) - atol = max(np.finfo(dtout).tiny, 3e-308) - res = np.true_divide(x, y, dtype=dtout) - if not np.isfinite(res): - continue - assert_allclose(res, tgt, rtol=rtol, atol=atol) - assert_(res.dtype.name == dtout.name) - - # Check booleans - a = np.ones((), dtype=np.bool_) - res = np.true_divide(a, a) - assert_(res == 1.0) - assert_(res.dtype.name == 'float64') - res = np.true_divide(~a, a) - assert_(res == 0.0) - assert_(res.dtype.name == 'float64') - - def test_sum_stability(self): - a = np.ones(500, dtype=np.float32) - assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 4) - - a = np.ones(500, dtype=np.float64) - assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 13) - - def test_sum(self): - for dt in (int, np.float16, np.float32, np.float64, np.longdouble): - for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127, - 128, 1024, 1235): - tgt = dt(v * (v + 1) / 2) - d = np.arange(1, v + 1, dtype=dt) - - # warning if sum overflows, which it does in float16 - overflow = not np.isfinite(tgt) - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - assert_almost_equal(np.sum(d), tgt) - assert_equal(len(w), 1 * overflow) - - assert_almost_equal(np.sum(d[::-1]), tgt) - assert_equal(len(w), 2 * overflow) - - d = np.ones(500, dtype=dt) - assert_almost_equal(np.sum(d[::2]), 250.) - assert_almost_equal(np.sum(d[1::2]), 250.) - assert_almost_equal(np.sum(d[::3]), 167.) - assert_almost_equal(np.sum(d[1::3]), 167.) - assert_almost_equal(np.sum(d[::-2]), 250.) - assert_almost_equal(np.sum(d[-1::-2]), 250.) - assert_almost_equal(np.sum(d[::-3]), 167.) - assert_almost_equal(np.sum(d[-1::-3]), 167.) - # sum with first reduction entry != 0 - d = np.ones((1,), dtype=dt) - d += d - assert_almost_equal(d, 2.) - - def test_sum_complex(self): - for dt in (np.complex64, np.complex128, np.clongdouble): - for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127, - 128, 1024, 1235): - tgt = dt(v * (v + 1) / 2) - dt((v * (v + 1) / 2) * 1j) - d = np.empty(v, dtype=dt) - d.real = np.arange(1, v + 1) - d.imag = -np.arange(1, v + 1) - assert_almost_equal(np.sum(d), tgt) - assert_almost_equal(np.sum(d[::-1]), tgt) - - d = np.ones(500, dtype=dt) + 1j - assert_almost_equal(np.sum(d[::2]), 250. + 250j) - assert_almost_equal(np.sum(d[1::2]), 250. + 250j) - assert_almost_equal(np.sum(d[::3]), 167. + 167j) - assert_almost_equal(np.sum(d[1::3]), 167. + 167j) - assert_almost_equal(np.sum(d[::-2]), 250. + 250j) - assert_almost_equal(np.sum(d[-1::-2]), 250. + 250j) - assert_almost_equal(np.sum(d[::-3]), 167. + 167j) - assert_almost_equal(np.sum(d[-1::-3]), 167. + 167j) - # sum with first reduction entry != 0 - d = np.ones((1,), dtype=dt) + 1j - d += d - assert_almost_equal(d, 2. + 2j) - - def test_sum_initial(self): - # Integer, single axis - assert_equal(np.sum([3], initial=2), 5) - - # Floating point - assert_almost_equal(np.sum([0.2], initial=0.1), 0.3) - - # Multiple non-adjacent axes - assert_equal(np.sum(np.ones((2, 3, 5), dtype=np.int64), axis=(0, 2), initial=2), - [12, 12, 12]) - - def test_sum_where(self): - # More extensive tests done in test_reduction_with_where. - assert_equal(np.sum([[1., 2.], [3., 4.]], where=[True, False]), 4.) - assert_equal(np.sum([[1., 2.], [3., 4.]], axis=0, initial=5., - where=[True, False]), [9., 5.]) - - def test_inner1d(self): - a = np.arange(6).reshape((2, 3)) - assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1)) - a = np.arange(6) - assert_array_equal(umt.inner1d(a, a), np.sum(a*a)) - - def test_broadcast(self): - msg = "broadcast" - a = np.arange(4).reshape((2, 1, 2)) - b = np.arange(4).reshape((1, 2, 2)) - assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg) - msg = "extend & broadcast loop dimensions" - b = np.arange(4).reshape((2, 2)) - assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg) - # Broadcast in core dimensions should fail - a = np.arange(8).reshape((4, 2)) - b = np.arange(4).reshape((4, 1)) - assert_raises(ValueError, umt.inner1d, a, b) - # Extend core dimensions should fail - a = np.arange(8).reshape((4, 2)) - b = np.array(7) - assert_raises(ValueError, umt.inner1d, a, b) - # Broadcast should fail - a = np.arange(2).reshape((2, 1, 1)) - b = np.arange(3).reshape((3, 1, 1)) - assert_raises(ValueError, umt.inner1d, a, b) - - # Writing to a broadcasted array with overlap should warn, gh-2705 - a = np.arange(2) - b = np.arange(4).reshape((2, 2)) - u, v = np.broadcast_arrays(a, b) - assert_equal(u.strides[0], 0) - x = u + v - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - u += v - assert_equal(len(w), 1) - assert_(x[0,0] != u[0, 0]) - - def test_type_cast(self): - msg = "type cast" - a = np.arange(6, dtype='short').reshape((2, 3)) - assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1), - err_msg=msg) - msg = "type cast on one argument" - a = np.arange(6).reshape((2, 3)) - b = a + 0.1 - assert_array_almost_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), - err_msg=msg) - - def test_endian(self): - msg = "big endian" - a = np.arange(6, dtype='>i4').reshape((2, 3)) - assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1), - err_msg=msg) - msg = "little endian" - a = np.arange(6, dtype='()' - inner1d = umt.inner1d - a = np.arange(27.).reshape((3, 3, 3)) - b = np.arange(10., 19.).reshape((3, 1, 3)) - # basic tests on inputs (outputs tested below with matrix_multiply). - c = inner1d(a, b) - assert_array_equal(c, (a * b).sum(-1)) - # default - c = inner1d(a, b, axes=[(-1,), (-1,), ()]) - assert_array_equal(c, (a * b).sum(-1)) - # integers ok for single axis. - c = inner1d(a, b, axes=[-1, -1, ()]) - assert_array_equal(c, (a * b).sum(-1)) - # mix fine - c = inner1d(a, b, axes=[(-1,), -1, ()]) - assert_array_equal(c, (a * b).sum(-1)) - # can omit last axis. - c = inner1d(a, b, axes=[-1, -1]) - assert_array_equal(c, (a * b).sum(-1)) - # can pass in other types of integer (with __index__ protocol) - c = inner1d(a, b, axes=[np.int8(-1), np.array(-1, dtype=np.int32)]) - assert_array_equal(c, (a * b).sum(-1)) - # swap some axes - c = inner1d(a, b, axes=[0, 0]) - assert_array_equal(c, (a * b).sum(0)) - c = inner1d(a, b, axes=[0, 2]) - assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1)) - # Check errors for improperly constructed axes arguments. - # should have list. - assert_raises(TypeError, inner1d, a, b, axes=-1) - # needs enough elements - assert_raises(ValueError, inner1d, a, b, axes=[-1]) - # should pass in indices. - assert_raises(TypeError, inner1d, a, b, axes=[-1.0, -1.0]) - assert_raises(TypeError, inner1d, a, b, axes=[(-1.0,), -1]) - assert_raises(TypeError, inner1d, a, b, axes=[None, 1]) - # cannot pass an index unless there is only one dimension - # (output is wrong in this case) - assert_raises(TypeError, inner1d, a, b, axes=[-1, -1, -1]) - # or pass in generally the wrong number of axes - assert_raises(ValueError, inner1d, a, b, axes=[-1, -1, (-1,)]) - assert_raises(ValueError, inner1d, a, b, axes=[-1, (-2, -1), ()]) - # axes need to have same length. - assert_raises(ValueError, inner1d, a, b, axes=[0, 1]) - - # matrix_multiply signature: '(m,n),(n,p)->(m,p)' - mm = umt.matrix_multiply - a = np.arange(12).reshape((2, 3, 2)) - b = np.arange(8).reshape((2, 2, 2, 1)) + 1 - # Sanity check. - c = mm(a, b) - assert_array_equal(c, np.matmul(a, b)) - # Default axes. - c = mm(a, b, axes=[(-2, -1), (-2, -1), (-2, -1)]) - assert_array_equal(c, np.matmul(a, b)) - # Default with explicit axes. - c = mm(a, b, axes=[(1, 2), (2, 3), (2, 3)]) - assert_array_equal(c, np.matmul(a, b)) - # swap some axes. - c = mm(a, b, axes=[(0, -1), (1, 2), (-2, -1)]) - assert_array_equal(c, np.matmul(a.transpose(1, 0, 2), - b.transpose(0, 3, 1, 2))) - # Default with output array. - c = np.empty((2, 2, 3, 1)) - d = mm(a, b, out=c, axes=[(1, 2), (2, 3), (2, 3)]) - assert_(c is d) - assert_array_equal(c, np.matmul(a, b)) - # Transposed output array - c = np.empty((1, 2, 2, 3)) - d = mm(a, b, out=c, axes=[(-2, -1), (-2, -1), (3, 0)]) - assert_(c is d) - assert_array_equal(c, np.matmul(a, b).transpose(3, 0, 1, 2)) - # Check errors for improperly constructed axes arguments. - # wrong argument - assert_raises(TypeError, mm, a, b, axis=1) - # axes should be list - assert_raises(TypeError, mm, a, b, axes=1) - assert_raises(TypeError, mm, a, b, axes=((-2, -1), (-2, -1), (-2, -1))) - # list needs to have right length - assert_raises(ValueError, mm, a, b, axes=[]) - assert_raises(ValueError, mm, a, b, axes=[(-2, -1)]) - # list should contain tuples for multiple axes - assert_raises(TypeError, mm, a, b, axes=[-1, -1, -1]) - assert_raises(TypeError, mm, a, b, axes=[(-2, -1), (-2, -1), -1]) - assert_raises(TypeError, - mm, a, b, axes=[[-2, -1], [-2, -1], [-2, -1]]) - assert_raises(TypeError, - mm, a, b, axes=[(-2, -1), (-2, -1), [-2, -1]]) - assert_raises(TypeError, mm, a, b, axes=[(-2, -1), (-2, -1), None]) - # tuples should not have duplicated values - assert_raises(ValueError, mm, a, b, axes=[(-2, -1), (-2, -1), (-2, -2)]) - # arrays should have enough axes. - z = np.zeros((2, 2)) - assert_raises(ValueError, mm, z, z[0]) - assert_raises(ValueError, mm, z, z, out=z[:, 0]) - assert_raises(ValueError, mm, z[1], z, axes=[0, 1]) - assert_raises(ValueError, mm, z, z, out=z[0], axes=[0, 1]) - # Regular ufuncs should not accept axes. - assert_raises(TypeError, np.add, 1., 1., axes=[0]) - # should be able to deal with bad unrelated kwargs. - assert_raises(TypeError, mm, z, z, axes=[0, 1], parrot=True) - - def test_axis_argument(self): - # inner1d signature: '(i),(i)->()' - inner1d = umt.inner1d - a = np.arange(27.).reshape((3, 3, 3)) - b = np.arange(10., 19.).reshape((3, 1, 3)) - c = inner1d(a, b) - assert_array_equal(c, (a * b).sum(-1)) - c = inner1d(a, b, axis=-1) - assert_array_equal(c, (a * b).sum(-1)) - out = np.zeros_like(c) - d = inner1d(a, b, axis=-1, out=out) - assert_(d is out) - assert_array_equal(d, c) - c = inner1d(a, b, axis=0) - assert_array_equal(c, (a * b).sum(0)) - # Sanity checks on innerwt and cumsum. - a = np.arange(6).reshape((2, 3)) - b = np.arange(10, 16).reshape((2, 3)) - w = np.arange(20, 26).reshape((2, 3)) - assert_array_equal(umt.innerwt(a, b, w, axis=0), - np.sum(a * b * w, axis=0)) - assert_array_equal(umt.cumsum(a, axis=0), np.cumsum(a, axis=0)) - assert_array_equal(umt.cumsum(a, axis=-1), np.cumsum(a, axis=-1)) - out = np.empty_like(a) - b = umt.cumsum(a, out=out, axis=0) - assert_(out is b) - assert_array_equal(b, np.cumsum(a, axis=0)) - b = umt.cumsum(a, out=out, axis=1) - assert_(out is b) - assert_array_equal(b, np.cumsum(a, axis=-1)) - # Check errors. - # Cannot pass in both axis and axes. - assert_raises(TypeError, inner1d, a, b, axis=0, axes=[0, 0]) - # Not an integer. - assert_raises(TypeError, inner1d, a, b, axis=[0]) - # more than 1 core dimensions. - mm = umt.matrix_multiply - assert_raises(TypeError, mm, a, b, axis=1) - # Output wrong size in axis. - out = np.empty((1, 2, 3), dtype=a.dtype) - assert_raises(ValueError, umt.cumsum, a, out=out, axis=0) - # Regular ufuncs should not accept axis. - assert_raises(TypeError, np.add, 1., 1., axis=0) - - def test_keepdims_argument(self): - # inner1d signature: '(i),(i)->()' - inner1d = umt.inner1d - a = np.arange(27.).reshape((3, 3, 3)) - b = np.arange(10., 19.).reshape((3, 1, 3)) - c = inner1d(a, b) - assert_array_equal(c, (a * b).sum(-1)) - c = inner1d(a, b, keepdims=False) - assert_array_equal(c, (a * b).sum(-1)) - c = inner1d(a, b, keepdims=True) - assert_array_equal(c, (a * b).sum(-1, keepdims=True)) - out = np.zeros_like(c) - d = inner1d(a, b, keepdims=True, out=out) - assert_(d is out) - assert_array_equal(d, c) - # Now combined with axis and axes. - c = inner1d(a, b, axis=-1, keepdims=False) - assert_array_equal(c, (a * b).sum(-1, keepdims=False)) - c = inner1d(a, b, axis=-1, keepdims=True) - assert_array_equal(c, (a * b).sum(-1, keepdims=True)) - c = inner1d(a, b, axis=0, keepdims=False) - assert_array_equal(c, (a * b).sum(0, keepdims=False)) - c = inner1d(a, b, axis=0, keepdims=True) - assert_array_equal(c, (a * b).sum(0, keepdims=True)) - c = inner1d(a, b, axes=[(-1,), (-1,), ()], keepdims=False) - assert_array_equal(c, (a * b).sum(-1)) - c = inner1d(a, b, axes=[(-1,), (-1,), (-1,)], keepdims=True) - assert_array_equal(c, (a * b).sum(-1, keepdims=True)) - c = inner1d(a, b, axes=[0, 0], keepdims=False) - assert_array_equal(c, (a * b).sum(0)) - c = inner1d(a, b, axes=[0, 0, 0], keepdims=True) - assert_array_equal(c, (a * b).sum(0, keepdims=True)) - c = inner1d(a, b, axes=[0, 2], keepdims=False) - assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1)) - c = inner1d(a, b, axes=[0, 2], keepdims=True) - assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1, - keepdims=True)) - c = inner1d(a, b, axes=[0, 2, 2], keepdims=True) - assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1, - keepdims=True)) - c = inner1d(a, b, axes=[0, 2, 0], keepdims=True) - assert_array_equal(c, (a * b.transpose(2, 0, 1)).sum(0, keepdims=True)) - # Hardly useful, but should work. - c = inner1d(a, b, axes=[0, 2, 1], keepdims=True) - assert_array_equal(c, (a.transpose(1, 0, 2) * b.transpose(0, 2, 1)) - .sum(1, keepdims=True)) - # Check with two core dimensions. - a = np.eye(3) * np.arange(4.)[:, np.newaxis, np.newaxis] - expected = uml.det(a) - c = uml.det(a, keepdims=False) - assert_array_equal(c, expected) - c = uml.det(a, keepdims=True) - assert_array_equal(c, expected[:, np.newaxis, np.newaxis]) - a = np.eye(3) * np.arange(4.)[:, np.newaxis, np.newaxis] - expected_s, expected_l = uml.slogdet(a) - cs, cl = uml.slogdet(a, keepdims=False) - assert_array_equal(cs, expected_s) - assert_array_equal(cl, expected_l) - cs, cl = uml.slogdet(a, keepdims=True) - assert_array_equal(cs, expected_s[:, np.newaxis, np.newaxis]) - assert_array_equal(cl, expected_l[:, np.newaxis, np.newaxis]) - # Sanity check on innerwt. - a = np.arange(6).reshape((2, 3)) - b = np.arange(10, 16).reshape((2, 3)) - w = np.arange(20, 26).reshape((2, 3)) - assert_array_equal(umt.innerwt(a, b, w, keepdims=True), - np.sum(a * b * w, axis=-1, keepdims=True)) - assert_array_equal(umt.innerwt(a, b, w, axis=0, keepdims=True), - np.sum(a * b * w, axis=0, keepdims=True)) - # Check errors. - # Not a boolean - assert_raises(TypeError, inner1d, a, b, keepdims='true') - # More than 1 core dimension, and core output dimensions. - mm = umt.matrix_multiply - assert_raises(TypeError, mm, a, b, keepdims=True) - assert_raises(TypeError, mm, a, b, keepdims=False) - # Regular ufuncs should not accept keepdims. - assert_raises(TypeError, np.add, 1., 1., keepdims=False) - - def test_innerwt(self): - a = np.arange(6).reshape((2, 3)) - b = np.arange(10, 16).reshape((2, 3)) - w = np.arange(20, 26).reshape((2, 3)) - assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1)) - a = np.arange(100, 124).reshape((2, 3, 4)) - b = np.arange(200, 224).reshape((2, 3, 4)) - w = np.arange(300, 324).reshape((2, 3, 4)) - assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1)) - - def test_innerwt_empty(self): - """Test generalized ufunc with zero-sized operands""" - a = np.array([], dtype='f8') - b = np.array([], dtype='f8') - w = np.array([], dtype='f8') - assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1)) - - def test_cross1d(self): - """Test with fixed-sized signature.""" - a = np.eye(3) - assert_array_equal(umt.cross1d(a, a), np.zeros((3, 3))) - out = np.zeros((3, 3)) - result = umt.cross1d(a[0], a, out) - assert_(result is out) - assert_array_equal(result, np.vstack((np.zeros(3), a[2], -a[1]))) - assert_raises(ValueError, umt.cross1d, np.eye(4), np.eye(4)) - assert_raises(ValueError, umt.cross1d, a, np.arange(4.)) - assert_raises(ValueError, umt.cross1d, a, np.arange(3.), np.zeros((3, 4))) - - def test_can_ignore_signature(self): - # Comparing the effects of ? in signature: - # matrix_multiply: (m,n),(n,p)->(m,p) # all must be there. - # matmul: (m?,n),(n,p?)->(m?,p?) # allow missing m, p. - mat = np.arange(12).reshape((2, 3, 2)) - single_vec = np.arange(2) - col_vec = single_vec[:, np.newaxis] - col_vec_array = np.arange(8).reshape((2, 2, 2, 1)) + 1 - # matrix @ single column vector with proper dimension - mm_col_vec = umt.matrix_multiply(mat, col_vec) - # matmul does the same thing - matmul_col_vec = umt.matmul(mat, col_vec) - assert_array_equal(matmul_col_vec, mm_col_vec) - # matrix @ vector without dimension making it a column vector. - # matrix multiply fails -> missing core dim. - assert_raises(ValueError, umt.matrix_multiply, mat, single_vec) - # matmul mimicker passes, and returns a vector. - matmul_col = umt.matmul(mat, single_vec) - assert_array_equal(matmul_col, mm_col_vec.squeeze()) - # Now with a column array: same as for column vector, - # broadcasting sensibly. - mm_col_vec = umt.matrix_multiply(mat, col_vec_array) - matmul_col_vec = umt.matmul(mat, col_vec_array) - assert_array_equal(matmul_col_vec, mm_col_vec) - # As above, but for row vector - single_vec = np.arange(3) - row_vec = single_vec[np.newaxis, :] - row_vec_array = np.arange(24).reshape((4, 2, 1, 1, 3)) + 1 - # row vector @ matrix - mm_row_vec = umt.matrix_multiply(row_vec, mat) - matmul_row_vec = umt.matmul(row_vec, mat) - assert_array_equal(matmul_row_vec, mm_row_vec) - # single row vector @ matrix - assert_raises(ValueError, umt.matrix_multiply, single_vec, mat) - matmul_row = umt.matmul(single_vec, mat) - assert_array_equal(matmul_row, mm_row_vec.squeeze()) - # row vector array @ matrix - mm_row_vec = umt.matrix_multiply(row_vec_array, mat) - matmul_row_vec = umt.matmul(row_vec_array, mat) - assert_array_equal(matmul_row_vec, mm_row_vec) - # Now for vector combinations - # row vector @ column vector - col_vec = row_vec.T - col_vec_array = row_vec_array.swapaxes(-2, -1) - mm_row_col_vec = umt.matrix_multiply(row_vec, col_vec) - matmul_row_col_vec = umt.matmul(row_vec, col_vec) - assert_array_equal(matmul_row_col_vec, mm_row_col_vec) - # single row vector @ single col vector - assert_raises(ValueError, umt.matrix_multiply, single_vec, single_vec) - matmul_row_col = umt.matmul(single_vec, single_vec) - assert_array_equal(matmul_row_col, mm_row_col_vec.squeeze()) - # row vector array @ matrix - mm_row_col_array = umt.matrix_multiply(row_vec_array, col_vec_array) - matmul_row_col_array = umt.matmul(row_vec_array, col_vec_array) - assert_array_equal(matmul_row_col_array, mm_row_col_array) - # Finally, check that things are *not* squeezed if one gives an - # output. - out = np.zeros_like(mm_row_col_array) - out = umt.matrix_multiply(row_vec_array, col_vec_array, out=out) - assert_array_equal(out, mm_row_col_array) - out[:] = 0 - out = umt.matmul(row_vec_array, col_vec_array, out=out) - assert_array_equal(out, mm_row_col_array) - # And check one cannot put missing dimensions back. - out = np.zeros_like(mm_row_col_vec) - assert_raises(ValueError, umt.matrix_multiply, single_vec, single_vec, - out) - # But fine for matmul, since it is just a broadcast. - out = umt.matmul(single_vec, single_vec, out) - assert_array_equal(out, mm_row_col_vec.squeeze()) - - def test_matrix_multiply(self): - self.compare_matrix_multiply_results(np.int64) - self.compare_matrix_multiply_results(np.double) - - def test_matrix_multiply_umath_empty(self): - res = umt.matrix_multiply(np.ones((0, 10)), np.ones((10, 0))) - assert_array_equal(res, np.zeros((0, 0))) - res = umt.matrix_multiply(np.ones((10, 0)), np.ones((0, 10))) - assert_array_equal(res, np.zeros((10, 10))) - - def compare_matrix_multiply_results(self, tp): - d1 = np.array(np.random.rand(2, 3, 4), dtype=tp) - d2 = np.array(np.random.rand(2, 3, 4), dtype=tp) - msg = "matrix multiply on type %s" % d1.dtype.name - - def permute_n(n): - if n == 1: - return ([0],) - ret = () - base = permute_n(n-1) - for perm in base: - for i in range(n): - new = perm + [n-1] - new[n-1] = new[i] - new[i] = n-1 - ret += (new,) - return ret - - def slice_n(n): - if n == 0: - return ((),) - ret = () - base = slice_n(n-1) - for sl in base: - ret += (sl+(slice(None),),) - ret += (sl+(slice(0, 1),),) - return ret - - def broadcastable(s1, s2): - return s1 == s2 or s1 == 1 or s2 == 1 - - permute_3 = permute_n(3) - slice_3 = slice_n(3) + ((slice(None, None, -1),)*3,) - - ref = True - for p1 in permute_3: - for p2 in permute_3: - for s1 in slice_3: - for s2 in slice_3: - a1 = d1.transpose(p1)[s1] - a2 = d2.transpose(p2)[s2] - ref = ref and a1.base is not None - ref = ref and a2.base is not None - if (a1.shape[-1] == a2.shape[-2] and - broadcastable(a1.shape[0], a2.shape[0])): - assert_array_almost_equal( - umt.matrix_multiply(a1, a2), - np.sum(a2[..., np.newaxis].swapaxes(-3, -1) * - a1[..., np.newaxis,:], axis=-1), - err_msg=msg + ' %s %s' % (str(a1.shape), - str(a2.shape))) - - assert_equal(ref, True, err_msg="reference check") - - def test_euclidean_pdist(self): - a = np.arange(12, dtype=float).reshape(4, 3) - out = np.empty((a.shape[0] * (a.shape[0] - 1) // 2,), dtype=a.dtype) - umt.euclidean_pdist(a, out) - b = np.sqrt(np.sum((a[:, None] - a)**2, axis=-1)) - b = b[~np.tri(a.shape[0], dtype=bool)] - assert_almost_equal(out, b) - # An output array is required to determine p with signature (n,d)->(p) - assert_raises(ValueError, umt.euclidean_pdist, a) - - def test_cumsum(self): - a = np.arange(10) - result = umt.cumsum(a) - assert_array_equal(result, a.cumsum()) - - def test_object_logical(self): - a = np.array([3, None, True, False, "test", ""], dtype=object) - assert_equal(np.logical_or(a, None), - np.array([x or None for x in a], dtype=object)) - assert_equal(np.logical_or(a, True), - np.array([x or True for x in a], dtype=object)) - assert_equal(np.logical_or(a, 12), - np.array([x or 12 for x in a], dtype=object)) - assert_equal(np.logical_or(a, "blah"), - np.array([x or "blah" for x in a], dtype=object)) - - assert_equal(np.logical_and(a, None), - np.array([x and None for x in a], dtype=object)) - assert_equal(np.logical_and(a, True), - np.array([x and True for x in a], dtype=object)) - assert_equal(np.logical_and(a, 12), - np.array([x and 12 for x in a], dtype=object)) - assert_equal(np.logical_and(a, "blah"), - np.array([x and "blah" for x in a], dtype=object)) - - assert_equal(np.logical_not(a), - np.array([not x for x in a], dtype=object)) - - assert_equal(np.logical_or.reduce(a), 3) - assert_equal(np.logical_and.reduce(a), None) - - def test_object_comparison(self): - class HasComparisons(object): - def __eq__(self, other): - return '==' - - arr0d = np.array(HasComparisons()) - assert_equal(arr0d == arr0d, True) - assert_equal(np.equal(arr0d, arr0d), True) # normal behavior is a cast - - arr1d = np.array([HasComparisons()]) - assert_equal(arr1d == arr1d, np.array([True])) - assert_equal(np.equal(arr1d, arr1d), np.array([True])) # normal behavior is a cast - assert_equal(np.equal(arr1d, arr1d, dtype=object), np.array(['=='])) - - def test_object_array_reduction(self): - # Reductions on object arrays - a = np.array(['a', 'b', 'c'], dtype=object) - assert_equal(np.sum(a), 'abc') - assert_equal(np.max(a), 'c') - assert_equal(np.min(a), 'a') - a = np.array([True, False, True], dtype=object) - assert_equal(np.sum(a), 2) - assert_equal(np.prod(a), 0) - assert_equal(np.any(a), True) - assert_equal(np.all(a), False) - assert_equal(np.max(a), True) - assert_equal(np.min(a), False) - assert_equal(np.array([[1]], dtype=object).sum(), 1) - assert_equal(np.array([[[1, 2]]], dtype=object).sum((0, 1)), [1, 2]) - assert_equal(np.array([1], dtype=object).sum(initial=1), 2) - assert_equal(np.array([[1], [2, 3]], dtype=object) - .sum(initial=[0], where=[False, True]), [0, 2, 3]) - - def test_object_array_accumulate_inplace(self): - # Checks that in-place accumulates work, see also gh-7402 - arr = np.ones(4, dtype=object) - arr[:] = [[1] for i in range(4)] - # Twice reproduced also for tuples: - np.add.accumulate(arr, out=arr) - np.add.accumulate(arr, out=arr) - assert_array_equal(arr, np.array([[1]*i for i in [1, 3, 6, 10]])) - - # And the same if the axis argument is used - arr = np.ones((2, 4), dtype=object) - arr[0, :] = [[2] for i in range(4)] - np.add.accumulate(arr, out=arr, axis=-1) - np.add.accumulate(arr, out=arr, axis=-1) - assert_array_equal(arr[0, :], np.array([[2]*i for i in [1, 3, 6, 10]])) - - def test_object_array_reduceat_inplace(self): - # Checks that in-place reduceats work, see also gh-7465 - arr = np.empty(4, dtype=object) - arr[:] = [[1] for i in range(4)] - out = np.empty(4, dtype=object) - out[:] = [[1] for i in range(4)] - np.add.reduceat(arr, np.arange(4), out=arr) - np.add.reduceat(arr, np.arange(4), out=arr) - assert_array_equal(arr, out) - - # And the same if the axis argument is used - arr = np.ones((2, 4), dtype=object) - arr[0, :] = [[2] for i in range(4)] - out = np.ones((2, 4), dtype=object) - out[0, :] = [[2] for i in range(4)] - np.add.reduceat(arr, np.arange(4), out=arr, axis=-1) - np.add.reduceat(arr, np.arange(4), out=arr, axis=-1) - assert_array_equal(arr, out) - - def test_zerosize_reduction(self): - # Test with default dtype and object dtype - for a in [[], np.array([], dtype=object)]: - assert_equal(np.sum(a), 0) - assert_equal(np.prod(a), 1) - assert_equal(np.any(a), False) - assert_equal(np.all(a), True) - assert_raises(ValueError, np.max, a) - assert_raises(ValueError, np.min, a) - - def test_axis_out_of_bounds(self): - a = np.array([False, False]) - assert_raises(np.AxisError, a.all, axis=1) - a = np.array([False, False]) - assert_raises(np.AxisError, a.all, axis=-2) - - a = np.array([False, False]) - assert_raises(np.AxisError, a.any, axis=1) - a = np.array([False, False]) - assert_raises(np.AxisError, a.any, axis=-2) - - def test_scalar_reduction(self): - # The functions 'sum', 'prod', etc allow specifying axis=0 - # even for scalars - assert_equal(np.sum(3, axis=0), 3) - assert_equal(np.prod(3.5, axis=0), 3.5) - assert_equal(np.any(True, axis=0), True) - assert_equal(np.all(False, axis=0), False) - assert_equal(np.max(3, axis=0), 3) - assert_equal(np.min(2.5, axis=0), 2.5) - - # Check scalar behaviour for ufuncs without an identity - assert_equal(np.power.reduce(3), 3) - - # Make sure that scalars are coming out from this operation - assert_(type(np.prod(np.float32(2.5), axis=0)) is np.float32) - assert_(type(np.sum(np.float32(2.5), axis=0)) is np.float32) - assert_(type(np.max(np.float32(2.5), axis=0)) is np.float32) - assert_(type(np.min(np.float32(2.5), axis=0)) is np.float32) - - # check if scalars/0-d arrays get cast - assert_(type(np.any(0, axis=0)) is np.bool_) - - # assert that 0-d arrays get wrapped - class MyArray(np.ndarray): - pass - a = np.array(1).view(MyArray) - assert_(type(np.any(a)) is MyArray) - - def test_casting_out_param(self): - # Test that it's possible to do casts on output - a = np.ones((200, 100), np.int64) - b = np.ones((200, 100), np.int64) - c = np.ones((200, 100), np.float64) - np.add(a, b, out=c) - assert_equal(c, 2) - - a = np.zeros(65536) - b = np.zeros(65536, dtype=np.float32) - np.subtract(a, 0, out=b) - assert_equal(b, 0) - - def test_where_param(self): - # Test that the where= ufunc parameter works with regular arrays - a = np.arange(7) - b = np.ones(7) - c = np.zeros(7) - np.add(a, b, out=c, where=(a % 2 == 1)) - assert_equal(c, [0, 2, 0, 4, 0, 6, 0]) - - a = np.arange(4).reshape(2, 2) + 2 - np.power(a, [2, 3], out=a, where=[[0, 1], [1, 0]]) - assert_equal(a, [[2, 27], [16, 5]]) - # Broadcasting the where= parameter - np.subtract(a, 2, out=a, where=[True, False]) - assert_equal(a, [[0, 27], [14, 5]]) - - def test_where_param_buffer_output(self): - # This test is temporarily skipped because it requires - # adding masking features to the nditer to work properly - - # With casting on output - a = np.ones(10, np.int64) - b = np.ones(10, np.int64) - c = 1.5 * np.ones(10, np.float64) - np.add(a, b, out=c, where=[1, 0, 0, 1, 0, 0, 1, 1, 1, 0]) - assert_equal(c, [2, 1.5, 1.5, 2, 1.5, 1.5, 2, 2, 2, 1.5]) - - def test_where_param_alloc(self): - # With casting and allocated output - a = np.array([1], dtype=np.int64) - m = np.array([True], dtype=bool) - assert_equal(np.sqrt(a, where=m), [1]) - - # No casting and allocated output - a = np.array([1], dtype=np.float64) - m = np.array([True], dtype=bool) - assert_equal(np.sqrt(a, where=m), [1]) - - def check_identityless_reduction(self, a): - # np.minimum.reduce is an identityless reduction - - # Verify that it sees the zero at various positions - a[...] = 1 - a[1, 0, 0] = 0 - assert_equal(np.minimum.reduce(a, axis=None), 0) - assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1]) - assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1]) - assert_equal(np.minimum.reduce(a, axis=(1, 2)), [1, 0]) - assert_equal(np.minimum.reduce(a, axis=0), - [[0, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=1), - [[1, 1, 1, 1], [0, 1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=2), - [[1, 1, 1], [0, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=()), a) - - a[...] = 1 - a[0, 1, 0] = 0 - assert_equal(np.minimum.reduce(a, axis=None), 0) - assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1]) - assert_equal(np.minimum.reduce(a, axis=(0, 2)), [1, 0, 1]) - assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1]) - assert_equal(np.minimum.reduce(a, axis=0), - [[1, 1, 1, 1], [0, 1, 1, 1], [1, 1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=1), - [[0, 1, 1, 1], [1, 1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=2), - [[1, 0, 1], [1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=()), a) - - a[...] = 1 - a[0, 0, 1] = 0 - assert_equal(np.minimum.reduce(a, axis=None), 0) - assert_equal(np.minimum.reduce(a, axis=(0, 1)), [1, 0, 1, 1]) - assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1]) - assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1]) - assert_equal(np.minimum.reduce(a, axis=0), - [[1, 0, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=1), - [[1, 0, 1, 1], [1, 1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=2), - [[0, 1, 1], [1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=()), a) - - def test_identityless_reduction_corder(self): - a = np.empty((2, 3, 4), order='C') - self.check_identityless_reduction(a) - - def test_identityless_reduction_forder(self): - a = np.empty((2, 3, 4), order='F') - self.check_identityless_reduction(a) - - def test_identityless_reduction_otherorder(self): - a = np.empty((2, 4, 3), order='C').swapaxes(1, 2) - self.check_identityless_reduction(a) - - def test_identityless_reduction_noncontig(self): - a = np.empty((3, 5, 4), order='C').swapaxes(1, 2) - a = a[1:, 1:, 1:] - self.check_identityless_reduction(a) - - def test_identityless_reduction_noncontig_unaligned(self): - a = np.empty((3*4*5*8 + 1,), dtype='i1') - a = a[1:].view(dtype='f8') - a.shape = (3, 4, 5) - a = a[1:, 1:, 1:] - self.check_identityless_reduction(a) - - def test_initial_reduction(self): - # np.minimum.reduce is an identityless reduction - - # For cases like np.maximum(np.abs(...), initial=0) - # More generally, a supremum over non-negative numbers. - assert_equal(np.maximum.reduce([], initial=0), 0) - - # For cases like reduction of an empty array over the reals. - assert_equal(np.minimum.reduce([], initial=np.inf), np.inf) - assert_equal(np.maximum.reduce([], initial=-np.inf), -np.inf) - - # Random tests - assert_equal(np.minimum.reduce([5], initial=4), 4) - assert_equal(np.maximum.reduce([4], initial=5), 5) - assert_equal(np.maximum.reduce([5], initial=4), 5) - assert_equal(np.minimum.reduce([4], initial=5), 4) - - # Check initial=None raises ValueError for both types of ufunc reductions - assert_raises(ValueError, np.minimum.reduce, [], initial=None) - assert_raises(ValueError, np.add.reduce, [], initial=None) - - # Check that np._NoValue gives default behavior. - assert_equal(np.add.reduce([], initial=np._NoValue), 0) - - # Check that initial kwarg behaves as intended for dtype=object - a = np.array([10], dtype=object) - res = np.add.reduce(a, initial=5) - assert_equal(res, 15) - - @pytest.mark.parametrize('axis', (0, 1, None)) - @pytest.mark.parametrize('where', (np.array([False, True, True]), - np.array([[True], [False], [True]]), - np.array([[True, False, False], - [False, True, False], - [False, True, True]]))) - def test_reduction_with_where(self, axis, where): - a = np.arange(9.).reshape(3, 3) - a_copy = a.copy() - a_check = np.zeros_like(a) - np.positive(a, out=a_check, where=where) - - res = np.add.reduce(a, axis=axis, where=where) - check = a_check.sum(axis) - assert_equal(res, check) - # Check we do not overwrite elements of a internally. - assert_array_equal(a, a_copy) - - @pytest.mark.parametrize(('axis', 'where'), - ((0, np.array([True, False, True])), - (1, [True, True, False]), - (None, True))) - @pytest.mark.parametrize('initial', (-np.inf, 5.)) - def test_reduction_with_where_and_initial(self, axis, where, initial): - a = np.arange(9.).reshape(3, 3) - a_copy = a.copy() - a_check = np.full(a.shape, -np.inf) - np.positive(a, out=a_check, where=where) - - res = np.maximum.reduce(a, axis=axis, where=where, initial=initial) - check = a_check.max(axis, initial=initial) - assert_equal(res, check) - - def test_reduction_where_initial_needed(self): - a = np.arange(9.).reshape(3, 3) - m = [False, True, False] - assert_raises(ValueError, np.maximum.reduce, a, where=m) - - def test_identityless_reduction_nonreorderable(self): - a = np.array([[8.0, 2.0, 2.0], [1.0, 0.5, 0.25]]) - - res = np.divide.reduce(a, axis=0) - assert_equal(res, [8.0, 4.0, 8.0]) - - res = np.divide.reduce(a, axis=1) - assert_equal(res, [2.0, 8.0]) - - res = np.divide.reduce(a, axis=()) - assert_equal(res, a) - - assert_raises(ValueError, np.divide.reduce, a, axis=(0, 1)) - - def test_reduce_zero_axis(self): - # If we have a n x m array and do a reduction with axis=1, then we are - # doing n reductions, and each reduction takes an m-element array. For - # a reduction operation without an identity, then: - # n > 0, m > 0: fine - # n = 0, m > 0: fine, doing 0 reductions of m-element arrays - # n > 0, m = 0: can't reduce a 0-element array, ValueError - # n = 0, m = 0: can't reduce a 0-element array, ValueError (for - # consistency with the above case) - # This test doesn't actually look at return values, it just checks to - # make sure that error we get an error in exactly those cases where we - # expect one, and assumes the calculations themselves are done - # correctly. - - def ok(f, *args, **kwargs): - f(*args, **kwargs) - - def err(f, *args, **kwargs): - assert_raises(ValueError, f, *args, **kwargs) - - def t(expect, func, n, m): - expect(func, np.zeros((n, m)), axis=1) - expect(func, np.zeros((m, n)), axis=0) - expect(func, np.zeros((n // 2, n // 2, m)), axis=2) - expect(func, np.zeros((n // 2, m, n // 2)), axis=1) - expect(func, np.zeros((n, m // 2, m // 2)), axis=(1, 2)) - expect(func, np.zeros((m // 2, n, m // 2)), axis=(0, 2)) - expect(func, np.zeros((m // 3, m // 3, m // 3, - n // 2, n // 2)), - axis=(0, 1, 2)) - # Check what happens if the inner (resp. outer) dimensions are a - # mix of zero and non-zero: - expect(func, np.zeros((10, m, n)), axis=(0, 1)) - expect(func, np.zeros((10, n, m)), axis=(0, 2)) - expect(func, np.zeros((m, 10, n)), axis=0) - expect(func, np.zeros((10, m, n)), axis=1) - expect(func, np.zeros((10, n, m)), axis=2) - - # np.maximum is just an arbitrary ufunc with no reduction identity - assert_equal(np.maximum.identity, None) - t(ok, np.maximum.reduce, 30, 30) - t(ok, np.maximum.reduce, 0, 30) - t(err, np.maximum.reduce, 30, 0) - t(err, np.maximum.reduce, 0, 0) - err(np.maximum.reduce, []) - np.maximum.reduce(np.zeros((0, 0)), axis=()) - - # all of the combinations are fine for a reduction that has an - # identity - t(ok, np.add.reduce, 30, 30) - t(ok, np.add.reduce, 0, 30) - t(ok, np.add.reduce, 30, 0) - t(ok, np.add.reduce, 0, 0) - np.add.reduce([]) - np.add.reduce(np.zeros((0, 0)), axis=()) - - # OTOH, accumulate always makes sense for any combination of n and m, - # because it maps an m-element array to an m-element array. These - # tests are simpler because accumulate doesn't accept multiple axes. - for uf in (np.maximum, np.add): - uf.accumulate(np.zeros((30, 0)), axis=0) - uf.accumulate(np.zeros((0, 30)), axis=0) - uf.accumulate(np.zeros((30, 30)), axis=0) - uf.accumulate(np.zeros((0, 0)), axis=0) - - def test_safe_casting(self): - # In old versions of numpy, in-place operations used the 'unsafe' - # casting rules. In versions >= 1.10, 'same_kind' is the - # default and an exception is raised instead of a warning. - # when 'same_kind' is not satisfied. - a = np.array([1, 2, 3], dtype=int) - # Non-in-place addition is fine - assert_array_equal(assert_no_warnings(np.add, a, 1.1), - [2.1, 3.1, 4.1]) - assert_raises(TypeError, np.add, a, 1.1, out=a) - - def add_inplace(a, b): - a += b - - assert_raises(TypeError, add_inplace, a, 1.1) - # Make sure that explicitly overriding the exception is allowed: - assert_no_warnings(np.add, a, 1.1, out=a, casting="unsafe") - assert_array_equal(a, [2, 3, 4]) - - def test_ufunc_custom_out(self): - # Test ufunc with built in input types and custom output type - - a = np.array([0, 1, 2], dtype='i8') - b = np.array([0, 1, 2], dtype='i8') - c = np.empty(3, dtype=_rational_tests.rational) - - # Output must be specified so numpy knows what - # ufunc signature to look for - result = _rational_tests.test_add(a, b, c) - target = np.array([0, 2, 4], dtype=_rational_tests.rational) - assert_equal(result, target) - - # no output type should raise TypeError - with assert_raises(TypeError): - _rational_tests.test_add(a, b) - - def test_operand_flags(self): - a = np.arange(16, dtype='l').reshape(4, 4) - b = np.arange(9, dtype='l').reshape(3, 3) - opflag_tests.inplace_add(a[:-1, :-1], b) - assert_equal(a, np.array([[0, 2, 4, 3], [7, 9, 11, 7], - [14, 16, 18, 11], [12, 13, 14, 15]], dtype='l')) - - a = np.array(0) - opflag_tests.inplace_add(a, 3) - assert_equal(a, 3) - opflag_tests.inplace_add(a, [3, 4]) - assert_equal(a, 10) - - def test_struct_ufunc(self): - import numpy.core._struct_ufunc_tests as struct_ufunc - - a = np.array([(1, 2, 3)], dtype='u8,u8,u8') - b = np.array([(1, 2, 3)], dtype='u8,u8,u8') - - result = struct_ufunc.add_triplet(a, b) - assert_equal(result, np.array([(2, 4, 6)], dtype='u8,u8,u8')) - assert_raises(RuntimeError, struct_ufunc.register_fail) - - def test_custom_ufunc(self): - a = np.array( - [_rational_tests.rational(1, 2), - _rational_tests.rational(1, 3), - _rational_tests.rational(1, 4)], - dtype=_rational_tests.rational) - b = np.array( - [_rational_tests.rational(1, 2), - _rational_tests.rational(1, 3), - _rational_tests.rational(1, 4)], - dtype=_rational_tests.rational) - - result = _rational_tests.test_add_rationals(a, b) - expected = np.array( - [_rational_tests.rational(1), - _rational_tests.rational(2, 3), - _rational_tests.rational(1, 2)], - dtype=_rational_tests.rational) - assert_equal(result, expected) - - def test_custom_ufunc_forced_sig(self): - # gh-9351 - looking for a non-first userloop would previously hang - with assert_raises(TypeError): - np.multiply(_rational_tests.rational(1), 1, - signature=(_rational_tests.rational, int, None)) - - def test_custom_array_like(self): - - class MyThing(object): - __array_priority__ = 1000 - - rmul_count = 0 - getitem_count = 0 - - def __init__(self, shape): - self.shape = shape - - def __len__(self): - return self.shape[0] - - def __getitem__(self, i): - MyThing.getitem_count += 1 - if not isinstance(i, tuple): - i = (i,) - if len(i) > self.ndim: - raise IndexError("boo") - - return MyThing(self.shape[len(i):]) - - def __rmul__(self, other): - MyThing.rmul_count += 1 - return self - - np.float64(5)*MyThing((3, 3)) - assert_(MyThing.rmul_count == 1, MyThing.rmul_count) - assert_(MyThing.getitem_count <= 2, MyThing.getitem_count) - - def test_inplace_fancy_indexing(self): - - a = np.arange(10) - np.add.at(a, [2, 5, 2], 1) - assert_equal(a, [0, 1, 4, 3, 4, 6, 6, 7, 8, 9]) - - a = np.arange(10) - b = np.array([100, 100, 100]) - np.add.at(a, [2, 5, 2], b) - assert_equal(a, [0, 1, 202, 3, 4, 105, 6, 7, 8, 9]) - - a = np.arange(9).reshape(3, 3) - b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]]) - np.add.at(a, (slice(None), [1, 2, 1]), b) - assert_equal(a, [[0, 201, 102], [3, 404, 205], [6, 607, 308]]) - - a = np.arange(27).reshape(3, 3, 3) - b = np.array([100, 200, 300]) - np.add.at(a, (slice(None), slice(None), [1, 2, 1]), b) - assert_equal(a, - [[[0, 401, 202], - [3, 404, 205], - [6, 407, 208]], - - [[9, 410, 211], - [12, 413, 214], - [15, 416, 217]], - - [[18, 419, 220], - [21, 422, 223], - [24, 425, 226]]]) - - a = np.arange(9).reshape(3, 3) - b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]]) - np.add.at(a, ([1, 2, 1], slice(None)), b) - assert_equal(a, [[0, 1, 2], [403, 404, 405], [206, 207, 208]]) - - a = np.arange(27).reshape(3, 3, 3) - b = np.array([100, 200, 300]) - np.add.at(a, (slice(None), [1, 2, 1], slice(None)), b) - assert_equal(a, - [[[0, 1, 2], - [203, 404, 605], - [106, 207, 308]], - - [[9, 10, 11], - [212, 413, 614], - [115, 216, 317]], - - [[18, 19, 20], - [221, 422, 623], - [124, 225, 326]]]) - - a = np.arange(9).reshape(3, 3) - b = np.array([100, 200, 300]) - np.add.at(a, (0, [1, 2, 1]), b) - assert_equal(a, [[0, 401, 202], [3, 4, 5], [6, 7, 8]]) - - a = np.arange(27).reshape(3, 3, 3) - b = np.array([100, 200, 300]) - np.add.at(a, ([1, 2, 1], 0, slice(None)), b) - assert_equal(a, - [[[0, 1, 2], - [3, 4, 5], - [6, 7, 8]], - - [[209, 410, 611], - [12, 13, 14], - [15, 16, 17]], - - [[118, 219, 320], - [21, 22, 23], - [24, 25, 26]]]) - - a = np.arange(27).reshape(3, 3, 3) - b = np.array([100, 200, 300]) - np.add.at(a, (slice(None), slice(None), slice(None)), b) - assert_equal(a, - [[[100, 201, 302], - [103, 204, 305], - [106, 207, 308]], - - [[109, 210, 311], - [112, 213, 314], - [115, 216, 317]], - - [[118, 219, 320], - [121, 222, 323], - [124, 225, 326]]]) - - a = np.arange(10) - np.negative.at(a, [2, 5, 2]) - assert_equal(a, [0, 1, 2, 3, 4, -5, 6, 7, 8, 9]) - - # Test 0-dim array - a = np.array(0) - np.add.at(a, (), 1) - assert_equal(a, 1) - - assert_raises(IndexError, np.add.at, a, 0, 1) - assert_raises(IndexError, np.add.at, a, [], 1) - - # Test mixed dtypes - a = np.arange(10) - np.power.at(a, [1, 2, 3, 2], 3.5) - assert_equal(a, np.array([0, 1, 4414, 46, 4, 5, 6, 7, 8, 9])) - - # Test boolean indexing and boolean ufuncs - a = np.arange(10) - index = a % 2 == 0 - np.equal.at(a, index, [0, 2, 4, 6, 8]) - assert_equal(a, [1, 1, 1, 3, 1, 5, 1, 7, 1, 9]) - - # Test unary operator - a = np.arange(10, dtype='u4') - np.invert.at(a, [2, 5, 2]) - assert_equal(a, [0, 1, 2, 3, 4, 5 ^ 0xffffffff, 6, 7, 8, 9]) - - # Test empty subspace - orig = np.arange(4) - a = orig[:, None][:, 0:0] - np.add.at(a, [0, 1], 3) - assert_array_equal(orig, np.arange(4)) - - # Test with swapped byte order - index = np.array([1, 2, 1], np.dtype('i').newbyteorder()) - values = np.array([1, 2, 3, 4], np.dtype('f').newbyteorder()) - np.add.at(values, index, 3) - assert_array_equal(values, [1, 8, 6, 4]) - - # Test exception thrown - values = np.array(['a', 1], dtype=object) - assert_raises(TypeError, np.add.at, values, [0, 1], 1) - assert_array_equal(values, np.array(['a', 1], dtype=object)) - - # Test multiple output ufuncs raise error, gh-5665 - assert_raises(ValueError, np.modf.at, np.arange(10), [1]) - - def test_reduce_arguments(self): - f = np.add.reduce - d = np.ones((5,2), dtype=int) - o = np.ones((2,), dtype=d.dtype) - r = o * 5 - assert_equal(f(d), r) - # a, axis=0, dtype=None, out=None, keepdims=False - assert_equal(f(d, axis=0), r) - assert_equal(f(d, 0), r) - assert_equal(f(d, 0, dtype=None), r) - assert_equal(f(d, 0, dtype='i'), r) - assert_equal(f(d, 0, 'i'), r) - assert_equal(f(d, 0, None), r) - assert_equal(f(d, 0, None, out=None), r) - assert_equal(f(d, 0, None, out=o), r) - assert_equal(f(d, 0, None, o), r) - assert_equal(f(d, 0, None, None), r) - assert_equal(f(d, 0, None, None, keepdims=False), r) - assert_equal(f(d, 0, None, None, True), r.reshape((1,) + r.shape)) - assert_equal(f(d, 0, None, None, False, 0), r) - assert_equal(f(d, 0, None, None, False, initial=0), r) - assert_equal(f(d, 0, None, None, False, 0, True), r) - assert_equal(f(d, 0, None, None, False, 0, where=True), r) - # multiple keywords - assert_equal(f(d, axis=0, dtype=None, out=None, keepdims=False), r) - assert_equal(f(d, 0, dtype=None, out=None, keepdims=False), r) - assert_equal(f(d, 0, None, out=None, keepdims=False), r) - assert_equal(f(d, 0, None, out=None, keepdims=False, initial=0, - where=True), r) - - # too little - assert_raises(TypeError, f) - # too much - assert_raises(TypeError, f, d, 0, None, None, False, 0, True, 1) - # invalid axis - assert_raises(TypeError, f, d, "invalid") - assert_raises(TypeError, f, d, axis="invalid") - assert_raises(TypeError, f, d, axis="invalid", dtype=None, - keepdims=True) - # invalid dtype - assert_raises(TypeError, f, d, 0, "invalid") - assert_raises(TypeError, f, d, dtype="invalid") - assert_raises(TypeError, f, d, dtype="invalid", out=None) - # invalid out - assert_raises(TypeError, f, d, 0, None, "invalid") - assert_raises(TypeError, f, d, out="invalid") - assert_raises(TypeError, f, d, out="invalid", dtype=None) - # keepdims boolean, no invalid value - # assert_raises(TypeError, f, d, 0, None, None, "invalid") - # assert_raises(TypeError, f, d, keepdims="invalid", axis=0, dtype=None) - # invalid mix - assert_raises(TypeError, f, d, 0, keepdims="invalid", dtype="invalid", - out=None) - - # invalid keyord - assert_raises(TypeError, f, d, axis=0, dtype=None, invalid=0) - assert_raises(TypeError, f, d, invalid=0) - assert_raises(TypeError, f, d, 0, keepdims=True, invalid="invalid", - out=None) - assert_raises(TypeError, f, d, axis=0, dtype=None, keepdims=True, - out=None, invalid=0) - assert_raises(TypeError, f, d, axis=0, dtype=None, - out=None, invalid=0) - - def test_structured_equal(self): - # https://github.com/numpy/numpy/issues/4855 - - class MyA(np.ndarray): - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - return getattr(ufunc, method)(*(input.view(np.ndarray) - for input in inputs), **kwargs) - a = np.arange(12.).reshape(4,3) - ra = a.view(dtype=('f8,f8,f8')).squeeze() - mra = ra.view(MyA) - - target = np.array([ True, False, False, False], dtype=bool) - assert_equal(np.all(target == (mra == ra[0])), True) - - def test_scalar_equal(self): - # Scalar comparisons should always work, without deprecation warnings. - # even when the ufunc fails. - a = np.array(0.) - b = np.array('a') - assert_(a != b) - assert_(b != a) - assert_(not (a == b)) - assert_(not (b == a)) - - def test_NotImplemented_not_returned(self): - # See gh-5964 and gh-2091. Some of these functions are not operator - # related and were fixed for other reasons in the past. - binary_funcs = [ - np.power, np.add, np.subtract, np.multiply, np.divide, - np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or, - np.bitwise_xor, np.left_shift, np.right_shift, np.fmax, - np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2, - np.logical_and, np.logical_or, np.logical_xor, np.maximum, - np.minimum, np.mod, - np.greater, np.greater_equal, np.less, np.less_equal, - np.equal, np.not_equal] - - a = np.array('1') - b = 1 - c = np.array([1., 2.]) - for f in binary_funcs: - assert_raises(TypeError, f, a, b) - assert_raises(TypeError, f, c, a) - - def test_reduce_noncontig_output(self): - # Check that reduction deals with non-contiguous output arrays - # appropriately. - # - # gh-8036 - - x = np.arange(7*13*8, dtype=np.int16).reshape(7, 13, 8) - x = x[4:6,1:11:6,1:5].transpose(1, 2, 0) - y_base = np.arange(4*4, dtype=np.int16).reshape(4, 4) - y = y_base[::2,:] - - y_base_copy = y_base.copy() - - r0 = np.add.reduce(x, out=y.copy(), axis=2) - r1 = np.add.reduce(x, out=y, axis=2) - - # The results should match, and y_base shouldn't get clobbered - assert_equal(r0, r1) - assert_equal(y_base[1,:], y_base_copy[1,:]) - assert_equal(y_base[3,:], y_base_copy[3,:]) - - def test_no_doc_string(self): - # gh-9337 - assert_('\n' not in umt.inner1d_no_doc.__doc__) - - def test_invalid_args(self): - # gh-7961 - exc = pytest.raises(TypeError, np.sqrt, None) - # minimally check the exception text - assert exc.match('loop of ufunc does not support') - - @pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')]) - def test_nat_is_not_finite(self, nat): - try: - assert not np.isfinite(nat) - except TypeError: - pass # ok, just not implemented - - @pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')]) - def test_nat_is_nan(self, nat): - try: - assert np.isnan(nat) - except TypeError: - pass # ok, just not implemented - - @pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')]) - def test_nat_is_not_inf(self, nat): - try: - assert not np.isinf(nat) - except TypeError: - pass # ok, just not implemented - - -@pytest.mark.parametrize('ufunc', [getattr(np, x) for x in dir(np) - if isinstance(getattr(np, x), np.ufunc)]) -def test_ufunc_types(ufunc): - ''' - Check all ufuncs that the correct type is returned. Avoid - object and boolean types since many operations are not defined for - for them. - - Choose the shape so even dot and matmul will succeed - ''' - for typ in ufunc.types: - # types is a list of strings like ii->i - if 'O' in typ or '?' in typ: - continue - inp, out = typ.split('->') - args = [np.ones((3, 3), t) for t in inp] - with warnings.catch_warnings(record=True): - warnings.filterwarnings("always") - res = ufunc(*args) - if isinstance(res, tuple): - outs = tuple(out) - assert len(res) == len(outs) - for r, t in zip(res, outs): - assert r.dtype == np.dtype(t) - else: - assert res.dtype == np.dtype(out) - -@pytest.mark.parametrize('ufunc', [getattr(np, x) for x in dir(np) - if isinstance(getattr(np, x), np.ufunc)]) -def test_ufunc_noncontiguous(ufunc): - ''' - Check that contiguous and non-contiguous calls to ufuncs - have the same results for values in range(9) - ''' - for typ in ufunc.types: - # types is a list of strings like ii->i - if any(set('O?mM') & set(typ)): - # bool, object, datetime are too irregular for this simple test - continue - inp, out = typ.split('->') - args_c = [np.empty(6, t) for t in inp] - args_n = [np.empty(18, t)[::3] for t in inp] - for a in args_c: - a.flat = range(1,7) - for a in args_n: - a.flat = range(1,7) - with warnings.catch_warnings(record=True): - warnings.filterwarnings("always") - res_c = ufunc(*args_c) - res_n = ufunc(*args_n) - if len(out) == 1: - res_c = (res_c,) - res_n = (res_n,) - for c_ar, n_ar in zip(res_c, res_n): - dt = c_ar.dtype - if np.issubdtype(dt, np.floating): - # for floating point results allow a small fuss in comparisons - # since different algorithms (libm vs. intrinsics) can be used - # for different input strides - res_eps = np.finfo(dt).eps - tol = 2*res_eps - assert_allclose(res_c, res_n, atol=tol, rtol=tol) - else: - assert_equal(c_ar, n_ar) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_umath.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_umath.py deleted file mode 100644 index e892e81..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_umath.py +++ /dev/null @@ -1,3138 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import platform -import warnings -import fnmatch -import itertools -import pytest -from fractions import Fraction - -import numpy.core.umath as ncu -from numpy.core import _umath_tests as ncu_tests -import numpy as np -from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_raises_regex, - assert_array_equal, assert_almost_equal, assert_array_almost_equal, - assert_array_max_ulp, assert_allclose, assert_no_warnings, suppress_warnings, - _gen_alignment_data, assert_array_almost_equal_nulp - ) - -def on_powerpc(): - """ True if we are running on a Power PC platform.""" - return platform.processor() == 'powerpc' or \ - platform.machine().startswith('ppc') - - -class _FilterInvalids(object): - def setup(self): - self.olderr = np.seterr(invalid='ignore') - - def teardown(self): - np.seterr(**self.olderr) - - -class TestConstants(object): - def test_pi(self): - assert_allclose(ncu.pi, 3.141592653589793, 1e-15) - - def test_e(self): - assert_allclose(ncu.e, 2.718281828459045, 1e-15) - - def test_euler_gamma(self): - assert_allclose(ncu.euler_gamma, 0.5772156649015329, 1e-15) - - -class TestOut(object): - def test_out_subok(self): - for subok in (True, False): - a = np.array(0.5) - o = np.empty(()) - - r = np.add(a, 2, o, subok=subok) - assert_(r is o) - r = np.add(a, 2, out=o, subok=subok) - assert_(r is o) - r = np.add(a, 2, out=(o,), subok=subok) - assert_(r is o) - - d = np.array(5.7) - o1 = np.empty(()) - o2 = np.empty((), dtype=np.int32) - - r1, r2 = np.frexp(d, o1, None, subok=subok) - assert_(r1 is o1) - r1, r2 = np.frexp(d, None, o2, subok=subok) - assert_(r2 is o2) - r1, r2 = np.frexp(d, o1, o2, subok=subok) - assert_(r1 is o1) - assert_(r2 is o2) - - r1, r2 = np.frexp(d, out=(o1, None), subok=subok) - assert_(r1 is o1) - r1, r2 = np.frexp(d, out=(None, o2), subok=subok) - assert_(r2 is o2) - r1, r2 = np.frexp(d, out=(o1, o2), subok=subok) - assert_(r1 is o1) - assert_(r2 is o2) - - with assert_raises(TypeError): - # Out argument must be tuple, since there are multiple outputs. - r1, r2 = np.frexp(d, out=o1, subok=subok) - - assert_raises(ValueError, np.add, a, 2, o, o, subok=subok) - assert_raises(ValueError, np.add, a, 2, o, out=o, subok=subok) - assert_raises(ValueError, np.add, a, 2, None, out=o, subok=subok) - assert_raises(ValueError, np.add, a, 2, out=(o, o), subok=subok) - assert_raises(ValueError, np.add, a, 2, out=(), subok=subok) - assert_raises(TypeError, np.add, a, 2, [], subok=subok) - assert_raises(TypeError, np.add, a, 2, out=[], subok=subok) - assert_raises(TypeError, np.add, a, 2, out=([],), subok=subok) - o.flags.writeable = False - assert_raises(ValueError, np.add, a, 2, o, subok=subok) - assert_raises(ValueError, np.add, a, 2, out=o, subok=subok) - assert_raises(ValueError, np.add, a, 2, out=(o,), subok=subok) - - def test_out_wrap_subok(self): - class ArrayWrap(np.ndarray): - __array_priority__ = 10 - - def __new__(cls, arr): - return np.asarray(arr).view(cls).copy() - - def __array_wrap__(self, arr, context): - return arr.view(type(self)) - - for subok in (True, False): - a = ArrayWrap([0.5]) - - r = np.add(a, 2, subok=subok) - if subok: - assert_(isinstance(r, ArrayWrap)) - else: - assert_(type(r) == np.ndarray) - - r = np.add(a, 2, None, subok=subok) - if subok: - assert_(isinstance(r, ArrayWrap)) - else: - assert_(type(r) == np.ndarray) - - r = np.add(a, 2, out=None, subok=subok) - if subok: - assert_(isinstance(r, ArrayWrap)) - else: - assert_(type(r) == np.ndarray) - - r = np.add(a, 2, out=(None,), subok=subok) - if subok: - assert_(isinstance(r, ArrayWrap)) - else: - assert_(type(r) == np.ndarray) - - d = ArrayWrap([5.7]) - o1 = np.empty((1,)) - o2 = np.empty((1,), dtype=np.int32) - - r1, r2 = np.frexp(d, o1, subok=subok) - if subok: - assert_(isinstance(r2, ArrayWrap)) - else: - assert_(type(r2) == np.ndarray) - - r1, r2 = np.frexp(d, o1, None, subok=subok) - if subok: - assert_(isinstance(r2, ArrayWrap)) - else: - assert_(type(r2) == np.ndarray) - - r1, r2 = np.frexp(d, None, o2, subok=subok) - if subok: - assert_(isinstance(r1, ArrayWrap)) - else: - assert_(type(r1) == np.ndarray) - - r1, r2 = np.frexp(d, out=(o1, None), subok=subok) - if subok: - assert_(isinstance(r2, ArrayWrap)) - else: - assert_(type(r2) == np.ndarray) - - r1, r2 = np.frexp(d, out=(None, o2), subok=subok) - if subok: - assert_(isinstance(r1, ArrayWrap)) - else: - assert_(type(r1) == np.ndarray) - - with assert_raises(TypeError): - # Out argument must be tuple, since there are multiple outputs. - r1, r2 = np.frexp(d, out=o1, subok=subok) - - -class TestComparisons(object): - def test_ignore_object_identity_in_equal(self): - # Check comparing identical objects whose comparison - # is not a simple boolean, e.g., arrays that are compared elementwise. - a = np.array([np.array([1, 2, 3]), None], dtype=object) - assert_raises(ValueError, np.equal, a, a) - - # Check error raised when comparing identical non-comparable objects. - class FunkyType(object): - def __eq__(self, other): - raise TypeError("I won't compare") - - a = np.array([FunkyType()]) - assert_raises(TypeError, np.equal, a, a) - - # Check identity doesn't override comparison mismatch. - a = np.array([np.nan], dtype=object) - assert_equal(np.equal(a, a), [False]) - - def test_ignore_object_identity_in_not_equal(self): - # Check comparing identical objects whose comparison - # is not a simple boolean, e.g., arrays that are compared elementwise. - a = np.array([np.array([1, 2, 3]), None], dtype=object) - assert_raises(ValueError, np.not_equal, a, a) - - # Check error raised when comparing identical non-comparable objects. - class FunkyType(object): - def __ne__(self, other): - raise TypeError("I won't compare") - - a = np.array([FunkyType()]) - assert_raises(TypeError, np.not_equal, a, a) - - # Check identity doesn't override comparison mismatch. - a = np.array([np.nan], dtype=object) - assert_equal(np.not_equal(a, a), [True]) - - -class TestAdd(object): - def test_reduce_alignment(self): - # gh-9876 - # make sure arrays with weird strides work with the optimizations in - # pairwise_sum_@TYPE@. On x86, the 'b' field will count as aligned at a - # 4 byte offset, even though its itemsize is 8. - a = np.zeros(2, dtype=[('a', np.int32), ('b', np.float64)]) - a['a'] = -1 - assert_equal(a['b'].sum(), 0) - - -class TestDivision(object): - def test_division_int(self): - # int division should follow Python - x = np.array([5, 10, 90, 100, -5, -10, -90, -100, -120]) - if 5 / 10 == 0.5: - assert_equal(x / 100, [0.05, 0.1, 0.9, 1, - -0.05, -0.1, -0.9, -1, -1.2]) - else: - assert_equal(x / 100, [0, 0, 0, 1, -1, -1, -1, -1, -2]) - assert_equal(x // 100, [0, 0, 0, 1, -1, -1, -1, -1, -2]) - assert_equal(x % 100, [5, 10, 90, 0, 95, 90, 10, 0, 80]) - - def test_division_complex(self): - # check that implementation is correct - msg = "Complex division implementation check" - x = np.array([1. + 1.*1j, 1. + .5*1j, 1. + 2.*1j], dtype=np.complex128) - assert_almost_equal(x**2/x, x, err_msg=msg) - # check overflow, underflow - msg = "Complex division overflow/underflow check" - x = np.array([1.e+110, 1.e-110], dtype=np.complex128) - y = x**2/x - assert_almost_equal(y/x, [1, 1], err_msg=msg) - - def test_zero_division_complex(self): - with np.errstate(invalid="ignore", divide="ignore"): - x = np.array([0.0], dtype=np.complex128) - y = 1.0/x - assert_(np.isinf(y)[0]) - y = complex(np.inf, np.nan)/x - assert_(np.isinf(y)[0]) - y = complex(np.nan, np.inf)/x - assert_(np.isinf(y)[0]) - y = complex(np.inf, np.inf)/x - assert_(np.isinf(y)[0]) - y = 0.0/x - assert_(np.isnan(y)[0]) - - def test_floor_division_complex(self): - # check that implementation is correct - msg = "Complex floor division implementation check" - x = np.array([.9 + 1j, -.1 + 1j, .9 + .5*1j, .9 + 2.*1j], dtype=np.complex128) - y = np.array([0., -1., 0., 0.], dtype=np.complex128) - assert_equal(np.floor_divide(x**2, x), y, err_msg=msg) - # check overflow, underflow - msg = "Complex floor division overflow/underflow check" - x = np.array([1.e+110, 1.e-110], dtype=np.complex128) - y = np.floor_divide(x**2, x) - assert_equal(y, [1.e+110, 0], err_msg=msg) - - def test_floor_division_signed_zero(self): - # Check that the sign bit is correctly set when dividing positive and - # negative zero by one. - x = np.zeros(10) - assert_equal(np.signbit(x//1), 0) - assert_equal(np.signbit((-x)//1), 1) - -def floor_divide_and_remainder(x, y): - return (np.floor_divide(x, y), np.remainder(x, y)) - - -def _signs(dt): - if dt in np.typecodes['UnsignedInteger']: - return (+1,) - else: - return (+1, -1) - - -class TestRemainder(object): - - def test_remainder_basic(self): - dt = np.typecodes['AllInteger'] + np.typecodes['Float'] - for op in [floor_divide_and_remainder, np.divmod]: - for dt1, dt2 in itertools.product(dt, dt): - for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)): - fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' - msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) - a = np.array(sg1*71, dtype=dt1) - b = np.array(sg2*19, dtype=dt2) - div, rem = op(a, b) - assert_equal(div*b + rem, a, err_msg=msg) - if sg2 == -1: - assert_(b < rem <= 0, msg) - else: - assert_(b > rem >= 0, msg) - - def test_float_remainder_exact(self): - # test that float results are exact for small integers. This also - # holds for the same integers scaled by powers of two. - nlst = list(range(-127, 0)) - plst = list(range(1, 128)) - dividend = nlst + [0] + plst - divisor = nlst + plst - arg = list(itertools.product(dividend, divisor)) - tgt = list(divmod(*t) for t in arg) - - a, b = np.array(arg, dtype=int).T - # convert exact integer results from Python to float so that - # signed zero can be used, it is checked. - tgtdiv, tgtrem = np.array(tgt, dtype=float).T - tgtdiv = np.where((tgtdiv == 0.0) & ((b < 0) ^ (a < 0)), -0.0, tgtdiv) - tgtrem = np.where((tgtrem == 0.0) & (b < 0), -0.0, tgtrem) - - for op in [floor_divide_and_remainder, np.divmod]: - for dt in np.typecodes['Float']: - msg = 'op: %s, dtype: %s' % (op.__name__, dt) - fa = a.astype(dt) - fb = b.astype(dt) - div, rem = op(fa, fb) - assert_equal(div, tgtdiv, err_msg=msg) - assert_equal(rem, tgtrem, err_msg=msg) - - def test_float_remainder_roundoff(self): - # gh-6127 - dt = np.typecodes['Float'] - for op in [floor_divide_and_remainder, np.divmod]: - for dt1, dt2 in itertools.product(dt, dt): - for sg1, sg2 in itertools.product((+1, -1), (+1, -1)): - fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' - msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) - a = np.array(sg1*78*6e-8, dtype=dt1) - b = np.array(sg2*6e-8, dtype=dt2) - div, rem = op(a, b) - # Equal assertion should hold when fmod is used - assert_equal(div*b + rem, a, err_msg=msg) - if sg2 == -1: - assert_(b < rem <= 0, msg) - else: - assert_(b > rem >= 0, msg) - - def test_float_remainder_corner_cases(self): - # Check remainder magnitude. - for dt in np.typecodes['Float']: - b = np.array(1.0, dtype=dt) - a = np.nextafter(np.array(0.0, dtype=dt), -b) - rem = np.remainder(a, b) - assert_(rem <= b, 'dt: %s' % dt) - rem = np.remainder(-a, -b) - assert_(rem >= -b, 'dt: %s' % dt) - - # Check nans, inf - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in remainder") - for dt in np.typecodes['Float']: - fone = np.array(1.0, dtype=dt) - fzer = np.array(0.0, dtype=dt) - finf = np.array(np.inf, dtype=dt) - fnan = np.array(np.nan, dtype=dt) - rem = np.remainder(fone, fzer) - assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) - # MSVC 2008 returns NaN here, so disable the check. - #rem = np.remainder(fone, finf) - #assert_(rem == fone, 'dt: %s, rem: %s' % (dt, rem)) - rem = np.remainder(fone, fnan) - assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) - rem = np.remainder(finf, fone) - assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) - - -class TestCbrt(object): - def test_cbrt_scalar(self): - assert_almost_equal((np.cbrt(np.float32(-2.5)**3)), -2.5) - - def test_cbrt(self): - x = np.array([1., 2., -3., np.inf, -np.inf]) - assert_almost_equal(np.cbrt(x**3), x) - - assert_(np.isnan(np.cbrt(np.nan))) - assert_equal(np.cbrt(np.inf), np.inf) - assert_equal(np.cbrt(-np.inf), -np.inf) - - -class TestPower(object): - def test_power_float(self): - x = np.array([1., 2., 3.]) - assert_equal(x**0, [1., 1., 1.]) - assert_equal(x**1, x) - assert_equal(x**2, [1., 4., 9.]) - y = x.copy() - y **= 2 - assert_equal(y, [1., 4., 9.]) - assert_almost_equal(x**(-1), [1., 0.5, 1./3]) - assert_almost_equal(x**(0.5), [1., ncu.sqrt(2), ncu.sqrt(3)]) - - for out, inp, msg in _gen_alignment_data(dtype=np.float32, - type='unary', - max_size=11): - exp = [ncu.sqrt(i) for i in inp] - assert_almost_equal(inp**(0.5), exp, err_msg=msg) - np.sqrt(inp, out=out) - assert_equal(out, exp, err_msg=msg) - - for out, inp, msg in _gen_alignment_data(dtype=np.float64, - type='unary', - max_size=7): - exp = [ncu.sqrt(i) for i in inp] - assert_almost_equal(inp**(0.5), exp, err_msg=msg) - np.sqrt(inp, out=out) - assert_equal(out, exp, err_msg=msg) - - def test_power_complex(self): - x = np.array([1+2j, 2+3j, 3+4j]) - assert_equal(x**0, [1., 1., 1.]) - assert_equal(x**1, x) - assert_almost_equal(x**2, [-3+4j, -5+12j, -7+24j]) - assert_almost_equal(x**3, [(1+2j)**3, (2+3j)**3, (3+4j)**3]) - assert_almost_equal(x**4, [(1+2j)**4, (2+3j)**4, (3+4j)**4]) - assert_almost_equal(x**(-1), [1/(1+2j), 1/(2+3j), 1/(3+4j)]) - assert_almost_equal(x**(-2), [1/(1+2j)**2, 1/(2+3j)**2, 1/(3+4j)**2]) - assert_almost_equal(x**(-3), [(-11+2j)/125, (-46-9j)/2197, - (-117-44j)/15625]) - assert_almost_equal(x**(0.5), [ncu.sqrt(1+2j), ncu.sqrt(2+3j), - ncu.sqrt(3+4j)]) - norm = 1./((x**14)[0]) - assert_almost_equal(x**14 * norm, - [i * norm for i in [-76443+16124j, 23161315+58317492j, - 5583548873 + 2465133864j]]) - - # Ticket #836 - def assert_complex_equal(x, y): - assert_array_equal(x.real, y.real) - assert_array_equal(x.imag, y.imag) - - for z in [complex(0, np.inf), complex(1, np.inf)]: - z = np.array([z], dtype=np.complex_) - with np.errstate(invalid="ignore"): - assert_complex_equal(z**1, z) - assert_complex_equal(z**2, z*z) - assert_complex_equal(z**3, z*z*z) - - def test_power_zero(self): - # ticket #1271 - zero = np.array([0j]) - one = np.array([1+0j]) - cnan = np.array([complex(np.nan, np.nan)]) - # FIXME cinf not tested. - #cinf = np.array([complex(np.inf, 0)]) - - def assert_complex_equal(x, y): - x, y = np.asarray(x), np.asarray(y) - assert_array_equal(x.real, y.real) - assert_array_equal(x.imag, y.imag) - - # positive powers - for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]: - assert_complex_equal(np.power(zero, p), zero) - - # zero power - assert_complex_equal(np.power(zero, 0), one) - with np.errstate(invalid="ignore"): - assert_complex_equal(np.power(zero, 0+1j), cnan) - - # negative power - for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]: - assert_complex_equal(np.power(zero, -p), cnan) - assert_complex_equal(np.power(zero, -1+0.2j), cnan) - - def test_fast_power(self): - x = np.array([1, 2, 3], np.int16) - res = x**2.0 - assert_((x**2.00001).dtype is res.dtype) - assert_array_equal(res, [1, 4, 9]) - # check the inplace operation on the casted copy doesn't mess with x - assert_(not np.may_share_memory(res, x)) - assert_array_equal(x, [1, 2, 3]) - - # Check that the fast path ignores 1-element not 0-d arrays - res = x ** np.array([[[2]]]) - assert_equal(res.shape, (1, 1, 3)) - - def test_integer_power(self): - a = np.array([15, 15], 'i8') - b = np.power(a, a) - assert_equal(b, [437893890380859375, 437893890380859375]) - - def test_integer_power_with_integer_zero_exponent(self): - dtypes = np.typecodes['Integer'] - for dt in dtypes: - arr = np.arange(-10, 10, dtype=dt) - assert_equal(np.power(arr, 0), np.ones_like(arr)) - - dtypes = np.typecodes['UnsignedInteger'] - for dt in dtypes: - arr = np.arange(10, dtype=dt) - assert_equal(np.power(arr, 0), np.ones_like(arr)) - - def test_integer_power_of_1(self): - dtypes = np.typecodes['AllInteger'] - for dt in dtypes: - arr = np.arange(10, dtype=dt) - assert_equal(np.power(1, arr), np.ones_like(arr)) - - def test_integer_power_of_zero(self): - dtypes = np.typecodes['AllInteger'] - for dt in dtypes: - arr = np.arange(1, 10, dtype=dt) - assert_equal(np.power(0, arr), np.zeros_like(arr)) - - def test_integer_to_negative_power(self): - dtypes = np.typecodes['Integer'] - for dt in dtypes: - a = np.array([0, 1, 2, 3], dtype=dt) - b = np.array([0, 1, 2, -3], dtype=dt) - one = np.array(1, dtype=dt) - minusone = np.array(-1, dtype=dt) - assert_raises(ValueError, np.power, a, b) - assert_raises(ValueError, np.power, a, minusone) - assert_raises(ValueError, np.power, one, b) - assert_raises(ValueError, np.power, one, minusone) - - -class TestFloat_power(object): - def test_type_conversion(self): - arg_type = '?bhilBHILefdgFDG' - res_type = 'ddddddddddddgDDG' - for dtin, dtout in zip(arg_type, res_type): - msg = "dtin: %s, dtout: %s" % (dtin, dtout) - arg = np.ones(1, dtype=dtin) - res = np.float_power(arg, arg) - assert_(res.dtype.name == np.dtype(dtout).name, msg) - - -class TestLog2(object): - def test_log2_values(self): - x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] - y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] - for dt in ['f', 'd', 'g']: - xf = np.array(x, dtype=dt) - yf = np.array(y, dtype=dt) - assert_almost_equal(np.log2(xf), yf) - - def test_log2_ints(self): - # a good log2 implementation should provide this, - # might fail on OS with bad libm - for i in range(1, 65): - v = np.log2(2.**i) - assert_equal(v, float(i), err_msg='at exponent %d' % i) - - def test_log2_special(self): - assert_equal(np.log2(1.), 0.) - assert_equal(np.log2(np.inf), np.inf) - assert_(np.isnan(np.log2(np.nan))) - - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) - assert_(np.isnan(np.log2(-1.))) - assert_(np.isnan(np.log2(-np.inf))) - assert_equal(np.log2(0.), -np.inf) - assert_(w[0].category is RuntimeWarning) - assert_(w[1].category is RuntimeWarning) - assert_(w[2].category is RuntimeWarning) - - -class TestExp2(object): - def test_exp2_values(self): - x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] - y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] - for dt in ['f', 'd', 'g']: - xf = np.array(x, dtype=dt) - yf = np.array(y, dtype=dt) - assert_almost_equal(np.exp2(yf), xf) - - -class TestLogAddExp2(_FilterInvalids): - # Need test for intermediate precisions - def test_logaddexp2_values(self): - x = [1, 2, 3, 4, 5] - y = [5, 4, 3, 2, 1] - z = [6, 6, 6, 6, 6] - for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]): - xf = np.log2(np.array(x, dtype=dt)) - yf = np.log2(np.array(y, dtype=dt)) - zf = np.log2(np.array(z, dtype=dt)) - assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec_) - - def test_logaddexp2_range(self): - x = [1000000, -1000000, 1000200, -1000200] - y = [1000200, -1000200, 1000000, -1000000] - z = [1000200, -1000000, 1000200, -1000000] - for dt in ['f', 'd', 'g']: - logxf = np.array(x, dtype=dt) - logyf = np.array(y, dtype=dt) - logzf = np.array(z, dtype=dt) - assert_almost_equal(np.logaddexp2(logxf, logyf), logzf) - - def test_inf(self): - inf = np.inf - x = [inf, -inf, inf, -inf, inf, 1, -inf, 1] - y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] - z = [inf, inf, inf, -inf, inf, inf, 1, 1] - with np.errstate(invalid='raise'): - for dt in ['f', 'd', 'g']: - logxf = np.array(x, dtype=dt) - logyf = np.array(y, dtype=dt) - logzf = np.array(z, dtype=dt) - assert_equal(np.logaddexp2(logxf, logyf), logzf) - - def test_nan(self): - assert_(np.isnan(np.logaddexp2(np.nan, np.inf))) - assert_(np.isnan(np.logaddexp2(np.inf, np.nan))) - assert_(np.isnan(np.logaddexp2(np.nan, 0))) - assert_(np.isnan(np.logaddexp2(0, np.nan))) - assert_(np.isnan(np.logaddexp2(np.nan, np.nan))) - - -class TestLog(object): - def test_log_values(self): - x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] - y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] - for dt in ['f', 'd', 'g']: - log2_ = 0.69314718055994530943 - xf = np.array(x, dtype=dt) - yf = np.array(y, dtype=dt)*log2_ - assert_almost_equal(np.log(xf), yf) - - -class TestExp(object): - def test_exp_values(self): - x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] - y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] - for dt in ['f', 'd', 'g']: - log2_ = 0.69314718055994530943 - xf = np.array(x, dtype=dt) - yf = np.array(y, dtype=dt)*log2_ - assert_almost_equal(np.exp(yf), xf) - -class TestSpecialFloats(object): - def test_exp_values(self): - x = [np.nan, np.nan, np.inf, 0.] - y = [np.nan, -np.nan, np.inf, -np.inf] - for dt in ['f', 'd', 'g']: - xf = np.array(x, dtype=dt) - yf = np.array(y, dtype=dt) - assert_equal(np.exp(yf), xf) - - with np.errstate(over='raise'): - assert_raises(FloatingPointError, np.exp, np.float32(100.)) - assert_raises(FloatingPointError, np.exp, np.float32(1E19)) - - def test_log_values(self): - with np.errstate(all='ignore'): - x = [np.nan, np.nan, np.inf, np.nan, -np.inf, np.nan] - y = [np.nan, -np.nan, np.inf, -np.inf, 0., -1.0] - for dt in ['f', 'd', 'g']: - xf = np.array(x, dtype=dt) - yf = np.array(y, dtype=dt) - assert_equal(np.log(yf), xf) - - with np.errstate(divide='raise'): - assert_raises(FloatingPointError, np.log, np.float32(0.)) - - with np.errstate(invalid='raise'): - assert_raises(FloatingPointError, np.log, np.float32(-np.inf)) - assert_raises(FloatingPointError, np.log, np.float32(-1.0)) - - def test_sincos_values(self): - with np.errstate(all='ignore'): - x = [np.nan, np.nan, np.nan, np.nan] - y = [np.nan, -np.nan, np.inf, -np.inf] - for dt in ['f', 'd', 'g']: - xf = np.array(x, dtype=dt) - yf = np.array(y, dtype=dt) - assert_equal(np.sin(yf), xf) - assert_equal(np.cos(yf), xf) - - with np.errstate(invalid='raise'): - assert_raises(FloatingPointError, np.sin, np.float32(-np.inf)) - assert_raises(FloatingPointError, np.sin, np.float32(np.inf)) - assert_raises(FloatingPointError, np.cos, np.float32(-np.inf)) - assert_raises(FloatingPointError, np.cos, np.float32(np.inf)) - - def test_sqrt_values(self): - with np.errstate(all='ignore'): - x = [np.nan, np.nan, np.inf, np.nan, 0.] - y = [np.nan, -np.nan, np.inf, -np.inf, 0.] - for dt in ['f', 'd', 'g']: - xf = np.array(x, dtype=dt) - yf = np.array(y, dtype=dt) - assert_equal(np.sqrt(yf), xf) - - #with np.errstate(invalid='raise'): - # for dt in ['f', 'd', 'g']: - # assert_raises(FloatingPointError, np.sqrt, np.array(-100., dtype=dt)) - - def test_abs_values(self): - x = [np.nan, np.nan, np.inf, np.inf, 0., 0., 1.0, 1.0] - y = [np.nan, -np.nan, np.inf, -np.inf, 0., -0., -1.0, 1.0] - for dt in ['f', 'd', 'g']: - xf = np.array(x, dtype=dt) - yf = np.array(y, dtype=dt) - assert_equal(np.abs(yf), xf) - - def test_square_values(self): - x = [np.nan, np.nan, np.inf, np.inf] - y = [np.nan, -np.nan, np.inf, -np.inf] - with np.errstate(all='ignore'): - for dt in ['f', 'd', 'g']: - xf = np.array(x, dtype=dt) - yf = np.array(y, dtype=dt) - assert_equal(np.square(yf), xf) - - with np.errstate(over='raise'): - assert_raises(FloatingPointError, np.square, np.array(1E32, dtype='f')) - assert_raises(FloatingPointError, np.square, np.array(1E200, dtype='d')) - - def test_reciprocal_values(self): - with np.errstate(all='ignore'): - x = [np.nan, np.nan, 0.0, -0.0, np.inf, -np.inf] - y = [np.nan, -np.nan, np.inf, -np.inf, 0., -0.] - for dt in ['f', 'd', 'g']: - xf = np.array(x, dtype=dt) - yf = np.array(y, dtype=dt) - assert_equal(np.reciprocal(yf), xf) - - with np.errstate(divide='raise'): - for dt in ['f', 'd', 'g']: - assert_raises(FloatingPointError, np.reciprocal, np.array(-0.0, dtype=dt)) - -# func : [maxulperror, low, high] -avx_ufuncs = {'sqrt' :[1, 0., 100.], - 'absolute' :[0, -100., 100.], - 'reciprocal' :[1, 1., 100.], - 'square' :[1, -100., 100.], - 'rint' :[0, -100., 100.], - 'floor' :[0, -100., 100.], - 'ceil' :[0, -100., 100.], - 'trunc' :[0, -100., 100.]} - -class TestAVXUfuncs(object): - def test_avx_based_ufunc(self): - strides = np.array([-4,-3,-2,-1,1,2,3,4]) - np.random.seed(42) - for func, prop in avx_ufuncs.items(): - maxulperr = prop[0] - minval = prop[1] - maxval = prop[2] - # various array sizes to ensure masking in AVX is tested - for size in range(1,32): - myfunc = getattr(np, func) - x_f32 = np.float32(np.random.uniform(low=minval, high=maxval, - size=size)) - x_f64 = np.float64(x_f32) - x_f128 = np.longdouble(x_f32) - y_true128 = myfunc(x_f128) - if maxulperr == 0: - assert_equal(myfunc(x_f32), np.float32(y_true128)) - assert_equal(myfunc(x_f64), np.float64(y_true128)) - else: - assert_array_max_ulp(myfunc(x_f32), np.float32(y_true128), - maxulp=maxulperr) - assert_array_max_ulp(myfunc(x_f64), np.float64(y_true128), - maxulp=maxulperr) - # various strides to test gather instruction - if size > 1: - y_true32 = myfunc(x_f32) - y_true64 = myfunc(x_f64) - for jj in strides: - assert_equal(myfunc(x_f64[::jj]), y_true64[::jj]) - assert_equal(myfunc(x_f32[::jj]), y_true32[::jj]) - -class TestAVXFloat32Transcendental(object): - def test_exp_float32(self): - np.random.seed(42) - x_f32 = np.float32(np.random.uniform(low=0.0,high=88.1,size=1000000)) - x_f64 = np.float64(x_f32) - assert_array_max_ulp(np.exp(x_f32), np.float32(np.exp(x_f64)), maxulp=3) - - def test_log_float32(self): - np.random.seed(42) - x_f32 = np.float32(np.random.uniform(low=0.0,high=1000,size=1000000)) - x_f64 = np.float64(x_f32) - assert_array_max_ulp(np.log(x_f32), np.float32(np.log(x_f64)), maxulp=4) - - def test_sincos_float32(self): - np.random.seed(42) - N = 1000000 - M = np.int_(N/20) - index = np.random.randint(low=0, high=N, size=M) - x_f32 = np.float32(np.random.uniform(low=-100.,high=100.,size=N)) - # test coverage for elements > 117435.992f for which glibc is used - x_f32[index] = np.float32(10E+10*np.random.rand(M)) - x_f64 = np.float64(x_f32) - assert_array_max_ulp(np.sin(x_f32), np.float32(np.sin(x_f64)), maxulp=2) - assert_array_max_ulp(np.cos(x_f32), np.float32(np.cos(x_f64)), maxulp=2) - - def test_strided_float32(self): - np.random.seed(42) - strides = np.array([-4,-3,-2,-1,1,2,3,4]) - sizes = np.arange(2,100) - for ii in sizes: - x_f32 = np.float32(np.random.uniform(low=0.01,high=88.1,size=ii)) - exp_true = np.exp(x_f32) - log_true = np.log(x_f32) - sin_true = np.sin(x_f32) - cos_true = np.cos(x_f32) - for jj in strides: - assert_array_almost_equal_nulp(np.exp(x_f32[::jj]), exp_true[::jj], nulp=2) - assert_array_almost_equal_nulp(np.log(x_f32[::jj]), log_true[::jj], nulp=2) - assert_array_almost_equal_nulp(np.sin(x_f32[::jj]), sin_true[::jj], nulp=2) - assert_array_almost_equal_nulp(np.cos(x_f32[::jj]), cos_true[::jj], nulp=2) - -class TestLogAddExp(_FilterInvalids): - def test_logaddexp_values(self): - x = [1, 2, 3, 4, 5] - y = [5, 4, 3, 2, 1] - z = [6, 6, 6, 6, 6] - for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]): - xf = np.log(np.array(x, dtype=dt)) - yf = np.log(np.array(y, dtype=dt)) - zf = np.log(np.array(z, dtype=dt)) - assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec_) - - def test_logaddexp_range(self): - x = [1000000, -1000000, 1000200, -1000200] - y = [1000200, -1000200, 1000000, -1000000] - z = [1000200, -1000000, 1000200, -1000000] - for dt in ['f', 'd', 'g']: - logxf = np.array(x, dtype=dt) - logyf = np.array(y, dtype=dt) - logzf = np.array(z, dtype=dt) - assert_almost_equal(np.logaddexp(logxf, logyf), logzf) - - def test_inf(self): - inf = np.inf - x = [inf, -inf, inf, -inf, inf, 1, -inf, 1] - y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] - z = [inf, inf, inf, -inf, inf, inf, 1, 1] - with np.errstate(invalid='raise'): - for dt in ['f', 'd', 'g']: - logxf = np.array(x, dtype=dt) - logyf = np.array(y, dtype=dt) - logzf = np.array(z, dtype=dt) - assert_equal(np.logaddexp(logxf, logyf), logzf) - - def test_nan(self): - assert_(np.isnan(np.logaddexp(np.nan, np.inf))) - assert_(np.isnan(np.logaddexp(np.inf, np.nan))) - assert_(np.isnan(np.logaddexp(np.nan, 0))) - assert_(np.isnan(np.logaddexp(0, np.nan))) - assert_(np.isnan(np.logaddexp(np.nan, np.nan))) - - def test_reduce(self): - assert_equal(np.logaddexp.identity, -np.inf) - assert_equal(np.logaddexp.reduce([]), -np.inf) - - -class TestLog1p(object): - def test_log1p(self): - assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2)) - assert_almost_equal(ncu.log1p(1e-6), ncu.log(1+1e-6)) - - def test_special(self): - with np.errstate(invalid="ignore", divide="ignore"): - assert_equal(ncu.log1p(np.nan), np.nan) - assert_equal(ncu.log1p(np.inf), np.inf) - assert_equal(ncu.log1p(-1.), -np.inf) - assert_equal(ncu.log1p(-2.), np.nan) - assert_equal(ncu.log1p(-np.inf), np.nan) - - -class TestExpm1(object): - def test_expm1(self): - assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2)-1) - assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6)-1) - - def test_special(self): - assert_equal(ncu.expm1(np.inf), np.inf) - assert_equal(ncu.expm1(0.), 0.) - assert_equal(ncu.expm1(-0.), -0.) - assert_equal(ncu.expm1(np.inf), np.inf) - assert_equal(ncu.expm1(-np.inf), -1.) - - -class TestHypot(object): - def test_simple(self): - assert_almost_equal(ncu.hypot(1, 1), ncu.sqrt(2)) - assert_almost_equal(ncu.hypot(0, 0), 0) - - def test_reduce(self): - assert_almost_equal(ncu.hypot.reduce([3.0, 4.0]), 5.0) - assert_almost_equal(ncu.hypot.reduce([3.0, 4.0, 0]), 5.0) - assert_almost_equal(ncu.hypot.reduce([9.0, 12.0, 20.0]), 25.0) - assert_equal(ncu.hypot.reduce([]), 0.0) - - -def assert_hypot_isnan(x, y): - with np.errstate(invalid='ignore'): - assert_(np.isnan(ncu.hypot(x, y)), - "hypot(%s, %s) is %s, not nan" % (x, y, ncu.hypot(x, y))) - - -def assert_hypot_isinf(x, y): - with np.errstate(invalid='ignore'): - assert_(np.isinf(ncu.hypot(x, y)), - "hypot(%s, %s) is %s, not inf" % (x, y, ncu.hypot(x, y))) - - -class TestHypotSpecialValues(object): - def test_nan_outputs(self): - assert_hypot_isnan(np.nan, np.nan) - assert_hypot_isnan(np.nan, 1) - - def test_nan_outputs2(self): - assert_hypot_isinf(np.nan, np.inf) - assert_hypot_isinf(np.inf, np.nan) - assert_hypot_isinf(np.inf, 0) - assert_hypot_isinf(0, np.inf) - assert_hypot_isinf(np.inf, np.inf) - assert_hypot_isinf(np.inf, 23.0) - - def test_no_fpe(self): - assert_no_warnings(ncu.hypot, np.inf, 0) - - -def assert_arctan2_isnan(x, y): - assert_(np.isnan(ncu.arctan2(x, y)), "arctan(%s, %s) is %s, not nan" % (x, y, ncu.arctan2(x, y))) - - -def assert_arctan2_ispinf(x, y): - assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) > 0), "arctan(%s, %s) is %s, not +inf" % (x, y, ncu.arctan2(x, y))) - - -def assert_arctan2_isninf(x, y): - assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) < 0), "arctan(%s, %s) is %s, not -inf" % (x, y, ncu.arctan2(x, y))) - - -def assert_arctan2_ispzero(x, y): - assert_((ncu.arctan2(x, y) == 0 and not np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not +0" % (x, y, ncu.arctan2(x, y))) - - -def assert_arctan2_isnzero(x, y): - assert_((ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not -0" % (x, y, ncu.arctan2(x, y))) - - -class TestArctan2SpecialValues(object): - def test_one_one(self): - # atan2(1, 1) returns pi/4. - assert_almost_equal(ncu.arctan2(1, 1), 0.25 * np.pi) - assert_almost_equal(ncu.arctan2(-1, 1), -0.25 * np.pi) - assert_almost_equal(ncu.arctan2(1, -1), 0.75 * np.pi) - - def test_zero_nzero(self): - # atan2(+-0, -0) returns +-pi. - assert_almost_equal(ncu.arctan2(np.PZERO, np.NZERO), np.pi) - assert_almost_equal(ncu.arctan2(np.NZERO, np.NZERO), -np.pi) - - def test_zero_pzero(self): - # atan2(+-0, +0) returns +-0. - assert_arctan2_ispzero(np.PZERO, np.PZERO) - assert_arctan2_isnzero(np.NZERO, np.PZERO) - - def test_zero_negative(self): - # atan2(+-0, x) returns +-pi for x < 0. - assert_almost_equal(ncu.arctan2(np.PZERO, -1), np.pi) - assert_almost_equal(ncu.arctan2(np.NZERO, -1), -np.pi) - - def test_zero_positive(self): - # atan2(+-0, x) returns +-0 for x > 0. - assert_arctan2_ispzero(np.PZERO, 1) - assert_arctan2_isnzero(np.NZERO, 1) - - def test_positive_zero(self): - # atan2(y, +-0) returns +pi/2 for y > 0. - assert_almost_equal(ncu.arctan2(1, np.PZERO), 0.5 * np.pi) - assert_almost_equal(ncu.arctan2(1, np.NZERO), 0.5 * np.pi) - - def test_negative_zero(self): - # atan2(y, +-0) returns -pi/2 for y < 0. - assert_almost_equal(ncu.arctan2(-1, np.PZERO), -0.5 * np.pi) - assert_almost_equal(ncu.arctan2(-1, np.NZERO), -0.5 * np.pi) - - def test_any_ninf(self): - # atan2(+-y, -infinity) returns +-pi for finite y > 0. - assert_almost_equal(ncu.arctan2(1, np.NINF), np.pi) - assert_almost_equal(ncu.arctan2(-1, np.NINF), -np.pi) - - def test_any_pinf(self): - # atan2(+-y, +infinity) returns +-0 for finite y > 0. - assert_arctan2_ispzero(1, np.inf) - assert_arctan2_isnzero(-1, np.inf) - - def test_inf_any(self): - # atan2(+-infinity, x) returns +-pi/2 for finite x. - assert_almost_equal(ncu.arctan2( np.inf, 1), 0.5 * np.pi) - assert_almost_equal(ncu.arctan2(-np.inf, 1), -0.5 * np.pi) - - def test_inf_ninf(self): - # atan2(+-infinity, -infinity) returns +-3*pi/4. - assert_almost_equal(ncu.arctan2( np.inf, -np.inf), 0.75 * np.pi) - assert_almost_equal(ncu.arctan2(-np.inf, -np.inf), -0.75 * np.pi) - - def test_inf_pinf(self): - # atan2(+-infinity, +infinity) returns +-pi/4. - assert_almost_equal(ncu.arctan2( np.inf, np.inf), 0.25 * np.pi) - assert_almost_equal(ncu.arctan2(-np.inf, np.inf), -0.25 * np.pi) - - def test_nan_any(self): - # atan2(nan, x) returns nan for any x, including inf - assert_arctan2_isnan(np.nan, np.inf) - assert_arctan2_isnan(np.inf, np.nan) - assert_arctan2_isnan(np.nan, np.nan) - - -class TestLdexp(object): - def _check_ldexp(self, tp): - assert_almost_equal(ncu.ldexp(np.array(2., np.float32), - np.array(3, tp)), 16.) - assert_almost_equal(ncu.ldexp(np.array(2., np.float64), - np.array(3, tp)), 16.) - assert_almost_equal(ncu.ldexp(np.array(2., np.longdouble), - np.array(3, tp)), 16.) - - def test_ldexp(self): - # The default Python int type should work - assert_almost_equal(ncu.ldexp(2., 3), 16.) - # The following int types should all be accepted - self._check_ldexp(np.int8) - self._check_ldexp(np.int16) - self._check_ldexp(np.int32) - self._check_ldexp('i') - self._check_ldexp('l') - - def test_ldexp_overflow(self): - # silence warning emitted on overflow - with np.errstate(over="ignore"): - imax = np.iinfo(np.dtype('l')).max - imin = np.iinfo(np.dtype('l')).min - assert_equal(ncu.ldexp(2., imax), np.inf) - assert_equal(ncu.ldexp(2., imin), 0) - - -class TestMaximum(_FilterInvalids): - def test_reduce(self): - dflt = np.typecodes['AllFloat'] - dint = np.typecodes['AllInteger'] - seq1 = np.arange(11) - seq2 = seq1[::-1] - func = np.maximum.reduce - for dt in dint: - tmp1 = seq1.astype(dt) - tmp2 = seq2.astype(dt) - assert_equal(func(tmp1), 10) - assert_equal(func(tmp2), 10) - for dt in dflt: - tmp1 = seq1.astype(dt) - tmp2 = seq2.astype(dt) - assert_equal(func(tmp1), 10) - assert_equal(func(tmp2), 10) - tmp1[::2] = np.nan - tmp2[::2] = np.nan - assert_equal(func(tmp1), np.nan) - assert_equal(func(tmp2), np.nan) - - def test_reduce_complex(self): - assert_equal(np.maximum.reduce([1, 2j]), 1) - assert_equal(np.maximum.reduce([1+3j, 2j]), 1+3j) - - def test_float_nans(self): - nan = np.nan - arg1 = np.array([0, nan, nan]) - arg2 = np.array([nan, 0, nan]) - out = np.array([nan, nan, nan]) - assert_equal(np.maximum(arg1, arg2), out) - - def test_object_nans(self): - # Multiple checks to give this a chance to - # fail if cmp is used instead of rich compare. - # Failure cannot be guaranteed. - for i in range(1): - x = np.array(float('nan'), object) - y = 1.0 - z = np.array(float('nan'), object) - assert_(np.maximum(x, y) == 1.0) - assert_(np.maximum(z, y) == 1.0) - - def test_complex_nans(self): - nan = np.nan - for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]: - arg1 = np.array([0, cnan, cnan], dtype=complex) - arg2 = np.array([cnan, 0, cnan], dtype=complex) - out = np.array([nan, nan, nan], dtype=complex) - assert_equal(np.maximum(arg1, arg2), out) - - def test_object_array(self): - arg1 = np.arange(5, dtype=object) - arg2 = arg1 + 1 - assert_equal(np.maximum(arg1, arg2), arg2) - - -class TestMinimum(_FilterInvalids): - def test_reduce(self): - dflt = np.typecodes['AllFloat'] - dint = np.typecodes['AllInteger'] - seq1 = np.arange(11) - seq2 = seq1[::-1] - func = np.minimum.reduce - for dt in dint: - tmp1 = seq1.astype(dt) - tmp2 = seq2.astype(dt) - assert_equal(func(tmp1), 0) - assert_equal(func(tmp2), 0) - for dt in dflt: - tmp1 = seq1.astype(dt) - tmp2 = seq2.astype(dt) - assert_equal(func(tmp1), 0) - assert_equal(func(tmp2), 0) - tmp1[::2] = np.nan - tmp2[::2] = np.nan - assert_equal(func(tmp1), np.nan) - assert_equal(func(tmp2), np.nan) - - def test_reduce_complex(self): - assert_equal(np.minimum.reduce([1, 2j]), 2j) - assert_equal(np.minimum.reduce([1+3j, 2j]), 2j) - - def test_float_nans(self): - nan = np.nan - arg1 = np.array([0, nan, nan]) - arg2 = np.array([nan, 0, nan]) - out = np.array([nan, nan, nan]) - assert_equal(np.minimum(arg1, arg2), out) - - def test_object_nans(self): - # Multiple checks to give this a chance to - # fail if cmp is used instead of rich compare. - # Failure cannot be guaranteed. - for i in range(1): - x = np.array(float('nan'), object) - y = 1.0 - z = np.array(float('nan'), object) - assert_(np.minimum(x, y) == 1.0) - assert_(np.minimum(z, y) == 1.0) - - def test_complex_nans(self): - nan = np.nan - for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]: - arg1 = np.array([0, cnan, cnan], dtype=complex) - arg2 = np.array([cnan, 0, cnan], dtype=complex) - out = np.array([nan, nan, nan], dtype=complex) - assert_equal(np.minimum(arg1, arg2), out) - - def test_object_array(self): - arg1 = np.arange(5, dtype=object) - arg2 = arg1 + 1 - assert_equal(np.minimum(arg1, arg2), arg1) - - -class TestFmax(_FilterInvalids): - def test_reduce(self): - dflt = np.typecodes['AllFloat'] - dint = np.typecodes['AllInteger'] - seq1 = np.arange(11) - seq2 = seq1[::-1] - func = np.fmax.reduce - for dt in dint: - tmp1 = seq1.astype(dt) - tmp2 = seq2.astype(dt) - assert_equal(func(tmp1), 10) - assert_equal(func(tmp2), 10) - for dt in dflt: - tmp1 = seq1.astype(dt) - tmp2 = seq2.astype(dt) - assert_equal(func(tmp1), 10) - assert_equal(func(tmp2), 10) - tmp1[::2] = np.nan - tmp2[::2] = np.nan - assert_equal(func(tmp1), 9) - assert_equal(func(tmp2), 9) - - def test_reduce_complex(self): - assert_equal(np.fmax.reduce([1, 2j]), 1) - assert_equal(np.fmax.reduce([1+3j, 2j]), 1+3j) - - def test_float_nans(self): - nan = np.nan - arg1 = np.array([0, nan, nan]) - arg2 = np.array([nan, 0, nan]) - out = np.array([0, 0, nan]) - assert_equal(np.fmax(arg1, arg2), out) - - def test_complex_nans(self): - nan = np.nan - for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]: - arg1 = np.array([0, cnan, cnan], dtype=complex) - arg2 = np.array([cnan, 0, cnan], dtype=complex) - out = np.array([0, 0, nan], dtype=complex) - assert_equal(np.fmax(arg1, arg2), out) - - -class TestFmin(_FilterInvalids): - def test_reduce(self): - dflt = np.typecodes['AllFloat'] - dint = np.typecodes['AllInteger'] - seq1 = np.arange(11) - seq2 = seq1[::-1] - func = np.fmin.reduce - for dt in dint: - tmp1 = seq1.astype(dt) - tmp2 = seq2.astype(dt) - assert_equal(func(tmp1), 0) - assert_equal(func(tmp2), 0) - for dt in dflt: - tmp1 = seq1.astype(dt) - tmp2 = seq2.astype(dt) - assert_equal(func(tmp1), 0) - assert_equal(func(tmp2), 0) - tmp1[::2] = np.nan - tmp2[::2] = np.nan - assert_equal(func(tmp1), 1) - assert_equal(func(tmp2), 1) - - def test_reduce_complex(self): - assert_equal(np.fmin.reduce([1, 2j]), 2j) - assert_equal(np.fmin.reduce([1+3j, 2j]), 2j) - - def test_float_nans(self): - nan = np.nan - arg1 = np.array([0, nan, nan]) - arg2 = np.array([nan, 0, nan]) - out = np.array([0, 0, nan]) - assert_equal(np.fmin(arg1, arg2), out) - - def test_complex_nans(self): - nan = np.nan - for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]: - arg1 = np.array([0, cnan, cnan], dtype=complex) - arg2 = np.array([cnan, 0, cnan], dtype=complex) - out = np.array([0, 0, nan], dtype=complex) - assert_equal(np.fmin(arg1, arg2), out) - - -class TestBool(object): - def test_exceptions(self): - a = np.ones(1, dtype=np.bool_) - assert_raises(TypeError, np.negative, a) - assert_raises(TypeError, np.positive, a) - assert_raises(TypeError, np.subtract, a, a) - - def test_truth_table_logical(self): - # 2, 3 and 4 serves as true values - input1 = [0, 0, 3, 2] - input2 = [0, 4, 0, 2] - - typecodes = (np.typecodes['AllFloat'] - + np.typecodes['AllInteger'] - + '?') # boolean - for dtype in map(np.dtype, typecodes): - arg1 = np.asarray(input1, dtype=dtype) - arg2 = np.asarray(input2, dtype=dtype) - - # OR - out = [False, True, True, True] - for func in (np.logical_or, np.maximum): - assert_equal(func(arg1, arg2).astype(bool), out) - # AND - out = [False, False, False, True] - for func in (np.logical_and, np.minimum): - assert_equal(func(arg1, arg2).astype(bool), out) - # XOR - out = [False, True, True, False] - for func in (np.logical_xor, np.not_equal): - assert_equal(func(arg1, arg2).astype(bool), out) - - def test_truth_table_bitwise(self): - arg1 = [False, False, True, True] - arg2 = [False, True, False, True] - - out = [False, True, True, True] - assert_equal(np.bitwise_or(arg1, arg2), out) - - out = [False, False, False, True] - assert_equal(np.bitwise_and(arg1, arg2), out) - - out = [False, True, True, False] - assert_equal(np.bitwise_xor(arg1, arg2), out) - - def test_reduce(self): - none = np.array([0, 0, 0, 0], bool) - some = np.array([1, 0, 1, 1], bool) - every = np.array([1, 1, 1, 1], bool) - empty = np.array([], bool) - - arrs = [none, some, every, empty] - - for arr in arrs: - assert_equal(np.logical_and.reduce(arr), all(arr)) - - for arr in arrs: - assert_equal(np.logical_or.reduce(arr), any(arr)) - - for arr in arrs: - assert_equal(np.logical_xor.reduce(arr), arr.sum() % 2 == 1) - - -class TestBitwiseUFuncs(object): - - bitwise_types = [np.dtype(c) for c in '?' + 'bBhHiIlLqQ' + 'O'] - - def test_values(self): - for dt in self.bitwise_types: - zeros = np.array([0], dtype=dt) - ones = np.array([-1], dtype=dt) - msg = "dt = '%s'" % dt.char - - assert_equal(np.bitwise_not(zeros), ones, err_msg=msg) - assert_equal(np.bitwise_not(ones), zeros, err_msg=msg) - - assert_equal(np.bitwise_or(zeros, zeros), zeros, err_msg=msg) - assert_equal(np.bitwise_or(zeros, ones), ones, err_msg=msg) - assert_equal(np.bitwise_or(ones, zeros), ones, err_msg=msg) - assert_equal(np.bitwise_or(ones, ones), ones, err_msg=msg) - - assert_equal(np.bitwise_xor(zeros, zeros), zeros, err_msg=msg) - assert_equal(np.bitwise_xor(zeros, ones), ones, err_msg=msg) - assert_equal(np.bitwise_xor(ones, zeros), ones, err_msg=msg) - assert_equal(np.bitwise_xor(ones, ones), zeros, err_msg=msg) - - assert_equal(np.bitwise_and(zeros, zeros), zeros, err_msg=msg) - assert_equal(np.bitwise_and(zeros, ones), zeros, err_msg=msg) - assert_equal(np.bitwise_and(ones, zeros), zeros, err_msg=msg) - assert_equal(np.bitwise_and(ones, ones), ones, err_msg=msg) - - def test_types(self): - for dt in self.bitwise_types: - zeros = np.array([0], dtype=dt) - ones = np.array([-1], dtype=dt) - msg = "dt = '%s'" % dt.char - - assert_(np.bitwise_not(zeros).dtype == dt, msg) - assert_(np.bitwise_or(zeros, zeros).dtype == dt, msg) - assert_(np.bitwise_xor(zeros, zeros).dtype == dt, msg) - assert_(np.bitwise_and(zeros, zeros).dtype == dt, msg) - - def test_identity(self): - assert_(np.bitwise_or.identity == 0, 'bitwise_or') - assert_(np.bitwise_xor.identity == 0, 'bitwise_xor') - assert_(np.bitwise_and.identity == -1, 'bitwise_and') - - def test_reduction(self): - binary_funcs = (np.bitwise_or, np.bitwise_xor, np.bitwise_and) - - for dt in self.bitwise_types: - zeros = np.array([0], dtype=dt) - ones = np.array([-1], dtype=dt) - for f in binary_funcs: - msg = "dt: '%s', f: '%s'" % (dt, f) - assert_equal(f.reduce(zeros), zeros, err_msg=msg) - assert_equal(f.reduce(ones), ones, err_msg=msg) - - # Test empty reduction, no object dtype - for dt in self.bitwise_types[:-1]: - # No object array types - empty = np.array([], dtype=dt) - for f in binary_funcs: - msg = "dt: '%s', f: '%s'" % (dt, f) - tgt = np.array(f.identity, dtype=dt) - res = f.reduce(empty) - assert_equal(res, tgt, err_msg=msg) - assert_(res.dtype == tgt.dtype, msg) - - # Empty object arrays use the identity. Note that the types may - # differ, the actual type used is determined by the assign_identity - # function and is not the same as the type returned by the identity - # method. - for f in binary_funcs: - msg = "dt: '%s'" % (f,) - empty = np.array([], dtype=object) - tgt = f.identity - res = f.reduce(empty) - assert_equal(res, tgt, err_msg=msg) - - # Non-empty object arrays do not use the identity - for f in binary_funcs: - msg = "dt: '%s'" % (f,) - btype = np.array([True], dtype=object) - assert_(type(f.reduce(btype)) is bool, msg) - - -class TestInt(object): - def test_logical_not(self): - x = np.ones(10, dtype=np.int16) - o = np.ones(10 * 2, dtype=bool) - tgt = o.copy() - tgt[::2] = False - os = o[::2] - assert_array_equal(np.logical_not(x, out=os), False) - assert_array_equal(o, tgt) - - -class TestFloatingPoint(object): - def test_floating_point(self): - assert_equal(ncu.FLOATING_POINT_SUPPORT, 1) - - -class TestDegrees(object): - def test_degrees(self): - assert_almost_equal(ncu.degrees(np.pi), 180.0) - assert_almost_equal(ncu.degrees(-0.5*np.pi), -90.0) - - -class TestRadians(object): - def test_radians(self): - assert_almost_equal(ncu.radians(180.0), np.pi) - assert_almost_equal(ncu.radians(-90.0), -0.5*np.pi) - - -class TestHeavside(object): - def test_heaviside(self): - x = np.array([[-30.0, -0.1, 0.0, 0.2], [7.5, np.nan, np.inf, -np.inf]]) - expectedhalf = np.array([[0.0, 0.0, 0.5, 1.0], [1.0, np.nan, 1.0, 0.0]]) - expected1 = expectedhalf.copy() - expected1[0, 2] = 1 - - h = ncu.heaviside(x, 0.5) - assert_equal(h, expectedhalf) - - h = ncu.heaviside(x, 1.0) - assert_equal(h, expected1) - - x = x.astype(np.float32) - - h = ncu.heaviside(x, np.float32(0.5)) - assert_equal(h, expectedhalf.astype(np.float32)) - - h = ncu.heaviside(x, np.float32(1.0)) - assert_equal(h, expected1.astype(np.float32)) - - -class TestSign(object): - def test_sign(self): - a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0]) - out = np.zeros(a.shape) - tgt = np.array([1., -1., np.nan, 0.0, 1.0, -1.0]) - - with np.errstate(invalid='ignore'): - res = ncu.sign(a) - assert_equal(res, tgt) - res = ncu.sign(a, out) - assert_equal(res, tgt) - assert_equal(out, tgt) - - def test_sign_dtype_object(self): - # In reference to github issue #6229 - - foo = np.array([-.1, 0, .1]) - a = np.sign(foo.astype(object)) - b = np.sign(foo) - - assert_array_equal(a, b) - - def test_sign_dtype_nan_object(self): - # In reference to github issue #6229 - def test_nan(): - foo = np.array([np.nan]) - # FIXME: a not used - a = np.sign(foo.astype(object)) - - assert_raises(TypeError, test_nan) - -class TestMinMax(object): - def test_minmax_blocked(self): - # simd tests on max/min, test all alignments, slow but important - # for 2 * vz + 2 * (vs - 1) + 1 (unrolled once) - for dt, sz in [(np.float32, 15), (np.float64, 7)]: - for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary', - max_size=sz): - for i in range(inp.size): - inp[:] = np.arange(inp.size, dtype=dt) - inp[i] = np.nan - emsg = lambda: '%r\n%s' % (inp, msg) - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, - "invalid value encountered in reduce") - assert_(np.isnan(inp.max()), msg=emsg) - assert_(np.isnan(inp.min()), msg=emsg) - - inp[i] = 1e10 - assert_equal(inp.max(), 1e10, err_msg=msg) - inp[i] = -1e10 - assert_equal(inp.min(), -1e10, err_msg=msg) - - def test_lower_align(self): - # check data that is not aligned to element size - # i.e doubles are aligned to 4 bytes on i386 - d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) - assert_equal(d.max(), d[0]) - assert_equal(d.min(), d[0]) - - def test_reduce_reorder(self): - # gh 10370, 11029 Some compilers reorder the call to npy_getfloatstatus - # and put it before the call to an intrisic function that causes - # invalid status to be set. Also make sure warnings are not emitted - for n in (2, 4, 8, 16, 32): - for dt in (np.float32, np.float16, np.complex64): - for r in np.diagflat(np.array([np.nan] * n, dtype=dt)): - assert_equal(np.min(r), np.nan) - - def test_minimize_no_warns(self): - a = np.minimum(np.nan, 1) - assert_equal(a, np.nan) - - -class TestAbsoluteNegative(object): - def test_abs_neg_blocked(self): - # simd tests on abs, test all alignments for vz + 2 * (vs - 1) + 1 - for dt, sz in [(np.float32, 11), (np.float64, 5)]: - for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary', - max_size=sz): - tgt = [ncu.absolute(i) for i in inp] - np.absolute(inp, out=out) - assert_equal(out, tgt, err_msg=msg) - assert_((out >= 0).all()) - - tgt = [-1*(i) for i in inp] - np.negative(inp, out=out) - assert_equal(out, tgt, err_msg=msg) - - for v in [np.nan, -np.inf, np.inf]: - for i in range(inp.size): - d = np.arange(inp.size, dtype=dt) - inp[:] = -d - inp[i] = v - d[i] = -v if v == -np.inf else v - assert_array_equal(np.abs(inp), d, err_msg=msg) - np.abs(inp, out=out) - assert_array_equal(out, d, err_msg=msg) - - assert_array_equal(-inp, -1*inp, err_msg=msg) - d = -1 * inp - np.negative(inp, out=out) - assert_array_equal(out, d, err_msg=msg) - - def test_lower_align(self): - # check data that is not aligned to element size - # i.e doubles are aligned to 4 bytes on i386 - d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) - assert_equal(np.abs(d), d) - assert_equal(np.negative(d), -d) - np.negative(d, out=d) - np.negative(np.ones_like(d), out=d) - np.abs(d, out=d) - np.abs(np.ones_like(d), out=d) - - -class TestPositive(object): - def test_valid(self): - valid_dtypes = [int, float, complex, object] - for dtype in valid_dtypes: - x = np.arange(5, dtype=dtype) - result = np.positive(x) - assert_equal(x, result, err_msg=str(dtype)) - - def test_invalid(self): - with assert_raises(TypeError): - np.positive(True) - with assert_raises(TypeError): - np.positive(np.datetime64('2000-01-01')) - with assert_raises(TypeError): - np.positive(np.array(['foo'], dtype=str)) - with assert_raises(TypeError): - np.positive(np.array(['bar'], dtype=object)) - - -class TestSpecialMethods(object): - def test_wrap(self): - - class with_wrap(object): - def __array__(self): - return np.zeros(1) - - def __array_wrap__(self, arr, context): - r = with_wrap() - r.arr = arr - r.context = context - return r - - a = with_wrap() - x = ncu.minimum(a, a) - assert_equal(x.arr, np.zeros(1)) - func, args, i = x.context - assert_(func is ncu.minimum) - assert_equal(len(args), 2) - assert_equal(args[0], a) - assert_equal(args[1], a) - assert_equal(i, 0) - - def test_wrap_and_prepare_out(self): - # Calling convention for out should not affect how special methods are - # called - - class StoreArrayPrepareWrap(np.ndarray): - _wrap_args = None - _prepare_args = None - def __new__(cls): - return np.empty(()).view(cls) - def __array_wrap__(self, obj, context): - self._wrap_args = context[1] - return obj - def __array_prepare__(self, obj, context): - self._prepare_args = context[1] - return obj - @property - def args(self): - # We need to ensure these are fetched at the same time, before - # any other ufuncs are calld by the assertions - return (self._prepare_args, self._wrap_args) - def __repr__(self): - return "a" # for short test output - - def do_test(f_call, f_expected): - a = StoreArrayPrepareWrap() - f_call(a) - p, w = a.args - expected = f_expected(a) - try: - assert_equal(p, expected) - assert_equal(w, expected) - except AssertionError as e: - # assert_equal produces truly useless error messages - raise AssertionError("\n".join([ - "Bad arguments passed in ufunc call", - " expected: {}".format(expected), - " __array_prepare__ got: {}".format(p), - " __array_wrap__ got: {}".format(w) - ])) - - # method not on the out argument - do_test(lambda a: np.add(a, 0), lambda a: (a, 0)) - do_test(lambda a: np.add(a, 0, None), lambda a: (a, 0)) - do_test(lambda a: np.add(a, 0, out=None), lambda a: (a, 0)) - do_test(lambda a: np.add(a, 0, out=(None,)), lambda a: (a, 0)) - - # method on the out argument - do_test(lambda a: np.add(0, 0, a), lambda a: (0, 0, a)) - do_test(lambda a: np.add(0, 0, out=a), lambda a: (0, 0, a)) - do_test(lambda a: np.add(0, 0, out=(a,)), lambda a: (0, 0, a)) - - def test_wrap_with_iterable(self): - # test fix for bug #1026: - - class with_wrap(np.ndarray): - __array_priority__ = 10 - - def __new__(cls): - return np.asarray(1).view(cls).copy() - - def __array_wrap__(self, arr, context): - return arr.view(type(self)) - - a = with_wrap() - x = ncu.multiply(a, (1, 2, 3)) - assert_(isinstance(x, with_wrap)) - assert_array_equal(x, np.array((1, 2, 3))) - - def test_priority_with_scalar(self): - # test fix for bug #826: - - class A(np.ndarray): - __array_priority__ = 10 - - def __new__(cls): - return np.asarray(1.0, 'float64').view(cls).copy() - - a = A() - x = np.float64(1)*a - assert_(isinstance(x, A)) - assert_array_equal(x, np.array(1)) - - def test_old_wrap(self): - - class with_wrap(object): - def __array__(self): - return np.zeros(1) - - def __array_wrap__(self, arr): - r = with_wrap() - r.arr = arr - return r - - a = with_wrap() - x = ncu.minimum(a, a) - assert_equal(x.arr, np.zeros(1)) - - def test_priority(self): - - class A(object): - def __array__(self): - return np.zeros(1) - - def __array_wrap__(self, arr, context): - r = type(self)() - r.arr = arr - r.context = context - return r - - class B(A): - __array_priority__ = 20. - - class C(A): - __array_priority__ = 40. - - x = np.zeros(1) - a = A() - b = B() - c = C() - f = ncu.minimum - assert_(type(f(x, x)) is np.ndarray) - assert_(type(f(x, a)) is A) - assert_(type(f(x, b)) is B) - assert_(type(f(x, c)) is C) - assert_(type(f(a, x)) is A) - assert_(type(f(b, x)) is B) - assert_(type(f(c, x)) is C) - - assert_(type(f(a, a)) is A) - assert_(type(f(a, b)) is B) - assert_(type(f(b, a)) is B) - assert_(type(f(b, b)) is B) - assert_(type(f(b, c)) is C) - assert_(type(f(c, b)) is C) - assert_(type(f(c, c)) is C) - - assert_(type(ncu.exp(a) is A)) - assert_(type(ncu.exp(b) is B)) - assert_(type(ncu.exp(c) is C)) - - def test_failing_wrap(self): - - class A(object): - def __array__(self): - return np.zeros(2) - - def __array_wrap__(self, arr, context): - raise RuntimeError - - a = A() - assert_raises(RuntimeError, ncu.maximum, a, a) - assert_raises(RuntimeError, ncu.maximum.reduce, a) - - def test_failing_out_wrap(self): - - singleton = np.array([1.0]) - - class Ok(np.ndarray): - def __array_wrap__(self, obj): - return singleton - - class Bad(np.ndarray): - def __array_wrap__(self, obj): - raise RuntimeError - - ok = np.empty(1).view(Ok) - bad = np.empty(1).view(Bad) - # double-free (segfault) of "ok" if "bad" raises an exception - for i in range(10): - assert_raises(RuntimeError, ncu.frexp, 1, ok, bad) - - def test_none_wrap(self): - # Tests that issue #8507 is resolved. Previously, this would segfault - - class A(object): - def __array__(self): - return np.zeros(1) - - def __array_wrap__(self, arr, context=None): - return None - - a = A() - assert_equal(ncu.maximum(a, a), None) - - def test_default_prepare(self): - - class with_wrap(object): - __array_priority__ = 10 - - def __array__(self): - return np.zeros(1) - - def __array_wrap__(self, arr, context): - return arr - - a = with_wrap() - x = ncu.minimum(a, a) - assert_equal(x, np.zeros(1)) - assert_equal(type(x), np.ndarray) - - def test_prepare(self): - - class with_prepare(np.ndarray): - __array_priority__ = 10 - - def __array_prepare__(self, arr, context): - # make sure we can return a new - return np.array(arr).view(type=with_prepare) - - a = np.array(1).view(type=with_prepare) - x = np.add(a, a) - assert_equal(x, np.array(2)) - assert_equal(type(x), with_prepare) - - def test_prepare_out(self): - - class with_prepare(np.ndarray): - __array_priority__ = 10 - - def __array_prepare__(self, arr, context): - return np.array(arr).view(type=with_prepare) - - a = np.array([1]).view(type=with_prepare) - x = np.add(a, a, a) - # Returned array is new, because of the strange - # __array_prepare__ above - assert_(not np.shares_memory(x, a)) - assert_equal(x, np.array([2])) - assert_equal(type(x), with_prepare) - - def test_failing_prepare(self): - - class A(object): - def __array__(self): - return np.zeros(1) - - def __array_prepare__(self, arr, context=None): - raise RuntimeError - - a = A() - assert_raises(RuntimeError, ncu.maximum, a, a) - - def test_array_with_context(self): - - class A(object): - def __array__(self, dtype=None, context=None): - func, args, i = context - self.func = func - self.args = args - self.i = i - return np.zeros(1) - - class B(object): - def __array__(self, dtype=None): - return np.zeros(1, dtype) - - class C(object): - def __array__(self): - return np.zeros(1) - - a = A() - ncu.maximum(np.zeros(1), a) - assert_(a.func is ncu.maximum) - assert_equal(a.args[0], 0) - assert_(a.args[1] is a) - assert_(a.i == 1) - assert_equal(ncu.maximum(a, B()), 0) - assert_equal(ncu.maximum(a, C()), 0) - - def test_ufunc_override(self): - # check override works even with instance with high priority. - class A(object): - def __array_ufunc__(self, func, method, *inputs, **kwargs): - return self, func, method, inputs, kwargs - - class MyNDArray(np.ndarray): - __array_priority__ = 100 - - a = A() - b = np.array([1]).view(MyNDArray) - res0 = np.multiply(a, b) - res1 = np.multiply(b, b, out=a) - - # self - assert_equal(res0[0], a) - assert_equal(res1[0], a) - assert_equal(res0[1], np.multiply) - assert_equal(res1[1], np.multiply) - assert_equal(res0[2], '__call__') - assert_equal(res1[2], '__call__') - assert_equal(res0[3], (a, b)) - assert_equal(res1[3], (b, b)) - assert_equal(res0[4], {}) - assert_equal(res1[4], {'out': (a,)}) - - def test_ufunc_override_mro(self): - - # Some multi arg functions for testing. - def tres_mul(a, b, c): - return a * b * c - - def quatro_mul(a, b, c, d): - return a * b * c * d - - # Make these into ufuncs. - three_mul_ufunc = np.frompyfunc(tres_mul, 3, 1) - four_mul_ufunc = np.frompyfunc(quatro_mul, 4, 1) - - class A(object): - def __array_ufunc__(self, func, method, *inputs, **kwargs): - return "A" - - class ASub(A): - def __array_ufunc__(self, func, method, *inputs, **kwargs): - return "ASub" - - class B(object): - def __array_ufunc__(self, func, method, *inputs, **kwargs): - return "B" - - class C(object): - def __init__(self): - self.count = 0 - - def __array_ufunc__(self, func, method, *inputs, **kwargs): - self.count += 1 - return NotImplemented - - class CSub(C): - def __array_ufunc__(self, func, method, *inputs, **kwargs): - self.count += 1 - return NotImplemented - - a = A() - a_sub = ASub() - b = B() - c = C() - - # Standard - res = np.multiply(a, a_sub) - assert_equal(res, "ASub") - res = np.multiply(a_sub, b) - assert_equal(res, "ASub") - - # With 1 NotImplemented - res = np.multiply(c, a) - assert_equal(res, "A") - assert_equal(c.count, 1) - # Check our counter works, so we can trust tests below. - res = np.multiply(c, a) - assert_equal(c.count, 2) - - # Both NotImplemented. - c = C() - c_sub = CSub() - assert_raises(TypeError, np.multiply, c, c_sub) - assert_equal(c.count, 1) - assert_equal(c_sub.count, 1) - c.count = c_sub.count = 0 - assert_raises(TypeError, np.multiply, c_sub, c) - assert_equal(c.count, 1) - assert_equal(c_sub.count, 1) - c.count = 0 - assert_raises(TypeError, np.multiply, c, c) - assert_equal(c.count, 1) - c.count = 0 - assert_raises(TypeError, np.multiply, 2, c) - assert_equal(c.count, 1) - - # Ternary testing. - assert_equal(three_mul_ufunc(a, 1, 2), "A") - assert_equal(three_mul_ufunc(1, a, 2), "A") - assert_equal(three_mul_ufunc(1, 2, a), "A") - - assert_equal(three_mul_ufunc(a, a, 6), "A") - assert_equal(three_mul_ufunc(a, 2, a), "A") - assert_equal(three_mul_ufunc(a, 2, b), "A") - assert_equal(three_mul_ufunc(a, 2, a_sub), "ASub") - assert_equal(three_mul_ufunc(a, a_sub, 3), "ASub") - c.count = 0 - assert_equal(three_mul_ufunc(c, a_sub, 3), "ASub") - assert_equal(c.count, 1) - c.count = 0 - assert_equal(three_mul_ufunc(1, a_sub, c), "ASub") - assert_equal(c.count, 0) - - c.count = 0 - assert_equal(three_mul_ufunc(a, b, c), "A") - assert_equal(c.count, 0) - c_sub.count = 0 - assert_equal(three_mul_ufunc(a, b, c_sub), "A") - assert_equal(c_sub.count, 0) - assert_equal(three_mul_ufunc(1, 2, b), "B") - - assert_raises(TypeError, three_mul_ufunc, 1, 2, c) - assert_raises(TypeError, three_mul_ufunc, c_sub, 2, c) - assert_raises(TypeError, three_mul_ufunc, c_sub, 2, 3) - - # Quaternary testing. - assert_equal(four_mul_ufunc(a, 1, 2, 3), "A") - assert_equal(four_mul_ufunc(1, a, 2, 3), "A") - assert_equal(four_mul_ufunc(1, 1, a, 3), "A") - assert_equal(four_mul_ufunc(1, 1, 2, a), "A") - - assert_equal(four_mul_ufunc(a, b, 2, 3), "A") - assert_equal(four_mul_ufunc(1, a, 2, b), "A") - assert_equal(four_mul_ufunc(b, 1, a, 3), "B") - assert_equal(four_mul_ufunc(a_sub, 1, 2, a), "ASub") - assert_equal(four_mul_ufunc(a, 1, 2, a_sub), "ASub") - - c = C() - c_sub = CSub() - assert_raises(TypeError, four_mul_ufunc, 1, 2, 3, c) - assert_equal(c.count, 1) - c.count = 0 - assert_raises(TypeError, four_mul_ufunc, 1, 2, c_sub, c) - assert_equal(c_sub.count, 1) - assert_equal(c.count, 1) - c2 = C() - c.count = c_sub.count = 0 - assert_raises(TypeError, four_mul_ufunc, 1, c, c_sub, c2) - assert_equal(c_sub.count, 1) - assert_equal(c.count, 1) - assert_equal(c2.count, 0) - c.count = c2.count = c_sub.count = 0 - assert_raises(TypeError, four_mul_ufunc, c2, c, c_sub, c) - assert_equal(c_sub.count, 1) - assert_equal(c.count, 0) - assert_equal(c2.count, 1) - - def test_ufunc_override_methods(self): - - class A(object): - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - return self, ufunc, method, inputs, kwargs - - # __call__ - a = A() - res = np.multiply.__call__(1, a, foo='bar', answer=42) - assert_equal(res[0], a) - assert_equal(res[1], np.multiply) - assert_equal(res[2], '__call__') - assert_equal(res[3], (1, a)) - assert_equal(res[4], {'foo': 'bar', 'answer': 42}) - - # __call__, wrong args - assert_raises(TypeError, np.multiply, a) - assert_raises(TypeError, np.multiply, a, a, a, a) - assert_raises(TypeError, np.multiply, a, a, sig='a', signature='a') - assert_raises(TypeError, ncu_tests.inner1d, a, a, axis=0, axes=[0, 0]) - - # reduce, positional args - res = np.multiply.reduce(a, 'axis0', 'dtype0', 'out0', 'keep0') - assert_equal(res[0], a) - assert_equal(res[1], np.multiply) - assert_equal(res[2], 'reduce') - assert_equal(res[3], (a,)) - assert_equal(res[4], {'dtype':'dtype0', - 'out': ('out0',), - 'keepdims': 'keep0', - 'axis': 'axis0'}) - - # reduce, kwargs - res = np.multiply.reduce(a, axis='axis0', dtype='dtype0', out='out0', - keepdims='keep0', initial='init0', - where='where0') - assert_equal(res[0], a) - assert_equal(res[1], np.multiply) - assert_equal(res[2], 'reduce') - assert_equal(res[3], (a,)) - assert_equal(res[4], {'dtype':'dtype0', - 'out': ('out0',), - 'keepdims': 'keep0', - 'axis': 'axis0', - 'initial': 'init0', - 'where': 'where0'}) - - # reduce, output equal to None removed, but not other explicit ones, - # even if they are at their default value. - res = np.multiply.reduce(a, 0, None, None, False) - assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False}) - res = np.multiply.reduce(a, out=None, axis=0, keepdims=True) - assert_equal(res[4], {'axis': 0, 'keepdims': True}) - res = np.multiply.reduce(a, None, out=(None,), dtype=None) - assert_equal(res[4], {'axis': None, 'dtype': None}) - res = np.multiply.reduce(a, 0, None, None, False, 2, True) - assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False, - 'initial': 2, 'where': True}) - # np._NoValue ignored for initial - res = np.multiply.reduce(a, 0, None, None, False, - np._NoValue, True) - assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False, - 'where': True}) - # None kept for initial, True for where. - res = np.multiply.reduce(a, 0, None, None, False, None, True) - assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False, - 'initial': None, 'where': True}) - - # reduce, wrong args - assert_raises(ValueError, np.multiply.reduce, a, out=()) - assert_raises(ValueError, np.multiply.reduce, a, out=('out0', 'out1')) - assert_raises(TypeError, np.multiply.reduce, a, 'axis0', axis='axis0') - - # accumulate, pos args - res = np.multiply.accumulate(a, 'axis0', 'dtype0', 'out0') - assert_equal(res[0], a) - assert_equal(res[1], np.multiply) - assert_equal(res[2], 'accumulate') - assert_equal(res[3], (a,)) - assert_equal(res[4], {'dtype':'dtype0', - 'out': ('out0',), - 'axis': 'axis0'}) - - # accumulate, kwargs - res = np.multiply.accumulate(a, axis='axis0', dtype='dtype0', - out='out0') - assert_equal(res[0], a) - assert_equal(res[1], np.multiply) - assert_equal(res[2], 'accumulate') - assert_equal(res[3], (a,)) - assert_equal(res[4], {'dtype':'dtype0', - 'out': ('out0',), - 'axis': 'axis0'}) - - # accumulate, output equal to None removed. - res = np.multiply.accumulate(a, 0, None, None) - assert_equal(res[4], {'axis': 0, 'dtype': None}) - res = np.multiply.accumulate(a, out=None, axis=0, dtype='dtype1') - assert_equal(res[4], {'axis': 0, 'dtype': 'dtype1'}) - res = np.multiply.accumulate(a, None, out=(None,), dtype=None) - assert_equal(res[4], {'axis': None, 'dtype': None}) - - # accumulate, wrong args - assert_raises(ValueError, np.multiply.accumulate, a, out=()) - assert_raises(ValueError, np.multiply.accumulate, a, - out=('out0', 'out1')) - assert_raises(TypeError, np.multiply.accumulate, a, - 'axis0', axis='axis0') - - # reduceat, pos args - res = np.multiply.reduceat(a, [4, 2], 'axis0', 'dtype0', 'out0') - assert_equal(res[0], a) - assert_equal(res[1], np.multiply) - assert_equal(res[2], 'reduceat') - assert_equal(res[3], (a, [4, 2])) - assert_equal(res[4], {'dtype':'dtype0', - 'out': ('out0',), - 'axis': 'axis0'}) - - # reduceat, kwargs - res = np.multiply.reduceat(a, [4, 2], axis='axis0', dtype='dtype0', - out='out0') - assert_equal(res[0], a) - assert_equal(res[1], np.multiply) - assert_equal(res[2], 'reduceat') - assert_equal(res[3], (a, [4, 2])) - assert_equal(res[4], {'dtype':'dtype0', - 'out': ('out0',), - 'axis': 'axis0'}) - - # reduceat, output equal to None removed. - res = np.multiply.reduceat(a, [4, 2], 0, None, None) - assert_equal(res[4], {'axis': 0, 'dtype': None}) - res = np.multiply.reduceat(a, [4, 2], axis=None, out=None, dtype='dt') - assert_equal(res[4], {'axis': None, 'dtype': 'dt'}) - res = np.multiply.reduceat(a, [4, 2], None, None, out=(None,)) - assert_equal(res[4], {'axis': None, 'dtype': None}) - - # reduceat, wrong args - assert_raises(ValueError, np.multiply.reduce, a, [4, 2], out=()) - assert_raises(ValueError, np.multiply.reduce, a, [4, 2], - out=('out0', 'out1')) - assert_raises(TypeError, np.multiply.reduce, a, [4, 2], - 'axis0', axis='axis0') - - # outer - res = np.multiply.outer(a, 42) - assert_equal(res[0], a) - assert_equal(res[1], np.multiply) - assert_equal(res[2], 'outer') - assert_equal(res[3], (a, 42)) - assert_equal(res[4], {}) - - # outer, wrong args - assert_raises(TypeError, np.multiply.outer, a) - assert_raises(TypeError, np.multiply.outer, a, a, a, a) - assert_raises(TypeError, np.multiply.outer, a, a, sig='a', signature='a') - - # at - res = np.multiply.at(a, [4, 2], 'b0') - assert_equal(res[0], a) - assert_equal(res[1], np.multiply) - assert_equal(res[2], 'at') - assert_equal(res[3], (a, [4, 2], 'b0')) - - # at, wrong args - assert_raises(TypeError, np.multiply.at, a) - assert_raises(TypeError, np.multiply.at, a, a, a, a) - - def test_ufunc_override_out(self): - - class A(object): - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - return kwargs - - class B(object): - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - return kwargs - - a = A() - b = B() - res0 = np.multiply(a, b, 'out_arg') - res1 = np.multiply(a, b, out='out_arg') - res2 = np.multiply(2, b, 'out_arg') - res3 = np.multiply(3, b, out='out_arg') - res4 = np.multiply(a, 4, 'out_arg') - res5 = np.multiply(a, 5, out='out_arg') - - assert_equal(res0['out'][0], 'out_arg') - assert_equal(res1['out'][0], 'out_arg') - assert_equal(res2['out'][0], 'out_arg') - assert_equal(res3['out'][0], 'out_arg') - assert_equal(res4['out'][0], 'out_arg') - assert_equal(res5['out'][0], 'out_arg') - - # ufuncs with multiple output modf and frexp. - res6 = np.modf(a, 'out0', 'out1') - res7 = np.frexp(a, 'out0', 'out1') - assert_equal(res6['out'][0], 'out0') - assert_equal(res6['out'][1], 'out1') - assert_equal(res7['out'][0], 'out0') - assert_equal(res7['out'][1], 'out1') - - # While we're at it, check that default output is never passed on. - assert_(np.sin(a, None) == {}) - assert_(np.sin(a, out=None) == {}) - assert_(np.sin(a, out=(None,)) == {}) - assert_(np.modf(a, None) == {}) - assert_(np.modf(a, None, None) == {}) - assert_(np.modf(a, out=(None, None)) == {}) - with assert_raises(TypeError): - # Out argument must be tuple, since there are multiple outputs. - np.modf(a, out=None) - - # don't give positional and output argument, or too many arguments. - # wrong number of arguments in the tuple is an error too. - assert_raises(TypeError, np.multiply, a, b, 'one', out='two') - assert_raises(TypeError, np.multiply, a, b, 'one', 'two') - assert_raises(ValueError, np.multiply, a, b, out=('one', 'two')) - assert_raises(ValueError, np.multiply, a, out=()) - assert_raises(TypeError, np.modf, a, 'one', out=('two', 'three')) - assert_raises(TypeError, np.modf, a, 'one', 'two', 'three') - assert_raises(ValueError, np.modf, a, out=('one', 'two', 'three')) - assert_raises(ValueError, np.modf, a, out=('one',)) - - def test_ufunc_override_exception(self): - - class A(object): - def __array_ufunc__(self, *a, **kwargs): - raise ValueError("oops") - - a = A() - assert_raises(ValueError, np.negative, 1, out=a) - assert_raises(ValueError, np.negative, a) - assert_raises(ValueError, np.divide, 1., a) - - def test_ufunc_override_not_implemented(self): - - class A(object): - def __array_ufunc__(self, *args, **kwargs): - return NotImplemented - - msg = ("operand type(s) all returned NotImplemented from " - "__array_ufunc__(, '__call__', <*>): 'A'") - with assert_raises_regex(TypeError, fnmatch.translate(msg)): - np.negative(A()) - - msg = ("operand type(s) all returned NotImplemented from " - "__array_ufunc__(, '__call__', <*>, , " - "out=(1,)): 'A', 'object', 'int'") - with assert_raises_regex(TypeError, fnmatch.translate(msg)): - np.add(A(), object(), out=1) - - def test_ufunc_override_disabled(self): - - class OptOut(object): - __array_ufunc__ = None - - opt_out = OptOut() - - # ufuncs always raise - msg = "operand 'OptOut' does not support ufuncs" - with assert_raises_regex(TypeError, msg): - np.add(opt_out, 1) - with assert_raises_regex(TypeError, msg): - np.add(1, opt_out) - with assert_raises_regex(TypeError, msg): - np.negative(opt_out) - - # opt-outs still hold even when other arguments have pathological - # __array_ufunc__ implementations - - class GreedyArray(object): - def __array_ufunc__(self, *args, **kwargs): - return self - - greedy = GreedyArray() - assert_(np.negative(greedy) is greedy) - with assert_raises_regex(TypeError, msg): - np.add(greedy, opt_out) - with assert_raises_regex(TypeError, msg): - np.add(greedy, 1, out=opt_out) - - def test_gufunc_override(self): - # gufunc are just ufunc instances, but follow a different path, - # so check __array_ufunc__ overrides them properly. - class A(object): - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - return self, ufunc, method, inputs, kwargs - - inner1d = ncu_tests.inner1d - a = A() - res = inner1d(a, a) - assert_equal(res[0], a) - assert_equal(res[1], inner1d) - assert_equal(res[2], '__call__') - assert_equal(res[3], (a, a)) - assert_equal(res[4], {}) - - res = inner1d(1, 1, out=a) - assert_equal(res[0], a) - assert_equal(res[1], inner1d) - assert_equal(res[2], '__call__') - assert_equal(res[3], (1, 1)) - assert_equal(res[4], {'out': (a,)}) - - # wrong number of arguments in the tuple is an error too. - assert_raises(TypeError, inner1d, a, out='two') - assert_raises(TypeError, inner1d, a, a, 'one', out='two') - assert_raises(TypeError, inner1d, a, a, 'one', 'two') - assert_raises(ValueError, inner1d, a, a, out=('one', 'two')) - assert_raises(ValueError, inner1d, a, a, out=()) - - def test_ufunc_override_with_super(self): - # NOTE: this class is given as an example in doc/subclassing.py; - # if you make any changes here, do update it there too. - class A(np.ndarray): - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - args = [] - in_no = [] - for i, input_ in enumerate(inputs): - if isinstance(input_, A): - in_no.append(i) - args.append(input_.view(np.ndarray)) - else: - args.append(input_) - - outputs = kwargs.pop('out', None) - out_no = [] - if outputs: - out_args = [] - for j, output in enumerate(outputs): - if isinstance(output, A): - out_no.append(j) - out_args.append(output.view(np.ndarray)) - else: - out_args.append(output) - kwargs['out'] = tuple(out_args) - else: - outputs = (None,) * ufunc.nout - - info = {} - if in_no: - info['inputs'] = in_no - if out_no: - info['outputs'] = out_no - - results = super(A, self).__array_ufunc__(ufunc, method, - *args, **kwargs) - if results is NotImplemented: - return NotImplemented - - if method == 'at': - if isinstance(inputs[0], A): - inputs[0].info = info - return - - if ufunc.nout == 1: - results = (results,) - - results = tuple((np.asarray(result).view(A) - if output is None else output) - for result, output in zip(results, outputs)) - if results and isinstance(results[0], A): - results[0].info = info - - return results[0] if len(results) == 1 else results - - class B(object): - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - if any(isinstance(input_, A) for input_ in inputs): - return "A!" - else: - return NotImplemented - - d = np.arange(5.) - # 1 input, 1 output - a = np.arange(5.).view(A) - b = np.sin(a) - check = np.sin(d) - assert_(np.all(check == b)) - assert_equal(b.info, {'inputs': [0]}) - b = np.sin(d, out=(a,)) - assert_(np.all(check == b)) - assert_equal(b.info, {'outputs': [0]}) - assert_(b is a) - a = np.arange(5.).view(A) - b = np.sin(a, out=a) - assert_(np.all(check == b)) - assert_equal(b.info, {'inputs': [0], 'outputs': [0]}) - - # 1 input, 2 outputs - a = np.arange(5.).view(A) - b1, b2 = np.modf(a) - assert_equal(b1.info, {'inputs': [0]}) - b1, b2 = np.modf(d, out=(None, a)) - assert_(b2 is a) - assert_equal(b1.info, {'outputs': [1]}) - a = np.arange(5.).view(A) - b = np.arange(5.).view(A) - c1, c2 = np.modf(a, out=(a, b)) - assert_(c1 is a) - assert_(c2 is b) - assert_equal(c1.info, {'inputs': [0], 'outputs': [0, 1]}) - - # 2 input, 1 output - a = np.arange(5.).view(A) - b = np.arange(5.).view(A) - c = np.add(a, b, out=a) - assert_(c is a) - assert_equal(c.info, {'inputs': [0, 1], 'outputs': [0]}) - # some tests with a non-ndarray subclass - a = np.arange(5.) - b = B() - assert_(a.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented) - assert_(b.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented) - assert_raises(TypeError, np.add, a, b) - a = a.view(A) - assert_(a.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented) - assert_(b.__array_ufunc__(np.add, '__call__', a, b) == "A!") - assert_(np.add(a, b) == "A!") - # regression check for gh-9102 -- tests ufunc.reduce implicitly. - d = np.array([[1, 2, 3], [1, 2, 3]]) - a = d.view(A) - c = a.any() - check = d.any() - assert_equal(c, check) - assert_(c.info, {'inputs': [0]}) - c = a.max() - check = d.max() - assert_equal(c, check) - assert_(c.info, {'inputs': [0]}) - b = np.array(0).view(A) - c = a.max(out=b) - assert_equal(c, check) - assert_(c is b) - assert_(c.info, {'inputs': [0], 'outputs': [0]}) - check = a.max(axis=0) - b = np.zeros_like(check).view(A) - c = a.max(axis=0, out=b) - assert_equal(c, check) - assert_(c is b) - assert_(c.info, {'inputs': [0], 'outputs': [0]}) - # simple explicit tests of reduce, accumulate, reduceat - check = np.add.reduce(d, axis=1) - c = np.add.reduce(a, axis=1) - assert_equal(c, check) - assert_(c.info, {'inputs': [0]}) - b = np.zeros_like(c) - c = np.add.reduce(a, 1, None, b) - assert_equal(c, check) - assert_(c is b) - assert_(c.info, {'inputs': [0], 'outputs': [0]}) - check = np.add.accumulate(d, axis=0) - c = np.add.accumulate(a, axis=0) - assert_equal(c, check) - assert_(c.info, {'inputs': [0]}) - b = np.zeros_like(c) - c = np.add.accumulate(a, 0, None, b) - assert_equal(c, check) - assert_(c is b) - assert_(c.info, {'inputs': [0], 'outputs': [0]}) - indices = [0, 2, 1] - check = np.add.reduceat(d, indices, axis=1) - c = np.add.reduceat(a, indices, axis=1) - assert_equal(c, check) - assert_(c.info, {'inputs': [0]}) - b = np.zeros_like(c) - c = np.add.reduceat(a, indices, 1, None, b) - assert_equal(c, check) - assert_(c is b) - assert_(c.info, {'inputs': [0], 'outputs': [0]}) - # and a few tests for at - d = np.array([[1, 2, 3], [1, 2, 3]]) - check = d.copy() - a = d.copy().view(A) - np.add.at(check, ([0, 1], [0, 2]), 1.) - np.add.at(a, ([0, 1], [0, 2]), 1.) - assert_equal(a, check) - assert_(a.info, {'inputs': [0]}) - b = np.array(1.).view(A) - a = d.copy().view(A) - np.add.at(a, ([0, 1], [0, 2]), b) - assert_equal(a, check) - assert_(a.info, {'inputs': [0, 2]}) - - -class TestChoose(object): - def test_mixed(self): - c = np.array([True, True]) - a = np.array([True, True]) - assert_equal(np.choose(c, (a, 1)), np.array([1, 1])) - - -class TestRationalFunctions(object): - def test_lcm(self): - self._test_lcm_inner(np.int16) - self._test_lcm_inner(np.uint16) - - def test_lcm_object(self): - self._test_lcm_inner(np.object_) - - def test_gcd(self): - self._test_gcd_inner(np.int16) - self._test_lcm_inner(np.uint16) - - def test_gcd_object(self): - self._test_gcd_inner(np.object_) - - def _test_lcm_inner(self, dtype): - # basic use - a = np.array([12, 120], dtype=dtype) - b = np.array([20, 200], dtype=dtype) - assert_equal(np.lcm(a, b), [60, 600]) - - if not issubclass(dtype, np.unsignedinteger): - # negatives are ignored - a = np.array([12, -12, 12, -12], dtype=dtype) - b = np.array([20, 20, -20, -20], dtype=dtype) - assert_equal(np.lcm(a, b), [60]*4) - - # reduce - a = np.array([3, 12, 20], dtype=dtype) - assert_equal(np.lcm.reduce([3, 12, 20]), 60) - - # broadcasting, and a test including 0 - a = np.arange(6).astype(dtype) - b = 20 - assert_equal(np.lcm(a, b), [0, 20, 20, 60, 20, 20]) - - def _test_gcd_inner(self, dtype): - # basic use - a = np.array([12, 120], dtype=dtype) - b = np.array([20, 200], dtype=dtype) - assert_equal(np.gcd(a, b), [4, 40]) - - if not issubclass(dtype, np.unsignedinteger): - # negatives are ignored - a = np.array([12, -12, 12, -12], dtype=dtype) - b = np.array([20, 20, -20, -20], dtype=dtype) - assert_equal(np.gcd(a, b), [4]*4) - - # reduce - a = np.array([15, 25, 35], dtype=dtype) - assert_equal(np.gcd.reduce(a), 5) - - # broadcasting, and a test including 0 - a = np.arange(6).astype(dtype) - b = 20 - assert_equal(np.gcd(a, b), [20, 1, 2, 1, 4, 5]) - - def test_lcm_overflow(self): - # verify that we don't overflow when a*b does overflow - big = np.int32(np.iinfo(np.int32).max // 11) - a = 2*big - b = 5*big - assert_equal(np.lcm(a, b), 10*big) - - def test_gcd_overflow(self): - for dtype in (np.int32, np.int64): - # verify that we don't overflow when taking abs(x) - # not relevant for lcm, where the result is unrepresentable anyway - a = dtype(np.iinfo(dtype).min) # negative power of two - q = -(a // 4) - assert_equal(np.gcd(a, q*3), q) - assert_equal(np.gcd(a, -q*3), q) - - def test_decimal(self): - from decimal import Decimal - a = np.array([1, 1, -1, -1]) * Decimal('0.20') - b = np.array([1, -1, 1, -1]) * Decimal('0.12') - - assert_equal(np.gcd(a, b), 4*[Decimal('0.04')]) - assert_equal(np.lcm(a, b), 4*[Decimal('0.60')]) - - def test_float(self): - # not well-defined on float due to rounding errors - assert_raises(TypeError, np.gcd, 0.3, 0.4) - assert_raises(TypeError, np.lcm, 0.3, 0.4) - - def test_builtin_long(self): - # sanity check that array coercion is alright for builtin longs - assert_equal(np.array(2**200).item(), 2**200) - - # expressed as prime factors - a = np.array(2**100 * 3**5) - b = np.array([2**100 * 5**7, 2**50 * 3**10]) - assert_equal(np.gcd(a, b), [2**100, 2**50 * 3**5]) - assert_equal(np.lcm(a, b), [2**100 * 3**5 * 5**7, 2**100 * 3**10]) - - assert_equal(np.gcd(2**100, 3**100), 1) - - -class TestRoundingFunctions(object): - - def test_object_direct(self): - """ test direct implementation of these magic methods """ - class C: - def __floor__(self): - return 1 - def __ceil__(self): - return 2 - def __trunc__(self): - return 3 - - arr = np.array([C(), C()]) - assert_equal(np.floor(arr), [1, 1]) - assert_equal(np.ceil(arr), [2, 2]) - assert_equal(np.trunc(arr), [3, 3]) - - def test_object_indirect(self): - """ test implementations via __float__ """ - class C: - def __float__(self): - return -2.5 - - arr = np.array([C(), C()]) - assert_equal(np.floor(arr), [-3, -3]) - assert_equal(np.ceil(arr), [-2, -2]) - with pytest.raises(TypeError): - np.trunc(arr) # consistent with math.trunc - - def test_fraction(self): - f = Fraction(-4, 3) - assert_equal(np.floor(f), -2) - assert_equal(np.ceil(f), -1) - assert_equal(np.trunc(f), -1) - - -class TestComplexFunctions(object): - funcs = [np.arcsin, np.arccos, np.arctan, np.arcsinh, np.arccosh, - np.arctanh, np.sin, np.cos, np.tan, np.exp, - np.exp2, np.log, np.sqrt, np.log10, np.log2, - np.log1p] - - def test_it(self): - for f in self.funcs: - if f is np.arccosh: - x = 1.5 - else: - x = .5 - fr = f(x) - fz = f(complex(x)) - assert_almost_equal(fz.real, fr, err_msg='real part %s' % f) - assert_almost_equal(fz.imag, 0., err_msg='imag part %s' % f) - - def test_precisions_consistent(self): - z = 1 + 1j - for f in self.funcs: - fcf = f(np.csingle(z)) - fcd = f(np.cdouble(z)) - fcl = f(np.clongdouble(z)) - assert_almost_equal(fcf, fcd, decimal=6, err_msg='fch-fcd %s' % f) - assert_almost_equal(fcl, fcd, decimal=15, err_msg='fch-fcl %s' % f) - - def test_branch_cuts(self): - # check branch cuts and continuity on them - _check_branch_cut(np.log, -0.5, 1j, 1, -1, True) - _check_branch_cut(np.log2, -0.5, 1j, 1, -1, True) - _check_branch_cut(np.log10, -0.5, 1j, 1, -1, True) - _check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True) - _check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True) - - _check_branch_cut(np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True) - _check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True) - _check_branch_cut(np.arctan, [0-2j, 2j], [1, 1], -1, 1, True) - - _check_branch_cut(np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True) - _check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True) - _check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True) - - # check against bogus branch cuts: assert continuity between quadrants - _check_branch_cut(np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1) - _check_branch_cut(np.arccos, [0-2j, 2j], [ 1, 1], 1, 1) - _check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1) - - _check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1) - _check_branch_cut(np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1) - _check_branch_cut(np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1) - - def test_branch_cuts_complex64(self): - # check branch cuts and continuity on them - _check_branch_cut(np.log, -0.5, 1j, 1, -1, True, np.complex64) - _check_branch_cut(np.log2, -0.5, 1j, 1, -1, True, np.complex64) - _check_branch_cut(np.log10, -0.5, 1j, 1, -1, True, np.complex64) - _check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True, np.complex64) - _check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True, np.complex64) - - _check_branch_cut(np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64) - _check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64) - _check_branch_cut(np.arctan, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64) - - _check_branch_cut(np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64) - _check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True, np.complex64) - _check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64) - - # check against bogus branch cuts: assert continuity between quadrants - _check_branch_cut(np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64) - _check_branch_cut(np.arccos, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64) - _check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1, False, np.complex64) - - _check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1, False, np.complex64) - _check_branch_cut(np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1, False, np.complex64) - _check_branch_cut(np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1, False, np.complex64) - - def test_against_cmath(self): - import cmath - - points = [-1-1j, -1+1j, +1-1j, +1+1j] - name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan', - 'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'} - atol = 4*np.finfo(complex).eps - for func in self.funcs: - fname = func.__name__.split('.')[-1] - cname = name_map.get(fname, fname) - try: - cfunc = getattr(cmath, cname) - except AttributeError: - continue - for p in points: - a = complex(func(np.complex_(p))) - b = cfunc(p) - assert_(abs(a - b) < atol, "%s %s: %s; cmath: %s" % (fname, p, a, b)) - - @pytest.mark.parametrize('dtype', [np.complex64, np.complex_, np.longcomplex]) - def test_loss_of_precision(self, dtype): - """Check loss of precision in complex arc* functions""" - - # Check against known-good functions - - info = np.finfo(dtype) - real_dtype = dtype(0.).real.dtype - eps = info.eps - - def check(x, rtol): - x = x.astype(real_dtype) - - z = x.astype(dtype) - d = np.absolute(np.arcsinh(x)/np.arcsinh(z).real - 1) - assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), - 'arcsinh')) - - z = (1j*x).astype(dtype) - d = np.absolute(np.arcsinh(x)/np.arcsin(z).imag - 1) - assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), - 'arcsin')) - - z = x.astype(dtype) - d = np.absolute(np.arctanh(x)/np.arctanh(z).real - 1) - assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), - 'arctanh')) - - z = (1j*x).astype(dtype) - d = np.absolute(np.arctanh(x)/np.arctan(z).imag - 1) - assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), - 'arctan')) - - # The switchover was chosen as 1e-3; hence there can be up to - # ~eps/1e-3 of relative cancellation error before it - - x_series = np.logspace(-20, -3.001, 200) - x_basic = np.logspace(-2.999, 0, 10, endpoint=False) - - if dtype is np.longcomplex: - # It's not guaranteed that the system-provided arc functions - # are accurate down to a few epsilons. (Eg. on Linux 64-bit) - # So, give more leeway for long complex tests here: - # Can use 2.1 for > Ubuntu LTS Trusty (2014), glibc = 2.19. - check(x_series, 50.0*eps) - else: - check(x_series, 2.1*eps) - check(x_basic, 2.0*eps/1e-3) - - # Check a few points - - z = np.array([1e-5*(1+1j)], dtype=dtype) - p = 9.999999999333333333e-6 + 1.000000000066666666e-5j - d = np.absolute(1-np.arctanh(z)/p) - assert_(np.all(d < 1e-15)) - - p = 1.0000000000333333333e-5 + 9.999999999666666667e-6j - d = np.absolute(1-np.arcsinh(z)/p) - assert_(np.all(d < 1e-15)) - - p = 9.999999999333333333e-6j + 1.000000000066666666e-5 - d = np.absolute(1-np.arctan(z)/p) - assert_(np.all(d < 1e-15)) - - p = 1.0000000000333333333e-5j + 9.999999999666666667e-6 - d = np.absolute(1-np.arcsin(z)/p) - assert_(np.all(d < 1e-15)) - - # Check continuity across switchover points - - def check(func, z0, d=1): - z0 = np.asarray(z0, dtype=dtype) - zp = z0 + abs(z0) * d * eps * 2 - zm = z0 - abs(z0) * d * eps * 2 - assert_(np.all(zp != zm), (zp, zm)) - - # NB: the cancellation error at the switchover is at least eps - good = (abs(func(zp) - func(zm)) < 2*eps) - assert_(np.all(good), (func, z0[~good])) - - for func in (np.arcsinh, np.arcsinh, np.arcsin, np.arctanh, np.arctan): - pts = [rp+1j*ip for rp in (-1e-3, 0, 1e-3) for ip in(-1e-3, 0, 1e-3) - if rp != 0 or ip != 0] - check(func, pts, 1) - check(func, pts, 1j) - check(func, pts, 1+1j) - - -class TestAttributes(object): - def test_attributes(self): - add = ncu.add - assert_equal(add.__name__, 'add') - assert_(add.ntypes >= 18) # don't fail if types added - assert_('ii->i' in add.types) - assert_equal(add.nin, 2) - assert_equal(add.nout, 1) - assert_equal(add.identity, 0) - - def test_doc(self): - # don't bother checking the long list of kwargs, which are likely to - # change - assert_(ncu.add.__doc__.startswith( - "add(x1, x2, /, out=None, *, where=True")) - assert_(ncu.frexp.__doc__.startswith( - "frexp(x[, out1, out2], / [, out=(None, None)], *, where=True")) - - -class TestSubclass(object): - - def test_subclass_op(self): - - class simple(np.ndarray): - def __new__(subtype, shape): - self = np.ndarray.__new__(subtype, shape, dtype=object) - self.fill(0) - return self - - a = simple((3, 4)) - assert_equal(a+a, a) - -def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False, - dtype=complex): - """ - Check for a branch cut in a function. - - Assert that `x0` lies on a branch cut of function `f` and `f` is - continuous from the direction `dx`. - - Parameters - ---------- - f : func - Function to check - x0 : array-like - Point on branch cut - dx : array-like - Direction to check continuity in - re_sign, im_sign : {1, -1} - Change of sign of the real or imaginary part expected - sig_zero_ok : bool - Whether to check if the branch cut respects signed zero (if applicable) - dtype : dtype - Dtype to check (should be complex) - - """ - x0 = np.atleast_1d(x0).astype(dtype) - dx = np.atleast_1d(dx).astype(dtype) - - if np.dtype(dtype).char == 'F': - scale = np.finfo(dtype).eps * 1e2 - atol = np.float32(1e-2) - else: - scale = np.finfo(dtype).eps * 1e3 - atol = 1e-4 - - y0 = f(x0) - yp = f(x0 + dx*scale*np.absolute(x0)/np.absolute(dx)) - ym = f(x0 - dx*scale*np.absolute(x0)/np.absolute(dx)) - - assert_(np.all(np.absolute(y0.real - yp.real) < atol), (y0, yp)) - assert_(np.all(np.absolute(y0.imag - yp.imag) < atol), (y0, yp)) - assert_(np.all(np.absolute(y0.real - ym.real*re_sign) < atol), (y0, ym)) - assert_(np.all(np.absolute(y0.imag - ym.imag*im_sign) < atol), (y0, ym)) - - if sig_zero_ok: - # check that signed zeros also work as a displacement - jr = (x0.real == 0) & (dx.real != 0) - ji = (x0.imag == 0) & (dx.imag != 0) - if np.any(jr): - x = x0[jr] - x.real = np.NZERO - ym = f(x) - assert_(np.all(np.absolute(y0[jr].real - ym.real*re_sign) < atol), (y0[jr], ym)) - assert_(np.all(np.absolute(y0[jr].imag - ym.imag*im_sign) < atol), (y0[jr], ym)) - - if np.any(ji): - x = x0[ji] - x.imag = np.NZERO - ym = f(x) - assert_(np.all(np.absolute(y0[ji].real - ym.real*re_sign) < atol), (y0[ji], ym)) - assert_(np.all(np.absolute(y0[ji].imag - ym.imag*im_sign) < atol), (y0[ji], ym)) - -def test_copysign(): - assert_(np.copysign(1, -1) == -1) - with np.errstate(divide="ignore"): - assert_(1 / np.copysign(0, -1) < 0) - assert_(1 / np.copysign(0, 1) > 0) - assert_(np.signbit(np.copysign(np.nan, -1))) - assert_(not np.signbit(np.copysign(np.nan, 1))) - -def _test_nextafter(t): - one = t(1) - two = t(2) - zero = t(0) - eps = np.finfo(t).eps - assert_(np.nextafter(one, two) - one == eps) - assert_(np.nextafter(one, zero) - one < 0) - assert_(np.isnan(np.nextafter(np.nan, one))) - assert_(np.isnan(np.nextafter(one, np.nan))) - assert_(np.nextafter(one, one) == one) - -def test_nextafter(): - return _test_nextafter(np.float64) - - -def test_nextafterf(): - return _test_nextafter(np.float32) - - -@pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble), - reason="long double is same as double") -@pytest.mark.xfail(condition=platform.machine().startswith("ppc64"), - reason="IBM double double") -def test_nextafterl(): - return _test_nextafter(np.longdouble) - - -def test_nextafter_0(): - for t, direction in itertools.product(np.sctypes['float'], (1, -1)): - tiny = np.finfo(t).tiny - assert_(0. < direction * np.nextafter(t(0), t(direction)) < tiny) - assert_equal(np.nextafter(t(0), t(direction)) / t(2.1), direction * 0.0) - -def _test_spacing(t): - one = t(1) - eps = np.finfo(t).eps - nan = t(np.nan) - inf = t(np.inf) - with np.errstate(invalid='ignore'): - assert_(np.spacing(one) == eps) - assert_(np.isnan(np.spacing(nan))) - assert_(np.isnan(np.spacing(inf))) - assert_(np.isnan(np.spacing(-inf))) - assert_(np.spacing(t(1e30)) != 0) - -def test_spacing(): - return _test_spacing(np.float64) - -def test_spacingf(): - return _test_spacing(np.float32) - - -@pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble), - reason="long double is same as double") -@pytest.mark.xfail(condition=platform.machine().startswith("ppc64"), - reason="IBM double double") -def test_spacingl(): - return _test_spacing(np.longdouble) - -def test_spacing_gfortran(): - # Reference from this fortran file, built with gfortran 4.3.3 on linux - # 32bits: - # PROGRAM test_spacing - # INTEGER, PARAMETER :: SGL = SELECTED_REAL_KIND(p=6, r=37) - # INTEGER, PARAMETER :: DBL = SELECTED_REAL_KIND(p=13, r=200) - # - # WRITE(*,*) spacing(0.00001_DBL) - # WRITE(*,*) spacing(1.0_DBL) - # WRITE(*,*) spacing(1000._DBL) - # WRITE(*,*) spacing(10500._DBL) - # - # WRITE(*,*) spacing(0.00001_SGL) - # WRITE(*,*) spacing(1.0_SGL) - # WRITE(*,*) spacing(1000._SGL) - # WRITE(*,*) spacing(10500._SGL) - # END PROGRAM - ref = {np.float64: [1.69406589450860068E-021, - 2.22044604925031308E-016, - 1.13686837721616030E-013, - 1.81898940354585648E-012], - np.float32: [9.09494702E-13, - 1.19209290E-07, - 6.10351563E-05, - 9.76562500E-04]} - - for dt, dec_ in zip([np.float32, np.float64], (10, 20)): - x = np.array([1e-5, 1, 1000, 10500], dtype=dt) - assert_array_almost_equal(np.spacing(x), ref[dt], decimal=dec_) - -def test_nextafter_vs_spacing(): - # XXX: spacing does not handle long double yet - for t in [np.float32, np.float64]: - for _f in [1, 1e-5, 1000]: - f = t(_f) - f1 = t(_f + 1) - assert_(np.nextafter(f, f1) - f == np.spacing(f)) - -def test_pos_nan(): - """Check np.nan is a positive nan.""" - assert_(np.signbit(np.nan) == 0) - -def test_reduceat(): - """Test bug in reduceat when structured arrays are not copied.""" - db = np.dtype([('name', 'S11'), ('time', np.int64), ('value', np.float32)]) - a = np.empty([100], dtype=db) - a['name'] = 'Simple' - a['time'] = 10 - a['value'] = 100 - indx = [0, 7, 15, 25] - - h2 = [] - val1 = indx[0] - for val2 in indx[1:]: - h2.append(np.add.reduce(a['value'][val1:val2])) - val1 = val2 - h2.append(np.add.reduce(a['value'][val1:])) - h2 = np.array(h2) - - # test buffered -- this should work - h1 = np.add.reduceat(a['value'], indx) - assert_array_almost_equal(h1, h2) - - # This is when the error occurs. - # test no buffer - np.setbufsize(32) - h1 = np.add.reduceat(a['value'], indx) - np.setbufsize(np.UFUNC_BUFSIZE_DEFAULT) - assert_array_almost_equal(h1, h2) - -def test_reduceat_empty(): - """Reduceat should work with empty arrays""" - indices = np.array([], 'i4') - x = np.array([], 'f8') - result = np.add.reduceat(x, indices) - assert_equal(result.dtype, x.dtype) - assert_equal(result.shape, (0,)) - # Another case with a slightly different zero-sized shape - x = np.ones((5, 2)) - result = np.add.reduceat(x, [], axis=0) - assert_equal(result.dtype, x.dtype) - assert_equal(result.shape, (0, 2)) - result = np.add.reduceat(x, [], axis=1) - assert_equal(result.dtype, x.dtype) - assert_equal(result.shape, (5, 0)) - -def test_complex_nan_comparisons(): - nans = [complex(np.nan, 0), complex(0, np.nan), complex(np.nan, np.nan)] - fins = [complex(1, 0), complex(-1, 0), complex(0, 1), complex(0, -1), - complex(1, 1), complex(-1, -1), complex(0, 0)] - - with np.errstate(invalid='ignore'): - for x in nans + fins: - x = np.array([x]) - for y in nans + fins: - y = np.array([y]) - - if np.isfinite(x) and np.isfinite(y): - continue - - assert_equal(x < y, False, err_msg="%r < %r" % (x, y)) - assert_equal(x > y, False, err_msg="%r > %r" % (x, y)) - assert_equal(x <= y, False, err_msg="%r <= %r" % (x, y)) - assert_equal(x >= y, False, err_msg="%r >= %r" % (x, y)) - assert_equal(x == y, False, err_msg="%r == %r" % (x, y)) - - -def test_rint_big_int(): - # np.rint bug for large integer values on Windows 32-bit and MKL - # https://github.com/numpy/numpy/issues/6685 - val = 4607998452777363968 - # This is exactly representable in floating point - assert_equal(val, int(float(val))) - # Rint should not change the value - assert_equal(val, np.rint(val)) - - -def test_signaling_nan_exceptions(): - with assert_no_warnings(): - a = np.ndarray(shape=(), dtype='float32', buffer=b'\x00\xe0\xbf\xff') - np.isnan(a) - -@pytest.mark.parametrize("arr", [ - np.arange(2), - np.matrix([0, 1]), - np.matrix([[0, 1], [2, 5]]), - ]) -def test_outer_subclass_preserve(arr): - # for gh-8661 - class foo(np.ndarray): pass - actual = np.multiply.outer(arr.view(foo), arr.view(foo)) - assert actual.__class__.__name__ == 'foo' diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_umath_accuracy.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_umath_accuracy.py deleted file mode 100644 index fec1807..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_umath_accuracy.py +++ /dev/null @@ -1,54 +0,0 @@ -import numpy as np -import platform -from os import path -import sys -import pytest -from ctypes import * -from numpy.testing import assert_array_max_ulp - -runtest = sys.platform.startswith('linux') and (platform.machine() == 'x86_64') -platform_skip = pytest.mark.skipif(not runtest, - reason=""" - stick to x86_64 and linux platforms. - test seems to fail on some of ARM and power - architectures. - """) - -# convert string to hex function taken from: -# https://stackoverflow.com/questions/1592158/convert-hex-to-float # -def convert(s): - i = int(s, 16) # convert from hex to a Python int - cp = pointer(c_int(i)) # make this into a c integer - fp = cast(cp, POINTER(c_float)) # cast the int pointer to a float pointer - return fp.contents.value # dereference the pointer, get the float - -str_to_float = np.vectorize(convert) -files = ['umath-validation-set-exp', - 'umath-validation-set-log', - 'umath-validation-set-sin', - 'umath-validation-set-cos'] - -class TestAccuracy(object): - @pytest.mark.xfail(reason="Fails for MacPython/numpy-wheels builds") - def test_validate_transcendentals(self): - with np.errstate(all='ignore'): - for filename in files: - data_dir = path.join(path.dirname(__file__), 'data') - filepath = path.join(data_dir, filename) - with open(filepath) as fid: - file_without_comments = (r for r in fid if not r[0] in ('$', '#')) - data = np.genfromtxt(file_without_comments, - dtype=('|S39','|S39','|S39',int), - names=('type','input','output','ulperr'), - delimiter=',', - skip_header=1) - npfunc = getattr(np, filename.split('-')[3]) - for datatype in np.unique(data['type']): - data_subset = data[data['type'] == datatype] - inval = np.array(str_to_float(data_subset['input'].astype(str)), dtype=eval(datatype)) - outval = np.array(str_to_float(data_subset['output'].astype(str)), dtype=eval(datatype)) - perm = np.random.permutation(len(inval)) - inval = inval[perm] - outval = outval[perm] - maxulperr = data_subset['ulperr'].max() - assert_array_max_ulp(npfunc(inval), outval, maxulperr) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_umath_complex.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_umath_complex.py deleted file mode 100644 index 1f5b407..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_umath_complex.py +++ /dev/null @@ -1,544 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import platform -import pytest - -import numpy as np -# import the c-extension module directly since _arg is not exported via umath -import numpy.core._multiarray_umath as ncu -from numpy.testing import ( - assert_raises, assert_equal, assert_array_equal, assert_almost_equal - ) - -# TODO: branch cuts (use Pauli code) -# TODO: conj 'symmetry' -# TODO: FPU exceptions - -# At least on Windows the results of many complex functions are not conforming -# to the C99 standard. See ticket 1574. -# Ditto for Solaris (ticket 1642) and OS X on PowerPC. -#FIXME: this will probably change when we require full C99 campatibility -with np.errstate(all='ignore'): - functions_seem_flaky = ((np.exp(complex(np.inf, 0)).imag != 0) - or (np.log(complex(np.NZERO, 0)).imag != np.pi)) -# TODO: replace with a check on whether platform-provided C99 funcs are used -xfail_complex_tests = (not sys.platform.startswith('linux') or functions_seem_flaky) - -# TODO This can be xfail when the generator functions are got rid of. -platform_skip = pytest.mark.skipif(xfail_complex_tests, - reason="Inadequate C99 complex support") - - - -class TestCexp(object): - def test_simple(self): - check = check_complex_value - f = np.exp - - check(f, 1, 0, np.exp(1), 0, False) - check(f, 0, 1, np.cos(1), np.sin(1), False) - - ref = np.exp(1) * complex(np.cos(1), np.sin(1)) - check(f, 1, 1, ref.real, ref.imag, False) - - @platform_skip - def test_special_values(self): - # C99: Section G 6.3.1 - - check = check_complex_value - f = np.exp - - # cexp(+-0 + 0i) is 1 + 0i - check(f, np.PZERO, 0, 1, 0, False) - check(f, np.NZERO, 0, 1, 0, False) - - # cexp(x + infi) is nan + nani for finite x and raises 'invalid' FPU - # exception - check(f, 1, np.inf, np.nan, np.nan) - check(f, -1, np.inf, np.nan, np.nan) - check(f, 0, np.inf, np.nan, np.nan) - - # cexp(inf + 0i) is inf + 0i - check(f, np.inf, 0, np.inf, 0) - - # cexp(-inf + yi) is +0 * (cos(y) + i sin(y)) for finite y - check(f, -np.inf, 1, np.PZERO, np.PZERO) - check(f, -np.inf, 0.75 * np.pi, np.NZERO, np.PZERO) - - # cexp(inf + yi) is +inf * (cos(y) + i sin(y)) for finite y - check(f, np.inf, 1, np.inf, np.inf) - check(f, np.inf, 0.75 * np.pi, -np.inf, np.inf) - - # cexp(-inf + inf i) is +-0 +- 0i (signs unspecified) - def _check_ninf_inf(dummy): - msgform = "cexp(-inf, inf) is (%f, %f), expected (+-0, +-0)" - with np.errstate(invalid='ignore'): - z = f(np.array(complex(-np.inf, np.inf))) - if z.real != 0 or z.imag != 0: - raise AssertionError(msgform % (z.real, z.imag)) - - _check_ninf_inf(None) - - # cexp(inf + inf i) is +-inf + NaNi and raised invalid FPU ex. - def _check_inf_inf(dummy): - msgform = "cexp(inf, inf) is (%f, %f), expected (+-inf, nan)" - with np.errstate(invalid='ignore'): - z = f(np.array(complex(np.inf, np.inf))) - if not np.isinf(z.real) or not np.isnan(z.imag): - raise AssertionError(msgform % (z.real, z.imag)) - - _check_inf_inf(None) - - # cexp(-inf + nan i) is +-0 +- 0i - def _check_ninf_nan(dummy): - msgform = "cexp(-inf, nan) is (%f, %f), expected (+-0, +-0)" - with np.errstate(invalid='ignore'): - z = f(np.array(complex(-np.inf, np.nan))) - if z.real != 0 or z.imag != 0: - raise AssertionError(msgform % (z.real, z.imag)) - - _check_ninf_nan(None) - - # cexp(inf + nan i) is +-inf + nan - def _check_inf_nan(dummy): - msgform = "cexp(-inf, nan) is (%f, %f), expected (+-inf, nan)" - with np.errstate(invalid='ignore'): - z = f(np.array(complex(np.inf, np.nan))) - if not np.isinf(z.real) or not np.isnan(z.imag): - raise AssertionError(msgform % (z.real, z.imag)) - - _check_inf_nan(None) - - # cexp(nan + yi) is nan + nani for y != 0 (optional: raises invalid FPU - # ex) - check(f, np.nan, 1, np.nan, np.nan) - check(f, np.nan, -1, np.nan, np.nan) - - check(f, np.nan, np.inf, np.nan, np.nan) - check(f, np.nan, -np.inf, np.nan, np.nan) - - # cexp(nan + nani) is nan + nani - check(f, np.nan, np.nan, np.nan, np.nan) - - # TODO This can be xfail when the generator functions are got rid of. - @pytest.mark.skip(reason="cexp(nan + 0I) is wrong on most platforms") - def test_special_values2(self): - # XXX: most implementations get it wrong here (including glibc <= 2.10) - # cexp(nan + 0i) is nan + 0i - check = check_complex_value - f = np.exp - - check(f, np.nan, 0, np.nan, 0) - -class TestClog(object): - def test_simple(self): - x = np.array([1+0j, 1+2j]) - y_r = np.log(np.abs(x)) + 1j * np.angle(x) - y = np.log(x) - for i in range(len(x)): - assert_almost_equal(y[i], y_r[i]) - - @platform_skip - @pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.") - def test_special_values(self): - xl = [] - yl = [] - - # From C99 std (Sec 6.3.2) - # XXX: check exceptions raised - # --- raise for invalid fails. - - # clog(-0 + i0) returns -inf + i pi and raises the 'divide-by-zero' - # floating-point exception. - with np.errstate(divide='raise'): - x = np.array([np.NZERO], dtype=complex) - y = complex(-np.inf, np.pi) - assert_raises(FloatingPointError, np.log, x) - with np.errstate(divide='ignore'): - assert_almost_equal(np.log(x), y) - - xl.append(x) - yl.append(y) - - # clog(+0 + i0) returns -inf + i0 and raises the 'divide-by-zero' - # floating-point exception. - with np.errstate(divide='raise'): - x = np.array([0], dtype=complex) - y = complex(-np.inf, 0) - assert_raises(FloatingPointError, np.log, x) - with np.errstate(divide='ignore'): - assert_almost_equal(np.log(x), y) - - xl.append(x) - yl.append(y) - - # clog(x + i inf returns +inf + i pi /2, for finite x. - x = np.array([complex(1, np.inf)], dtype=complex) - y = complex(np.inf, 0.5 * np.pi) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - x = np.array([complex(-1, np.inf)], dtype=complex) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - # clog(x + iNaN) returns NaN + iNaN and optionally raises the - # 'invalid' floating- point exception, for finite x. - with np.errstate(invalid='raise'): - x = np.array([complex(1., np.nan)], dtype=complex) - y = complex(np.nan, np.nan) - #assert_raises(FloatingPointError, np.log, x) - with np.errstate(invalid='ignore'): - assert_almost_equal(np.log(x), y) - - xl.append(x) - yl.append(y) - - with np.errstate(invalid='raise'): - x = np.array([np.inf + 1j * np.nan], dtype=complex) - #assert_raises(FloatingPointError, np.log, x) - with np.errstate(invalid='ignore'): - assert_almost_equal(np.log(x), y) - - xl.append(x) - yl.append(y) - - # clog(- inf + iy) returns +inf + ipi , for finite positive-signed y. - x = np.array([-np.inf + 1j], dtype=complex) - y = complex(np.inf, np.pi) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - # clog(+ inf + iy) returns +inf + i0, for finite positive-signed y. - x = np.array([np.inf + 1j], dtype=complex) - y = complex(np.inf, 0) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - # clog(- inf + i inf) returns +inf + i3pi /4. - x = np.array([complex(-np.inf, np.inf)], dtype=complex) - y = complex(np.inf, 0.75 * np.pi) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - # clog(+ inf + i inf) returns +inf + ipi /4. - x = np.array([complex(np.inf, np.inf)], dtype=complex) - y = complex(np.inf, 0.25 * np.pi) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - # clog(+/- inf + iNaN) returns +inf + iNaN. - x = np.array([complex(np.inf, np.nan)], dtype=complex) - y = complex(np.inf, np.nan) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - x = np.array([complex(-np.inf, np.nan)], dtype=complex) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - # clog(NaN + iy) returns NaN + iNaN and optionally raises the - # 'invalid' floating-point exception, for finite y. - x = np.array([complex(np.nan, 1)], dtype=complex) - y = complex(np.nan, np.nan) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - # clog(NaN + i inf) returns +inf + iNaN. - x = np.array([complex(np.nan, np.inf)], dtype=complex) - y = complex(np.inf, np.nan) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - # clog(NaN + iNaN) returns NaN + iNaN. - x = np.array([complex(np.nan, np.nan)], dtype=complex) - y = complex(np.nan, np.nan) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - # clog(conj(z)) = conj(clog(z)). - xa = np.array(xl, dtype=complex) - ya = np.array(yl, dtype=complex) - with np.errstate(divide='ignore'): - for i in range(len(xa)): - assert_almost_equal(np.log(xa[i].conj()), ya[i].conj()) - - -class TestCsqrt(object): - - def test_simple(self): - # sqrt(1) - check_complex_value(np.sqrt, 1, 0, 1, 0) - - # sqrt(1i) - rres = 0.5*np.sqrt(2) - ires = rres - check_complex_value(np.sqrt, 0, 1, rres, ires, False) - - # sqrt(-1) - check_complex_value(np.sqrt, -1, 0, 0, 1) - - def test_simple_conjugate(self): - ref = np.conj(np.sqrt(complex(1, 1))) - - def f(z): - return np.sqrt(np.conj(z)) - - check_complex_value(f, 1, 1, ref.real, ref.imag, False) - - #def test_branch_cut(self): - # _check_branch_cut(f, -1, 0, 1, -1) - - @platform_skip - def test_special_values(self): - # C99: Sec G 6.4.2 - - check = check_complex_value - f = np.sqrt - - # csqrt(+-0 + 0i) is 0 + 0i - check(f, np.PZERO, 0, 0, 0) - check(f, np.NZERO, 0, 0, 0) - - # csqrt(x + infi) is inf + infi for any x (including NaN) - check(f, 1, np.inf, np.inf, np.inf) - check(f, -1, np.inf, np.inf, np.inf) - - check(f, np.PZERO, np.inf, np.inf, np.inf) - check(f, np.NZERO, np.inf, np.inf, np.inf) - check(f, np.inf, np.inf, np.inf, np.inf) - check(f, -np.inf, np.inf, np.inf, np.inf) - check(f, -np.nan, np.inf, np.inf, np.inf) - - # csqrt(x + nani) is nan + nani for any finite x - check(f, 1, np.nan, np.nan, np.nan) - check(f, -1, np.nan, np.nan, np.nan) - check(f, 0, np.nan, np.nan, np.nan) - - # csqrt(-inf + yi) is +0 + infi for any finite y > 0 - check(f, -np.inf, 1, np.PZERO, np.inf) - - # csqrt(inf + yi) is +inf + 0i for any finite y > 0 - check(f, np.inf, 1, np.inf, np.PZERO) - - # csqrt(-inf + nani) is nan +- infi (both +i infi are valid) - def _check_ninf_nan(dummy): - msgform = "csqrt(-inf, nan) is (%f, %f), expected (nan, +-inf)" - z = np.sqrt(np.array(complex(-np.inf, np.nan))) - #Fixme: ugly workaround for isinf bug. - with np.errstate(invalid='ignore'): - if not (np.isnan(z.real) and np.isinf(z.imag)): - raise AssertionError(msgform % (z.real, z.imag)) - - _check_ninf_nan(None) - - # csqrt(+inf + nani) is inf + nani - check(f, np.inf, np.nan, np.inf, np.nan) - - # csqrt(nan + yi) is nan + nani for any finite y (infinite handled in x - # + nani) - check(f, np.nan, 0, np.nan, np.nan) - check(f, np.nan, 1, np.nan, np.nan) - check(f, np.nan, np.nan, np.nan, np.nan) - - # XXX: check for conj(csqrt(z)) == csqrt(conj(z)) (need to fix branch - # cuts first) - -class TestCpow(object): - def setup(self): - self.olderr = np.seterr(invalid='ignore') - - def teardown(self): - np.seterr(**self.olderr) - - def test_simple(self): - x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan]) - y_r = x ** 2 - y = np.power(x, 2) - for i in range(len(x)): - assert_almost_equal(y[i], y_r[i]) - - def test_scalar(self): - x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan]) - y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3]) - lx = list(range(len(x))) - # Compute the values for complex type in python - p_r = [complex(x[i]) ** complex(y[i]) for i in lx] - # Substitute a result allowed by C99 standard - p_r[4] = complex(np.inf, np.nan) - # Do the same with numpy complex scalars - n_r = [x[i] ** y[i] for i in lx] - for i in lx: - assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) - - def test_array(self): - x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan]) - y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3]) - lx = list(range(len(x))) - # Compute the values for complex type in python - p_r = [complex(x[i]) ** complex(y[i]) for i in lx] - # Substitute a result allowed by C99 standard - p_r[4] = complex(np.inf, np.nan) - # Do the same with numpy arrays - n_r = x ** y - for i in lx: - assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) - -class TestCabs(object): - def setup(self): - self.olderr = np.seterr(invalid='ignore') - - def teardown(self): - np.seterr(**self.olderr) - - def test_simple(self): - x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan]) - y_r = np.array([np.sqrt(2.), 2, np.sqrt(5), np.inf, np.nan]) - y = np.abs(x) - for i in range(len(x)): - assert_almost_equal(y[i], y_r[i]) - - def test_fabs(self): - # Test that np.abs(x +- 0j) == np.abs(x) (as mandated by C99 for cabs) - x = np.array([1+0j], dtype=complex) - assert_array_equal(np.abs(x), np.real(x)) - - x = np.array([complex(1, np.NZERO)], dtype=complex) - assert_array_equal(np.abs(x), np.real(x)) - - x = np.array([complex(np.inf, np.NZERO)], dtype=complex) - assert_array_equal(np.abs(x), np.real(x)) - - x = np.array([complex(np.nan, np.NZERO)], dtype=complex) - assert_array_equal(np.abs(x), np.real(x)) - - def test_cabs_inf_nan(self): - x, y = [], [] - - # cabs(+-nan + nani) returns nan - x.append(np.nan) - y.append(np.nan) - check_real_value(np.abs, np.nan, np.nan, np.nan) - - x.append(np.nan) - y.append(-np.nan) - check_real_value(np.abs, -np.nan, np.nan, np.nan) - - # According to C99 standard, if exactly one of the real/part is inf and - # the other nan, then cabs should return inf - x.append(np.inf) - y.append(np.nan) - check_real_value(np.abs, np.inf, np.nan, np.inf) - - x.append(-np.inf) - y.append(np.nan) - check_real_value(np.abs, -np.inf, np.nan, np.inf) - - # cabs(conj(z)) == conj(cabs(z)) (= cabs(z)) - def f(a): - return np.abs(np.conj(a)) - - def g(a, b): - return np.abs(complex(a, b)) - - xa = np.array(x, dtype=complex) - for i in range(len(xa)): - ref = g(x[i], y[i]) - check_real_value(f, x[i], y[i], ref) - -class TestCarg(object): - def test_simple(self): - check_real_value(ncu._arg, 1, 0, 0, False) - check_real_value(ncu._arg, 0, 1, 0.5*np.pi, False) - - check_real_value(ncu._arg, 1, 1, 0.25*np.pi, False) - check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO) - - # TODO This can be xfail when the generator functions are got rid of. - @pytest.mark.skip( - reason="Complex arithmetic with signed zero fails on most platforms") - def test_zero(self): - # carg(-0 +- 0i) returns +- pi - check_real_value(ncu._arg, np.NZERO, np.PZERO, np.pi, False) - check_real_value(ncu._arg, np.NZERO, np.NZERO, -np.pi, False) - - # carg(+0 +- 0i) returns +- 0 - check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO) - check_real_value(ncu._arg, np.PZERO, np.NZERO, np.NZERO) - - # carg(x +- 0i) returns +- 0 for x > 0 - check_real_value(ncu._arg, 1, np.PZERO, np.PZERO, False) - check_real_value(ncu._arg, 1, np.NZERO, np.NZERO, False) - - # carg(x +- 0i) returns +- pi for x < 0 - check_real_value(ncu._arg, -1, np.PZERO, np.pi, False) - check_real_value(ncu._arg, -1, np.NZERO, -np.pi, False) - - # carg(+- 0 + yi) returns pi/2 for y > 0 - check_real_value(ncu._arg, np.PZERO, 1, 0.5 * np.pi, False) - check_real_value(ncu._arg, np.NZERO, 1, 0.5 * np.pi, False) - - # carg(+- 0 + yi) returns -pi/2 for y < 0 - check_real_value(ncu._arg, np.PZERO, -1, 0.5 * np.pi, False) - check_real_value(ncu._arg, np.NZERO, -1, -0.5 * np.pi, False) - - #def test_branch_cuts(self): - # _check_branch_cut(ncu._arg, -1, 1j, -1, 1) - - def test_special_values(self): - # carg(-np.inf +- yi) returns +-pi for finite y > 0 - check_real_value(ncu._arg, -np.inf, 1, np.pi, False) - check_real_value(ncu._arg, -np.inf, -1, -np.pi, False) - - # carg(np.inf +- yi) returns +-0 for finite y > 0 - check_real_value(ncu._arg, np.inf, 1, np.PZERO, False) - check_real_value(ncu._arg, np.inf, -1, np.NZERO, False) - - # carg(x +- np.infi) returns +-pi/2 for finite x - check_real_value(ncu._arg, 1, np.inf, 0.5 * np.pi, False) - check_real_value(ncu._arg, 1, -np.inf, -0.5 * np.pi, False) - - # carg(-np.inf +- np.infi) returns +-3pi/4 - check_real_value(ncu._arg, -np.inf, np.inf, 0.75 * np.pi, False) - check_real_value(ncu._arg, -np.inf, -np.inf, -0.75 * np.pi, False) - - # carg(np.inf +- np.infi) returns +-pi/4 - check_real_value(ncu._arg, np.inf, np.inf, 0.25 * np.pi, False) - check_real_value(ncu._arg, np.inf, -np.inf, -0.25 * np.pi, False) - - # carg(x + yi) returns np.nan if x or y is nan - check_real_value(ncu._arg, np.nan, 0, np.nan, False) - check_real_value(ncu._arg, 0, np.nan, np.nan, False) - - check_real_value(ncu._arg, np.nan, np.inf, np.nan, False) - check_real_value(ncu._arg, np.inf, np.nan, np.nan, False) - - -def check_real_value(f, x1, y1, x, exact=True): - z1 = np.array([complex(x1, y1)]) - if exact: - assert_equal(f(z1), x) - else: - assert_almost_equal(f(z1), x) - - -def check_complex_value(f, x1, y1, x2, y2, exact=True): - z1 = np.array([complex(x1, y1)]) - z2 = complex(x2, y2) - with np.errstate(invalid='ignore'): - if exact: - assert_equal(f(z1), z2) - else: - assert_almost_equal(f(z1), z2) diff --git a/venv/lib/python3.7/site-packages/numpy/core/tests/test_unicode.py b/venv/lib/python3.7/site-packages/numpy/core/tests/test_unicode.py deleted file mode 100644 index 2ffd880..0000000 --- a/venv/lib/python3.7/site-packages/numpy/core/tests/test_unicode.py +++ /dev/null @@ -1,396 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys - -import numpy as np -from numpy.compat import unicode -from numpy.testing import assert_, assert_equal, assert_array_equal - -# Guess the UCS length for this python interpreter -if sys.version_info[:2] >= (3, 3): - # Python 3.3 uses a flexible string representation - ucs4 = False - - def buffer_length(arr): - if isinstance(arr, unicode): - arr = str(arr) - if not arr: - charmax = 0 - else: - charmax = max([ord(c) for c in arr]) - if charmax < 256: - size = 1 - elif charmax < 65536: - size = 2 - else: - size = 4 - return size * len(arr) - v = memoryview(arr) - if v.shape is None: - return len(v) * v.itemsize - else: - return np.prod(v.shape) * v.itemsize -else: - if len(buffer(u'u')) == 4: - ucs4 = True - else: - ucs4 = False - - def buffer_length(arr): - if isinstance(arr, np.ndarray): - return len(arr.data) - return len(buffer(arr)) - -# In both cases below we need to make sure that the byte swapped value (as -# UCS4) is still a valid unicode: -# Value that can be represented in UCS2 interpreters -ucs2_value = u'\u0900' -# Value that cannot be represented in UCS2 interpreters (but can in UCS4) -ucs4_value = u'\U00100900' - - -def test_string_cast(): - str_arr = np.array(["1234", "1234\0\0"], dtype='S') - uni_arr1 = str_arr.astype('>U') - uni_arr2 = str_arr.astype('>> _lib = np.ctypeslib.load_library('libmystuff', '.') #doctest: +SKIP - -Our result type, an ndarray that must be of type double, be 1-dimensional -and is C-contiguous in memory: - ->>> array_1d_double = np.ctypeslib.ndpointer( -... dtype=np.double, -... ndim=1, flags='CONTIGUOUS') #doctest: +SKIP - -Our C-function typically takes an array and updates its values -in-place. For example:: - - void foo_func(double* x, int length) - { - int i; - for (i = 0; i < length; i++) { - x[i] = i*i; - } - } - -We wrap it using: - ->>> _lib.foo_func.restype = None #doctest: +SKIP ->>> _lib.foo_func.argtypes = [array_1d_double, c_int] #doctest: +SKIP - -Then, we're ready to call ``foo_func``: - ->>> out = np.empty(15, dtype=np.double) ->>> _lib.foo_func(out, len(out)) #doctest: +SKIP - -""" -from __future__ import division, absolute_import, print_function - -__all__ = ['load_library', 'ndpointer', 'ctypes_load_library', - 'c_intp', 'as_ctypes', 'as_array'] - -import os -from numpy import ( - integer, ndarray, dtype as _dtype, deprecate, array, frombuffer -) -from numpy.core.multiarray import _flagdict, flagsobj - -try: - import ctypes -except ImportError: - ctypes = None - -if ctypes is None: - def _dummy(*args, **kwds): - """ - Dummy object that raises an ImportError if ctypes is not available. - - Raises - ------ - ImportError - If ctypes is not available. - - """ - raise ImportError("ctypes is not available.") - ctypes_load_library = _dummy - load_library = _dummy - as_ctypes = _dummy - as_array = _dummy - from numpy import intp as c_intp - _ndptr_base = object -else: - import numpy.core._internal as nic - c_intp = nic._getintp_ctype() - del nic - _ndptr_base = ctypes.c_void_p - - # Adapted from Albert Strasheim - def load_library(libname, loader_path): - """ - It is possible to load a library using - >>> lib = ctypes.cdll[] # doctest: +SKIP - - But there are cross-platform considerations, such as library file extensions, - plus the fact Windows will just load the first library it finds with that name. - NumPy supplies the load_library function as a convenience. - - Parameters - ---------- - libname : str - Name of the library, which can have 'lib' as a prefix, - but without an extension. - loader_path : str - Where the library can be found. - - Returns - ------- - ctypes.cdll[libpath] : library object - A ctypes library object - - Raises - ------ - OSError - If there is no library with the expected extension, or the - library is defective and cannot be loaded. - """ - if ctypes.__version__ < '1.0.1': - import warnings - warnings.warn("All features of ctypes interface may not work " - "with ctypes < 1.0.1", stacklevel=2) - - ext = os.path.splitext(libname)[1] - if not ext: - # Try to load library with platform-specific name, otherwise - # default to libname.[so|pyd]. Sometimes, these files are built - # erroneously on non-linux platforms. - from numpy.distutils.misc_util import get_shared_lib_extension - so_ext = get_shared_lib_extension() - libname_ext = [libname + so_ext] - # mac, windows and linux >= py3.2 shared library and loadable - # module have different extensions so try both - so_ext2 = get_shared_lib_extension(is_python_ext=True) - if not so_ext2 == so_ext: - libname_ext.insert(0, libname + so_ext2) - else: - libname_ext = [libname] - - loader_path = os.path.abspath(loader_path) - if not os.path.isdir(loader_path): - libdir = os.path.dirname(loader_path) - else: - libdir = loader_path - - for ln in libname_ext: - libpath = os.path.join(libdir, ln) - if os.path.exists(libpath): - try: - return ctypes.cdll[libpath] - except OSError: - ## defective lib file - raise - ## if no successful return in the libname_ext loop: - raise OSError("no file with expected extension") - - ctypes_load_library = deprecate(load_library, 'ctypes_load_library', - 'load_library') - -def _num_fromflags(flaglist): - num = 0 - for val in flaglist: - num += _flagdict[val] - return num - -_flagnames = ['C_CONTIGUOUS', 'F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE', - 'OWNDATA', 'UPDATEIFCOPY', 'WRITEBACKIFCOPY'] -def _flags_fromnum(num): - res = [] - for key in _flagnames: - value = _flagdict[key] - if (num & value): - res.append(key) - return res - - -class _ndptr(_ndptr_base): - @classmethod - def from_param(cls, obj): - if not isinstance(obj, ndarray): - raise TypeError("argument must be an ndarray") - if cls._dtype_ is not None \ - and obj.dtype != cls._dtype_: - raise TypeError("array must have data type %s" % cls._dtype_) - if cls._ndim_ is not None \ - and obj.ndim != cls._ndim_: - raise TypeError("array must have %d dimension(s)" % cls._ndim_) - if cls._shape_ is not None \ - and obj.shape != cls._shape_: - raise TypeError("array must have shape %s" % str(cls._shape_)) - if cls._flags_ is not None \ - and ((obj.flags.num & cls._flags_) != cls._flags_): - raise TypeError("array must have flags %s" % - _flags_fromnum(cls._flags_)) - return obj.ctypes - - -class _concrete_ndptr(_ndptr): - """ - Like _ndptr, but with `_shape_` and `_dtype_` specified. - - Notably, this means the pointer has enough information to reconstruct - the array, which is not generally true. - """ - def _check_retval_(self): - """ - This method is called when this class is used as the .restype - attribute for a shared-library function, to automatically wrap the - pointer into an array. - """ - return self.contents - - @property - def contents(self): - """ - Get an ndarray viewing the data pointed to by this pointer. - - This mirrors the `contents` attribute of a normal ctypes pointer - """ - full_dtype = _dtype((self._dtype_, self._shape_)) - full_ctype = ctypes.c_char * full_dtype.itemsize - buffer = ctypes.cast(self, ctypes.POINTER(full_ctype)).contents - return frombuffer(buffer, dtype=full_dtype).squeeze(axis=0) - - -# Factory for an array-checking class with from_param defined for -# use with ctypes argtypes mechanism -_pointer_type_cache = {} -def ndpointer(dtype=None, ndim=None, shape=None, flags=None): - """ - Array-checking restype/argtypes. - - An ndpointer instance is used to describe an ndarray in restypes - and argtypes specifications. This approach is more flexible than - using, for example, ``POINTER(c_double)``, since several restrictions - can be specified, which are verified upon calling the ctypes function. - These include data type, number of dimensions, shape and flags. If a - given array does not satisfy the specified restrictions, - a ``TypeError`` is raised. - - Parameters - ---------- - dtype : data-type, optional - Array data-type. - ndim : int, optional - Number of array dimensions. - shape : tuple of ints, optional - Array shape. - flags : str or tuple of str - Array flags; may be one or more of: - - - C_CONTIGUOUS / C / CONTIGUOUS - - F_CONTIGUOUS / F / FORTRAN - - OWNDATA / O - - WRITEABLE / W - - ALIGNED / A - - WRITEBACKIFCOPY / X - - UPDATEIFCOPY / U - - Returns - ------- - klass : ndpointer type object - A type object, which is an ``_ndtpr`` instance containing - dtype, ndim, shape and flags information. - - Raises - ------ - TypeError - If a given array does not satisfy the specified restrictions. - - Examples - -------- - >>> clib.somefunc.argtypes = [np.ctypeslib.ndpointer(dtype=np.float64, - ... ndim=1, - ... flags='C_CONTIGUOUS')] - ... #doctest: +SKIP - >>> clib.somefunc(np.array([1, 2, 3], dtype=np.float64)) - ... #doctest: +SKIP - - """ - - # normalize dtype to an Optional[dtype] - if dtype is not None: - dtype = _dtype(dtype) - - # normalize flags to an Optional[int] - num = None - if flags is not None: - if isinstance(flags, str): - flags = flags.split(',') - elif isinstance(flags, (int, integer)): - num = flags - flags = _flags_fromnum(num) - elif isinstance(flags, flagsobj): - num = flags.num - flags = _flags_fromnum(num) - if num is None: - try: - flags = [x.strip().upper() for x in flags] - except Exception: - raise TypeError("invalid flags specification") - num = _num_fromflags(flags) - - # normalize shape to an Optional[tuple] - if shape is not None: - try: - shape = tuple(shape) - except TypeError: - # single integer -> 1-tuple - shape = (shape,) - - cache_key = (dtype, ndim, shape, num) - - try: - return _pointer_type_cache[cache_key] - except KeyError: - pass - - # produce a name for the new type - if dtype is None: - name = 'any' - elif dtype.names is not None: - name = str(id(dtype)) - else: - name = dtype.str - if ndim is not None: - name += "_%dd" % ndim - if shape is not None: - name += "_"+"x".join(str(x) for x in shape) - if flags is not None: - name += "_"+"_".join(flags) - - if dtype is not None and shape is not None: - base = _concrete_ndptr - else: - base = _ndptr - - klass = type("ndpointer_%s"%name, (base,), - {"_dtype_": dtype, - "_shape_" : shape, - "_ndim_" : ndim, - "_flags_" : num}) - _pointer_type_cache[cache_key] = klass - return klass - - -if ctypes is not None: - def _ctype_ndarray(element_type, shape): - """ Create an ndarray of the given element type and shape """ - for dim in shape[::-1]: - element_type = dim * element_type - # prevent the type name include np.ctypeslib - element_type.__module__ = None - return element_type - - - def _get_scalar_type_map(): - """ - Return a dictionary mapping native endian scalar dtype to ctypes types - """ - ct = ctypes - simple_types = [ - ct.c_byte, ct.c_short, ct.c_int, ct.c_long, ct.c_longlong, - ct.c_ubyte, ct.c_ushort, ct.c_uint, ct.c_ulong, ct.c_ulonglong, - ct.c_float, ct.c_double, - ct.c_bool, - ] - return {_dtype(ctype): ctype for ctype in simple_types} - - - _scalar_type_map = _get_scalar_type_map() - - - def _ctype_from_dtype_scalar(dtype): - # swapping twice ensure that `=` is promoted to <, >, or | - dtype_with_endian = dtype.newbyteorder('S').newbyteorder('S') - dtype_native = dtype.newbyteorder('=') - try: - ctype = _scalar_type_map[dtype_native] - except KeyError: - raise NotImplementedError( - "Converting {!r} to a ctypes type".format(dtype) - ) - - if dtype_with_endian.byteorder == '>': - ctype = ctype.__ctype_be__ - elif dtype_with_endian.byteorder == '<': - ctype = ctype.__ctype_le__ - - return ctype - - - def _ctype_from_dtype_subarray(dtype): - element_dtype, shape = dtype.subdtype - ctype = _ctype_from_dtype(element_dtype) - return _ctype_ndarray(ctype, shape) - - - def _ctype_from_dtype_structured(dtype): - # extract offsets of each field - field_data = [] - for name in dtype.names: - field_dtype, offset = dtype.fields[name][:2] - field_data.append((offset, name, _ctype_from_dtype(field_dtype))) - - # ctypes doesn't care about field order - field_data = sorted(field_data, key=lambda f: f[0]) - - if len(field_data) > 1 and all(offset == 0 for offset, name, ctype in field_data): - # union, if multiple fields all at address 0 - size = 0 - _fields_ = [] - for offset, name, ctype in field_data: - _fields_.append((name, ctype)) - size = max(size, ctypes.sizeof(ctype)) - - # pad to the right size - if dtype.itemsize != size: - _fields_.append(('', ctypes.c_char * dtype.itemsize)) - - # we inserted manual padding, so always `_pack_` - return type('union', (ctypes.Union,), dict( - _fields_=_fields_, - _pack_=1, - __module__=None, - )) - else: - last_offset = 0 - _fields_ = [] - for offset, name, ctype in field_data: - padding = offset - last_offset - if padding < 0: - raise NotImplementedError("Overlapping fields") - if padding > 0: - _fields_.append(('', ctypes.c_char * padding)) - - _fields_.append((name, ctype)) - last_offset = offset + ctypes.sizeof(ctype) - - - padding = dtype.itemsize - last_offset - if padding > 0: - _fields_.append(('', ctypes.c_char * padding)) - - # we inserted manual padding, so always `_pack_` - return type('struct', (ctypes.Structure,), dict( - _fields_=_fields_, - _pack_=1, - __module__=None, - )) - - - def _ctype_from_dtype(dtype): - if dtype.fields is not None: - return _ctype_from_dtype_structured(dtype) - elif dtype.subdtype is not None: - return _ctype_from_dtype_subarray(dtype) - else: - return _ctype_from_dtype_scalar(dtype) - - - def as_ctypes_type(dtype): - r""" - Convert a dtype into a ctypes type. - - Parameters - ---------- - dtype : dtype - The dtype to convert - - Returns - ------- - ctype - A ctype scalar, union, array, or struct - - Raises - ------ - NotImplementedError - If the conversion is not possible - - Notes - ----- - This function does not losslessly round-trip in either direction. - - ``np.dtype(as_ctypes_type(dt))`` will: - - - insert padding fields - - reorder fields to be sorted by offset - - discard field titles - - ``as_ctypes_type(np.dtype(ctype))`` will: - - - discard the class names of `ctypes.Structure`\ s and - `ctypes.Union`\ s - - convert single-element `ctypes.Union`\ s into single-element - `ctypes.Structure`\ s - - insert padding fields - - """ - return _ctype_from_dtype(_dtype(dtype)) - - - def as_array(obj, shape=None): - """ - Create a numpy array from a ctypes array or POINTER. - - The numpy array shares the memory with the ctypes object. - - The shape parameter must be given if converting from a ctypes POINTER. - The shape parameter is ignored if converting from a ctypes array - """ - if isinstance(obj, ctypes._Pointer): - # convert pointers to an array of the desired shape - if shape is None: - raise TypeError( - 'as_array() requires a shape argument when called on a ' - 'pointer') - p_arr_type = ctypes.POINTER(_ctype_ndarray(obj._type_, shape)) - obj = ctypes.cast(obj, p_arr_type).contents - - return array(obj, copy=False) - - - def as_ctypes(obj): - """Create and return a ctypes object from a numpy array. Actually - anything that exposes the __array_interface__ is accepted.""" - ai = obj.__array_interface__ - if ai["strides"]: - raise TypeError("strided arrays not supported") - if ai["version"] != 3: - raise TypeError("only __array_interface__ version 3 supported") - addr, readonly = ai["data"] - if readonly: - raise TypeError("readonly arrays unsupported") - - # can't use `_dtype((ai["typestr"], ai["shape"]))` here, as it overflows - # dtype.itemsize (gh-14214) - ctype_scalar = as_ctypes_type(ai["typestr"]) - result_type = _ctype_ndarray(ctype_scalar, ai["shape"]) - result = result_type.from_address(addr) - result.__keep = obj - return result diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/__config__.py b/venv/lib/python3.7/site-packages/numpy/distutils/__config__.py deleted file mode 100644 index db2e454..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/__config__.py +++ /dev/null @@ -1,40 +0,0 @@ -# This file is generated by numpy's setup.py -# It contains system_info results at the time of building this package. -__all__ = ["get_info","show"] - - -import os -import sys - -extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs') - -if sys.platform == 'win32' and os.path.isdir(extra_dll_dir): - if sys.version_info >= (3, 8): - os.add_dll_directory(extra_dll_dir) - else: - os.environ.setdefault('PATH', '') - os.environ['PATH'] += os.pathsep + extra_dll_dir - -blas_mkl_info={} -blis_info={} -openblas_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]} -blas_opt_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]} -lapack_mkl_info={} -openblas_lapack_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]} -lapack_opt_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]} - -def get_info(name): - g = globals() - return g.get(name, g.get(name + "_info", {})) - -def show(): - for name,info_dict in globals().items(): - if name[0] == "_" or type(info_dict) is not type({}): continue - print(name + ":") - if not info_dict: - print(" NOT AVAILABLE") - for k,v in info_dict.items(): - v = str(v) - if k == "sources" and len(v) > 200: - v = v[:60] + " ...\n... " + v[-60:] - print(" %s = %s" % (k,v)) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/__init__.py b/venv/lib/python3.7/site-packages/numpy/distutils/__init__.py deleted file mode 100644 index 8dbb63b..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/__init__.py +++ /dev/null @@ -1,53 +0,0 @@ -""" -An enhanced distutils, providing support for Fortran compilers, for BLAS, -LAPACK and other common libraries for numerical computing, and more. - -Public submodules are:: - - misc_util - system_info - cpu_info - log - exec_command - -For details, please see the *Packaging* and *NumPy Distutils User Guide* -sections of the NumPy Reference Guide. - -For configuring the preference for and location of libraries like BLAS and -LAPACK, and for setting include paths and similar build options, please see -``site.cfg.example`` in the root of the NumPy repository or sdist. - -""" - -from __future__ import division, absolute_import, print_function - -# Must import local ccompiler ASAP in order to get -# customized CCompiler.spawn effective. -from . import ccompiler -from . import unixccompiler - -from .npy_pkg_config import * - -# If numpy is installed, add distutils.test() -try: - from . import __config__ - # Normally numpy is installed if the above import works, but an interrupted - # in-place build could also have left a __config__.py. In that case the - # next import may still fail, so keep it inside the try block. - from numpy._pytesttester import PytestTester - test = PytestTester(__name__) - del PytestTester -except ImportError: - pass - - -def customized_fcompiler(plat=None, compiler=None): - from numpy.distutils.fcompiler import new_fcompiler - c = new_fcompiler(plat=plat, compiler=compiler) - c.customize() - return c - -def customized_ccompiler(plat=None, compiler=None, verbose=1): - c = ccompiler.new_compiler(plat=plat, compiler=compiler, verbose=verbose) - c.customize('') - return c diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/_shell_utils.py b/venv/lib/python3.7/site-packages/numpy/distutils/_shell_utils.py deleted file mode 100644 index 82abd5f..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/_shell_utils.py +++ /dev/null @@ -1,91 +0,0 @@ -""" -Helper functions for interacting with the shell, and consuming shell-style -parameters provided in config files. -""" -import os -import shlex -import subprocess -try: - from shlex import quote -except ImportError: - from pipes import quote - -__all__ = ['WindowsParser', 'PosixParser', 'NativeParser'] - - -class CommandLineParser: - """ - An object that knows how to split and join command-line arguments. - - It must be true that ``argv == split(join(argv))`` for all ``argv``. - The reverse neednt be true - `join(split(cmd))` may result in the addition - or removal of unnecessary escaping. - """ - @staticmethod - def join(argv): - """ Join a list of arguments into a command line string """ - raise NotImplementedError - - @staticmethod - def split(cmd): - """ Split a command line string into a list of arguments """ - raise NotImplementedError - - -class WindowsParser: - """ - The parsing behavior used by `subprocess.call("string")` on Windows, which - matches the Microsoft C/C++ runtime. - - Note that this is _not_ the behavior of cmd. - """ - @staticmethod - def join(argv): - # note that list2cmdline is specific to the windows syntax - return subprocess.list2cmdline(argv) - - @staticmethod - def split(cmd): - import ctypes # guarded import for systems without ctypes - try: - ctypes.windll - except AttributeError: - raise NotImplementedError - - # Windows has special parsing rules for the executable (no quotes), - # that we do not care about - insert a dummy element - if not cmd: - return [] - cmd = 'dummy ' + cmd - - CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW - CommandLineToArgvW.restype = ctypes.POINTER(ctypes.c_wchar_p) - CommandLineToArgvW.argtypes = (ctypes.c_wchar_p, ctypes.POINTER(ctypes.c_int)) - - nargs = ctypes.c_int() - lpargs = CommandLineToArgvW(cmd, ctypes.byref(nargs)) - args = [lpargs[i] for i in range(nargs.value)] - assert not ctypes.windll.kernel32.LocalFree(lpargs) - - # strip the element we inserted - assert args[0] == "dummy" - return args[1:] - - -class PosixParser: - """ - The parsing behavior used by `subprocess.call("string", shell=True)` on Posix. - """ - @staticmethod - def join(argv): - return ' '.join(quote(arg) for arg in argv) - - @staticmethod - def split(cmd): - return shlex.split(cmd, posix=True) - - -if os.name == 'nt': - NativeParser = WindowsParser -elif os.name == 'posix': - NativeParser = PosixParser diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/ccompiler.py b/venv/lib/python3.7/site-packages/numpy/distutils/ccompiler.py deleted file mode 100644 index 6438790..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/ccompiler.py +++ /dev/null @@ -1,805 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import re -import sys -import types -import shlex -import time -import subprocess -from copy import copy -from distutils import ccompiler -from distutils.ccompiler import * -from distutils.errors import DistutilsExecError, DistutilsModuleError, \ - DistutilsPlatformError, CompileError -from distutils.sysconfig import customize_compiler -from distutils.version import LooseVersion - -from numpy.distutils import log -from numpy.distutils.compat import get_exception -from numpy.distutils.exec_command import ( - filepath_from_subprocess_output, forward_bytes_to_stdout -) -from numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \ - get_num_build_jobs, \ - _commandline_dep_string - -# globals for parallel build management -try: - import threading -except ImportError: - import dummy_threading as threading -_job_semaphore = None -_global_lock = threading.Lock() -_processing_files = set() - - -def _needs_build(obj, cc_args, extra_postargs, pp_opts): - """ - Check if an objects needs to be rebuild based on its dependencies - - Parameters - ---------- - obj : str - object file - - Returns - ------- - bool - """ - # defined in unixcompiler.py - dep_file = obj + '.d' - if not os.path.exists(dep_file): - return True - - # dep_file is a makefile containing 'object: dependencies' - # formatted like posix shell (spaces escaped, \ line continuations) - # the last line contains the compiler commandline arguments as some - # projects may compile an extension multiple times with different - # arguments - with open(dep_file, "r") as f: - lines = f.readlines() - - cmdline =_commandline_dep_string(cc_args, extra_postargs, pp_opts) - last_cmdline = lines[-1] - if last_cmdline != cmdline: - return True - - contents = ''.join(lines[:-1]) - deps = [x for x in shlex.split(contents, posix=True) - if x != "\n" and not x.endswith(":")] - - try: - t_obj = os.stat(obj).st_mtime - - # check if any of the dependencies is newer than the object - # the dependencies includes the source used to create the object - for f in deps: - if os.stat(f).st_mtime > t_obj: - return True - except OSError: - # no object counts as newer (shouldn't happen if dep_file exists) - return True - - return False - - -def replace_method(klass, method_name, func): - if sys.version_info[0] < 3: - m = types.MethodType(func, None, klass) - else: - # Py3k does not have unbound method anymore, MethodType does not work - m = lambda self, *args, **kw: func(self, *args, **kw) - setattr(klass, method_name, m) - - -###################################################################### -## Method that subclasses may redefine. But don't call this method, -## it i private to CCompiler class and may return unexpected -## results if used elsewhere. So, you have been warned.. - -def CCompiler_find_executables(self): - """ - Does nothing here, but is called by the get_version method and can be - overridden by subclasses. In particular it is redefined in the `FCompiler` - class where more documentation can be found. - - """ - pass - - -replace_method(CCompiler, 'find_executables', CCompiler_find_executables) - - -# Using customized CCompiler.spawn. -def CCompiler_spawn(self, cmd, display=None): - """ - Execute a command in a sub-process. - - Parameters - ---------- - cmd : str - The command to execute. - display : str or sequence of str, optional - The text to add to the log file kept by `numpy.distutils`. - If not given, `display` is equal to `cmd`. - - Returns - ------- - None - - Raises - ------ - DistutilsExecError - If the command failed, i.e. the exit status was not 0. - - """ - if display is None: - display = cmd - if is_sequence(display): - display = ' '.join(list(display)) - log.info(display) - try: - if self.verbose: - subprocess.check_output(cmd) - else: - subprocess.check_output(cmd, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as exc: - o = exc.output - s = exc.returncode - except OSError: - # OSError doesn't have the same hooks for the exception - # output, but exec_command() historically would use an - # empty string for EnvironmentError (base class for - # OSError) - o = b'' - # status previously used by exec_command() for parent - # of OSError - s = 127 - else: - # use a convenience return here so that any kind of - # caught exception will execute the default code after the - # try / except block, which handles various exceptions - return None - - if is_sequence(cmd): - cmd = ' '.join(list(cmd)) - - if self.verbose: - forward_bytes_to_stdout(o) - - if re.search(b'Too many open files', o): - msg = '\nTry rerunning setup command until build succeeds.' - else: - msg = '' - raise DistutilsExecError('Command "%s" failed with exit status %d%s' % - (cmd, s, msg)) - -replace_method(CCompiler, 'spawn', CCompiler_spawn) - -def CCompiler_object_filenames(self, source_filenames, strip_dir=0, output_dir=''): - """ - Return the name of the object files for the given source files. - - Parameters - ---------- - source_filenames : list of str - The list of paths to source files. Paths can be either relative or - absolute, this is handled transparently. - strip_dir : bool, optional - Whether to strip the directory from the returned paths. If True, - the file name prepended by `output_dir` is returned. Default is False. - output_dir : str, optional - If given, this path is prepended to the returned paths to the - object files. - - Returns - ------- - obj_names : list of str - The list of paths to the object files corresponding to the source - files in `source_filenames`. - - """ - if output_dir is None: - output_dir = '' - obj_names = [] - for src_name in source_filenames: - base, ext = os.path.splitext(os.path.normpath(src_name)) - base = os.path.splitdrive(base)[1] # Chop off the drive - base = base[os.path.isabs(base):] # If abs, chop off leading / - if base.startswith('..'): - # Resolve starting relative path components, middle ones - # (if any) have been handled by os.path.normpath above. - i = base.rfind('..')+2 - d = base[:i] - d = os.path.basename(os.path.abspath(d)) - base = d + base[i:] - if ext not in self.src_extensions: - raise UnknownFileError("unknown file type '%s' (from '%s')" % (ext, src_name)) - if strip_dir: - base = os.path.basename(base) - obj_name = os.path.join(output_dir, base + self.obj_extension) - obj_names.append(obj_name) - return obj_names - -replace_method(CCompiler, 'object_filenames', CCompiler_object_filenames) - -def CCompiler_compile(self, sources, output_dir=None, macros=None, - include_dirs=None, debug=0, extra_preargs=None, - extra_postargs=None, depends=None): - """ - Compile one or more source files. - - Please refer to the Python distutils API reference for more details. - - Parameters - ---------- - sources : list of str - A list of filenames - output_dir : str, optional - Path to the output directory. - macros : list of tuples - A list of macro definitions. - include_dirs : list of str, optional - The directories to add to the default include file search path for - this compilation only. - debug : bool, optional - Whether or not to output debug symbols in or alongside the object - file(s). - extra_preargs, extra_postargs : ? - Extra pre- and post-arguments. - depends : list of str, optional - A list of file names that all targets depend on. - - Returns - ------- - objects : list of str - A list of object file names, one per source file `sources`. - - Raises - ------ - CompileError - If compilation fails. - - """ - # This method is effective only with Python >=2.3 distutils. - # Any changes here should be applied also to fcompiler.compile - # method to support pre Python 2.3 distutils. - global _job_semaphore - - jobs = get_num_build_jobs() - - # setup semaphore to not exceed number of compile jobs when parallelized at - # extension level (python >= 3.5) - with _global_lock: - if _job_semaphore is None: - _job_semaphore = threading.Semaphore(jobs) - - if not sources: - return [] - # FIXME:RELATIVE_IMPORT - if sys.version_info[0] < 3: - from .fcompiler import FCompiler, is_f_file, has_f90_header - else: - from numpy.distutils.fcompiler import (FCompiler, is_f_file, - has_f90_header) - if isinstance(self, FCompiler): - display = [] - for fc in ['f77', 'f90', 'fix']: - fcomp = getattr(self, 'compiler_'+fc) - if fcomp is None: - continue - display.append("Fortran %s compiler: %s" % (fc, ' '.join(fcomp))) - display = '\n'.join(display) - else: - ccomp = self.compiler_so - display = "C compiler: %s\n" % (' '.join(ccomp),) - log.info(display) - macros, objects, extra_postargs, pp_opts, build = \ - self._setup_compile(output_dir, macros, include_dirs, sources, - depends, extra_postargs) - cc_args = self._get_cc_args(pp_opts, debug, extra_preargs) - display = "compile options: '%s'" % (' '.join(cc_args)) - if extra_postargs: - display += "\nextra options: '%s'" % (' '.join(extra_postargs)) - log.info(display) - - def single_compile(args): - obj, (src, ext) = args - if not _needs_build(obj, cc_args, extra_postargs, pp_opts): - return - - # check if we are currently already processing the same object - # happens when using the same source in multiple extensions - while True: - # need explicit lock as there is no atomic check and add with GIL - with _global_lock: - # file not being worked on, start working - if obj not in _processing_files: - _processing_files.add(obj) - break - # wait for the processing to end - time.sleep(0.1) - - try: - # retrieve slot from our #job semaphore and build - with _job_semaphore: - self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts) - finally: - # register being done processing - with _global_lock: - _processing_files.remove(obj) - - - if isinstance(self, FCompiler): - objects_to_build = list(build.keys()) - f77_objects, other_objects = [], [] - for obj in objects: - if obj in objects_to_build: - src, ext = build[obj] - if self.compiler_type=='absoft': - obj = cyg2win32(obj) - src = cyg2win32(src) - if is_f_file(src) and not has_f90_header(src): - f77_objects.append((obj, (src, ext))) - else: - other_objects.append((obj, (src, ext))) - - # f77 objects can be built in parallel - build_items = f77_objects - # build f90 modules serial, module files are generated during - # compilation and may be used by files later in the list so the - # ordering is important - for o in other_objects: - single_compile(o) - else: - build_items = build.items() - - if len(build) > 1 and jobs > 1: - # build parallel - import multiprocessing.pool - pool = multiprocessing.pool.ThreadPool(jobs) - pool.map(single_compile, build_items) - pool.close() - else: - # build serial - for o in build_items: - single_compile(o) - - # Return *all* object filenames, not just the ones we just built. - return objects - -replace_method(CCompiler, 'compile', CCompiler_compile) - -def CCompiler_customize_cmd(self, cmd, ignore=()): - """ - Customize compiler using distutils command. - - Parameters - ---------- - cmd : class instance - An instance inheriting from `distutils.cmd.Command`. - ignore : sequence of str, optional - List of `CCompiler` commands (without ``'set_'``) that should not be - altered. Strings that are checked for are: - ``('include_dirs', 'define', 'undef', 'libraries', 'library_dirs', - 'rpath', 'link_objects')``. - - Returns - ------- - None - - """ - log.info('customize %s using %s' % (self.__class__.__name__, - cmd.__class__.__name__)) - def allow(attr): - return getattr(cmd, attr, None) is not None and attr not in ignore - - if allow('include_dirs'): - self.set_include_dirs(cmd.include_dirs) - if allow('define'): - for (name, value) in cmd.define: - self.define_macro(name, value) - if allow('undef'): - for macro in cmd.undef: - self.undefine_macro(macro) - if allow('libraries'): - self.set_libraries(self.libraries + cmd.libraries) - if allow('library_dirs'): - self.set_library_dirs(self.library_dirs + cmd.library_dirs) - if allow('rpath'): - self.set_runtime_library_dirs(cmd.rpath) - if allow('link_objects'): - self.set_link_objects(cmd.link_objects) - -replace_method(CCompiler, 'customize_cmd', CCompiler_customize_cmd) - -def _compiler_to_string(compiler): - props = [] - mx = 0 - keys = list(compiler.executables.keys()) - for key in ['version', 'libraries', 'library_dirs', - 'object_switch', 'compile_switch', - 'include_dirs', 'define', 'undef', 'rpath', 'link_objects']: - if key not in keys: - keys.append(key) - for key in keys: - if hasattr(compiler, key): - v = getattr(compiler, key) - mx = max(mx, len(key)) - props.append((key, repr(v))) - fmt = '%-' + repr(mx+1) + 's = %s' - lines = [fmt % prop for prop in props] - return '\n'.join(lines) - -def CCompiler_show_customization(self): - """ - Print the compiler customizations to stdout. - - Parameters - ---------- - None - - Returns - ------- - None - - Notes - ----- - Printing is only done if the distutils log threshold is < 2. - - """ - if 0: - for attrname in ['include_dirs', 'define', 'undef', - 'libraries', 'library_dirs', - 'rpath', 'link_objects']: - attr = getattr(self, attrname, None) - if not attr: - continue - log.info("compiler '%s' is set to %s" % (attrname, attr)) - try: - self.get_version() - except Exception: - pass - if log._global_log.threshold<2: - print('*'*80) - print(self.__class__) - print(_compiler_to_string(self)) - print('*'*80) - -replace_method(CCompiler, 'show_customization', CCompiler_show_customization) - -def CCompiler_customize(self, dist, need_cxx=0): - """ - Do any platform-specific customization of a compiler instance. - - This method calls `distutils.sysconfig.customize_compiler` for - platform-specific customization, as well as optionally remove a flag - to suppress spurious warnings in case C++ code is being compiled. - - Parameters - ---------- - dist : object - This parameter is not used for anything. - need_cxx : bool, optional - Whether or not C++ has to be compiled. If so (True), the - ``"-Wstrict-prototypes"`` option is removed to prevent spurious - warnings. Default is False. - - Returns - ------- - None - - Notes - ----- - All the default options used by distutils can be extracted with:: - - from distutils import sysconfig - sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS', - 'CCSHARED', 'LDSHARED', 'SO') - - """ - # See FCompiler.customize for suggested usage. - log.info('customize %s' % (self.__class__.__name__)) - customize_compiler(self) - if need_cxx: - # In general, distutils uses -Wstrict-prototypes, but this option is - # not valid for C++ code, only for C. Remove it if it's there to - # avoid a spurious warning on every compilation. - try: - self.compiler_so.remove('-Wstrict-prototypes') - except (AttributeError, ValueError): - pass - - if hasattr(self, 'compiler') and 'cc' in self.compiler[0]: - if not self.compiler_cxx: - if self.compiler[0].startswith('gcc'): - a, b = 'gcc', 'g++' - else: - a, b = 'cc', 'c++' - self.compiler_cxx = [self.compiler[0].replace(a, b)]\ - + self.compiler[1:] - else: - if hasattr(self, 'compiler'): - log.warn("#### %s #######" % (self.compiler,)) - if not hasattr(self, 'compiler_cxx'): - log.warn('Missing compiler_cxx fix for ' + self.__class__.__name__) - - - # check if compiler supports gcc style automatic dependencies - # run on every extension so skip for known good compilers - if hasattr(self, 'compiler') and ('gcc' in self.compiler[0] or - 'g++' in self.compiler[0] or - 'clang' in self.compiler[0]): - self._auto_depends = True - elif os.name == 'posix': - import tempfile - import shutil - tmpdir = tempfile.mkdtemp() - try: - fn = os.path.join(tmpdir, "file.c") - with open(fn, "w") as f: - f.write("int a;\n") - self.compile([fn], output_dir=tmpdir, - extra_preargs=['-MMD', '-MF', fn + '.d']) - self._auto_depends = True - except CompileError: - self._auto_depends = False - finally: - shutil.rmtree(tmpdir) - - return - -replace_method(CCompiler, 'customize', CCompiler_customize) - -def simple_version_match(pat=r'[-.\d]+', ignore='', start=''): - """ - Simple matching of version numbers, for use in CCompiler and FCompiler. - - Parameters - ---------- - pat : str, optional - A regular expression matching version numbers. - Default is ``r'[-.\\d]+'``. - ignore : str, optional - A regular expression matching patterns to skip. - Default is ``''``, in which case nothing is skipped. - start : str, optional - A regular expression matching the start of where to start looking - for version numbers. - Default is ``''``, in which case searching is started at the - beginning of the version string given to `matcher`. - - Returns - ------- - matcher : callable - A function that is appropriate to use as the ``.version_match`` - attribute of a `CCompiler` class. `matcher` takes a single parameter, - a version string. - - """ - def matcher(self, version_string): - # version string may appear in the second line, so getting rid - # of new lines: - version_string = version_string.replace('\n', ' ') - pos = 0 - if start: - m = re.match(start, version_string) - if not m: - return None - pos = m.end() - while True: - m = re.search(pat, version_string[pos:]) - if not m: - return None - if ignore and re.match(ignore, m.group(0)): - pos = m.end() - continue - break - return m.group(0) - return matcher - -def CCompiler_get_version(self, force=False, ok_status=[0]): - """ - Return compiler version, or None if compiler is not available. - - Parameters - ---------- - force : bool, optional - If True, force a new determination of the version, even if the - compiler already has a version attribute. Default is False. - ok_status : list of int, optional - The list of status values returned by the version look-up process - for which a version string is returned. If the status value is not - in `ok_status`, None is returned. Default is ``[0]``. - - Returns - ------- - version : str or None - Version string, in the format of `distutils.version.LooseVersion`. - - """ - if not force and hasattr(self, 'version'): - return self.version - self.find_executables() - try: - version_cmd = self.version_cmd - except AttributeError: - return None - if not version_cmd or not version_cmd[0]: - return None - try: - matcher = self.version_match - except AttributeError: - try: - pat = self.version_pattern - except AttributeError: - return None - def matcher(version_string): - m = re.match(pat, version_string) - if not m: - return None - version = m.group('version') - return version - - try: - output = subprocess.check_output(version_cmd, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as exc: - output = exc.output - status = exc.returncode - except OSError: - # match the historical returns for a parent - # exception class caught by exec_command() - status = 127 - output = b'' - else: - # output isn't actually a filepath but we do this - # for now to match previous distutils behavior - output = filepath_from_subprocess_output(output) - status = 0 - - version = None - if status in ok_status: - version = matcher(output) - if version: - version = LooseVersion(version) - self.version = version - return version - -replace_method(CCompiler, 'get_version', CCompiler_get_version) - -def CCompiler_cxx_compiler(self): - """ - Return the C++ compiler. - - Parameters - ---------- - None - - Returns - ------- - cxx : class instance - The C++ compiler, as a `CCompiler` instance. - - """ - if self.compiler_type in ('msvc', 'intelw', 'intelemw'): - return self - - cxx = copy(self) - cxx.compiler_so = [cxx.compiler_cxx[0]] + cxx.compiler_so[1:] - if sys.platform.startswith('aix') and 'ld_so_aix' in cxx.linker_so[0]: - # AIX needs the ld_so_aix script included with Python - cxx.linker_so = [cxx.linker_so[0], cxx.compiler_cxx[0]] \ - + cxx.linker_so[2:] - else: - cxx.linker_so = [cxx.compiler_cxx[0]] + cxx.linker_so[1:] - return cxx - -replace_method(CCompiler, 'cxx_compiler', CCompiler_cxx_compiler) - -compiler_class['intel'] = ('intelccompiler', 'IntelCCompiler', - "Intel C Compiler for 32-bit applications") -compiler_class['intele'] = ('intelccompiler', 'IntelItaniumCCompiler', - "Intel C Itanium Compiler for Itanium-based applications") -compiler_class['intelem'] = ('intelccompiler', 'IntelEM64TCCompiler', - "Intel C Compiler for 64-bit applications") -compiler_class['intelw'] = ('intelccompiler', 'IntelCCompilerW', - "Intel C Compiler for 32-bit applications on Windows") -compiler_class['intelemw'] = ('intelccompiler', 'IntelEM64TCCompilerW', - "Intel C Compiler for 64-bit applications on Windows") -compiler_class['pathcc'] = ('pathccompiler', 'PathScaleCCompiler', - "PathScale Compiler for SiCortex-based applications") -ccompiler._default_compilers += (('linux.*', 'intel'), - ('linux.*', 'intele'), - ('linux.*', 'intelem'), - ('linux.*', 'pathcc'), - ('nt', 'intelw'), - ('nt', 'intelemw')) - -if sys.platform == 'win32': - compiler_class['mingw32'] = ('mingw32ccompiler', 'Mingw32CCompiler', - "Mingw32 port of GNU C Compiler for Win32"\ - "(for MSC built Python)") - if mingw32(): - # On windows platforms, we want to default to mingw32 (gcc) - # because msvc can't build blitz stuff. - log.info('Setting mingw32 as default compiler for nt.') - ccompiler._default_compilers = (('nt', 'mingw32'),) \ - + ccompiler._default_compilers - - -_distutils_new_compiler = new_compiler -def new_compiler (plat=None, - compiler=None, - verbose=None, - dry_run=0, - force=0): - # Try first C compilers from numpy.distutils. - if verbose is None: - verbose = log.get_threshold() <= log.INFO - if plat is None: - plat = os.name - try: - if compiler is None: - compiler = get_default_compiler(plat) - (module_name, class_name, long_description) = compiler_class[compiler] - except KeyError: - msg = "don't know how to compile C/C++ code on platform '%s'" % plat - if compiler is not None: - msg = msg + " with '%s' compiler" % compiler - raise DistutilsPlatformError(msg) - module_name = "numpy.distutils." + module_name - try: - __import__ (module_name) - except ImportError: - msg = str(get_exception()) - log.info('%s in numpy.distutils; trying from distutils', - str(msg)) - module_name = module_name[6:] - try: - __import__(module_name) - except ImportError: - msg = str(get_exception()) - raise DistutilsModuleError("can't compile C/C++ code: unable to load module '%s'" % \ - module_name) - try: - module = sys.modules[module_name] - klass = vars(module)[class_name] - except KeyError: - raise DistutilsModuleError(("can't compile C/C++ code: unable to find class '%s' " + - "in module '%s'") % (class_name, module_name)) - compiler = klass(None, dry_run, force) - compiler.verbose = verbose - log.debug('new_compiler returns %s' % (klass)) - return compiler - -ccompiler.new_compiler = new_compiler - -_distutils_gen_lib_options = gen_lib_options -def gen_lib_options(compiler, library_dirs, runtime_library_dirs, libraries): - # the version of this function provided by CPython allows the following - # to return lists, which are unpacked automatically: - # - compiler.runtime_library_dir_option - # our version extends the behavior to: - # - compiler.library_dir_option - # - compiler.library_option - # - compiler.find_library_file - r = _distutils_gen_lib_options(compiler, library_dirs, - runtime_library_dirs, libraries) - lib_opts = [] - for i in r: - if is_sequence(i): - lib_opts.extend(list(i)) - else: - lib_opts.append(i) - return lib_opts -ccompiler.gen_lib_options = gen_lib_options - -# Also fix up the various compiler modules, which do -# from distutils.ccompiler import gen_lib_options -# Don't bother with mwerks, as we don't support Classic Mac. -for _cc in ['msvc9', 'msvc', '_msvc', 'bcpp', 'cygwinc', 'emxc', 'unixc']: - _m = sys.modules.get('distutils.' + _cc + 'compiler') - if _m is not None: - setattr(_m, 'gen_lib_options', gen_lib_options) - diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/command/__init__.py b/venv/lib/python3.7/site-packages/numpy/distutils/command/__init__.py deleted file mode 100644 index 76a2600..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/command/__init__.py +++ /dev/null @@ -1,43 +0,0 @@ -"""distutils.command - -Package containing implementation of all the standard Distutils -commands. - -""" -from __future__ import division, absolute_import, print_function - -def test_na_writable_attributes_deletion(): - a = np.NA(2) - attr = ['payload', 'dtype'] - for s in attr: - assert_raises(AttributeError, delattr, a, s) - - -__revision__ = "$Id: __init__.py,v 1.3 2005/05/16 11:08:49 pearu Exp $" - -distutils_all = [ #'build_py', - 'clean', - 'install_clib', - 'install_scripts', - 'bdist', - 'bdist_dumb', - 'bdist_wininst', - ] - -__import__('distutils.command', globals(), locals(), distutils_all) - -__all__ = ['build', - 'config_compiler', - 'config', - 'build_src', - 'build_py', - 'build_ext', - 'build_clib', - 'build_scripts', - 'install', - 'install_data', - 'install_headers', - 'install_lib', - 'bdist_rpm', - 'sdist', - ] + distutils_all diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/command/autodist.py b/venv/lib/python3.7/site-packages/numpy/distutils/command/autodist.py deleted file mode 100644 index 9c98b84..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/command/autodist.py +++ /dev/null @@ -1,122 +0,0 @@ -"""This module implements additional tests ala autoconf which can be useful. - -""" -from __future__ import division, absolute_import, print_function - -import textwrap - -# We put them here since they could be easily reused outside numpy.distutils - -def check_inline(cmd): - """Return the inline identifier (may be empty).""" - cmd._check_compiler() - body = textwrap.dedent(""" - #ifndef __cplusplus - static %(inline)s int static_func (void) - { - return 0; - } - %(inline)s int nostatic_func (void) - { - return 0; - } - #endif""") - - for kw in ['inline', '__inline__', '__inline']: - st = cmd.try_compile(body % {'inline': kw}, None, None) - if st: - return kw - - return '' - - -def check_restrict(cmd): - """Return the restrict identifier (may be empty).""" - cmd._check_compiler() - body = textwrap.dedent(""" - static int static_func (char * %(restrict)s a) - { - return 0; - } - """) - - for kw in ['restrict', '__restrict__', '__restrict']: - st = cmd.try_compile(body % {'restrict': kw}, None, None) - if st: - return kw - - return '' - - -def check_compiler_gcc4(cmd): - """Return True if the C compiler is GCC 4.x.""" - cmd._check_compiler() - body = textwrap.dedent(""" - int - main() - { - #if (! defined __GNUC__) || (__GNUC__ < 4) - #error gcc >= 4 required - #endif - return 0; - } - """) - return cmd.try_compile(body, None, None) - - -def check_gcc_function_attribute(cmd, attribute, name): - """Return True if the given function attribute is supported.""" - cmd._check_compiler() - body = textwrap.dedent(""" - #pragma GCC diagnostic error "-Wattributes" - #pragma clang diagnostic error "-Wattributes" - - int %s %s(void*); - - int - main() - { - return 0; - } - """) % (attribute, name) - return cmd.try_compile(body, None, None) != 0 - - -def check_gcc_function_attribute_with_intrinsics(cmd, attribute, name, code, - include): - """Return True if the given function attribute is supported with - intrinsics.""" - cmd._check_compiler() - body = textwrap.dedent(""" - #include<%s> - int %s %s(void) - { - %s; - return 0; - } - - int - main() - { - return 0; - } - """) % (include, attribute, name, code) - return cmd.try_compile(body, None, None) != 0 - - -def check_gcc_variable_attribute(cmd, attribute): - """Return True if the given variable attribute is supported.""" - cmd._check_compiler() - body = textwrap.dedent(""" - #pragma GCC diagnostic error "-Wattributes" - #pragma clang diagnostic error "-Wattributes" - - int %s foo; - - int - main() - { - return 0; - } - """) % (attribute, ) - return cmd.try_compile(body, None, None) != 0 diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/command/bdist_rpm.py b/venv/lib/python3.7/site-packages/numpy/distutils/command/bdist_rpm.py deleted file mode 100644 index 3e52a50..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/command/bdist_rpm.py +++ /dev/null @@ -1,24 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import sys -if 'setuptools' in sys.modules: - from setuptools.command.bdist_rpm import bdist_rpm as old_bdist_rpm -else: - from distutils.command.bdist_rpm import bdist_rpm as old_bdist_rpm - -class bdist_rpm(old_bdist_rpm): - - def _make_spec_file(self): - spec_file = old_bdist_rpm._make_spec_file(self) - - # Replace hardcoded setup.py script name - # with the real setup script name. - setup_py = os.path.basename(sys.argv[0]) - if setup_py == 'setup.py': - return spec_file - new_spec_file = [] - for line in spec_file: - line = line.replace('setup.py', setup_py) - new_spec_file.append(line) - return new_spec_file diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/command/build.py b/venv/lib/python3.7/site-packages/numpy/distutils/command/build.py deleted file mode 100644 index 5a9da12..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/command/build.py +++ /dev/null @@ -1,42 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import sys -from distutils.command.build import build as old_build -from distutils.util import get_platform -from numpy.distutils.command.config_compiler import show_fortran_compilers - -class build(old_build): - - sub_commands = [('config_cc', lambda *args: True), - ('config_fc', lambda *args: True), - ('build_src', old_build.has_ext_modules), - ] + old_build.sub_commands - - user_options = old_build.user_options + [ - ('fcompiler=', None, - "specify the Fortran compiler type"), - ('warn-error', None, - "turn all warnings into errors (-Werror)"), - ] - - help_options = old_build.help_options + [ - ('help-fcompiler', None, "list available Fortran compilers", - show_fortran_compilers), - ] - - def initialize_options(self): - old_build.initialize_options(self) - self.fcompiler = None - self.warn_error = False - - def finalize_options(self): - build_scripts = self.build_scripts - old_build.finalize_options(self) - plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2]) - if build_scripts is None: - self.build_scripts = os.path.join(self.build_base, - 'scripts' + plat_specifier) - - def run(self): - old_build.run(self) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/command/build_clib.py b/venv/lib/python3.7/site-packages/numpy/distutils/command/build_clib.py deleted file mode 100644 index 13edf07..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/command/build_clib.py +++ /dev/null @@ -1,333 +0,0 @@ -""" Modified version of build_clib that handles fortran source files. -""" -from __future__ import division, absolute_import, print_function - -import os -from glob import glob -import shutil -from distutils.command.build_clib import build_clib as old_build_clib -from distutils.errors import DistutilsSetupError, DistutilsError, \ - DistutilsFileError - -from numpy.distutils import log -from distutils.dep_util import newer_group -from numpy.distutils.misc_util import filter_sources, has_f_sources,\ - has_cxx_sources, all_strings, get_lib_source_files, is_sequence, \ - get_numpy_include_dirs - -# Fix Python distutils bug sf #1718574: -_l = old_build_clib.user_options -for _i in range(len(_l)): - if _l[_i][0] in ['build-clib', 'build-temp']: - _l[_i] = (_l[_i][0] + '=',) + _l[_i][1:] -# - - -class build_clib(old_build_clib): - - description = "build C/C++/F libraries used by Python extensions" - - user_options = old_build_clib.user_options + [ - ('fcompiler=', None, - "specify the Fortran compiler type"), - ('inplace', 'i', 'Build in-place'), - ('parallel=', 'j', - "number of parallel jobs"), - ('warn-error', None, - "turn all warnings into errors (-Werror)"), - ] - - boolean_options = old_build_clib.boolean_options + ['inplace', 'warn-error'] - - def initialize_options(self): - old_build_clib.initialize_options(self) - self.fcompiler = None - self.inplace = 0 - self.parallel = None - self.warn_error = None - - def finalize_options(self): - if self.parallel: - try: - self.parallel = int(self.parallel) - except ValueError: - raise ValueError("--parallel/-j argument must be an integer") - old_build_clib.finalize_options(self) - self.set_undefined_options('build', - ('parallel', 'parallel'), - ('warn_error', 'warn_error'), - ) - - def have_f_sources(self): - for (lib_name, build_info) in self.libraries: - if has_f_sources(build_info.get('sources', [])): - return True - return False - - def have_cxx_sources(self): - for (lib_name, build_info) in self.libraries: - if has_cxx_sources(build_info.get('sources', [])): - return True - return False - - def run(self): - if not self.libraries: - return - - # Make sure that library sources are complete. - languages = [] - - # Make sure that extension sources are complete. - self.run_command('build_src') - - for (lib_name, build_info) in self.libraries: - l = build_info.get('language', None) - if l and l not in languages: - languages.append(l) - - from distutils.ccompiler import new_compiler - self.compiler = new_compiler(compiler=self.compiler, - dry_run=self.dry_run, - force=self.force) - self.compiler.customize(self.distribution, - need_cxx=self.have_cxx_sources()) - - if self.warn_error: - self.compiler.compiler.append('-Werror') - self.compiler.compiler_so.append('-Werror') - - libraries = self.libraries - self.libraries = None - self.compiler.customize_cmd(self) - self.libraries = libraries - - self.compiler.show_customization() - - if self.have_f_sources(): - from numpy.distutils.fcompiler import new_fcompiler - self._f_compiler = new_fcompiler(compiler=self.fcompiler, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90='f90' in languages, - c_compiler=self.compiler) - if self._f_compiler is not None: - self._f_compiler.customize(self.distribution) - - libraries = self.libraries - self.libraries = None - self._f_compiler.customize_cmd(self) - self.libraries = libraries - - self._f_compiler.show_customization() - else: - self._f_compiler = None - - self.build_libraries(self.libraries) - - if self.inplace: - for l in self.distribution.installed_libraries: - libname = self.compiler.library_filename(l.name) - source = os.path.join(self.build_clib, libname) - target = os.path.join(l.target_dir, libname) - self.mkpath(l.target_dir) - shutil.copy(source, target) - - def get_source_files(self): - self.check_library_list(self.libraries) - filenames = [] - for lib in self.libraries: - filenames.extend(get_lib_source_files(lib)) - return filenames - - def build_libraries(self, libraries): - for (lib_name, build_info) in libraries: - self.build_a_library(build_info, lib_name, libraries) - - def build_a_library(self, build_info, lib_name, libraries): - # default compilers - compiler = self.compiler - fcompiler = self._f_compiler - - sources = build_info.get('sources') - if sources is None or not is_sequence(sources): - raise DistutilsSetupError(("in 'libraries' option (library '%s'), " + - "'sources' must be present and must be " + - "a list of source filenames") % lib_name) - sources = list(sources) - - c_sources, cxx_sources, f_sources, fmodule_sources \ - = filter_sources(sources) - requiref90 = not not fmodule_sources or \ - build_info.get('language', 'c') == 'f90' - - # save source type information so that build_ext can use it. - source_languages = [] - if c_sources: - source_languages.append('c') - if cxx_sources: - source_languages.append('c++') - if requiref90: - source_languages.append('f90') - elif f_sources: - source_languages.append('f77') - build_info['source_languages'] = source_languages - - lib_file = compiler.library_filename(lib_name, - output_dir=self.build_clib) - depends = sources + build_info.get('depends', []) - if not (self.force or newer_group(depends, lib_file, 'newer')): - log.debug("skipping '%s' library (up-to-date)", lib_name) - return - else: - log.info("building '%s' library", lib_name) - - config_fc = build_info.get('config_fc', {}) - if fcompiler is not None and config_fc: - log.info('using additional config_fc from setup script ' - 'for fortran compiler: %s' - % (config_fc,)) - from numpy.distutils.fcompiler import new_fcompiler - fcompiler = new_fcompiler(compiler=fcompiler.compiler_type, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90=requiref90, - c_compiler=self.compiler) - if fcompiler is not None: - dist = self.distribution - base_config_fc = dist.get_option_dict('config_fc').copy() - base_config_fc.update(config_fc) - fcompiler.customize(base_config_fc) - - # check availability of Fortran compilers - if (f_sources or fmodule_sources) and fcompiler is None: - raise DistutilsError("library %s has Fortran sources" - " but no Fortran compiler found" % (lib_name)) - - if fcompiler is not None: - fcompiler.extra_f77_compile_args = build_info.get( - 'extra_f77_compile_args') or [] - fcompiler.extra_f90_compile_args = build_info.get( - 'extra_f90_compile_args') or [] - - macros = build_info.get('macros') - include_dirs = build_info.get('include_dirs') - if include_dirs is None: - include_dirs = [] - extra_postargs = build_info.get('extra_compiler_args') or [] - - include_dirs.extend(get_numpy_include_dirs()) - # where compiled F90 module files are: - module_dirs = build_info.get('module_dirs') or [] - module_build_dir = os.path.dirname(lib_file) - if requiref90: - self.mkpath(module_build_dir) - - if compiler.compiler_type == 'msvc': - # this hack works around the msvc compiler attributes - # problem, msvc uses its own convention :( - c_sources += cxx_sources - cxx_sources = [] - - objects = [] - if c_sources: - log.info("compiling C sources") - objects = compiler.compile(c_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs) - - if cxx_sources: - log.info("compiling C++ sources") - cxx_compiler = compiler.cxx_compiler() - cxx_objects = cxx_compiler.compile(cxx_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs) - objects.extend(cxx_objects) - - if f_sources or fmodule_sources: - extra_postargs = [] - f_objects = [] - - if requiref90: - if fcompiler.module_dir_switch is None: - existing_modules = glob('*.mod') - extra_postargs += fcompiler.module_options( - module_dirs, module_build_dir) - - if fmodule_sources: - log.info("compiling Fortran 90 module sources") - f_objects += fcompiler.compile(fmodule_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs) - - if requiref90 and self._f_compiler.module_dir_switch is None: - # move new compiled F90 module files to module_build_dir - for f in glob('*.mod'): - if f in existing_modules: - continue - t = os.path.join(module_build_dir, f) - if os.path.abspath(f) == os.path.abspath(t): - continue - if os.path.isfile(t): - os.remove(t) - try: - self.move_file(f, module_build_dir) - except DistutilsFileError: - log.warn('failed to move %r to %r' - % (f, module_build_dir)) - - if f_sources: - log.info("compiling Fortran sources") - f_objects += fcompiler.compile(f_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs) - else: - f_objects = [] - - if f_objects and not fcompiler.can_ccompiler_link(compiler): - # Default linker cannot link Fortran object files, and results - # need to be wrapped later. Instead of creating a real static - # library, just keep track of the object files. - listfn = os.path.join(self.build_clib, - lib_name + '.fobjects') - with open(listfn, 'w') as f: - f.write("\n".join(os.path.abspath(obj) for obj in f_objects)) - - listfn = os.path.join(self.build_clib, - lib_name + '.cobjects') - with open(listfn, 'w') as f: - f.write("\n".join(os.path.abspath(obj) for obj in objects)) - - # create empty "library" file for dependency tracking - lib_fname = os.path.join(self.build_clib, - lib_name + compiler.static_lib_extension) - with open(lib_fname, 'wb') as f: - pass - else: - # assume that default linker is suitable for - # linking Fortran object files - objects.extend(f_objects) - compiler.create_static_lib(objects, lib_name, - output_dir=self.build_clib, - debug=self.debug) - - # fix library dependencies - clib_libraries = build_info.get('libraries', []) - for lname, binfo in libraries: - if lname in clib_libraries: - clib_libraries.extend(binfo.get('libraries', [])) - if clib_libraries: - build_info['libraries'] = clib_libraries diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/command/build_ext.py b/venv/lib/python3.7/site-packages/numpy/distutils/command/build_ext.py deleted file mode 100644 index cd9b1c6..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/command/build_ext.py +++ /dev/null @@ -1,611 +0,0 @@ -""" Modified version of build_ext that handles fortran source files. - -""" -from __future__ import division, absolute_import, print_function - -import os -import subprocess -from glob import glob - -from distutils.dep_util import newer_group -from distutils.command.build_ext import build_ext as old_build_ext -from distutils.errors import DistutilsFileError, DistutilsSetupError,\ - DistutilsError -from distutils.file_util import copy_file - -from numpy.distutils import log -from numpy.distutils.exec_command import filepath_from_subprocess_output -from numpy.distutils.system_info import combine_paths, system_info -from numpy.distutils.misc_util import filter_sources, has_f_sources, \ - has_cxx_sources, get_ext_source_files, \ - get_numpy_include_dirs, is_sequence, get_build_architecture, \ - msvc_version -from numpy.distutils.command.config_compiler import show_fortran_compilers - - - -class build_ext (old_build_ext): - - description = "build C/C++/F extensions (compile/link to build directory)" - - user_options = old_build_ext.user_options + [ - ('fcompiler=', None, - "specify the Fortran compiler type"), - ('parallel=', 'j', - "number of parallel jobs"), - ('warn-error', None, - "turn all warnings into errors (-Werror)"), - ] - - help_options = old_build_ext.help_options + [ - ('help-fcompiler', None, "list available Fortran compilers", - show_fortran_compilers), - ] - - boolean_options = old_build_ext.boolean_options + ['warn-error'] - - def initialize_options(self): - old_build_ext.initialize_options(self) - self.fcompiler = None - self.parallel = None - self.warn_error = None - - def finalize_options(self): - if self.parallel: - try: - self.parallel = int(self.parallel) - except ValueError: - raise ValueError("--parallel/-j argument must be an integer") - - # Ensure that self.include_dirs and self.distribution.include_dirs - # refer to the same list object. finalize_options will modify - # self.include_dirs, but self.distribution.include_dirs is used - # during the actual build. - # self.include_dirs is None unless paths are specified with - # --include-dirs. - # The include paths will be passed to the compiler in the order: - # numpy paths, --include-dirs paths, Python include path. - if isinstance(self.include_dirs, str): - self.include_dirs = self.include_dirs.split(os.pathsep) - incl_dirs = self.include_dirs or [] - if self.distribution.include_dirs is None: - self.distribution.include_dirs = [] - self.include_dirs = self.distribution.include_dirs - self.include_dirs.extend(incl_dirs) - - old_build_ext.finalize_options(self) - self.set_undefined_options('build', - ('parallel', 'parallel'), - ('warn_error', 'warn_error'), - ) - - def run(self): - if not self.extensions: - return - - # Make sure that extension sources are complete. - self.run_command('build_src') - - if self.distribution.has_c_libraries(): - if self.inplace: - if self.distribution.have_run.get('build_clib'): - log.warn('build_clib already run, it is too late to ' - 'ensure in-place build of build_clib') - build_clib = self.distribution.get_command_obj( - 'build_clib') - else: - build_clib = self.distribution.get_command_obj( - 'build_clib') - build_clib.inplace = 1 - build_clib.ensure_finalized() - build_clib.run() - self.distribution.have_run['build_clib'] = 1 - - else: - self.run_command('build_clib') - build_clib = self.get_finalized_command('build_clib') - self.library_dirs.append(build_clib.build_clib) - else: - build_clib = None - - # Not including C libraries to the list of - # extension libraries automatically to prevent - # bogus linking commands. Extensions must - # explicitly specify the C libraries that they use. - - from distutils.ccompiler import new_compiler - from numpy.distutils.fcompiler import new_fcompiler - - compiler_type = self.compiler - # Initialize C compiler: - self.compiler = new_compiler(compiler=compiler_type, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force) - self.compiler.customize(self.distribution) - self.compiler.customize_cmd(self) - - if self.warn_error: - self.compiler.compiler.append('-Werror') - self.compiler.compiler_so.append('-Werror') - - self.compiler.show_customization() - - # Setup directory for storing generated extra DLL files on Windows - self.extra_dll_dir = os.path.join(self.build_temp, '.libs') - if not os.path.isdir(self.extra_dll_dir): - os.makedirs(self.extra_dll_dir) - - # Create mapping of libraries built by build_clib: - clibs = {} - if build_clib is not None: - for libname, build_info in build_clib.libraries or []: - if libname in clibs and clibs[libname] != build_info: - log.warn('library %r defined more than once,' - ' overwriting build_info\n%s... \nwith\n%s...' - % (libname, repr(clibs[libname])[:300], repr(build_info)[:300])) - clibs[libname] = build_info - # .. and distribution libraries: - for libname, build_info in self.distribution.libraries or []: - if libname in clibs: - # build_clib libraries have a precedence before distribution ones - continue - clibs[libname] = build_info - - # Determine if C++/Fortran 77/Fortran 90 compilers are needed. - # Update extension libraries, library_dirs, and macros. - all_languages = set() - for ext in self.extensions: - ext_languages = set() - c_libs = [] - c_lib_dirs = [] - macros = [] - for libname in ext.libraries: - if libname in clibs: - binfo = clibs[libname] - c_libs += binfo.get('libraries', []) - c_lib_dirs += binfo.get('library_dirs', []) - for m in binfo.get('macros', []): - if m not in macros: - macros.append(m) - - for l in clibs.get(libname, {}).get('source_languages', []): - ext_languages.add(l) - if c_libs: - new_c_libs = ext.libraries + c_libs - log.info('updating extension %r libraries from %r to %r' - % (ext.name, ext.libraries, new_c_libs)) - ext.libraries = new_c_libs - ext.library_dirs = ext.library_dirs + c_lib_dirs - if macros: - log.info('extending extension %r defined_macros with %r' - % (ext.name, macros)) - ext.define_macros = ext.define_macros + macros - - # determine extension languages - if has_f_sources(ext.sources): - ext_languages.add('f77') - if has_cxx_sources(ext.sources): - ext_languages.add('c++') - l = ext.language or self.compiler.detect_language(ext.sources) - if l: - ext_languages.add(l) - # reset language attribute for choosing proper linker - if 'c++' in ext_languages: - ext_language = 'c++' - elif 'f90' in ext_languages: - ext_language = 'f90' - elif 'f77' in ext_languages: - ext_language = 'f77' - else: - ext_language = 'c' # default - if l and l != ext_language and ext.language: - log.warn('resetting extension %r language from %r to %r.' % - (ext.name, l, ext_language)) - ext.language = ext_language - # global language - all_languages.update(ext_languages) - - need_f90_compiler = 'f90' in all_languages - need_f77_compiler = 'f77' in all_languages - need_cxx_compiler = 'c++' in all_languages - - # Initialize C++ compiler: - if need_cxx_compiler: - self._cxx_compiler = new_compiler(compiler=compiler_type, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force) - compiler = self._cxx_compiler - compiler.customize(self.distribution, need_cxx=need_cxx_compiler) - compiler.customize_cmd(self) - compiler.show_customization() - self._cxx_compiler = compiler.cxx_compiler() - else: - self._cxx_compiler = None - - # Initialize Fortran 77 compiler: - if need_f77_compiler: - ctype = self.fcompiler - self._f77_compiler = new_fcompiler(compiler=self.fcompiler, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90=False, - c_compiler=self.compiler) - fcompiler = self._f77_compiler - if fcompiler: - ctype = fcompiler.compiler_type - fcompiler.customize(self.distribution) - if fcompiler and fcompiler.get_version(): - fcompiler.customize_cmd(self) - fcompiler.show_customization() - else: - self.warn('f77_compiler=%s is not available.' % - (ctype)) - self._f77_compiler = None - else: - self._f77_compiler = None - - # Initialize Fortran 90 compiler: - if need_f90_compiler: - ctype = self.fcompiler - self._f90_compiler = new_fcompiler(compiler=self.fcompiler, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90=True, - c_compiler=self.compiler) - fcompiler = self._f90_compiler - if fcompiler: - ctype = fcompiler.compiler_type - fcompiler.customize(self.distribution) - if fcompiler and fcompiler.get_version(): - fcompiler.customize_cmd(self) - fcompiler.show_customization() - else: - self.warn('f90_compiler=%s is not available.' % - (ctype)) - self._f90_compiler = None - else: - self._f90_compiler = None - - # Build extensions - self.build_extensions() - - # Copy over any extra DLL files - # FIXME: In the case where there are more than two packages, - # we blindly assume that both packages need all of the libraries, - # resulting in a larger wheel than is required. This should be fixed, - # but it's so rare that I won't bother to handle it. - pkg_roots = { - self.get_ext_fullname(ext.name).split('.')[0] - for ext in self.extensions - } - for pkg_root in pkg_roots: - shared_lib_dir = os.path.join(pkg_root, '.libs') - if not self.inplace: - shared_lib_dir = os.path.join(self.build_lib, shared_lib_dir) - for fn in os.listdir(self.extra_dll_dir): - if not os.path.isdir(shared_lib_dir): - os.makedirs(shared_lib_dir) - if not fn.lower().endswith('.dll'): - continue - runtime_lib = os.path.join(self.extra_dll_dir, fn) - copy_file(runtime_lib, shared_lib_dir) - - def swig_sources(self, sources, extensions=None): - # Do nothing. Swig sources have been handled in build_src command. - return sources - - def build_extension(self, ext): - sources = ext.sources - if sources is None or not is_sequence(sources): - raise DistutilsSetupError( - ("in 'ext_modules' option (extension '%s'), " + - "'sources' must be present and must be " + - "a list of source filenames") % ext.name) - sources = list(sources) - - if not sources: - return - - fullname = self.get_ext_fullname(ext.name) - if self.inplace: - modpath = fullname.split('.') - package = '.'.join(modpath[0:-1]) - base = modpath[-1] - build_py = self.get_finalized_command('build_py') - package_dir = build_py.get_package_dir(package) - ext_filename = os.path.join(package_dir, - self.get_ext_filename(base)) - else: - ext_filename = os.path.join(self.build_lib, - self.get_ext_filename(fullname)) - depends = sources + ext.depends - - if not (self.force or newer_group(depends, ext_filename, 'newer')): - log.debug("skipping '%s' extension (up-to-date)", ext.name) - return - else: - log.info("building '%s' extension", ext.name) - - extra_args = ext.extra_compile_args or [] - macros = ext.define_macros[:] - for undef in ext.undef_macros: - macros.append((undef,)) - - c_sources, cxx_sources, f_sources, fmodule_sources = \ - filter_sources(ext.sources) - - if self.compiler.compiler_type == 'msvc': - if cxx_sources: - # Needed to compile kiva.agg._agg extension. - extra_args.append('/Zm1000') - # this hack works around the msvc compiler attributes - # problem, msvc uses its own convention :( - c_sources += cxx_sources - cxx_sources = [] - - # Set Fortran/C++ compilers for compilation and linking. - if ext.language == 'f90': - fcompiler = self._f90_compiler - elif ext.language == 'f77': - fcompiler = self._f77_compiler - else: # in case ext.language is c++, for instance - fcompiler = self._f90_compiler or self._f77_compiler - if fcompiler is not None: - fcompiler.extra_f77_compile_args = (ext.extra_f77_compile_args or []) if hasattr( - ext, 'extra_f77_compile_args') else [] - fcompiler.extra_f90_compile_args = (ext.extra_f90_compile_args or []) if hasattr( - ext, 'extra_f90_compile_args') else [] - cxx_compiler = self._cxx_compiler - - # check for the availability of required compilers - if cxx_sources and cxx_compiler is None: - raise DistutilsError("extension %r has C++ sources" - "but no C++ compiler found" % (ext.name)) - if (f_sources or fmodule_sources) and fcompiler is None: - raise DistutilsError("extension %r has Fortran sources " - "but no Fortran compiler found" % (ext.name)) - if ext.language in ['f77', 'f90'] and fcompiler is None: - self.warn("extension %r has Fortran libraries " - "but no Fortran linker found, using default linker" % (ext.name)) - if ext.language == 'c++' and cxx_compiler is None: - self.warn("extension %r has C++ libraries " - "but no C++ linker found, using default linker" % (ext.name)) - - kws = {'depends': ext.depends} - output_dir = self.build_temp - - include_dirs = ext.include_dirs + get_numpy_include_dirs() - - c_objects = [] - if c_sources: - log.info("compiling C sources") - c_objects = self.compiler.compile(c_sources, - output_dir=output_dir, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_args, - **kws) - - if cxx_sources: - log.info("compiling C++ sources") - c_objects += cxx_compiler.compile(cxx_sources, - output_dir=output_dir, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_args, - **kws) - - extra_postargs = [] - f_objects = [] - if fmodule_sources: - log.info("compiling Fortran 90 module sources") - module_dirs = ext.module_dirs[:] - module_build_dir = os.path.join( - self.build_temp, os.path.dirname( - self.get_ext_filename(fullname))) - - self.mkpath(module_build_dir) - if fcompiler.module_dir_switch is None: - existing_modules = glob('*.mod') - extra_postargs += fcompiler.module_options( - module_dirs, module_build_dir) - f_objects += fcompiler.compile(fmodule_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs, - depends=ext.depends) - - if fcompiler.module_dir_switch is None: - for f in glob('*.mod'): - if f in existing_modules: - continue - t = os.path.join(module_build_dir, f) - if os.path.abspath(f) == os.path.abspath(t): - continue - if os.path.isfile(t): - os.remove(t) - try: - self.move_file(f, module_build_dir) - except DistutilsFileError: - log.warn('failed to move %r to %r' % - (f, module_build_dir)) - if f_sources: - log.info("compiling Fortran sources") - f_objects += fcompiler.compile(f_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs, - depends=ext.depends) - - if f_objects and not fcompiler.can_ccompiler_link(self.compiler): - unlinkable_fobjects = f_objects - objects = c_objects - else: - unlinkable_fobjects = [] - objects = c_objects + f_objects - - if ext.extra_objects: - objects.extend(ext.extra_objects) - extra_args = ext.extra_link_args or [] - libraries = self.get_libraries(ext)[:] - library_dirs = ext.library_dirs[:] - - linker = self.compiler.link_shared_object - # Always use system linker when using MSVC compiler. - if self.compiler.compiler_type in ('msvc', 'intelw', 'intelemw'): - # expand libraries with fcompiler libraries as we are - # not using fcompiler linker - self._libs_with_msvc_and_fortran( - fcompiler, libraries, library_dirs) - - elif ext.language in ['f77', 'f90'] and fcompiler is not None: - linker = fcompiler.link_shared_object - if ext.language == 'c++' and cxx_compiler is not None: - linker = cxx_compiler.link_shared_object - - if fcompiler is not None: - objects, libraries = self._process_unlinkable_fobjects( - objects, libraries, - fcompiler, library_dirs, - unlinkable_fobjects) - - linker(objects, ext_filename, - libraries=libraries, - library_dirs=library_dirs, - runtime_library_dirs=ext.runtime_library_dirs, - extra_postargs=extra_args, - export_symbols=self.get_export_symbols(ext), - debug=self.debug, - build_temp=self.build_temp, - target_lang=ext.language) - - def _add_dummy_mingwex_sym(self, c_sources): - build_src = self.get_finalized_command("build_src").build_src - build_clib = self.get_finalized_command("build_clib").build_clib - objects = self.compiler.compile([os.path.join(build_src, - "gfortran_vs2003_hack.c")], - output_dir=self.build_temp) - self.compiler.create_static_lib( - objects, "_gfortran_workaround", output_dir=build_clib, debug=self.debug) - - def _process_unlinkable_fobjects(self, objects, libraries, - fcompiler, library_dirs, - unlinkable_fobjects): - libraries = list(libraries) - objects = list(objects) - unlinkable_fobjects = list(unlinkable_fobjects) - - # Expand possible fake static libraries to objects - for lib in list(libraries): - for libdir in library_dirs: - fake_lib = os.path.join(libdir, lib + '.fobjects') - if os.path.isfile(fake_lib): - # Replace fake static library - libraries.remove(lib) - with open(fake_lib, 'r') as f: - unlinkable_fobjects.extend(f.read().splitlines()) - - # Expand C objects - c_lib = os.path.join(libdir, lib + '.cobjects') - with open(c_lib, 'r') as f: - objects.extend(f.read().splitlines()) - - # Wrap unlinkable objects to a linkable one - if unlinkable_fobjects: - fobjects = [os.path.relpath(obj) for obj in unlinkable_fobjects] - wrapped = fcompiler.wrap_unlinkable_objects( - fobjects, output_dir=self.build_temp, - extra_dll_dir=self.extra_dll_dir) - objects.extend(wrapped) - - return objects, libraries - - def _libs_with_msvc_and_fortran(self, fcompiler, c_libraries, - c_library_dirs): - if fcompiler is None: - return - - for libname in c_libraries: - if libname.startswith('msvc'): - continue - fileexists = False - for libdir in c_library_dirs or []: - libfile = os.path.join(libdir, '%s.lib' % (libname)) - if os.path.isfile(libfile): - fileexists = True - break - if fileexists: - continue - # make g77-compiled static libs available to MSVC - fileexists = False - for libdir in c_library_dirs: - libfile = os.path.join(libdir, 'lib%s.a' % (libname)) - if os.path.isfile(libfile): - # copy libname.a file to name.lib so that MSVC linker - # can find it - libfile2 = os.path.join(self.build_temp, libname + '.lib') - copy_file(libfile, libfile2) - if self.build_temp not in c_library_dirs: - c_library_dirs.append(self.build_temp) - fileexists = True - break - if fileexists: - continue - log.warn('could not find library %r in directories %s' - % (libname, c_library_dirs)) - - # Always use system linker when using MSVC compiler. - f_lib_dirs = [] - for dir in fcompiler.library_dirs: - # correct path when compiling in Cygwin but with normal Win - # Python - if dir.startswith('/usr/lib'): - try: - dir = subprocess.check_output(['cygpath', '-w', dir]) - except (OSError, subprocess.CalledProcessError): - pass - else: - dir = filepath_from_subprocess_output(dir) - f_lib_dirs.append(dir) - c_library_dirs.extend(f_lib_dirs) - - # make g77-compiled static libs available to MSVC - for lib in fcompiler.libraries: - if not lib.startswith('msvc'): - c_libraries.append(lib) - p = combine_paths(f_lib_dirs, 'lib' + lib + '.a') - if p: - dst_name = os.path.join(self.build_temp, lib + '.lib') - if not os.path.isfile(dst_name): - copy_file(p[0], dst_name) - if self.build_temp not in c_library_dirs: - c_library_dirs.append(self.build_temp) - - def get_source_files(self): - self.check_extensions_list(self.extensions) - filenames = [] - for ext in self.extensions: - filenames.extend(get_ext_source_files(ext)) - return filenames - - def get_outputs(self): - self.check_extensions_list(self.extensions) - - outputs = [] - for ext in self.extensions: - if not ext.sources: - continue - fullname = self.get_ext_fullname(ext.name) - outputs.append(os.path.join(self.build_lib, - self.get_ext_filename(fullname))) - return outputs diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/command/build_py.py b/venv/lib/python3.7/site-packages/numpy/distutils/command/build_py.py deleted file mode 100644 index 54dcde4..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/command/build_py.py +++ /dev/null @@ -1,33 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from distutils.command.build_py import build_py as old_build_py -from numpy.distutils.misc_util import is_string - -class build_py(old_build_py): - - def run(self): - build_src = self.get_finalized_command('build_src') - if build_src.py_modules_dict and self.packages is None: - self.packages = list(build_src.py_modules_dict.keys ()) - old_build_py.run(self) - - def find_package_modules(self, package, package_dir): - modules = old_build_py.find_package_modules(self, package, package_dir) - - # Find build_src generated *.py files. - build_src = self.get_finalized_command('build_src') - modules += build_src.py_modules_dict.get(package, []) - - return modules - - def find_modules(self): - old_py_modules = self.py_modules[:] - new_py_modules = [_m for _m in self.py_modules if is_string(_m)] - self.py_modules[:] = new_py_modules - modules = old_build_py.find_modules(self) - self.py_modules[:] = old_py_modules - - return modules - - # XXX: Fix find_source_files for item in py_modules such that item is 3-tuple - # and item[2] is source file. diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/command/build_scripts.py b/venv/lib/python3.7/site-packages/numpy/distutils/command/build_scripts.py deleted file mode 100644 index c8b25fc..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/command/build_scripts.py +++ /dev/null @@ -1,51 +0,0 @@ -""" Modified version of build_scripts that handles building scripts from functions. - -""" -from __future__ import division, absolute_import, print_function - -from distutils.command.build_scripts import build_scripts as old_build_scripts -from numpy.distutils import log -from numpy.distutils.misc_util import is_string - -class build_scripts(old_build_scripts): - - def generate_scripts(self, scripts): - new_scripts = [] - func_scripts = [] - for script in scripts: - if is_string(script): - new_scripts.append(script) - else: - func_scripts.append(script) - if not func_scripts: - return new_scripts - - build_dir = self.build_dir - self.mkpath(build_dir) - for func in func_scripts: - script = func(build_dir) - if not script: - continue - if is_string(script): - log.info(" adding '%s' to scripts" % (script,)) - new_scripts.append(script) - else: - [log.info(" adding '%s' to scripts" % (s,)) for s in script] - new_scripts.extend(list(script)) - return new_scripts - - def run (self): - if not self.scripts: - return - - self.scripts = self.generate_scripts(self.scripts) - # Now make sure that the distribution object has this list of scripts. - # setuptools' develop command requires that this be a list of filenames, - # not functions. - self.distribution.scripts = self.scripts - - return old_build_scripts.run(self) - - def get_source_files(self): - from numpy.distutils.misc_util import get_script_files - return get_script_files(self.scripts) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/command/build_src.py b/venv/lib/python3.7/site-packages/numpy/distutils/command/build_src.py deleted file mode 100644 index 3e0522c..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/command/build_src.py +++ /dev/null @@ -1,775 +0,0 @@ -""" Build swig and f2py sources. -""" -from __future__ import division, absolute_import, print_function - -import os -import re -import sys -import shlex -import copy - -from distutils.command import build_ext -from distutils.dep_util import newer_group, newer -from distutils.util import get_platform -from distutils.errors import DistutilsError, DistutilsSetupError - - -# this import can't be done here, as it uses numpy stuff only available -# after it's installed -#import numpy.f2py -from numpy.distutils import log -from numpy.distutils.misc_util import ( - fortran_ext_match, appendpath, is_string, is_sequence, get_cmd - ) -from numpy.distutils.from_template import process_file as process_f_file -from numpy.distutils.conv_template import process_file as process_c_file - -def subst_vars(target, source, d): - """Substitute any occurrence of @foo@ by d['foo'] from source file into - target.""" - var = re.compile('@([a-zA-Z_]+)@') - with open(source, 'r') as fs: - with open(target, 'w') as ft: - for l in fs: - m = var.search(l) - if m: - ft.write(l.replace('@%s@' % m.group(1), d[m.group(1)])) - else: - ft.write(l) - -class build_src(build_ext.build_ext): - - description = "build sources from SWIG, F2PY files or a function" - - user_options = [ - ('build-src=', 'd', "directory to \"build\" sources to"), - ('f2py-opts=', None, "list of f2py command line options"), - ('swig=', None, "path to the SWIG executable"), - ('swig-opts=', None, "list of SWIG command line options"), - ('swig-cpp', None, "make SWIG create C++ files (default is autodetected from sources)"), - ('f2pyflags=', None, "additional flags to f2py (use --f2py-opts= instead)"), # obsolete - ('swigflags=', None, "additional flags to swig (use --swig-opts= instead)"), # obsolete - ('force', 'f', "forcibly build everything (ignore file timestamps)"), - ('inplace', 'i', - "ignore build-lib and put compiled extensions into the source " + - "directory alongside your pure Python modules"), - ('verbose-cfg', None, - "change logging level from WARN to INFO which will show all " + - "compiler output") - ] - - boolean_options = ['force', 'inplace', 'verbose-cfg'] - - help_options = [] - - def initialize_options(self): - self.extensions = None - self.package = None - self.py_modules = None - self.py_modules_dict = None - self.build_src = None - self.build_lib = None - self.build_base = None - self.force = None - self.inplace = None - self.package_dir = None - self.f2pyflags = None # obsolete - self.f2py_opts = None - self.swigflags = None # obsolete - self.swig_opts = None - self.swig_cpp = None - self.swig = None - self.verbose_cfg = None - - def finalize_options(self): - self.set_undefined_options('build', - ('build_base', 'build_base'), - ('build_lib', 'build_lib'), - ('force', 'force')) - if self.package is None: - self.package = self.distribution.ext_package - self.extensions = self.distribution.ext_modules - self.libraries = self.distribution.libraries or [] - self.py_modules = self.distribution.py_modules or [] - self.data_files = self.distribution.data_files or [] - - if self.build_src is None: - plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2]) - self.build_src = os.path.join(self.build_base, 'src'+plat_specifier) - - # py_modules_dict is used in build_py.find_package_modules - self.py_modules_dict = {} - - if self.f2pyflags: - if self.f2py_opts: - log.warn('ignoring --f2pyflags as --f2py-opts already used') - else: - self.f2py_opts = self.f2pyflags - self.f2pyflags = None - if self.f2py_opts is None: - self.f2py_opts = [] - else: - self.f2py_opts = shlex.split(self.f2py_opts) - - if self.swigflags: - if self.swig_opts: - log.warn('ignoring --swigflags as --swig-opts already used') - else: - self.swig_opts = self.swigflags - self.swigflags = None - - if self.swig_opts is None: - self.swig_opts = [] - else: - self.swig_opts = shlex.split(self.swig_opts) - - # use options from build_ext command - build_ext = self.get_finalized_command('build_ext') - if self.inplace is None: - self.inplace = build_ext.inplace - if self.swig_cpp is None: - self.swig_cpp = build_ext.swig_cpp - for c in ['swig', 'swig_opt']: - o = '--'+c.replace('_', '-') - v = getattr(build_ext, c, None) - if v: - if getattr(self, c): - log.warn('both build_src and build_ext define %s option' % (o)) - else: - log.info('using "%s=%s" option from build_ext command' % (o, v)) - setattr(self, c, v) - - def run(self): - log.info("build_src") - if not (self.extensions or self.libraries): - return - self.build_sources() - - def build_sources(self): - - if self.inplace: - self.get_package_dir = \ - self.get_finalized_command('build_py').get_package_dir - - self.build_py_modules_sources() - - for libname_info in self.libraries: - self.build_library_sources(*libname_info) - - if self.extensions: - self.check_extensions_list(self.extensions) - - for ext in self.extensions: - self.build_extension_sources(ext) - - self.build_data_files_sources() - self.build_npy_pkg_config() - - def build_data_files_sources(self): - if not self.data_files: - return - log.info('building data_files sources') - from numpy.distutils.misc_util import get_data_files - new_data_files = [] - for data in self.data_files: - if isinstance(data, str): - new_data_files.append(data) - elif isinstance(data, tuple): - d, files = data - if self.inplace: - build_dir = self.get_package_dir('.'.join(d.split(os.sep))) - else: - build_dir = os.path.join(self.build_src, d) - funcs = [f for f in files if hasattr(f, '__call__')] - files = [f for f in files if not hasattr(f, '__call__')] - for f in funcs: - if f.__code__.co_argcount==1: - s = f(build_dir) - else: - s = f() - if s is not None: - if isinstance(s, list): - files.extend(s) - elif isinstance(s, str): - files.append(s) - else: - raise TypeError(repr(s)) - filenames = get_data_files((d, files)) - new_data_files.append((d, filenames)) - else: - raise TypeError(repr(data)) - self.data_files[:] = new_data_files - - - def _build_npy_pkg_config(self, info, gd): - template, install_dir, subst_dict = info - template_dir = os.path.dirname(template) - for k, v in gd.items(): - subst_dict[k] = v - - if self.inplace == 1: - generated_dir = os.path.join(template_dir, install_dir) - else: - generated_dir = os.path.join(self.build_src, template_dir, - install_dir) - generated = os.path.basename(os.path.splitext(template)[0]) - generated_path = os.path.join(generated_dir, generated) - if not os.path.exists(generated_dir): - os.makedirs(generated_dir) - - subst_vars(generated_path, template, subst_dict) - - # Where to install relatively to install prefix - full_install_dir = os.path.join(template_dir, install_dir) - return full_install_dir, generated_path - - def build_npy_pkg_config(self): - log.info('build_src: building npy-pkg config files') - - # XXX: another ugly workaround to circumvent distutils brain damage. We - # need the install prefix here, but finalizing the options of the - # install command when only building sources cause error. Instead, we - # copy the install command instance, and finalize the copy so that it - # does not disrupt how distutils want to do things when with the - # original install command instance. - install_cmd = copy.copy(get_cmd('install')) - if not install_cmd.finalized == 1: - install_cmd.finalize_options() - build_npkg = False - if self.inplace == 1: - top_prefix = '.' - build_npkg = True - elif hasattr(install_cmd, 'install_libbase'): - top_prefix = install_cmd.install_libbase - build_npkg = True - - if build_npkg: - for pkg, infos in self.distribution.installed_pkg_config.items(): - pkg_path = self.distribution.package_dir[pkg] - prefix = os.path.join(os.path.abspath(top_prefix), pkg_path) - d = {'prefix': prefix} - for info in infos: - install_dir, generated = self._build_npy_pkg_config(info, d) - self.distribution.data_files.append((install_dir, - [generated])) - - def build_py_modules_sources(self): - if not self.py_modules: - return - log.info('building py_modules sources') - new_py_modules = [] - for source in self.py_modules: - if is_sequence(source) and len(source)==3: - package, module_base, source = source - if self.inplace: - build_dir = self.get_package_dir(package) - else: - build_dir = os.path.join(self.build_src, - os.path.join(*package.split('.'))) - if hasattr(source, '__call__'): - target = os.path.join(build_dir, module_base + '.py') - source = source(target) - if source is None: - continue - modules = [(package, module_base, source)] - if package not in self.py_modules_dict: - self.py_modules_dict[package] = [] - self.py_modules_dict[package] += modules - else: - new_py_modules.append(source) - self.py_modules[:] = new_py_modules - - def build_library_sources(self, lib_name, build_info): - sources = list(build_info.get('sources', [])) - - if not sources: - return - - log.info('building library "%s" sources' % (lib_name)) - - sources = self.generate_sources(sources, (lib_name, build_info)) - - sources = self.template_sources(sources, (lib_name, build_info)) - - sources, h_files = self.filter_h_files(sources) - - if h_files: - log.info('%s - nothing done with h_files = %s', - self.package, h_files) - - #for f in h_files: - # self.distribution.headers.append((lib_name,f)) - - build_info['sources'] = sources - return - - def build_extension_sources(self, ext): - - sources = list(ext.sources) - - log.info('building extension "%s" sources' % (ext.name)) - - fullname = self.get_ext_fullname(ext.name) - - modpath = fullname.split('.') - package = '.'.join(modpath[0:-1]) - - if self.inplace: - self.ext_target_dir = self.get_package_dir(package) - - sources = self.generate_sources(sources, ext) - sources = self.template_sources(sources, ext) - sources = self.swig_sources(sources, ext) - sources = self.f2py_sources(sources, ext) - sources = self.pyrex_sources(sources, ext) - - sources, py_files = self.filter_py_files(sources) - - if package not in self.py_modules_dict: - self.py_modules_dict[package] = [] - modules = [] - for f in py_files: - module = os.path.splitext(os.path.basename(f))[0] - modules.append((package, module, f)) - self.py_modules_dict[package] += modules - - sources, h_files = self.filter_h_files(sources) - - if h_files: - log.info('%s - nothing done with h_files = %s', - package, h_files) - #for f in h_files: - # self.distribution.headers.append((package,f)) - - ext.sources = sources - - def generate_sources(self, sources, extension): - new_sources = [] - func_sources = [] - for source in sources: - if is_string(source): - new_sources.append(source) - else: - func_sources.append(source) - if not func_sources: - return new_sources - if self.inplace and not is_sequence(extension): - build_dir = self.ext_target_dir - else: - if is_sequence(extension): - name = extension[0] - # if 'include_dirs' not in extension[1]: - # extension[1]['include_dirs'] = [] - # incl_dirs = extension[1]['include_dirs'] - else: - name = extension.name - # incl_dirs = extension.include_dirs - #if self.build_src not in incl_dirs: - # incl_dirs.append(self.build_src) - build_dir = os.path.join(*([self.build_src] - +name.split('.')[:-1])) - self.mkpath(build_dir) - - if self.verbose_cfg: - new_level = log.INFO - else: - new_level = log.WARN - old_level = log.set_threshold(new_level) - - for func in func_sources: - source = func(extension, build_dir) - if not source: - continue - if is_sequence(source): - [log.info(" adding '%s' to sources." % (s,)) for s in source] - new_sources.extend(source) - else: - log.info(" adding '%s' to sources." % (source,)) - new_sources.append(source) - log.set_threshold(old_level) - return new_sources - - def filter_py_files(self, sources): - return self.filter_files(sources, ['.py']) - - def filter_h_files(self, sources): - return self.filter_files(sources, ['.h', '.hpp', '.inc']) - - def filter_files(self, sources, exts = []): - new_sources = [] - files = [] - for source in sources: - (base, ext) = os.path.splitext(source) - if ext in exts: - files.append(source) - else: - new_sources.append(source) - return new_sources, files - - def template_sources(self, sources, extension): - new_sources = [] - if is_sequence(extension): - depends = extension[1].get('depends') - include_dirs = extension[1].get('include_dirs') - else: - depends = extension.depends - include_dirs = extension.include_dirs - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.src': # Template file - if self.inplace: - target_dir = os.path.dirname(base) - else: - target_dir = appendpath(self.build_src, os.path.dirname(base)) - self.mkpath(target_dir) - target_file = os.path.join(target_dir, os.path.basename(base)) - if (self.force or newer_group([source] + depends, target_file)): - if _f_pyf_ext_match(base): - log.info("from_template:> %s" % (target_file)) - outstr = process_f_file(source) - else: - log.info("conv_template:> %s" % (target_file)) - outstr = process_c_file(source) - with open(target_file, 'w') as fid: - fid.write(outstr) - if _header_ext_match(target_file): - d = os.path.dirname(target_file) - if d not in include_dirs: - log.info(" adding '%s' to include_dirs." % (d)) - include_dirs.append(d) - new_sources.append(target_file) - else: - new_sources.append(source) - return new_sources - - def pyrex_sources(self, sources, extension): - """Pyrex not supported; this remains for Cython support (see below)""" - new_sources = [] - ext_name = extension.name.split('.')[-1] - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.pyx': - target_file = self.generate_a_pyrex_source(base, ext_name, - source, - extension) - new_sources.append(target_file) - else: - new_sources.append(source) - return new_sources - - def generate_a_pyrex_source(self, base, ext_name, source, extension): - """Pyrex is not supported, but some projects monkeypatch this method. - - That allows compiling Cython code, see gh-6955. - This method will remain here for compatibility reasons. - """ - return [] - - def f2py_sources(self, sources, extension): - new_sources = [] - f2py_sources = [] - f_sources = [] - f2py_targets = {} - target_dirs = [] - ext_name = extension.name.split('.')[-1] - skip_f2py = 0 - - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.pyf': # F2PY interface file - if self.inplace: - target_dir = os.path.dirname(base) - else: - target_dir = appendpath(self.build_src, os.path.dirname(base)) - if os.path.isfile(source): - name = get_f2py_modulename(source) - if name != ext_name: - raise DistutilsSetupError('mismatch of extension names: %s ' - 'provides %r but expected %r' % ( - source, name, ext_name)) - target_file = os.path.join(target_dir, name+'module.c') - else: - log.debug(' source %s does not exist: skipping f2py\'ing.' \ - % (source)) - name = ext_name - skip_f2py = 1 - target_file = os.path.join(target_dir, name+'module.c') - if not os.path.isfile(target_file): - log.warn(' target %s does not exist:\n '\ - 'Assuming %smodule.c was generated with '\ - '"build_src --inplace" command.' \ - % (target_file, name)) - target_dir = os.path.dirname(base) - target_file = os.path.join(target_dir, name+'module.c') - if not os.path.isfile(target_file): - raise DistutilsSetupError("%r missing" % (target_file,)) - log.info(' Yes! Using %r as up-to-date target.' \ - % (target_file)) - target_dirs.append(target_dir) - f2py_sources.append(source) - f2py_targets[source] = target_file - new_sources.append(target_file) - elif fortran_ext_match(ext): - f_sources.append(source) - else: - new_sources.append(source) - - if not (f2py_sources or f_sources): - return new_sources - - for d in target_dirs: - self.mkpath(d) - - f2py_options = extension.f2py_options + self.f2py_opts - - if self.distribution.libraries: - for name, build_info in self.distribution.libraries: - if name in extension.libraries: - f2py_options.extend(build_info.get('f2py_options', [])) - - log.info("f2py options: %s" % (f2py_options)) - - if f2py_sources: - if len(f2py_sources) != 1: - raise DistutilsSetupError( - 'only one .pyf file is allowed per extension module but got'\ - ' more: %r' % (f2py_sources,)) - source = f2py_sources[0] - target_file = f2py_targets[source] - target_dir = os.path.dirname(target_file) or '.' - depends = [source] + extension.depends - if (self.force or newer_group(depends, target_file, 'newer')) \ - and not skip_f2py: - log.info("f2py: %s" % (source)) - import numpy.f2py - numpy.f2py.run_main(f2py_options - + ['--build-dir', target_dir, source]) - else: - log.debug(" skipping '%s' f2py interface (up-to-date)" % (source)) - else: - #XXX TODO: --inplace support for sdist command - if is_sequence(extension): - name = extension[0] - else: name = extension.name - target_dir = os.path.join(*([self.build_src] - +name.split('.')[:-1])) - target_file = os.path.join(target_dir, ext_name + 'module.c') - new_sources.append(target_file) - depends = f_sources + extension.depends - if (self.force or newer_group(depends, target_file, 'newer')) \ - and not skip_f2py: - log.info("f2py:> %s" % (target_file)) - self.mkpath(target_dir) - import numpy.f2py - numpy.f2py.run_main(f2py_options + ['--lower', - '--build-dir', target_dir]+\ - ['-m', ext_name]+f_sources) - else: - log.debug(" skipping f2py fortran files for '%s' (up-to-date)"\ - % (target_file)) - - if not os.path.isfile(target_file): - raise DistutilsError("f2py target file %r not generated" % (target_file,)) - - build_dir = os.path.join(self.build_src, target_dir) - target_c = os.path.join(build_dir, 'fortranobject.c') - target_h = os.path.join(build_dir, 'fortranobject.h') - log.info(" adding '%s' to sources." % (target_c)) - new_sources.append(target_c) - if build_dir not in extension.include_dirs: - log.info(" adding '%s' to include_dirs." % (build_dir)) - extension.include_dirs.append(build_dir) - - if not skip_f2py: - import numpy.f2py - d = os.path.dirname(numpy.f2py.__file__) - source_c = os.path.join(d, 'src', 'fortranobject.c') - source_h = os.path.join(d, 'src', 'fortranobject.h') - if newer(source_c, target_c) or newer(source_h, target_h): - self.mkpath(os.path.dirname(target_c)) - self.copy_file(source_c, target_c) - self.copy_file(source_h, target_h) - else: - if not os.path.isfile(target_c): - raise DistutilsSetupError("f2py target_c file %r not found" % (target_c,)) - if not os.path.isfile(target_h): - raise DistutilsSetupError("f2py target_h file %r not found" % (target_h,)) - - for name_ext in ['-f2pywrappers.f', '-f2pywrappers2.f90']: - filename = os.path.join(target_dir, ext_name + name_ext) - if os.path.isfile(filename): - log.info(" adding '%s' to sources." % (filename)) - f_sources.append(filename) - - return new_sources + f_sources - - def swig_sources(self, sources, extension): - # Assuming SWIG 1.3.14 or later. See compatibility note in - # http://www.swig.org/Doc1.3/Python.html#Python_nn6 - - new_sources = [] - swig_sources = [] - swig_targets = {} - target_dirs = [] - py_files = [] # swig generated .py files - target_ext = '.c' - if '-c++' in extension.swig_opts: - typ = 'c++' - is_cpp = True - extension.swig_opts.remove('-c++') - elif self.swig_cpp: - typ = 'c++' - is_cpp = True - else: - typ = None - is_cpp = False - skip_swig = 0 - ext_name = extension.name.split('.')[-1] - - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.i': # SWIG interface file - # the code below assumes that the sources list - # contains not more than one .i SWIG interface file - if self.inplace: - target_dir = os.path.dirname(base) - py_target_dir = self.ext_target_dir - else: - target_dir = appendpath(self.build_src, os.path.dirname(base)) - py_target_dir = target_dir - if os.path.isfile(source): - name = get_swig_modulename(source) - if name != ext_name[1:]: - raise DistutilsSetupError( - 'mismatch of extension names: %s provides %r' - ' but expected %r' % (source, name, ext_name[1:])) - if typ is None: - typ = get_swig_target(source) - is_cpp = typ=='c++' - else: - typ2 = get_swig_target(source) - if typ2 is None: - log.warn('source %r does not define swig target, assuming %s swig target' \ - % (source, typ)) - elif typ!=typ2: - log.warn('expected %r but source %r defines %r swig target' \ - % (typ, source, typ2)) - if typ2=='c++': - log.warn('resetting swig target to c++ (some targets may have .c extension)') - is_cpp = True - else: - log.warn('assuming that %r has c++ swig target' % (source)) - if is_cpp: - target_ext = '.cpp' - target_file = os.path.join(target_dir, '%s_wrap%s' \ - % (name, target_ext)) - else: - log.warn(' source %s does not exist: skipping swig\'ing.' \ - % (source)) - name = ext_name[1:] - skip_swig = 1 - target_file = _find_swig_target(target_dir, name) - if not os.path.isfile(target_file): - log.warn(' target %s does not exist:\n '\ - 'Assuming %s_wrap.{c,cpp} was generated with '\ - '"build_src --inplace" command.' \ - % (target_file, name)) - target_dir = os.path.dirname(base) - target_file = _find_swig_target(target_dir, name) - if not os.path.isfile(target_file): - raise DistutilsSetupError("%r missing" % (target_file,)) - log.warn(' Yes! Using %r as up-to-date target.' \ - % (target_file)) - target_dirs.append(target_dir) - new_sources.append(target_file) - py_files.append(os.path.join(py_target_dir, name+'.py')) - swig_sources.append(source) - swig_targets[source] = new_sources[-1] - else: - new_sources.append(source) - - if not swig_sources: - return new_sources - - if skip_swig: - return new_sources + py_files - - for d in target_dirs: - self.mkpath(d) - - swig = self.swig or self.find_swig() - swig_cmd = [swig, "-python"] + extension.swig_opts - if is_cpp: - swig_cmd.append('-c++') - for d in extension.include_dirs: - swig_cmd.append('-I'+d) - for source in swig_sources: - target = swig_targets[source] - depends = [source] + extension.depends - if self.force or newer_group(depends, target, 'newer'): - log.info("%s: %s" % (os.path.basename(swig) \ - + (is_cpp and '++' or ''), source)) - self.spawn(swig_cmd + self.swig_opts \ - + ["-o", target, '-outdir', py_target_dir, source]) - else: - log.debug(" skipping '%s' swig interface (up-to-date)" \ - % (source)) - - return new_sources + py_files - -_f_pyf_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match -_header_ext_match = re.compile(r'.*[.](inc|h|hpp)\Z', re.I).match - -#### SWIG related auxiliary functions #### -_swig_module_name_match = re.compile(r'\s*%module\s*(.*\(\s*package\s*=\s*"(?P[\w_]+)".*\)|)\s*(?P[\w_]+)', - re.I).match -_has_c_header = re.compile(r'-[*]-\s*c\s*-[*]-', re.I).search -_has_cpp_header = re.compile(r'-[*]-\s*c[+][+]\s*-[*]-', re.I).search - -def get_swig_target(source): - with open(source, 'r') as f: - result = None - line = f.readline() - if _has_cpp_header(line): - result = 'c++' - if _has_c_header(line): - result = 'c' - return result - -def get_swig_modulename(source): - with open(source, 'r') as f: - name = None - for line in f: - m = _swig_module_name_match(line) - if m: - name = m.group('name') - break - return name - -def _find_swig_target(target_dir, name): - for ext in ['.cpp', '.c']: - target = os.path.join(target_dir, '%s_wrap%s' % (name, ext)) - if os.path.isfile(target): - break - return target - -#### F2PY related auxiliary functions #### - -_f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]+)', - re.I).match -_f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]*?' - r'__user__[\w_]*)', re.I).match - -def get_f2py_modulename(source): - name = None - with open(source) as f: - for line in f: - m = _f2py_module_name_match(line) - if m: - if _f2py_user_module_name_match(line): # skip *__user__* names - continue - name = m.group('name') - break - return name - -########################################## diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/command/config.py b/venv/lib/python3.7/site-packages/numpy/distutils/command/config.py deleted file mode 100644 index b9f2fa7..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/command/config.py +++ /dev/null @@ -1,513 +0,0 @@ -# Added Fortran compiler support to config. Currently useful only for -# try_compile call. try_run works but is untested for most of Fortran -# compilers (they must define linker_exe first). -# Pearu Peterson -from __future__ import division, absolute_import, print_function - -import os, signal -import warnings -import sys -import subprocess -import textwrap - -from distutils.command.config import config as old_config -from distutils.command.config import LANG_EXT -from distutils import log -from distutils.file_util import copy_file -from distutils.ccompiler import CompileError, LinkError -import distutils -from numpy.distutils.exec_command import filepath_from_subprocess_output -from numpy.distutils.mingw32ccompiler import generate_manifest -from numpy.distutils.command.autodist import (check_gcc_function_attribute, - check_gcc_function_attribute_with_intrinsics, - check_gcc_variable_attribute, - check_inline, - check_restrict, - check_compiler_gcc4) -from numpy.distutils.compat import get_exception - -LANG_EXT['f77'] = '.f' -LANG_EXT['f90'] = '.f90' - -class config(old_config): - old_config.user_options += [ - ('fcompiler=', None, "specify the Fortran compiler type"), - ] - - def initialize_options(self): - self.fcompiler = None - old_config.initialize_options(self) - - def _check_compiler (self): - old_config._check_compiler(self) - from numpy.distutils.fcompiler import FCompiler, new_fcompiler - - if sys.platform == 'win32' and (self.compiler.compiler_type in - ('msvc', 'intelw', 'intelemw')): - # XXX: hack to circumvent a python 2.6 bug with msvc9compiler: - # initialize call query_vcvarsall, which throws an IOError, and - # causes an error along the way without much information. We try to - # catch it here, hoping it is early enough, and print an helpful - # message instead of Error: None. - if not self.compiler.initialized: - try: - self.compiler.initialize() - except IOError: - e = get_exception() - msg = textwrap.dedent("""\ - Could not initialize compiler instance: do you have Visual Studio - installed? If you are trying to build with MinGW, please use "python setup.py - build -c mingw32" instead. If you have Visual Studio installed, check it is - correctly installed, and the right version (VS 2008 for python 2.6, 2.7 and 3.2, - VS 2010 for >= 3.3). - - Original exception was: %s, and the Compiler class was %s - ============================================================================""") \ - % (e, self.compiler.__class__.__name__) - print(textwrap.dedent("""\ - ============================================================================""")) - raise distutils.errors.DistutilsPlatformError(msg) - - # After MSVC is initialized, add an explicit /MANIFEST to linker - # flags. See issues gh-4245 and gh-4101 for details. Also - # relevant are issues 4431 and 16296 on the Python bug tracker. - from distutils import msvc9compiler - if msvc9compiler.get_build_version() >= 10: - for ldflags in [self.compiler.ldflags_shared, - self.compiler.ldflags_shared_debug]: - if '/MANIFEST' not in ldflags: - ldflags.append('/MANIFEST') - - if not isinstance(self.fcompiler, FCompiler): - self.fcompiler = new_fcompiler(compiler=self.fcompiler, - dry_run=self.dry_run, force=1, - c_compiler=self.compiler) - if self.fcompiler is not None: - self.fcompiler.customize(self.distribution) - if self.fcompiler.get_version(): - self.fcompiler.customize_cmd(self) - self.fcompiler.show_customization() - - def _wrap_method(self, mth, lang, args): - from distutils.ccompiler import CompileError - from distutils.errors import DistutilsExecError - save_compiler = self.compiler - if lang in ['f77', 'f90']: - self.compiler = self.fcompiler - try: - ret = mth(*((self,)+args)) - except (DistutilsExecError, CompileError): - str(get_exception()) - self.compiler = save_compiler - raise CompileError - self.compiler = save_compiler - return ret - - def _compile (self, body, headers, include_dirs, lang): - src, obj = self._wrap_method(old_config._compile, lang, - (body, headers, include_dirs, lang)) - # _compile in unixcompiler.py sometimes creates .d dependency files. - # Clean them up. - self.temp_files.append(obj + '.d') - return src, obj - - def _link (self, body, - headers, include_dirs, - libraries, library_dirs, lang): - if self.compiler.compiler_type=='msvc': - libraries = (libraries or [])[:] - library_dirs = (library_dirs or [])[:] - if lang in ['f77', 'f90']: - lang = 'c' # always use system linker when using MSVC compiler - if self.fcompiler: - for d in self.fcompiler.library_dirs or []: - # correct path when compiling in Cygwin but with - # normal Win Python - if d.startswith('/usr/lib'): - try: - d = subprocess.check_output(['cygpath', - '-w', d]) - except (OSError, subprocess.CalledProcessError): - pass - else: - d = filepath_from_subprocess_output(d) - library_dirs.append(d) - for libname in self.fcompiler.libraries or []: - if libname not in libraries: - libraries.append(libname) - for libname in libraries: - if libname.startswith('msvc'): continue - fileexists = False - for libdir in library_dirs or []: - libfile = os.path.join(libdir, '%s.lib' % (libname)) - if os.path.isfile(libfile): - fileexists = True - break - if fileexists: continue - # make g77-compiled static libs available to MSVC - fileexists = False - for libdir in library_dirs: - libfile = os.path.join(libdir, 'lib%s.a' % (libname)) - if os.path.isfile(libfile): - # copy libname.a file to name.lib so that MSVC linker - # can find it - libfile2 = os.path.join(libdir, '%s.lib' % (libname)) - copy_file(libfile, libfile2) - self.temp_files.append(libfile2) - fileexists = True - break - if fileexists: continue - log.warn('could not find library %r in directories %s' \ - % (libname, library_dirs)) - elif self.compiler.compiler_type == 'mingw32': - generate_manifest(self) - return self._wrap_method(old_config._link, lang, - (body, headers, include_dirs, - libraries, library_dirs, lang)) - - def check_header(self, header, include_dirs=None, library_dirs=None, lang='c'): - self._check_compiler() - return self.try_compile( - "/* we need a dummy line to make distutils happy */", - [header], include_dirs) - - def check_decl(self, symbol, - headers=None, include_dirs=None): - self._check_compiler() - body = textwrap.dedent(""" - int main(void) - { - #ifndef %s - (void) %s; - #endif - ; - return 0; - }""") % (symbol, symbol) - - return self.try_compile(body, headers, include_dirs) - - def check_macro_true(self, symbol, - headers=None, include_dirs=None): - self._check_compiler() - body = textwrap.dedent(""" - int main(void) - { - #if %s - #else - #error false or undefined macro - #endif - ; - return 0; - }""") % (symbol,) - - return self.try_compile(body, headers, include_dirs) - - def check_type(self, type_name, headers=None, include_dirs=None, - library_dirs=None): - """Check type availability. Return True if the type can be compiled, - False otherwise""" - self._check_compiler() - - # First check the type can be compiled - body = textwrap.dedent(r""" - int main(void) { - if ((%(name)s *) 0) - return 0; - if (sizeof (%(name)s)) - return 0; - } - """) % {'name': type_name} - - st = False - try: - try: - self._compile(body % {'type': type_name}, - headers, include_dirs, 'c') - st = True - except distutils.errors.CompileError: - st = False - finally: - self._clean() - - return st - - def check_type_size(self, type_name, headers=None, include_dirs=None, library_dirs=None, expected=None): - """Check size of a given type.""" - self._check_compiler() - - # First check the type can be compiled - body = textwrap.dedent(r""" - typedef %(type)s npy_check_sizeof_type; - int main (void) - { - static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) >= 0)]; - test_array [0] = 0 - - ; - return 0; - } - """) - self._compile(body % {'type': type_name}, - headers, include_dirs, 'c') - self._clean() - - if expected: - body = textwrap.dedent(r""" - typedef %(type)s npy_check_sizeof_type; - int main (void) - { - static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) == %(size)s)]; - test_array [0] = 0 - - ; - return 0; - } - """) - for size in expected: - try: - self._compile(body % {'type': type_name, 'size': size}, - headers, include_dirs, 'c') - self._clean() - return size - except CompileError: - pass - - # this fails to *compile* if size > sizeof(type) - body = textwrap.dedent(r""" - typedef %(type)s npy_check_sizeof_type; - int main (void) - { - static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) <= %(size)s)]; - test_array [0] = 0 - - ; - return 0; - } - """) - - # The principle is simple: we first find low and high bounds of size - # for the type, where low/high are looked up on a log scale. Then, we - # do a binary search to find the exact size between low and high - low = 0 - mid = 0 - while True: - try: - self._compile(body % {'type': type_name, 'size': mid}, - headers, include_dirs, 'c') - self._clean() - break - except CompileError: - #log.info("failure to test for bound %d" % mid) - low = mid + 1 - mid = 2 * mid + 1 - - high = mid - # Binary search: - while low != high: - mid = (high - low) // 2 + low - try: - self._compile(body % {'type': type_name, 'size': mid}, - headers, include_dirs, 'c') - self._clean() - high = mid - except CompileError: - low = mid + 1 - return low - - def check_func(self, func, - headers=None, include_dirs=None, - libraries=None, library_dirs=None, - decl=False, call=False, call_args=None): - # clean up distutils's config a bit: add void to main(), and - # return a value. - self._check_compiler() - body = [] - if decl: - if type(decl) == str: - body.append(decl) - else: - body.append("int %s (void);" % func) - # Handle MSVC intrinsics: force MS compiler to make a function call. - # Useful to test for some functions when built with optimization on, to - # avoid build error because the intrinsic and our 'fake' test - # declaration do not match. - body.append("#ifdef _MSC_VER") - body.append("#pragma function(%s)" % func) - body.append("#endif") - body.append("int main (void) {") - if call: - if call_args is None: - call_args = '' - body.append(" %s(%s);" % (func, call_args)) - else: - body.append(" %s;" % func) - body.append(" return 0;") - body.append("}") - body = '\n'.join(body) + "\n" - - return self.try_link(body, headers, include_dirs, - libraries, library_dirs) - - def check_funcs_once(self, funcs, - headers=None, include_dirs=None, - libraries=None, library_dirs=None, - decl=False, call=False, call_args=None): - """Check a list of functions at once. - - This is useful to speed up things, since all the functions in the funcs - list will be put in one compilation unit. - - Arguments - --------- - funcs : seq - list of functions to test - include_dirs : seq - list of header paths - libraries : seq - list of libraries to link the code snippet to - library_dirs : seq - list of library paths - decl : dict - for every (key, value), the declaration in the value will be - used for function in key. If a function is not in the - dictionary, no declaration will be used. - call : dict - for every item (f, value), if the value is True, a call will be - done to the function f. - """ - self._check_compiler() - body = [] - if decl: - for f, v in decl.items(): - if v: - body.append("int %s (void);" % f) - - # Handle MS intrinsics. See check_func for more info. - body.append("#ifdef _MSC_VER") - for func in funcs: - body.append("#pragma function(%s)" % func) - body.append("#endif") - - body.append("int main (void) {") - if call: - for f in funcs: - if f in call and call[f]: - if not (call_args and f in call_args and call_args[f]): - args = '' - else: - args = call_args[f] - body.append(" %s(%s);" % (f, args)) - else: - body.append(" %s;" % f) - else: - for f in funcs: - body.append(" %s;" % f) - body.append(" return 0;") - body.append("}") - body = '\n'.join(body) + "\n" - - return self.try_link(body, headers, include_dirs, - libraries, library_dirs) - - def check_inline(self): - """Return the inline keyword recognized by the compiler, empty string - otherwise.""" - return check_inline(self) - - def check_restrict(self): - """Return the restrict keyword recognized by the compiler, empty string - otherwise.""" - return check_restrict(self) - - def check_compiler_gcc4(self): - """Return True if the C compiler is gcc >= 4.""" - return check_compiler_gcc4(self) - - def check_gcc_function_attribute(self, attribute, name): - return check_gcc_function_attribute(self, attribute, name) - - def check_gcc_function_attribute_with_intrinsics(self, attribute, name, - code, include): - return check_gcc_function_attribute_with_intrinsics(self, attribute, - name, code, include) - - def check_gcc_variable_attribute(self, attribute): - return check_gcc_variable_attribute(self, attribute) - - def get_output(self, body, headers=None, include_dirs=None, - libraries=None, library_dirs=None, - lang="c", use_tee=None): - """Try to compile, link to an executable, and run a program - built from 'body' and 'headers'. Returns the exit status code - of the program and its output. - """ - # 2008-11-16, RemoveMe - warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n" - "Usage of get_output is deprecated: please do not \n" - "use it anymore, and avoid configuration checks \n" - "involving running executable on the target machine.\n" - "+++++++++++++++++++++++++++++++++++++++++++++++++\n", - DeprecationWarning, stacklevel=2) - self._check_compiler() - exitcode, output = 255, '' - try: - grabber = GrabStdout() - try: - src, obj, exe = self._link(body, headers, include_dirs, - libraries, library_dirs, lang) - grabber.restore() - except Exception: - output = grabber.data - grabber.restore() - raise - exe = os.path.join('.', exe) - try: - # specify cwd arg for consistency with - # historic usage pattern of exec_command() - # also, note that exe appears to be a string, - # which exec_command() handled, but we now - # use a list for check_output() -- this assumes - # that exe is always a single command - output = subprocess.check_output([exe], cwd='.') - except subprocess.CalledProcessError as exc: - exitstatus = exc.returncode - output = '' - except OSError: - # preserve the EnvironmentError exit status - # used historically in exec_command() - exitstatus = 127 - output = '' - else: - output = filepath_from_subprocess_output(output) - if hasattr(os, 'WEXITSTATUS'): - exitcode = os.WEXITSTATUS(exitstatus) - if os.WIFSIGNALED(exitstatus): - sig = os.WTERMSIG(exitstatus) - log.error('subprocess exited with signal %d' % (sig,)) - if sig == signal.SIGINT: - # control-C - raise KeyboardInterrupt - else: - exitcode = exitstatus - log.info("success!") - except (CompileError, LinkError): - log.info("failure.") - self._clean() - return exitcode, output - -class GrabStdout(object): - - def __init__(self): - self.sys_stdout = sys.stdout - self.data = '' - sys.stdout = self - - def write (self, data): - self.sys_stdout.write(data) - self.data += data - - def flush (self): - self.sys_stdout.flush() - - def restore(self): - sys.stdout = self.sys_stdout diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/command/config_compiler.py b/venv/lib/python3.7/site-packages/numpy/distutils/command/config_compiler.py deleted file mode 100644 index bf17006..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/command/config_compiler.py +++ /dev/null @@ -1,128 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from distutils.core import Command -from numpy.distutils import log - -#XXX: Linker flags - -def show_fortran_compilers(_cache=None): - # Using cache to prevent infinite recursion. - if _cache: - return - elif _cache is None: - _cache = [] - _cache.append(1) - from numpy.distutils.fcompiler import show_fcompilers - import distutils.core - dist = distutils.core._setup_distribution - show_fcompilers(dist) - -class config_fc(Command): - """ Distutils command to hold user specified options - to Fortran compilers. - - config_fc command is used by the FCompiler.customize() method. - """ - - description = "specify Fortran 77/Fortran 90 compiler information" - - user_options = [ - ('fcompiler=', None, "specify Fortran compiler type"), - ('f77exec=', None, "specify F77 compiler command"), - ('f90exec=', None, "specify F90 compiler command"), - ('f77flags=', None, "specify F77 compiler flags"), - ('f90flags=', None, "specify F90 compiler flags"), - ('opt=', None, "specify optimization flags"), - ('arch=', None, "specify architecture specific optimization flags"), - ('debug', 'g', "compile with debugging information"), - ('noopt', None, "compile without optimization"), - ('noarch', None, "compile without arch-dependent optimization"), - ] - - help_options = [ - ('help-fcompiler', None, "list available Fortran compilers", - show_fortran_compilers), - ] - - boolean_options = ['debug', 'noopt', 'noarch'] - - def initialize_options(self): - self.fcompiler = None - self.f77exec = None - self.f90exec = None - self.f77flags = None - self.f90flags = None - self.opt = None - self.arch = None - self.debug = None - self.noopt = None - self.noarch = None - - def finalize_options(self): - log.info('unifing config_fc, config, build_clib, build_ext, build commands --fcompiler options') - build_clib = self.get_finalized_command('build_clib') - build_ext = self.get_finalized_command('build_ext') - config = self.get_finalized_command('config') - build = self.get_finalized_command('build') - cmd_list = [self, config, build_clib, build_ext, build] - for a in ['fcompiler']: - l = [] - for c in cmd_list: - v = getattr(c, a) - if v is not None: - if not isinstance(v, str): v = v.compiler_type - if v not in l: l.append(v) - if not l: v1 = None - else: v1 = l[0] - if len(l)>1: - log.warn(' commands have different --%s options: %s'\ - ', using first in list as default' % (a, l)) - if v1: - for c in cmd_list: - if getattr(c, a) is None: setattr(c, a, v1) - - def run(self): - # Do nothing. - return - -class config_cc(Command): - """ Distutils command to hold user specified options - to C/C++ compilers. - """ - - description = "specify C/C++ compiler information" - - user_options = [ - ('compiler=', None, "specify C/C++ compiler type"), - ] - - def initialize_options(self): - self.compiler = None - - def finalize_options(self): - log.info('unifing config_cc, config, build_clib, build_ext, build commands --compiler options') - build_clib = self.get_finalized_command('build_clib') - build_ext = self.get_finalized_command('build_ext') - config = self.get_finalized_command('config') - build = self.get_finalized_command('build') - cmd_list = [self, config, build_clib, build_ext, build] - for a in ['compiler']: - l = [] - for c in cmd_list: - v = getattr(c, a) - if v is not None: - if not isinstance(v, str): v = v.compiler_type - if v not in l: l.append(v) - if not l: v1 = None - else: v1 = l[0] - if len(l)>1: - log.warn(' commands have different --%s options: %s'\ - ', using first in list as default' % (a, l)) - if v1: - for c in cmd_list: - if getattr(c, a) is None: setattr(c, a, v1) - return - - def run(self): - # Do nothing. - return diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/command/develop.py b/venv/lib/python3.7/site-packages/numpy/distutils/command/develop.py deleted file mode 100644 index 1410ab2..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/command/develop.py +++ /dev/null @@ -1,17 +0,0 @@ -""" Override the develop command from setuptools so we can ensure that our -generated files (from build_src or build_scripts) are properly converted to real -files with filenames. - -""" -from __future__ import division, absolute_import, print_function - -from setuptools.command.develop import develop as old_develop - -class develop(old_develop): - __doc__ = old_develop.__doc__ - def install_for_development(self): - # Build sources in-place, too. - self.reinitialize_command('build_src', inplace=1) - # Make sure scripts are built. - self.run_command('build_scripts') - old_develop.install_for_development(self) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/command/egg_info.py b/venv/lib/python3.7/site-packages/numpy/distutils/command/egg_info.py deleted file mode 100644 index 18673ec..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/command/egg_info.py +++ /dev/null @@ -1,27 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys - -from setuptools.command.egg_info import egg_info as _egg_info - -class egg_info(_egg_info): - def run(self): - if 'sdist' in sys.argv: - import warnings - import textwrap - msg = textwrap.dedent(""" - `build_src` is being run, this may lead to missing - files in your sdist! You want to use distutils.sdist - instead of the setuptools version: - - from distutils.command.sdist import sdist - cmdclass={'sdist': sdist}" - - See numpy's setup.py or gh-7131 for details.""") - warnings.warn(msg, UserWarning, stacklevel=2) - - # We need to ensure that build_src has been executed in order to give - # setuptools' egg_info command real filenames instead of functions which - # generate files. - self.run_command("build_src") - _egg_info.run(self) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/command/install.py b/venv/lib/python3.7/site-packages/numpy/distutils/command/install.py deleted file mode 100644 index c74ae94..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/command/install.py +++ /dev/null @@ -1,81 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -if 'setuptools' in sys.modules: - import setuptools.command.install as old_install_mod - have_setuptools = True -else: - import distutils.command.install as old_install_mod - have_setuptools = False -from distutils.file_util import write_file - -old_install = old_install_mod.install - -class install(old_install): - - # Always run install_clib - the command is cheap, so no need to bypass it; - # but it's not run by setuptools -- so it's run again in install_data - sub_commands = old_install.sub_commands + [ - ('install_clib', lambda x: True) - ] - - def finalize_options (self): - old_install.finalize_options(self) - self.install_lib = self.install_libbase - - def setuptools_run(self): - """ The setuptools version of the .run() method. - - We must pull in the entire code so we can override the level used in the - _getframe() call since we wrap this call by one more level. - """ - from distutils.command.install import install as distutils_install - - # Explicit request for old-style install? Just do it - if self.old_and_unmanageable or self.single_version_externally_managed: - return distutils_install.run(self) - - # Attempt to detect whether we were called from setup() or by another - # command. If we were called by setup(), our caller will be the - # 'run_command' method in 'distutils.dist', and *its* caller will be - # the 'run_commands' method. If we were called any other way, our - # immediate caller *might* be 'run_command', but it won't have been - # called by 'run_commands'. This is slightly kludgy, but seems to - # work. - # - caller = sys._getframe(3) - caller_module = caller.f_globals.get('__name__', '') - caller_name = caller.f_code.co_name - - if caller_module != 'distutils.dist' or caller_name!='run_commands': - # We weren't called from the command line or setup(), so we - # should run in backward-compatibility mode to support bdist_* - # commands. - distutils_install.run(self) - else: - self.do_egg_install() - - def run(self): - if not have_setuptools: - r = old_install.run(self) - else: - r = self.setuptools_run() - if self.record: - # bdist_rpm fails when INSTALLED_FILES contains - # paths with spaces. Such paths must be enclosed - # with double-quotes. - with open(self.record, 'r') as f: - lines = [] - need_rewrite = False - for l in f: - l = l.rstrip() - if ' ' in l: - need_rewrite = True - l = '"%s"' % (l) - lines.append(l) - if need_rewrite: - self.execute(write_file, - (self.record, lines), - "re-writing list of installed files to '%s'" % - self.record) - return r diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/command/install_clib.py b/venv/lib/python3.7/site-packages/numpy/distutils/command/install_clib.py deleted file mode 100644 index 6a73f7e..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/command/install_clib.py +++ /dev/null @@ -1,42 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -from distutils.core import Command -from distutils.ccompiler import new_compiler -from numpy.distutils.misc_util import get_cmd - -class install_clib(Command): - description = "Command to install installable C libraries" - - user_options = [] - - def initialize_options(self): - self.install_dir = None - self.outfiles = [] - - def finalize_options(self): - self.set_undefined_options('install', ('install_lib', 'install_dir')) - - def run (self): - build_clib_cmd = get_cmd("build_clib") - if not build_clib_cmd.build_clib: - # can happen if the user specified `--skip-build` - build_clib_cmd.finalize_options() - build_dir = build_clib_cmd.build_clib - - # We need the compiler to get the library name -> filename association - if not build_clib_cmd.compiler: - compiler = new_compiler(compiler=None) - compiler.customize(self.distribution) - else: - compiler = build_clib_cmd.compiler - - for l in self.distribution.installed_libraries: - target_dir = os.path.join(self.install_dir, l.target_dir) - name = compiler.library_filename(l.name) - source = os.path.join(build_dir, name) - self.mkpath(target_dir) - self.outfiles.append(self.copy_file(source, target_dir)[0]) - - def get_outputs(self): - return self.outfiles diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/command/install_data.py b/venv/lib/python3.7/site-packages/numpy/distutils/command/install_data.py deleted file mode 100644 index 996cf7e..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/command/install_data.py +++ /dev/null @@ -1,26 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -have_setuptools = ('setuptools' in sys.modules) - -from distutils.command.install_data import install_data as old_install_data - -#data installer with improved intelligence over distutils -#data files are copied into the project directory instead -#of willy-nilly -class install_data (old_install_data): - - def run(self): - old_install_data.run(self) - - if have_setuptools: - # Run install_clib again, since setuptools does not run sub-commands - # of install automatically - self.run_command('install_clib') - - def finalize_options (self): - self.set_undefined_options('install', - ('install_lib', 'install_dir'), - ('root', 'root'), - ('force', 'force'), - ) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/command/install_headers.py b/venv/lib/python3.7/site-packages/numpy/distutils/command/install_headers.py deleted file mode 100644 index f3f58aa..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/command/install_headers.py +++ /dev/null @@ -1,27 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -from distutils.command.install_headers import install_headers as old_install_headers - -class install_headers (old_install_headers): - - def run (self): - headers = self.distribution.headers - if not headers: - return - - prefix = os.path.dirname(self.install_dir) - for header in headers: - if isinstance(header, tuple): - # Kind of a hack, but I don't know where else to change this... - if header[0] == 'numpy.core': - header = ('numpy', header[1]) - if os.path.splitext(header[1])[1] == '.inc': - continue - d = os.path.join(*([prefix]+header[0].split('.'))) - header = header[1] - else: - d = self.install_dir - self.mkpath(d) - (out, _) = self.copy_file(header, d) - self.outfiles.append(out) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/command/sdist.py b/venv/lib/python3.7/site-packages/numpy/distutils/command/sdist.py deleted file mode 100644 index bfaab1c..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/command/sdist.py +++ /dev/null @@ -1,29 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -if 'setuptools' in sys.modules: - from setuptools.command.sdist import sdist as old_sdist -else: - from distutils.command.sdist import sdist as old_sdist - -from numpy.distutils.misc_util import get_data_files - -class sdist(old_sdist): - - def add_defaults (self): - old_sdist.add_defaults(self) - - dist = self.distribution - - if dist.has_data_files(): - for data in dist.data_files: - self.filelist.extend(get_data_files(data)) - - if dist.has_headers(): - headers = [] - for h in dist.headers: - if isinstance(h, str): headers.append(h) - else: headers.append(h[1]) - self.filelist.extend(headers) - - return diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/compat.py b/venv/lib/python3.7/site-packages/numpy/distutils/compat.py deleted file mode 100644 index 9a81cd3..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/compat.py +++ /dev/null @@ -1,10 +0,0 @@ -"""Small modules to cope with python 2 vs 3 incompatibilities inside -numpy.distutils - -""" -from __future__ import division, absolute_import, print_function - -import sys - -def get_exception(): - return sys.exc_info()[1] diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/conv_template.py b/venv/lib/python3.7/site-packages/numpy/distutils/conv_template.py deleted file mode 100644 index 3bcb7b8..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/conv_template.py +++ /dev/null @@ -1,337 +0,0 @@ -#!/usr/bin/env python -""" -takes templated file .xxx.src and produces .xxx file where .xxx is -.i or .c or .h, using the following template rules - -/**begin repeat -- on a line by itself marks the start of a repeated code - segment -/**end repeat**/ -- on a line by itself marks it's end - -After the /**begin repeat and before the */, all the named templates are placed -these should all have the same number of replacements - -Repeat blocks can be nested, with each nested block labeled with its depth, -i.e. -/**begin repeat1 - *.... - */ -/**end repeat1**/ - -When using nested loops, you can optionally exclude particular -combinations of the variables using (inside the comment portion of the inner loop): - - :exclude: var1=value1, var2=value2, ... - -This will exclude the pattern where var1 is value1 and var2 is value2 when -the result is being generated. - - -In the main body each replace will use one entry from the list of named replacements - - Note that all #..# forms in a block must have the same number of - comma-separated entries. - -Example: - - An input file containing - - /**begin repeat - * #a = 1,2,3# - * #b = 1,2,3# - */ - - /**begin repeat1 - * #c = ted, jim# - */ - @a@, @b@, @c@ - /**end repeat1**/ - - /**end repeat**/ - - produces - - line 1 "template.c.src" - - /* - ********************************************************************* - ** This file was autogenerated from a template DO NOT EDIT!!** - ** Changes should be made to the original source (.src) file ** - ********************************************************************* - */ - - #line 9 - 1, 1, ted - - #line 9 - 1, 1, jim - - #line 9 - 2, 2, ted - - #line 9 - 2, 2, jim - - #line 9 - 3, 3, ted - - #line 9 - 3, 3, jim - -""" -from __future__ import division, absolute_import, print_function - - -__all__ = ['process_str', 'process_file'] - -import os -import sys -import re - -from numpy.distutils.compat import get_exception - -# names for replacement that are already global. -global_names = {} - -# header placed at the front of head processed file -header =\ -""" -/* - ***************************************************************************** - ** This file was autogenerated from a template DO NOT EDIT!!!! ** - ** Changes should be made to the original source (.src) file ** - ***************************************************************************** - */ - -""" -# Parse string for repeat loops -def parse_structure(astr, level): - """ - The returned line number is from the beginning of the string, starting - at zero. Returns an empty list if no loops found. - - """ - if level == 0 : - loopbeg = "/**begin repeat" - loopend = "/**end repeat**/" - else : - loopbeg = "/**begin repeat%d" % level - loopend = "/**end repeat%d**/" % level - - ind = 0 - line = 0 - spanlist = [] - while True: - start = astr.find(loopbeg, ind) - if start == -1: - break - start2 = astr.find("*/", start) - start2 = astr.find("\n", start2) - fini1 = astr.find(loopend, start2) - fini2 = astr.find("\n", fini1) - line += astr.count("\n", ind, start2+1) - spanlist.append((start, start2+1, fini1, fini2+1, line)) - line += astr.count("\n", start2+1, fini2) - ind = fini2 - spanlist.sort() - return spanlist - - -def paren_repl(obj): - torep = obj.group(1) - numrep = obj.group(2) - return ','.join([torep]*int(numrep)) - -parenrep = re.compile(r"[(]([^)]*)[)]\*(\d+)") -plainrep = re.compile(r"([^*]+)\*(\d+)") -def parse_values(astr): - # replaces all occurrences of '(a,b,c)*4' in astr - # with 'a,b,c,a,b,c,a,b,c,a,b,c'. Empty braces generate - # empty values, i.e., ()*4 yields ',,,'. The result is - # split at ',' and a list of values returned. - astr = parenrep.sub(paren_repl, astr) - # replaces occurrences of xxx*3 with xxx, xxx, xxx - astr = ','.join([plainrep.sub(paren_repl, x.strip()) - for x in astr.split(',')]) - return astr.split(',') - - -stripast = re.compile(r"\n\s*\*?") -named_re = re.compile(r"#\s*(\w*)\s*=([^#]*)#") -exclude_vars_re = re.compile(r"(\w*)=(\w*)") -exclude_re = re.compile(":exclude:") -def parse_loop_header(loophead) : - """Find all named replacements in the header - - Returns a list of dictionaries, one for each loop iteration, - where each key is a name to be substituted and the corresponding - value is the replacement string. - - Also return a list of exclusions. The exclusions are dictionaries - of key value pairs. There can be more than one exclusion. - [{'var1':'value1', 'var2', 'value2'[,...]}, ...] - - """ - # Strip out '\n' and leading '*', if any, in continuation lines. - # This should not effect code previous to this change as - # continuation lines were not allowed. - loophead = stripast.sub("", loophead) - # parse out the names and lists of values - names = [] - reps = named_re.findall(loophead) - nsub = None - for rep in reps: - name = rep[0] - vals = parse_values(rep[1]) - size = len(vals) - if nsub is None : - nsub = size - elif nsub != size : - msg = "Mismatch in number of values, %d != %d\n%s = %s" - raise ValueError(msg % (nsub, size, name, vals)) - names.append((name, vals)) - - - # Find any exclude variables - excludes = [] - - for obj in exclude_re.finditer(loophead): - span = obj.span() - # find next newline - endline = loophead.find('\n', span[1]) - substr = loophead[span[1]:endline] - ex_names = exclude_vars_re.findall(substr) - excludes.append(dict(ex_names)) - - # generate list of dictionaries, one for each template iteration - dlist = [] - if nsub is None : - raise ValueError("No substitution variables found") - for i in range(nsub): - tmp = {name: vals[i] for name, vals in names} - dlist.append(tmp) - return dlist - -replace_re = re.compile(r"@([\w]+)@") -def parse_string(astr, env, level, line) : - lineno = "#line %d\n" % line - - # local function for string replacement, uses env - def replace(match): - name = match.group(1) - try : - val = env[name] - except KeyError: - msg = 'line %d: no definition of key "%s"'%(line, name) - raise ValueError(msg) - return val - - code = [lineno] - struct = parse_structure(astr, level) - if struct : - # recurse over inner loops - oldend = 0 - newlevel = level + 1 - for sub in struct: - pref = astr[oldend:sub[0]] - head = astr[sub[0]:sub[1]] - text = astr[sub[1]:sub[2]] - oldend = sub[3] - newline = line + sub[4] - code.append(replace_re.sub(replace, pref)) - try : - envlist = parse_loop_header(head) - except ValueError: - e = get_exception() - msg = "line %d: %s" % (newline, e) - raise ValueError(msg) - for newenv in envlist : - newenv.update(env) - newcode = parse_string(text, newenv, newlevel, newline) - code.extend(newcode) - suff = astr[oldend:] - code.append(replace_re.sub(replace, suff)) - else : - # replace keys - code.append(replace_re.sub(replace, astr)) - code.append('\n') - return ''.join(code) - -def process_str(astr): - code = [header] - code.extend(parse_string(astr, global_names, 0, 1)) - return ''.join(code) - - -include_src_re = re.compile(r"(\n|\A)#include\s*['\"]" - r"(?P[\w\d./\\]+[.]src)['\"]", re.I) - -def resolve_includes(source): - d = os.path.dirname(source) - with open(source) as fid: - lines = [] - for line in fid: - m = include_src_re.match(line) - if m: - fn = m.group('name') - if not os.path.isabs(fn): - fn = os.path.join(d, fn) - if os.path.isfile(fn): - print('Including file', fn) - lines.extend(resolve_includes(fn)) - else: - lines.append(line) - else: - lines.append(line) - return lines - -def process_file(source): - lines = resolve_includes(source) - sourcefile = os.path.normcase(source).replace("\\", "\\\\") - try: - code = process_str(''.join(lines)) - except ValueError: - e = get_exception() - raise ValueError('In "%s" loop at %s' % (sourcefile, e)) - return '#line 1 "%s"\n%s' % (sourcefile, code) - - -def unique_key(adict): - # this obtains a unique key given a dictionary - # currently it works by appending together n of the letters of the - # current keys and increasing n until a unique key is found - # -- not particularly quick - allkeys = list(adict.keys()) - done = False - n = 1 - while not done: - newkey = "".join([x[:n] for x in allkeys]) - if newkey in allkeys: - n += 1 - else: - done = True - return newkey - - -def main(): - try: - file = sys.argv[1] - except IndexError: - fid = sys.stdin - outfile = sys.stdout - else: - fid = open(file, 'r') - (base, ext) = os.path.splitext(file) - newname = base - outfile = open(newname, 'w') - - allstr = fid.read() - try: - writestr = process_str(allstr) - except ValueError: - e = get_exception() - raise ValueError("In %s loop at %s" % (file, e)) - - outfile.write(writestr) - -if __name__ == "__main__": - main() diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/core.py b/venv/lib/python3.7/site-packages/numpy/distutils/core.py deleted file mode 100644 index 70cc37c..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/core.py +++ /dev/null @@ -1,217 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -from distutils.core import * - -if 'setuptools' in sys.modules: - have_setuptools = True - from setuptools import setup as old_setup - # easy_install imports math, it may be picked up from cwd - from setuptools.command import easy_install - try: - # very old versions of setuptools don't have this - from setuptools.command import bdist_egg - except ImportError: - have_setuptools = False -else: - from distutils.core import setup as old_setup - have_setuptools = False - -import warnings -import distutils.core -import distutils.dist - -from numpy.distutils.extension import Extension -from numpy.distutils.numpy_distribution import NumpyDistribution -from numpy.distutils.command import config, config_compiler, \ - build, build_py, build_ext, build_clib, build_src, build_scripts, \ - sdist, install_data, install_headers, install, bdist_rpm, \ - install_clib -from numpy.distutils.misc_util import get_data_files, is_sequence, is_string - -numpy_cmdclass = {'build': build.build, - 'build_src': build_src.build_src, - 'build_scripts': build_scripts.build_scripts, - 'config_cc': config_compiler.config_cc, - 'config_fc': config_compiler.config_fc, - 'config': config.config, - 'build_ext': build_ext.build_ext, - 'build_py': build_py.build_py, - 'build_clib': build_clib.build_clib, - 'sdist': sdist.sdist, - 'install_data': install_data.install_data, - 'install_headers': install_headers.install_headers, - 'install_clib': install_clib.install_clib, - 'install': install.install, - 'bdist_rpm': bdist_rpm.bdist_rpm, - } -if have_setuptools: - # Use our own versions of develop and egg_info to ensure that build_src is - # handled appropriately. - from numpy.distutils.command import develop, egg_info - numpy_cmdclass['bdist_egg'] = bdist_egg.bdist_egg - numpy_cmdclass['develop'] = develop.develop - numpy_cmdclass['easy_install'] = easy_install.easy_install - numpy_cmdclass['egg_info'] = egg_info.egg_info - -def _dict_append(d, **kws): - for k, v in kws.items(): - if k not in d: - d[k] = v - continue - dv = d[k] - if isinstance(dv, tuple): - d[k] = dv + tuple(v) - elif isinstance(dv, list): - d[k] = dv + list(v) - elif isinstance(dv, dict): - _dict_append(dv, **v) - elif is_string(dv): - d[k] = dv + v - else: - raise TypeError(repr(type(dv))) - -def _command_line_ok(_cache=None): - """ Return True if command line does not contain any - help or display requests. - """ - if _cache: - return _cache[0] - elif _cache is None: - _cache = [] - ok = True - display_opts = ['--'+n for n in Distribution.display_option_names] - for o in Distribution.display_options: - if o[1]: - display_opts.append('-'+o[1]) - for arg in sys.argv: - if arg.startswith('--help') or arg=='-h' or arg in display_opts: - ok = False - break - _cache.append(ok) - return ok - -def get_distribution(always=False): - dist = distutils.core._setup_distribution - # XXX Hack to get numpy installable with easy_install. - # The problem is easy_install runs it's own setup(), which - # sets up distutils.core._setup_distribution. However, - # when our setup() runs, that gets overwritten and lost. - # We can't use isinstance, as the DistributionWithoutHelpCommands - # class is local to a function in setuptools.command.easy_install - if dist is not None and \ - 'DistributionWithoutHelpCommands' in repr(dist): - dist = None - if always and dist is None: - dist = NumpyDistribution() - return dist - -def setup(**attr): - - cmdclass = numpy_cmdclass.copy() - - new_attr = attr.copy() - if 'cmdclass' in new_attr: - cmdclass.update(new_attr['cmdclass']) - new_attr['cmdclass'] = cmdclass - - if 'configuration' in new_attr: - # To avoid calling configuration if there are any errors - # or help request in command in the line. - configuration = new_attr.pop('configuration') - - old_dist = distutils.core._setup_distribution - old_stop = distutils.core._setup_stop_after - distutils.core._setup_distribution = None - distutils.core._setup_stop_after = "commandline" - try: - dist = setup(**new_attr) - finally: - distutils.core._setup_distribution = old_dist - distutils.core._setup_stop_after = old_stop - if dist.help or not _command_line_ok(): - # probably displayed help, skip running any commands - return dist - - # create setup dictionary and append to new_attr - config = configuration() - if hasattr(config, 'todict'): - config = config.todict() - _dict_append(new_attr, **config) - - # Move extension source libraries to libraries - libraries = [] - for ext in new_attr.get('ext_modules', []): - new_libraries = [] - for item in ext.libraries: - if is_sequence(item): - lib_name, build_info = item - _check_append_ext_library(libraries, lib_name, build_info) - new_libraries.append(lib_name) - elif is_string(item): - new_libraries.append(item) - else: - raise TypeError("invalid description of extension module " - "library %r" % (item,)) - ext.libraries = new_libraries - if libraries: - if 'libraries' not in new_attr: - new_attr['libraries'] = [] - for item in libraries: - _check_append_library(new_attr['libraries'], item) - - # sources in ext_modules or libraries may contain header files - if ('ext_modules' in new_attr or 'libraries' in new_attr) \ - and 'headers' not in new_attr: - new_attr['headers'] = [] - - # Use our custom NumpyDistribution class instead of distutils' one - new_attr['distclass'] = NumpyDistribution - - return old_setup(**new_attr) - -def _check_append_library(libraries, item): - for libitem in libraries: - if is_sequence(libitem): - if is_sequence(item): - if item[0]==libitem[0]: - if item[1] is libitem[1]: - return - warnings.warn("[0] libraries list contains %r with" - " different build_info" % (item[0],), - stacklevel=2) - break - else: - if item==libitem[0]: - warnings.warn("[1] libraries list contains %r with" - " no build_info" % (item[0],), - stacklevel=2) - break - else: - if is_sequence(item): - if item[0]==libitem: - warnings.warn("[2] libraries list contains %r with" - " no build_info" % (item[0],), - stacklevel=2) - break - else: - if item==libitem: - return - libraries.append(item) - -def _check_append_ext_library(libraries, lib_name, build_info): - for item in libraries: - if is_sequence(item): - if item[0]==lib_name: - if item[1] is build_info: - return - warnings.warn("[3] libraries list contains %r with" - " different build_info" % (lib_name,), - stacklevel=2) - break - elif item==lib_name: - warnings.warn("[4] libraries list contains %r with" - " no build_info" % (lib_name,), - stacklevel=2) - break - libraries.append((lib_name, build_info)) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/cpuinfo.py b/venv/lib/python3.7/site-packages/numpy/distutils/cpuinfo.py deleted file mode 100644 index bc97283..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/cpuinfo.py +++ /dev/null @@ -1,693 +0,0 @@ -#!/usr/bin/env python -""" -cpuinfo - -Copyright 2002 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy (BSD style) license. See LICENSE.txt that came with -this distribution for specifics. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -Pearu Peterson - -""" -from __future__ import division, absolute_import, print_function - -__all__ = ['cpu'] - -import sys, re, types -import os - -if sys.version_info[0] >= 3: - from subprocess import getstatusoutput -else: - from commands import getstatusoutput - -import warnings -import platform - -from numpy.distutils.compat import get_exception - -def getoutput(cmd, successful_status=(0,), stacklevel=1): - try: - status, output = getstatusoutput(cmd) - except EnvironmentError: - e = get_exception() - warnings.warn(str(e), UserWarning, stacklevel=stacklevel) - return False, "" - if os.WIFEXITED(status) and os.WEXITSTATUS(status) in successful_status: - return True, output - return False, output - -def command_info(successful_status=(0,), stacklevel=1, **kw): - info = {} - for key in kw: - ok, output = getoutput(kw[key], successful_status=successful_status, - stacklevel=stacklevel+1) - if ok: - info[key] = output.strip() - return info - -def command_by_line(cmd, successful_status=(0,), stacklevel=1): - ok, output = getoutput(cmd, successful_status=successful_status, - stacklevel=stacklevel+1) - if not ok: - return - for line in output.splitlines(): - yield line.strip() - -def key_value_from_command(cmd, sep, successful_status=(0,), - stacklevel=1): - d = {} - for line in command_by_line(cmd, successful_status=successful_status, - stacklevel=stacklevel+1): - l = [s.strip() for s in line.split(sep, 1)] - if len(l) == 2: - d[l[0]] = l[1] - return d - -class CPUInfoBase(object): - """Holds CPU information and provides methods for requiring - the availability of various CPU features. - """ - - def _try_call(self, func): - try: - return func() - except Exception: - pass - - def __getattr__(self, name): - if not name.startswith('_'): - if hasattr(self, '_'+name): - attr = getattr(self, '_'+name) - if isinstance(attr, types.MethodType): - return lambda func=self._try_call,attr=attr : func(attr) - else: - return lambda : None - raise AttributeError(name) - - def _getNCPUs(self): - return 1 - - def __get_nbits(self): - abits = platform.architecture()[0] - nbits = re.compile(r'(\d+)bit').search(abits).group(1) - return nbits - - def _is_32bit(self): - return self.__get_nbits() == '32' - - def _is_64bit(self): - return self.__get_nbits() == '64' - -class LinuxCPUInfo(CPUInfoBase): - - info = None - - def __init__(self): - if self.info is not None: - return - info = [ {} ] - ok, output = getoutput('uname -m') - if ok: - info[0]['uname_m'] = output.strip() - try: - fo = open('/proc/cpuinfo') - except EnvironmentError: - e = get_exception() - warnings.warn(str(e), UserWarning, stacklevel=2) - else: - for line in fo: - name_value = [s.strip() for s in line.split(':', 1)] - if len(name_value) != 2: - continue - name, value = name_value - if not info or name in info[-1]: # next processor - info.append({}) - info[-1][name] = value - fo.close() - self.__class__.info = info - - def _not_impl(self): pass - - # Athlon - - def _is_AMD(self): - return self.info[0]['vendor_id']=='AuthenticAMD' - - def _is_AthlonK6_2(self): - return self._is_AMD() and self.info[0]['model'] == '2' - - def _is_AthlonK6_3(self): - return self._is_AMD() and self.info[0]['model'] == '3' - - def _is_AthlonK6(self): - return re.match(r'.*?AMD-K6', self.info[0]['model name']) is not None - - def _is_AthlonK7(self): - return re.match(r'.*?AMD-K7', self.info[0]['model name']) is not None - - def _is_AthlonMP(self): - return re.match(r'.*?Athlon\(tm\) MP\b', - self.info[0]['model name']) is not None - - def _is_AMD64(self): - return self.is_AMD() and self.info[0]['family'] == '15' - - def _is_Athlon64(self): - return re.match(r'.*?Athlon\(tm\) 64\b', - self.info[0]['model name']) is not None - - def _is_AthlonHX(self): - return re.match(r'.*?Athlon HX\b', - self.info[0]['model name']) is not None - - def _is_Opteron(self): - return re.match(r'.*?Opteron\b', - self.info[0]['model name']) is not None - - def _is_Hammer(self): - return re.match(r'.*?Hammer\b', - self.info[0]['model name']) is not None - - # Alpha - - def _is_Alpha(self): - return self.info[0]['cpu']=='Alpha' - - def _is_EV4(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'EV4' - - def _is_EV5(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'EV5' - - def _is_EV56(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'EV56' - - def _is_PCA56(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'PCA56' - - # Intel - - #XXX - _is_i386 = _not_impl - - def _is_Intel(self): - return self.info[0]['vendor_id']=='GenuineIntel' - - def _is_i486(self): - return self.info[0]['cpu']=='i486' - - def _is_i586(self): - return self.is_Intel() and self.info[0]['cpu family'] == '5' - - def _is_i686(self): - return self.is_Intel() and self.info[0]['cpu family'] == '6' - - def _is_Celeron(self): - return re.match(r'.*?Celeron', - self.info[0]['model name']) is not None - - def _is_Pentium(self): - return re.match(r'.*?Pentium', - self.info[0]['model name']) is not None - - def _is_PentiumII(self): - return re.match(r'.*?Pentium.*?II\b', - self.info[0]['model name']) is not None - - def _is_PentiumPro(self): - return re.match(r'.*?PentiumPro\b', - self.info[0]['model name']) is not None - - def _is_PentiumMMX(self): - return re.match(r'.*?Pentium.*?MMX\b', - self.info[0]['model name']) is not None - - def _is_PentiumIII(self): - return re.match(r'.*?Pentium.*?III\b', - self.info[0]['model name']) is not None - - def _is_PentiumIV(self): - return re.match(r'.*?Pentium.*?(IV|4)\b', - self.info[0]['model name']) is not None - - def _is_PentiumM(self): - return re.match(r'.*?Pentium.*?M\b', - self.info[0]['model name']) is not None - - def _is_Prescott(self): - return self.is_PentiumIV() and self.has_sse3() - - def _is_Nocona(self): - return (self.is_Intel() - and (self.info[0]['cpu family'] == '6' - or self.info[0]['cpu family'] == '15') - and (self.has_sse3() and not self.has_ssse3()) - and re.match(r'.*?\blm\b', self.info[0]['flags']) is not None) - - def _is_Core2(self): - return (self.is_64bit() and self.is_Intel() and - re.match(r'.*?Core\(TM\)2\b', - self.info[0]['model name']) is not None) - - def _is_Itanium(self): - return re.match(r'.*?Itanium\b', - self.info[0]['family']) is not None - - def _is_XEON(self): - return re.match(r'.*?XEON\b', - self.info[0]['model name'], re.IGNORECASE) is not None - - _is_Xeon = _is_XEON - - # Varia - - def _is_singleCPU(self): - return len(self.info) == 1 - - def _getNCPUs(self): - return len(self.info) - - def _has_fdiv_bug(self): - return self.info[0]['fdiv_bug']=='yes' - - def _has_f00f_bug(self): - return self.info[0]['f00f_bug']=='yes' - - def _has_mmx(self): - return re.match(r'.*?\bmmx\b', self.info[0]['flags']) is not None - - def _has_sse(self): - return re.match(r'.*?\bsse\b', self.info[0]['flags']) is not None - - def _has_sse2(self): - return re.match(r'.*?\bsse2\b', self.info[0]['flags']) is not None - - def _has_sse3(self): - return re.match(r'.*?\bpni\b', self.info[0]['flags']) is not None - - def _has_ssse3(self): - return re.match(r'.*?\bssse3\b', self.info[0]['flags']) is not None - - def _has_3dnow(self): - return re.match(r'.*?\b3dnow\b', self.info[0]['flags']) is not None - - def _has_3dnowext(self): - return re.match(r'.*?\b3dnowext\b', self.info[0]['flags']) is not None - -class IRIXCPUInfo(CPUInfoBase): - info = None - - def __init__(self): - if self.info is not None: - return - info = key_value_from_command('sysconf', sep=' ', - successful_status=(0, 1)) - self.__class__.info = info - - def _not_impl(self): pass - - def _is_singleCPU(self): - return self.info.get('NUM_PROCESSORS') == '1' - - def _getNCPUs(self): - return int(self.info.get('NUM_PROCESSORS', 1)) - - def __cputype(self, n): - return self.info.get('PROCESSORS').split()[0].lower() == 'r%s' % (n) - def _is_r2000(self): return self.__cputype(2000) - def _is_r3000(self): return self.__cputype(3000) - def _is_r3900(self): return self.__cputype(3900) - def _is_r4000(self): return self.__cputype(4000) - def _is_r4100(self): return self.__cputype(4100) - def _is_r4300(self): return self.__cputype(4300) - def _is_r4400(self): return self.__cputype(4400) - def _is_r4600(self): return self.__cputype(4600) - def _is_r4650(self): return self.__cputype(4650) - def _is_r5000(self): return self.__cputype(5000) - def _is_r6000(self): return self.__cputype(6000) - def _is_r8000(self): return self.__cputype(8000) - def _is_r10000(self): return self.__cputype(10000) - def _is_r12000(self): return self.__cputype(12000) - def _is_rorion(self): return self.__cputype('orion') - - def get_ip(self): - try: return self.info.get('MACHINE') - except Exception: pass - def __machine(self, n): - return self.info.get('MACHINE').lower() == 'ip%s' % (n) - def _is_IP19(self): return self.__machine(19) - def _is_IP20(self): return self.__machine(20) - def _is_IP21(self): return self.__machine(21) - def _is_IP22(self): return self.__machine(22) - def _is_IP22_4k(self): return self.__machine(22) and self._is_r4000() - def _is_IP22_5k(self): return self.__machine(22) and self._is_r5000() - def _is_IP24(self): return self.__machine(24) - def _is_IP25(self): return self.__machine(25) - def _is_IP26(self): return self.__machine(26) - def _is_IP27(self): return self.__machine(27) - def _is_IP28(self): return self.__machine(28) - def _is_IP30(self): return self.__machine(30) - def _is_IP32(self): return self.__machine(32) - def _is_IP32_5k(self): return self.__machine(32) and self._is_r5000() - def _is_IP32_10k(self): return self.__machine(32) and self._is_r10000() - - -class DarwinCPUInfo(CPUInfoBase): - info = None - - def __init__(self): - if self.info is not None: - return - info = command_info(arch='arch', - machine='machine') - info['sysctl_hw'] = key_value_from_command('sysctl hw', sep='=') - self.__class__.info = info - - def _not_impl(self): pass - - def _getNCPUs(self): - return int(self.info['sysctl_hw'].get('hw.ncpu', 1)) - - def _is_Power_Macintosh(self): - return self.info['sysctl_hw']['hw.machine']=='Power Macintosh' - - def _is_i386(self): - return self.info['arch']=='i386' - def _is_ppc(self): - return self.info['arch']=='ppc' - - def __machine(self, n): - return self.info['machine'] == 'ppc%s'%n - def _is_ppc601(self): return self.__machine(601) - def _is_ppc602(self): return self.__machine(602) - def _is_ppc603(self): return self.__machine(603) - def _is_ppc603e(self): return self.__machine('603e') - def _is_ppc604(self): return self.__machine(604) - def _is_ppc604e(self): return self.__machine('604e') - def _is_ppc620(self): return self.__machine(620) - def _is_ppc630(self): return self.__machine(630) - def _is_ppc740(self): return self.__machine(740) - def _is_ppc7400(self): return self.__machine(7400) - def _is_ppc7450(self): return self.__machine(7450) - def _is_ppc750(self): return self.__machine(750) - def _is_ppc403(self): return self.__machine(403) - def _is_ppc505(self): return self.__machine(505) - def _is_ppc801(self): return self.__machine(801) - def _is_ppc821(self): return self.__machine(821) - def _is_ppc823(self): return self.__machine(823) - def _is_ppc860(self): return self.__machine(860) - - -class SunOSCPUInfo(CPUInfoBase): - - info = None - - def __init__(self): - if self.info is not None: - return - info = command_info(arch='arch', - mach='mach', - uname_i='uname_i', - isainfo_b='isainfo -b', - isainfo_n='isainfo -n', - ) - info['uname_X'] = key_value_from_command('uname -X', sep='=') - for line in command_by_line('psrinfo -v 0'): - m = re.match(r'\s*The (?P

[\w\d]+) processor operates at', line) - if m: - info['processor'] = m.group('p') - break - self.__class__.info = info - - def _not_impl(self): pass - - def _is_i386(self): - return self.info['isainfo_n']=='i386' - def _is_sparc(self): - return self.info['isainfo_n']=='sparc' - def _is_sparcv9(self): - return self.info['isainfo_n']=='sparcv9' - - def _getNCPUs(self): - return int(self.info['uname_X'].get('NumCPU', 1)) - - def _is_sun4(self): - return self.info['arch']=='sun4' - - def _is_SUNW(self): - return re.match(r'SUNW', self.info['uname_i']) is not None - def _is_sparcstation5(self): - return re.match(r'.*SPARCstation-5', self.info['uname_i']) is not None - def _is_ultra1(self): - return re.match(r'.*Ultra-1', self.info['uname_i']) is not None - def _is_ultra250(self): - return re.match(r'.*Ultra-250', self.info['uname_i']) is not None - def _is_ultra2(self): - return re.match(r'.*Ultra-2', self.info['uname_i']) is not None - def _is_ultra30(self): - return re.match(r'.*Ultra-30', self.info['uname_i']) is not None - def _is_ultra4(self): - return re.match(r'.*Ultra-4', self.info['uname_i']) is not None - def _is_ultra5_10(self): - return re.match(r'.*Ultra-5_10', self.info['uname_i']) is not None - def _is_ultra5(self): - return re.match(r'.*Ultra-5', self.info['uname_i']) is not None - def _is_ultra60(self): - return re.match(r'.*Ultra-60', self.info['uname_i']) is not None - def _is_ultra80(self): - return re.match(r'.*Ultra-80', self.info['uname_i']) is not None - def _is_ultraenterprice(self): - return re.match(r'.*Ultra-Enterprise', self.info['uname_i']) is not None - def _is_ultraenterprice10k(self): - return re.match(r'.*Ultra-Enterprise-10000', self.info['uname_i']) is not None - def _is_sunfire(self): - return re.match(r'.*Sun-Fire', self.info['uname_i']) is not None - def _is_ultra(self): - return re.match(r'.*Ultra', self.info['uname_i']) is not None - - def _is_cpusparcv7(self): - return self.info['processor']=='sparcv7' - def _is_cpusparcv8(self): - return self.info['processor']=='sparcv8' - def _is_cpusparcv9(self): - return self.info['processor']=='sparcv9' - -class Win32CPUInfo(CPUInfoBase): - - info = None - pkey = r"HARDWARE\DESCRIPTION\System\CentralProcessor" - # XXX: what does the value of - # HKEY_LOCAL_MACHINE\HARDWARE\DESCRIPTION\System\CentralProcessor\0 - # mean? - - def __init__(self): - if self.info is not None: - return - info = [] - try: - #XXX: Bad style to use so long `try:...except:...`. Fix it! - if sys.version_info[0] >= 3: - import winreg - else: - import _winreg as winreg - - prgx = re.compile(r"family\s+(?P\d+)\s+model\s+(?P\d+)" - r"\s+stepping\s+(?P\d+)", re.IGNORECASE) - chnd=winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, self.pkey) - pnum=0 - while True: - try: - proc=winreg.EnumKey(chnd, pnum) - except winreg.error: - break - else: - pnum+=1 - info.append({"Processor":proc}) - phnd=winreg.OpenKey(chnd, proc) - pidx=0 - while True: - try: - name, value, vtpe=winreg.EnumValue(phnd, pidx) - except winreg.error: - break - else: - pidx=pidx+1 - info[-1][name]=value - if name=="Identifier": - srch=prgx.search(value) - if srch: - info[-1]["Family"]=int(srch.group("FML")) - info[-1]["Model"]=int(srch.group("MDL")) - info[-1]["Stepping"]=int(srch.group("STP")) - except Exception: - print(sys.exc_info()[1], '(ignoring)') - self.__class__.info = info - - def _not_impl(self): pass - - # Athlon - - def _is_AMD(self): - return self.info[0]['VendorIdentifier']=='AuthenticAMD' - - def _is_Am486(self): - return self.is_AMD() and self.info[0]['Family']==4 - - def _is_Am5x86(self): - return self.is_AMD() and self.info[0]['Family']==4 - - def _is_AMDK5(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model'] in [0, 1, 2, 3] - - def _is_AMDK6(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model'] in [6, 7] - - def _is_AMDK6_2(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model']==8 - - def _is_AMDK6_3(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model']==9 - - def _is_AMDK7(self): - return self.is_AMD() and self.info[0]['Family'] == 6 - - # To reliably distinguish between the different types of AMD64 chips - # (Athlon64, Operton, Athlon64 X2, Semperon, Turion 64, etc.) would - # require looking at the 'brand' from cpuid - - def _is_AMD64(self): - return self.is_AMD() and self.info[0]['Family'] == 15 - - # Intel - - def _is_Intel(self): - return self.info[0]['VendorIdentifier']=='GenuineIntel' - - def _is_i386(self): - return self.info[0]['Family']==3 - - def _is_i486(self): - return self.info[0]['Family']==4 - - def _is_i586(self): - return self.is_Intel() and self.info[0]['Family']==5 - - def _is_i686(self): - return self.is_Intel() and self.info[0]['Family']==6 - - def _is_Pentium(self): - return self.is_Intel() and self.info[0]['Family']==5 - - def _is_PentiumMMX(self): - return self.is_Intel() and self.info[0]['Family']==5 \ - and self.info[0]['Model']==4 - - def _is_PentiumPro(self): - return self.is_Intel() and self.info[0]['Family']==6 \ - and self.info[0]['Model']==1 - - def _is_PentiumII(self): - return self.is_Intel() and self.info[0]['Family']==6 \ - and self.info[0]['Model'] in [3, 5, 6] - - def _is_PentiumIII(self): - return self.is_Intel() and self.info[0]['Family']==6 \ - and self.info[0]['Model'] in [7, 8, 9, 10, 11] - - def _is_PentiumIV(self): - return self.is_Intel() and self.info[0]['Family']==15 - - def _is_PentiumM(self): - return self.is_Intel() and self.info[0]['Family'] == 6 \ - and self.info[0]['Model'] in [9, 13, 14] - - def _is_Core2(self): - return self.is_Intel() and self.info[0]['Family'] == 6 \ - and self.info[0]['Model'] in [15, 16, 17] - - # Varia - - def _is_singleCPU(self): - return len(self.info) == 1 - - def _getNCPUs(self): - return len(self.info) - - def _has_mmx(self): - if self.is_Intel(): - return (self.info[0]['Family']==5 and self.info[0]['Model']==4) \ - or (self.info[0]['Family'] in [6, 15]) - elif self.is_AMD(): - return self.info[0]['Family'] in [5, 6, 15] - else: - return False - - def _has_sse(self): - if self.is_Intel(): - return ((self.info[0]['Family']==6 and - self.info[0]['Model'] in [7, 8, 9, 10, 11]) - or self.info[0]['Family']==15) - elif self.is_AMD(): - return ((self.info[0]['Family']==6 and - self.info[0]['Model'] in [6, 7, 8, 10]) - or self.info[0]['Family']==15) - else: - return False - - def _has_sse2(self): - if self.is_Intel(): - return self.is_Pentium4() or self.is_PentiumM() \ - or self.is_Core2() - elif self.is_AMD(): - return self.is_AMD64() - else: - return False - - def _has_3dnow(self): - return self.is_AMD() and self.info[0]['Family'] in [5, 6, 15] - - def _has_3dnowext(self): - return self.is_AMD() and self.info[0]['Family'] in [6, 15] - -if sys.platform.startswith('linux'): # variations: linux2,linux-i386 (any others?) - cpuinfo = LinuxCPUInfo -elif sys.platform.startswith('irix'): - cpuinfo = IRIXCPUInfo -elif sys.platform == 'darwin': - cpuinfo = DarwinCPUInfo -elif sys.platform.startswith('sunos'): - cpuinfo = SunOSCPUInfo -elif sys.platform.startswith('win32'): - cpuinfo = Win32CPUInfo -elif sys.platform.startswith('cygwin'): - cpuinfo = LinuxCPUInfo -#XXX: other OS's. Eg. use _winreg on Win32. Or os.uname on unices. -else: - cpuinfo = CPUInfoBase - -cpu = cpuinfo() - -#if __name__ == "__main__": -# -# cpu.is_blaa() -# cpu.is_Intel() -# cpu.is_Alpha() -# -# print('CPU information:'), -# for name in dir(cpuinfo): -# if name[0]=='_' and name[1]!='_': -# r = getattr(cpu,name[1:])() -# if r: -# if r!=1: -# print('%s=%s' %(name[1:],r)) -# else: -# print(name[1:]), -# print() diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/exec_command.py b/venv/lib/python3.7/site-packages/numpy/distutils/exec_command.py deleted file mode 100644 index 712f226..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/exec_command.py +++ /dev/null @@ -1,330 +0,0 @@ -""" -exec_command - -Implements exec_command function that is (almost) equivalent to -commands.getstatusoutput function but on NT, DOS systems the -returned status is actually correct (though, the returned status -values may be different by a factor). In addition, exec_command -takes keyword arguments for (re-)defining environment variables. - -Provides functions: - - exec_command --- execute command in a specified directory and - in the modified environment. - find_executable --- locate a command using info from environment - variable PATH. Equivalent to posix `which` - command. - -Author: Pearu Peterson -Created: 11 January 2003 - -Requires: Python 2.x - -Successfully tested on: - -======== ============ ================================================= -os.name sys.platform comments -======== ============ ================================================= -posix linux2 Debian (sid) Linux, Python 2.1.3+, 2.2.3+, 2.3.3 - PyCrust 0.9.3, Idle 1.0.2 -posix linux2 Red Hat 9 Linux, Python 2.1.3, 2.2.2, 2.3.2 -posix sunos5 SunOS 5.9, Python 2.2, 2.3.2 -posix darwin Darwin 7.2.0, Python 2.3 -nt win32 Windows Me - Python 2.3(EE), Idle 1.0, PyCrust 0.7.2 - Python 2.1.1 Idle 0.8 -nt win32 Windows 98, Python 2.1.1. Idle 0.8 -nt win32 Cygwin 98-4.10, Python 2.1.1(MSC) - echo tests - fail i.e. redefining environment variables may - not work. FIXED: don't use cygwin echo! - Comment: also `cmd /c echo` will not work - but redefining environment variables do work. -posix cygwin Cygwin 98-4.10, Python 2.3.3(cygming special) -nt win32 Windows XP, Python 2.3.3 -======== ============ ================================================= - -Known bugs: - -* Tests, that send messages to stderr, fail when executed from MSYS prompt - because the messages are lost at some point. - -""" -from __future__ import division, absolute_import, print_function - -__all__ = ['exec_command', 'find_executable'] - -import os -import sys -import subprocess -import locale -import warnings - -from numpy.distutils.misc_util import is_sequence, make_temp_file -from numpy.distutils import log - -def filepath_from_subprocess_output(output): - """ - Convert `bytes` in the encoding used by a subprocess into a filesystem-appropriate `str`. - - Inherited from `exec_command`, and possibly incorrect. - """ - mylocale = locale.getpreferredencoding(False) - if mylocale is None: - mylocale = 'ascii' - output = output.decode(mylocale, errors='replace') - output = output.replace('\r\n', '\n') - # Another historical oddity - if output[-1:] == '\n': - output = output[:-1] - # stdio uses bytes in python 2, so to avoid issues, we simply - # remove all non-ascii characters - if sys.version_info < (3, 0): - output = output.encode('ascii', errors='replace') - return output - - -def forward_bytes_to_stdout(val): - """ - Forward bytes from a subprocess call to the console, without attempting to - decode them. - - The assumption is that the subprocess call already returned bytes in - a suitable encoding. - """ - if sys.version_info.major < 3: - # python 2 has binary output anyway - sys.stdout.write(val) - elif hasattr(sys.stdout, 'buffer'): - # use the underlying binary output if there is one - sys.stdout.buffer.write(val) - elif hasattr(sys.stdout, 'encoding'): - # round-trip the encoding if necessary - sys.stdout.write(val.decode(sys.stdout.encoding)) - else: - # make a best-guess at the encoding - sys.stdout.write(val.decode('utf8', errors='replace')) - - -def temp_file_name(): - # 2019-01-30, 1.17 - warnings.warn('temp_file_name is deprecated since NumPy v1.17, use ' - 'tempfile.mkstemp instead', DeprecationWarning, stacklevel=1) - fo, name = make_temp_file() - fo.close() - return name - -def get_pythonexe(): - pythonexe = sys.executable - if os.name in ['nt', 'dos']: - fdir, fn = os.path.split(pythonexe) - fn = fn.upper().replace('PYTHONW', 'PYTHON') - pythonexe = os.path.join(fdir, fn) - assert os.path.isfile(pythonexe), '%r is not a file' % (pythonexe,) - return pythonexe - -def find_executable(exe, path=None, _cache={}): - """Return full path of a executable or None. - - Symbolic links are not followed. - """ - key = exe, path - try: - return _cache[key] - except KeyError: - pass - log.debug('find_executable(%r)' % exe) - orig_exe = exe - - if path is None: - path = os.environ.get('PATH', os.defpath) - if os.name=='posix': - realpath = os.path.realpath - else: - realpath = lambda a:a - - if exe.startswith('"'): - exe = exe[1:-1] - - suffixes = [''] - if os.name in ['nt', 'dos', 'os2']: - fn, ext = os.path.splitext(exe) - extra_suffixes = ['.exe', '.com', '.bat'] - if ext.lower() not in extra_suffixes: - suffixes = extra_suffixes - - if os.path.isabs(exe): - paths = [''] - else: - paths = [ os.path.abspath(p) for p in path.split(os.pathsep) ] - - for path in paths: - fn = os.path.join(path, exe) - for s in suffixes: - f_ext = fn+s - if not os.path.islink(f_ext): - f_ext = realpath(f_ext) - if os.path.isfile(f_ext) and os.access(f_ext, os.X_OK): - log.info('Found executable %s' % f_ext) - _cache[key] = f_ext - return f_ext - - log.warn('Could not locate executable %s' % orig_exe) - return None - -############################################################ - -def _preserve_environment( names ): - log.debug('_preserve_environment(%r)' % (names)) - env = {name: os.environ.get(name) for name in names} - return env - -def _update_environment( **env ): - log.debug('_update_environment(...)') - for name, value in env.items(): - os.environ[name] = value or '' - -def exec_command(command, execute_in='', use_shell=None, use_tee=None, - _with_python = 1, **env ): - """ - Return (status,output) of executed command. - - .. deprecated:: 1.17 - Use subprocess.Popen instead - - Parameters - ---------- - command : str - A concatenated string of executable and arguments. - execute_in : str - Before running command ``cd execute_in`` and after ``cd -``. - use_shell : {bool, None}, optional - If True, execute ``sh -c command``. Default None (True) - use_tee : {bool, None}, optional - If True use tee. Default None (True) - - - Returns - ------- - res : str - Both stdout and stderr messages. - - Notes - ----- - On NT, DOS systems the returned status is correct for external commands. - Wild cards will not work for non-posix systems or when use_shell=0. - - """ - # 2019-01-30, 1.17 - warnings.warn('exec_command is deprecated since NumPy v1.17, use ' - 'subprocess.Popen instead', DeprecationWarning, stacklevel=1) - log.debug('exec_command(%r,%s)' % (command, - ','.join(['%s=%r'%kv for kv in env.items()]))) - - if use_tee is None: - use_tee = os.name=='posix' - if use_shell is None: - use_shell = os.name=='posix' - execute_in = os.path.abspath(execute_in) - oldcwd = os.path.abspath(os.getcwd()) - - if __name__[-12:] == 'exec_command': - exec_dir = os.path.dirname(os.path.abspath(__file__)) - elif os.path.isfile('exec_command.py'): - exec_dir = os.path.abspath('.') - else: - exec_dir = os.path.abspath(sys.argv[0]) - if os.path.isfile(exec_dir): - exec_dir = os.path.dirname(exec_dir) - - if oldcwd!=execute_in: - os.chdir(execute_in) - log.debug('New cwd: %s' % execute_in) - else: - log.debug('Retaining cwd: %s' % oldcwd) - - oldenv = _preserve_environment( list(env.keys()) ) - _update_environment( **env ) - - try: - st = _exec_command(command, - use_shell=use_shell, - use_tee=use_tee, - **env) - finally: - if oldcwd!=execute_in: - os.chdir(oldcwd) - log.debug('Restored cwd to %s' % oldcwd) - _update_environment(**oldenv) - - return st - - -def _exec_command(command, use_shell=None, use_tee = None, **env): - """ - Internal workhorse for exec_command(). - """ - if use_shell is None: - use_shell = os.name=='posix' - if use_tee is None: - use_tee = os.name=='posix' - - if os.name == 'posix' and use_shell: - # On POSIX, subprocess always uses /bin/sh, override - sh = os.environ.get('SHELL', '/bin/sh') - if is_sequence(command): - command = [sh, '-c', ' '.join(command)] - else: - command = [sh, '-c', command] - use_shell = False - - elif os.name == 'nt' and is_sequence(command): - # On Windows, join the string for CreateProcess() ourselves as - # subprocess does it a bit differently - command = ' '.join(_quote_arg(arg) for arg in command) - - # Inherit environment by default - env = env or None - try: - # universal_newlines is set to False so that communicate() - # will return bytes. We need to decode the output ourselves - # so that Python will not raise a UnicodeDecodeError when - # it encounters an invalid character; rather, we simply replace it - proc = subprocess.Popen(command, shell=use_shell, env=env, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - universal_newlines=False) - except EnvironmentError: - # Return 127, as os.spawn*() and /bin/sh do - return 127, '' - - text, err = proc.communicate() - mylocale = locale.getpreferredencoding(False) - if mylocale is None: - mylocale = 'ascii' - text = text.decode(mylocale, errors='replace') - text = text.replace('\r\n', '\n') - # Another historical oddity - if text[-1:] == '\n': - text = text[:-1] - - # stdio uses bytes in python 2, so to avoid issues, we simply - # remove all non-ascii characters - if sys.version_info < (3, 0): - text = text.encode('ascii', errors='replace') - - if use_tee and text: - print(text) - return proc.returncode, text - - -def _quote_arg(arg): - """ - Quote the argument for safe use in a shell command line. - """ - # If there is a quote in the string, assume relevants parts of the - # string are already quoted (e.g. '-I"C:\\Program Files\\..."') - if '"' not in arg and ' ' in arg: - return '"%s"' % arg - return arg - -############################################################ diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/extension.py b/venv/lib/python3.7/site-packages/numpy/distutils/extension.py deleted file mode 100644 index 872bd53..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/extension.py +++ /dev/null @@ -1,109 +0,0 @@ -"""distutils.extension - -Provides the Extension class, used to describe C/C++ extension -modules in setup scripts. - -Overridden to support f2py. - -""" -from __future__ import division, absolute_import, print_function - -import sys -import re -from distutils.extension import Extension as old_Extension - -if sys.version_info[0] >= 3: - basestring = str - - -cxx_ext_re = re.compile(r'.*[.](cpp|cxx|cc)\Z', re.I).match -fortran_pyf_ext_re = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match - - -class Extension(old_Extension): - """ - Parameters - ---------- - name : str - Extension name. - sources : list of str - List of source file locations relative to the top directory of - the package. - extra_compile_args : list of str - Extra command line arguments to pass to the compiler. - extra_f77_compile_args : list of str - Extra command line arguments to pass to the fortran77 compiler. - extra_f90_compile_args : list of str - Extra command line arguments to pass to the fortran90 compiler. - """ - def __init__( - self, name, sources, - include_dirs=None, - define_macros=None, - undef_macros=None, - library_dirs=None, - libraries=None, - runtime_library_dirs=None, - extra_objects=None, - extra_compile_args=None, - extra_link_args=None, - export_symbols=None, - swig_opts=None, - depends=None, - language=None, - f2py_options=None, - module_dirs=None, - extra_f77_compile_args=None, - extra_f90_compile_args=None,): - - old_Extension.__init__( - self, name, [], - include_dirs=include_dirs, - define_macros=define_macros, - undef_macros=undef_macros, - library_dirs=library_dirs, - libraries=libraries, - runtime_library_dirs=runtime_library_dirs, - extra_objects=extra_objects, - extra_compile_args=extra_compile_args, - extra_link_args=extra_link_args, - export_symbols=export_symbols) - - # Avoid assert statements checking that sources contains strings: - self.sources = sources - - # Python 2.4 distutils new features - self.swig_opts = swig_opts or [] - # swig_opts is assumed to be a list. Here we handle the case where it - # is specified as a string instead. - if isinstance(self.swig_opts, basestring): - import warnings - msg = "swig_opts is specified as a string instead of a list" - warnings.warn(msg, SyntaxWarning, stacklevel=2) - self.swig_opts = self.swig_opts.split() - - # Python 2.3 distutils new features - self.depends = depends or [] - self.language = language - - # numpy_distutils features - self.f2py_options = f2py_options or [] - self.module_dirs = module_dirs or [] - self.extra_f77_compile_args = extra_f77_compile_args or [] - self.extra_f90_compile_args = extra_f90_compile_args or [] - - return - - def has_cxx_sources(self): - for source in self.sources: - if cxx_ext_re(str(source)): - return True - return False - - def has_f2py_sources(self): - for source in self.sources: - if fortran_pyf_ext_re(source): - return True - return False - -# class Extension diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/__init__.py b/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/__init__.py deleted file mode 100644 index 3723470..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/__init__.py +++ /dev/null @@ -1,1032 +0,0 @@ -"""numpy.distutils.fcompiler - -Contains FCompiler, an abstract base class that defines the interface -for the numpy.distutils Fortran compiler abstraction model. - -Terminology: - -To be consistent, where the term 'executable' is used, it means the single -file, like 'gcc', that is executed, and should be a string. In contrast, -'command' means the entire command line, like ['gcc', '-c', 'file.c'], and -should be a list. - -But note that FCompiler.executables is actually a dictionary of commands. - -""" -from __future__ import division, absolute_import, print_function - -__all__ = ['FCompiler', 'new_fcompiler', 'show_fcompilers', - 'dummy_fortran_file'] - -import os -import sys -import re -import types - -from numpy.compat import open_latin1 - -from distutils.sysconfig import get_python_lib -from distutils.fancy_getopt import FancyGetopt -from distutils.errors import DistutilsModuleError, \ - DistutilsExecError, CompileError, LinkError, DistutilsPlatformError -from distutils.util import split_quoted, strtobool - -from numpy.distutils.ccompiler import CCompiler, gen_lib_options -from numpy.distutils import log -from numpy.distutils.misc_util import is_string, all_strings, is_sequence, \ - make_temp_file, get_shared_lib_extension -from numpy.distutils.exec_command import find_executable -from numpy.distutils.compat import get_exception -from numpy.distutils import _shell_utils - -from .environment import EnvironmentConfig - -__metaclass__ = type - -class CompilerNotFound(Exception): - pass - -def flaglist(s): - if is_string(s): - return split_quoted(s) - else: - return s - -def str2bool(s): - if is_string(s): - return strtobool(s) - return bool(s) - -def is_sequence_of_strings(seq): - return is_sequence(seq) and all_strings(seq) - -class FCompiler(CCompiler): - """Abstract base class to define the interface that must be implemented - by real Fortran compiler classes. - - Methods that subclasses may redefine: - - update_executables(), find_executables(), get_version() - get_flags(), get_flags_opt(), get_flags_arch(), get_flags_debug() - get_flags_f77(), get_flags_opt_f77(), get_flags_arch_f77(), - get_flags_debug_f77(), get_flags_f90(), get_flags_opt_f90(), - get_flags_arch_f90(), get_flags_debug_f90(), - get_flags_fix(), get_flags_linker_so() - - DON'T call these methods (except get_version) after - constructing a compiler instance or inside any other method. - All methods, except update_executables() and find_executables(), - may call the get_version() method. - - After constructing a compiler instance, always call customize(dist=None) - method that finalizes compiler construction and makes the following - attributes available: - compiler_f77 - compiler_f90 - compiler_fix - linker_so - archiver - ranlib - libraries - library_dirs - """ - - # These are the environment variables and distutils keys used. - # Each configuration description is - # (, , , , ) - # The hook names are handled by the self._environment_hook method. - # - names starting with 'self.' call methods in this class - # - names starting with 'exe.' return the key in the executables dict - # - names like 'flags.YYY' return self.get_flag_YYY() - # convert is either None or a function to convert a string to the - # appropriate type used. - - distutils_vars = EnvironmentConfig( - distutils_section='config_fc', - noopt = (None, None, 'noopt', str2bool, False), - noarch = (None, None, 'noarch', str2bool, False), - debug = (None, None, 'debug', str2bool, False), - verbose = (None, None, 'verbose', str2bool, False), - ) - - command_vars = EnvironmentConfig( - distutils_section='config_fc', - compiler_f77 = ('exe.compiler_f77', 'F77', 'f77exec', None, False), - compiler_f90 = ('exe.compiler_f90', 'F90', 'f90exec', None, False), - compiler_fix = ('exe.compiler_fix', 'F90', 'f90exec', None, False), - version_cmd = ('exe.version_cmd', None, None, None, False), - linker_so = ('exe.linker_so', 'LDSHARED', 'ldshared', None, False), - linker_exe = ('exe.linker_exe', 'LD', 'ld', None, False), - archiver = (None, 'AR', 'ar', None, False), - ranlib = (None, 'RANLIB', 'ranlib', None, False), - ) - - flag_vars = EnvironmentConfig( - distutils_section='config_fc', - f77 = ('flags.f77', 'F77FLAGS', 'f77flags', flaglist, True), - f90 = ('flags.f90', 'F90FLAGS', 'f90flags', flaglist, True), - free = ('flags.free', 'FREEFLAGS', 'freeflags', flaglist, True), - fix = ('flags.fix', None, None, flaglist, False), - opt = ('flags.opt', 'FOPT', 'opt', flaglist, True), - opt_f77 = ('flags.opt_f77', None, None, flaglist, False), - opt_f90 = ('flags.opt_f90', None, None, flaglist, False), - arch = ('flags.arch', 'FARCH', 'arch', flaglist, False), - arch_f77 = ('flags.arch_f77', None, None, flaglist, False), - arch_f90 = ('flags.arch_f90', None, None, flaglist, False), - debug = ('flags.debug', 'FDEBUG', 'fdebug', flaglist, True), - debug_f77 = ('flags.debug_f77', None, None, flaglist, False), - debug_f90 = ('flags.debug_f90', None, None, flaglist, False), - flags = ('self.get_flags', 'FFLAGS', 'fflags', flaglist, True), - linker_so = ('flags.linker_so', 'LDFLAGS', 'ldflags', flaglist, True), - linker_exe = ('flags.linker_exe', 'LDFLAGS', 'ldflags', flaglist, True), - ar = ('flags.ar', 'ARFLAGS', 'arflags', flaglist, True), - ) - - language_map = {'.f': 'f77', - '.for': 'f77', - '.F': 'f77', # XXX: needs preprocessor - '.ftn': 'f77', - '.f77': 'f77', - '.f90': 'f90', - '.F90': 'f90', # XXX: needs preprocessor - '.f95': 'f90', - } - language_order = ['f90', 'f77'] - - - # These will be set by the subclass - - compiler_type = None - compiler_aliases = () - version_pattern = None - - possible_executables = [] - executables = { - 'version_cmd': ["f77", "-v"], - 'compiler_f77': ["f77"], - 'compiler_f90': ["f90"], - 'compiler_fix': ["f90", "-fixed"], - 'linker_so': ["f90", "-shared"], - 'linker_exe': ["f90"], - 'archiver': ["ar", "-cr"], - 'ranlib': None, - } - - # If compiler does not support compiling Fortran 90 then it can - # suggest using another compiler. For example, gnu would suggest - # gnu95 compiler type when there are F90 sources. - suggested_f90_compiler = None - - compile_switch = "-c" - object_switch = "-o " # Ending space matters! It will be stripped - # but if it is missing then object_switch - # will be prefixed to object file name by - # string concatenation. - library_switch = "-o " # Ditto! - - # Switch to specify where module files are created and searched - # for USE statement. Normally it is a string and also here ending - # space matters. See above. - module_dir_switch = None - - # Switch to specify where module files are searched for USE statement. - module_include_switch = '-I' - - pic_flags = [] # Flags to create position-independent code - - src_extensions = ['.for', '.ftn', '.f77', '.f', '.f90', '.f95', '.F', '.F90', '.FOR'] - obj_extension = ".o" - - shared_lib_extension = get_shared_lib_extension() - static_lib_extension = ".a" # or .lib - static_lib_format = "lib%s%s" # or %s%s - shared_lib_format = "%s%s" - exe_extension = "" - - _exe_cache = {} - - _executable_keys = ['version_cmd', 'compiler_f77', 'compiler_f90', - 'compiler_fix', 'linker_so', 'linker_exe', 'archiver', - 'ranlib'] - - # This will be set by new_fcompiler when called in - # command/{build_ext.py, build_clib.py, config.py} files. - c_compiler = None - - # extra_{f77,f90}_compile_args are set by build_ext.build_extension method - extra_f77_compile_args = [] - extra_f90_compile_args = [] - - def __init__(self, *args, **kw): - CCompiler.__init__(self, *args, **kw) - self.distutils_vars = self.distutils_vars.clone(self._environment_hook) - self.command_vars = self.command_vars.clone(self._environment_hook) - self.flag_vars = self.flag_vars.clone(self._environment_hook) - self.executables = self.executables.copy() - for e in self._executable_keys: - if e not in self.executables: - self.executables[e] = None - - # Some methods depend on .customize() being called first, so - # this keeps track of whether that's happened yet. - self._is_customised = False - - def __copy__(self): - obj = self.__new__(self.__class__) - obj.__dict__.update(self.__dict__) - obj.distutils_vars = obj.distutils_vars.clone(obj._environment_hook) - obj.command_vars = obj.command_vars.clone(obj._environment_hook) - obj.flag_vars = obj.flag_vars.clone(obj._environment_hook) - obj.executables = obj.executables.copy() - return obj - - def copy(self): - return self.__copy__() - - # Use properties for the attributes used by CCompiler. Setting them - # as attributes from the self.executables dictionary is error-prone, - # so we get them from there each time. - def _command_property(key): - def fget(self): - assert self._is_customised - return self.executables[key] - return property(fget=fget) - version_cmd = _command_property('version_cmd') - compiler_f77 = _command_property('compiler_f77') - compiler_f90 = _command_property('compiler_f90') - compiler_fix = _command_property('compiler_fix') - linker_so = _command_property('linker_so') - linker_exe = _command_property('linker_exe') - archiver = _command_property('archiver') - ranlib = _command_property('ranlib') - - # Make our terminology consistent. - def set_executable(self, key, value): - self.set_command(key, value) - - def set_commands(self, **kw): - for k, v in kw.items(): - self.set_command(k, v) - - def set_command(self, key, value): - if not key in self._executable_keys: - raise ValueError( - "unknown executable '%s' for class %s" % - (key, self.__class__.__name__)) - if is_string(value): - value = split_quoted(value) - assert value is None or is_sequence_of_strings(value[1:]), (key, value) - self.executables[key] = value - - ###################################################################### - ## Methods that subclasses may redefine. But don't call these methods! - ## They are private to FCompiler class and may return unexpected - ## results if used elsewhere. So, you have been warned.. - - def find_executables(self): - """Go through the self.executables dictionary, and attempt to - find and assign appropriate executables. - - Executable names are looked for in the environment (environment - variables, the distutils.cfg, and command line), the 0th-element of - the command list, and the self.possible_executables list. - - Also, if the 0th element is "" or "", the Fortran 77 - or the Fortran 90 compiler executable is used, unless overridden - by an environment setting. - - Subclasses should call this if overridden. - """ - assert self._is_customised - exe_cache = self._exe_cache - def cached_find_executable(exe): - if exe in exe_cache: - return exe_cache[exe] - fc_exe = find_executable(exe) - exe_cache[exe] = exe_cache[fc_exe] = fc_exe - return fc_exe - def verify_command_form(name, value): - if value is not None and not is_sequence_of_strings(value): - raise ValueError( - "%s value %r is invalid in class %s" % - (name, value, self.__class__.__name__)) - def set_exe(exe_key, f77=None, f90=None): - cmd = self.executables.get(exe_key, None) - if not cmd: - return None - # Note that we get cmd[0] here if the environment doesn't - # have anything set - exe_from_environ = getattr(self.command_vars, exe_key) - if not exe_from_environ: - possibles = [f90, f77] + self.possible_executables - else: - possibles = [exe_from_environ] + self.possible_executables - - seen = set() - unique_possibles = [] - for e in possibles: - if e == '': - e = f77 - elif e == '': - e = f90 - if not e or e in seen: - continue - seen.add(e) - unique_possibles.append(e) - - for exe in unique_possibles: - fc_exe = cached_find_executable(exe) - if fc_exe: - cmd[0] = fc_exe - return fc_exe - self.set_command(exe_key, None) - return None - - ctype = self.compiler_type - f90 = set_exe('compiler_f90') - if not f90: - f77 = set_exe('compiler_f77') - if f77: - log.warn('%s: no Fortran 90 compiler found' % ctype) - else: - raise CompilerNotFound('%s: f90 nor f77' % ctype) - else: - f77 = set_exe('compiler_f77', f90=f90) - if not f77: - log.warn('%s: no Fortran 77 compiler found' % ctype) - set_exe('compiler_fix', f90=f90) - - set_exe('linker_so', f77=f77, f90=f90) - set_exe('linker_exe', f77=f77, f90=f90) - set_exe('version_cmd', f77=f77, f90=f90) - set_exe('archiver') - set_exe('ranlib') - - def update_executables(self): - """Called at the beginning of customisation. Subclasses should - override this if they need to set up the executables dictionary. - - Note that self.find_executables() is run afterwards, so the - self.executables dictionary values can contain or as - the command, which will be replaced by the found F77 or F90 - compiler. - """ - pass - - def get_flags(self): - """List of flags common to all compiler types.""" - return [] + self.pic_flags - - def _get_command_flags(self, key): - cmd = self.executables.get(key, None) - if cmd is None: - return [] - return cmd[1:] - - def get_flags_f77(self): - """List of Fortran 77 specific flags.""" - return self._get_command_flags('compiler_f77') - def get_flags_f90(self): - """List of Fortran 90 specific flags.""" - return self._get_command_flags('compiler_f90') - def get_flags_free(self): - """List of Fortran 90 free format specific flags.""" - return [] - def get_flags_fix(self): - """List of Fortran 90 fixed format specific flags.""" - return self._get_command_flags('compiler_fix') - def get_flags_linker_so(self): - """List of linker flags to build a shared library.""" - return self._get_command_flags('linker_so') - def get_flags_linker_exe(self): - """List of linker flags to build an executable.""" - return self._get_command_flags('linker_exe') - def get_flags_ar(self): - """List of archiver flags. """ - return self._get_command_flags('archiver') - def get_flags_opt(self): - """List of architecture independent compiler flags.""" - return [] - def get_flags_arch(self): - """List of architecture dependent compiler flags.""" - return [] - def get_flags_debug(self): - """List of compiler flags to compile with debugging information.""" - return [] - - get_flags_opt_f77 = get_flags_opt_f90 = get_flags_opt - get_flags_arch_f77 = get_flags_arch_f90 = get_flags_arch - get_flags_debug_f77 = get_flags_debug_f90 = get_flags_debug - - def get_libraries(self): - """List of compiler libraries.""" - return self.libraries[:] - def get_library_dirs(self): - """List of compiler library directories.""" - return self.library_dirs[:] - - def get_version(self, force=False, ok_status=[0]): - assert self._is_customised - version = CCompiler.get_version(self, force=force, ok_status=ok_status) - if version is None: - raise CompilerNotFound() - return version - - - ############################################################ - - ## Public methods: - - def customize(self, dist = None): - """Customize Fortran compiler. - - This method gets Fortran compiler specific information from - (i) class definition, (ii) environment, (iii) distutils config - files, and (iv) command line (later overrides earlier). - - This method should be always called after constructing a - compiler instance. But not in __init__ because Distribution - instance is needed for (iii) and (iv). - """ - log.info('customize %s' % (self.__class__.__name__)) - - self._is_customised = True - - self.distutils_vars.use_distribution(dist) - self.command_vars.use_distribution(dist) - self.flag_vars.use_distribution(dist) - - self.update_executables() - - # find_executables takes care of setting the compiler commands, - # version_cmd, linker_so, linker_exe, ar, and ranlib - self.find_executables() - - noopt = self.distutils_vars.get('noopt', False) - noarch = self.distutils_vars.get('noarch', noopt) - debug = self.distutils_vars.get('debug', False) - - f77 = self.command_vars.compiler_f77 - f90 = self.command_vars.compiler_f90 - - f77flags = [] - f90flags = [] - freeflags = [] - fixflags = [] - - if f77: - f77 = _shell_utils.NativeParser.split(f77) - f77flags = self.flag_vars.f77 - if f90: - f90 = _shell_utils.NativeParser.split(f90) - f90flags = self.flag_vars.f90 - freeflags = self.flag_vars.free - # XXX Assuming that free format is default for f90 compiler. - fix = self.command_vars.compiler_fix - # NOTE: this and similar examples are probably just - # excluding --coverage flag when F90 = gfortran --coverage - # instead of putting that flag somewhere more appropriate - # this and similar examples where a Fortran compiler - # environment variable has been customized by CI or a user - # should perhaps eventually be more thoroughly tested and more - # robustly handled - if fix: - fix = _shell_utils.NativeParser.split(fix) - fixflags = self.flag_vars.fix + f90flags - - oflags, aflags, dflags = [], [], [] - # examine get_flags__ for extra flags - # only add them if the method is different from get_flags_ - def get_flags(tag, flags): - # note that self.flag_vars. calls self.get_flags_() - flags.extend(getattr(self.flag_vars, tag)) - this_get = getattr(self, 'get_flags_' + tag) - for name, c, flagvar in [('f77', f77, f77flags), - ('f90', f90, f90flags), - ('f90', fix, fixflags)]: - t = '%s_%s' % (tag, name) - if c and this_get is not getattr(self, 'get_flags_' + t): - flagvar.extend(getattr(self.flag_vars, t)) - if not noopt: - get_flags('opt', oflags) - if not noarch: - get_flags('arch', aflags) - if debug: - get_flags('debug', dflags) - - fflags = self.flag_vars.flags + dflags + oflags + aflags - - if f77: - self.set_commands(compiler_f77=f77+f77flags+fflags) - if f90: - self.set_commands(compiler_f90=f90+freeflags+f90flags+fflags) - if fix: - self.set_commands(compiler_fix=fix+fixflags+fflags) - - - #XXX: Do we need LDSHARED->SOSHARED, LDFLAGS->SOFLAGS - linker_so = self.linker_so - if linker_so: - linker_so_flags = self.flag_vars.linker_so - if sys.platform.startswith('aix'): - python_lib = get_python_lib(standard_lib=1) - ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix') - python_exp = os.path.join(python_lib, 'config', 'python.exp') - linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp] - self.set_commands(linker_so=linker_so+linker_so_flags) - - linker_exe = self.linker_exe - if linker_exe: - linker_exe_flags = self.flag_vars.linker_exe - self.set_commands(linker_exe=linker_exe+linker_exe_flags) - - ar = self.command_vars.archiver - if ar: - arflags = self.flag_vars.ar - self.set_commands(archiver=[ar]+arflags) - - self.set_library_dirs(self.get_library_dirs()) - self.set_libraries(self.get_libraries()) - - def dump_properties(self): - """Print out the attributes of a compiler instance.""" - props = [] - for key in list(self.executables.keys()) + \ - ['version', 'libraries', 'library_dirs', - 'object_switch', 'compile_switch']: - if hasattr(self, key): - v = getattr(self, key) - props.append((key, None, '= '+repr(v))) - props.sort() - - pretty_printer = FancyGetopt(props) - for l in pretty_printer.generate_help("%s instance properties:" \ - % (self.__class__.__name__)): - if l[:4]==' --': - l = ' ' + l[4:] - print(l) - - ################### - - def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): - """Compile 'src' to product 'obj'.""" - src_flags = {} - if is_f_file(src) and not has_f90_header(src): - flavor = ':f77' - compiler = self.compiler_f77 - src_flags = get_f77flags(src) - extra_compile_args = self.extra_f77_compile_args or [] - elif is_free_format(src): - flavor = ':f90' - compiler = self.compiler_f90 - if compiler is None: - raise DistutilsExecError('f90 not supported by %s needed for %s'\ - % (self.__class__.__name__, src)) - extra_compile_args = self.extra_f90_compile_args or [] - else: - flavor = ':fix' - compiler = self.compiler_fix - if compiler is None: - raise DistutilsExecError('f90 (fixed) not supported by %s needed for %s'\ - % (self.__class__.__name__, src)) - extra_compile_args = self.extra_f90_compile_args or [] - if self.object_switch[-1]==' ': - o_args = [self.object_switch.strip(), obj] - else: - o_args = [self.object_switch.strip()+obj] - - assert self.compile_switch.strip() - s_args = [self.compile_switch, src] - - if extra_compile_args: - log.info('extra %s options: %r' \ - % (flavor[1:], ' '.join(extra_compile_args))) - - extra_flags = src_flags.get(self.compiler_type, []) - if extra_flags: - log.info('using compile options from source: %r' \ - % ' '.join(extra_flags)) - - command = compiler + cc_args + extra_flags + s_args + o_args \ - + extra_postargs + extra_compile_args - - display = '%s: %s' % (os.path.basename(compiler[0]) + flavor, - src) - try: - self.spawn(command, display=display) - except DistutilsExecError: - msg = str(get_exception()) - raise CompileError(msg) - - def module_options(self, module_dirs, module_build_dir): - options = [] - if self.module_dir_switch is not None: - if self.module_dir_switch[-1]==' ': - options.extend([self.module_dir_switch.strip(), module_build_dir]) - else: - options.append(self.module_dir_switch.strip()+module_build_dir) - else: - print('XXX: module_build_dir=%r option ignored' % (module_build_dir)) - print('XXX: Fix module_dir_switch for ', self.__class__.__name__) - if self.module_include_switch is not None: - for d in [module_build_dir]+module_dirs: - options.append('%s%s' % (self.module_include_switch, d)) - else: - print('XXX: module_dirs=%r option ignored' % (module_dirs)) - print('XXX: Fix module_include_switch for ', self.__class__.__name__) - return options - - def library_option(self, lib): - return "-l" + lib - def library_dir_option(self, dir): - return "-L" + dir - - def link(self, target_desc, objects, - output_filename, output_dir=None, libraries=None, - library_dirs=None, runtime_library_dirs=None, - export_symbols=None, debug=0, extra_preargs=None, - extra_postargs=None, build_temp=None, target_lang=None): - objects, output_dir = self._fix_object_args(objects, output_dir) - libraries, library_dirs, runtime_library_dirs = \ - self._fix_lib_args(libraries, library_dirs, runtime_library_dirs) - - lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs, - libraries) - if is_string(output_dir): - output_filename = os.path.join(output_dir, output_filename) - elif output_dir is not None: - raise TypeError("'output_dir' must be a string or None") - - if self._need_link(objects, output_filename): - if self.library_switch[-1]==' ': - o_args = [self.library_switch.strip(), output_filename] - else: - o_args = [self.library_switch.strip()+output_filename] - - if is_string(self.objects): - ld_args = objects + [self.objects] - else: - ld_args = objects + self.objects - ld_args = ld_args + lib_opts + o_args - if debug: - ld_args[:0] = ['-g'] - if extra_preargs: - ld_args[:0] = extra_preargs - if extra_postargs: - ld_args.extend(extra_postargs) - self.mkpath(os.path.dirname(output_filename)) - if target_desc == CCompiler.EXECUTABLE: - linker = self.linker_exe[:] - else: - linker = self.linker_so[:] - command = linker + ld_args - try: - self.spawn(command) - except DistutilsExecError: - msg = str(get_exception()) - raise LinkError(msg) - else: - log.debug("skipping %s (up-to-date)", output_filename) - - def _environment_hook(self, name, hook_name): - if hook_name is None: - return None - if is_string(hook_name): - if hook_name.startswith('self.'): - hook_name = hook_name[5:] - hook = getattr(self, hook_name) - return hook() - elif hook_name.startswith('exe.'): - hook_name = hook_name[4:] - var = self.executables[hook_name] - if var: - return var[0] - else: - return None - elif hook_name.startswith('flags.'): - hook_name = hook_name[6:] - hook = getattr(self, 'get_flags_' + hook_name) - return hook() - else: - return hook_name() - - def can_ccompiler_link(self, ccompiler): - """ - Check if the given C compiler can link objects produced by - this compiler. - """ - return True - - def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir): - """ - Convert a set of object files that are not compatible with the default - linker, to a file that is compatible. - - Parameters - ---------- - objects : list - List of object files to include. - output_dir : str - Output directory to place generated object files. - extra_dll_dir : str - Output directory to place extra DLL files that need to be - included on Windows. - - Returns - ------- - converted_objects : list of str - List of converted object files. - Note that the number of output files is not necessarily - the same as inputs. - - """ - raise NotImplementedError() - - ## class FCompiler - -_default_compilers = ( - # sys.platform mappings - ('win32', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95', - 'intelvem', 'intelem', 'flang')), - ('cygwin.*', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95')), - ('linux.*', ('gnu95', 'intel', 'lahey', 'pg', 'absoft', 'nag', 'vast', 'compaq', - 'intele', 'intelem', 'gnu', 'g95', 'pathf95', 'nagfor')), - ('darwin.*', ('gnu95', 'nag', 'absoft', 'ibm', 'intel', 'gnu', 'g95', 'pg')), - ('sunos.*', ('sun', 'gnu', 'gnu95', 'g95')), - ('irix.*', ('mips', 'gnu', 'gnu95',)), - ('aix.*', ('ibm', 'gnu', 'gnu95',)), - # os.name mappings - ('posix', ('gnu', 'gnu95',)), - ('nt', ('gnu', 'gnu95',)), - ('mac', ('gnu95', 'gnu', 'pg')), - ) - -fcompiler_class = None -fcompiler_aliases = None - -def load_all_fcompiler_classes(): - """Cache all the FCompiler classes found in modules in the - numpy.distutils.fcompiler package. - """ - from glob import glob - global fcompiler_class, fcompiler_aliases - if fcompiler_class is not None: - return - pys = os.path.join(os.path.dirname(__file__), '*.py') - fcompiler_class = {} - fcompiler_aliases = {} - for fname in glob(pys): - module_name, ext = os.path.splitext(os.path.basename(fname)) - module_name = 'numpy.distutils.fcompiler.' + module_name - __import__ (module_name) - module = sys.modules[module_name] - if hasattr(module, 'compilers'): - for cname in module.compilers: - klass = getattr(module, cname) - desc = (klass.compiler_type, klass, klass.description) - fcompiler_class[klass.compiler_type] = desc - for alias in klass.compiler_aliases: - if alias in fcompiler_aliases: - raise ValueError("alias %r defined for both %s and %s" - % (alias, klass.__name__, - fcompiler_aliases[alias][1].__name__)) - fcompiler_aliases[alias] = desc - -def _find_existing_fcompiler(compiler_types, - osname=None, platform=None, - requiref90=False, - c_compiler=None): - from numpy.distutils.core import get_distribution - dist = get_distribution(always=True) - for compiler_type in compiler_types: - v = None - try: - c = new_fcompiler(plat=platform, compiler=compiler_type, - c_compiler=c_compiler) - c.customize(dist) - v = c.get_version() - if requiref90 and c.compiler_f90 is None: - v = None - new_compiler = c.suggested_f90_compiler - if new_compiler: - log.warn('Trying %r compiler as suggested by %r ' - 'compiler for f90 support.' % (compiler_type, - new_compiler)) - c = new_fcompiler(plat=platform, compiler=new_compiler, - c_compiler=c_compiler) - c.customize(dist) - v = c.get_version() - if v is not None: - compiler_type = new_compiler - if requiref90 and c.compiler_f90 is None: - raise ValueError('%s does not support compiling f90 codes, ' - 'skipping.' % (c.__class__.__name__)) - except DistutilsModuleError: - log.debug("_find_existing_fcompiler: compiler_type='%s' raised DistutilsModuleError", compiler_type) - except CompilerNotFound: - log.debug("_find_existing_fcompiler: compiler_type='%s' not found", compiler_type) - if v is not None: - return compiler_type - return None - -def available_fcompilers_for_platform(osname=None, platform=None): - if osname is None: - osname = os.name - if platform is None: - platform = sys.platform - matching_compiler_types = [] - for pattern, compiler_type in _default_compilers: - if re.match(pattern, platform) or re.match(pattern, osname): - for ct in compiler_type: - if ct not in matching_compiler_types: - matching_compiler_types.append(ct) - if not matching_compiler_types: - matching_compiler_types.append('gnu') - return matching_compiler_types - -def get_default_fcompiler(osname=None, platform=None, requiref90=False, - c_compiler=None): - """Determine the default Fortran compiler to use for the given - platform.""" - matching_compiler_types = available_fcompilers_for_platform(osname, - platform) - log.info("get_default_fcompiler: matching types: '%s'", - matching_compiler_types) - compiler_type = _find_existing_fcompiler(matching_compiler_types, - osname=osname, - platform=platform, - requiref90=requiref90, - c_compiler=c_compiler) - return compiler_type - -# Flag to avoid rechecking for Fortran compiler every time -failed_fcompilers = set() - -def new_fcompiler(plat=None, - compiler=None, - verbose=0, - dry_run=0, - force=0, - requiref90=False, - c_compiler = None): - """Generate an instance of some FCompiler subclass for the supplied - platform/compiler combination. - """ - global failed_fcompilers - fcompiler_key = (plat, compiler) - if fcompiler_key in failed_fcompilers: - return None - - load_all_fcompiler_classes() - if plat is None: - plat = os.name - if compiler is None: - compiler = get_default_fcompiler(plat, requiref90=requiref90, - c_compiler=c_compiler) - if compiler in fcompiler_class: - module_name, klass, long_description = fcompiler_class[compiler] - elif compiler in fcompiler_aliases: - module_name, klass, long_description = fcompiler_aliases[compiler] - else: - msg = "don't know how to compile Fortran code on platform '%s'" % plat - if compiler is not None: - msg = msg + " with '%s' compiler." % compiler - msg = msg + " Supported compilers are: %s)" \ - % (','.join(fcompiler_class.keys())) - log.warn(msg) - failed_fcompilers.add(fcompiler_key) - return None - - compiler = klass(verbose=verbose, dry_run=dry_run, force=force) - compiler.c_compiler = c_compiler - return compiler - -def show_fcompilers(dist=None): - """Print list of available compilers (used by the "--help-fcompiler" - option to "config_fc"). - """ - if dist is None: - from distutils.dist import Distribution - from numpy.distutils.command.config_compiler import config_fc - dist = Distribution() - dist.script_name = os.path.basename(sys.argv[0]) - dist.script_args = ['config_fc'] + sys.argv[1:] - try: - dist.script_args.remove('--help-fcompiler') - except ValueError: - pass - dist.cmdclass['config_fc'] = config_fc - dist.parse_config_files() - dist.parse_command_line() - compilers = [] - compilers_na = [] - compilers_ni = [] - if not fcompiler_class: - load_all_fcompiler_classes() - platform_compilers = available_fcompilers_for_platform() - for compiler in platform_compilers: - v = None - log.set_verbosity(-2) - try: - c = new_fcompiler(compiler=compiler, verbose=dist.verbose) - c.customize(dist) - v = c.get_version() - except (DistutilsModuleError, CompilerNotFound): - e = get_exception() - log.debug("show_fcompilers: %s not found" % (compiler,)) - log.debug(repr(e)) - - if v is None: - compilers_na.append(("fcompiler="+compiler, None, - fcompiler_class[compiler][2])) - else: - c.dump_properties() - compilers.append(("fcompiler="+compiler, None, - fcompiler_class[compiler][2] + ' (%s)' % v)) - - compilers_ni = list(set(fcompiler_class.keys()) - set(platform_compilers)) - compilers_ni = [("fcompiler="+fc, None, fcompiler_class[fc][2]) - for fc in compilers_ni] - - compilers.sort() - compilers_na.sort() - compilers_ni.sort() - pretty_printer = FancyGetopt(compilers) - pretty_printer.print_help("Fortran compilers found:") - pretty_printer = FancyGetopt(compilers_na) - pretty_printer.print_help("Compilers available for this " - "platform, but not found:") - if compilers_ni: - pretty_printer = FancyGetopt(compilers_ni) - pretty_printer.print_help("Compilers not available on this platform:") - print("For compiler details, run 'config_fc --verbose' setup command.") - - -def dummy_fortran_file(): - fo, name = make_temp_file(suffix='.f') - fo.write(" subroutine dummy()\n end\n") - fo.close() - return name[:-2] - - -is_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z', re.I).match -_has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-', re.I).search -_has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-', re.I).search -_has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-', re.I).search -_free_f90_start = re.compile(r'[^c*!]\s*[^\s\d\t]', re.I).match - -def is_free_format(file): - """Check if file is in free format Fortran.""" - # f90 allows both fixed and free format, assuming fixed unless - # signs of free format are detected. - result = 0 - f = open_latin1(file, 'r') - line = f.readline() - n = 10000 # the number of non-comment lines to scan for hints - if _has_f_header(line): - n = 0 - elif _has_f90_header(line): - n = 0 - result = 1 - while n>0 and line: - line = line.rstrip() - if line and line[0]!='!': - n -= 1 - if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-1:]=='&': - result = 1 - break - line = f.readline() - f.close() - return result - -def has_f90_header(src): - f = open_latin1(src, 'r') - line = f.readline() - f.close() - return _has_f90_header(line) or _has_fix_header(line) - -_f77flags_re = re.compile(r'(c|)f77flags\s*\(\s*(?P\w+)\s*\)\s*=\s*(?P.*)', re.I) -def get_f77flags(src): - """ - Search the first 20 lines of fortran 77 code for line pattern - `CF77FLAGS()=` - Return a dictionary {:}. - """ - flags = {} - f = open_latin1(src, 'r') - i = 0 - for line in f: - i += 1 - if i>20: break - m = _f77flags_re.match(line) - if not m: continue - fcname = m.group('fcname').strip() - fflags = m.group('fflags').strip() - flags[fcname] = split_quoted(fflags) - f.close() - return flags - -# TODO: implement get_f90flags and use it in _compile similarly to get_f77flags - -if __name__ == '__main__': - show_fcompilers() diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/absoft.py b/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/absoft.py deleted file mode 100644 index d14fee0..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/absoft.py +++ /dev/null @@ -1,158 +0,0 @@ - -# http://www.absoft.com/literature/osxuserguide.pdf -# http://www.absoft.com/documentation.html - -# Notes: -# - when using -g77 then use -DUNDERSCORE_G77 to compile f2py -# generated extension modules (works for f2py v2.45.241_1936 and up) -from __future__ import division, absolute_import, print_function - -import os - -from numpy.distutils.cpuinfo import cpu -from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file -from numpy.distutils.misc_util import cyg2win32 - -compilers = ['AbsoftFCompiler'] - -class AbsoftFCompiler(FCompiler): - - compiler_type = 'absoft' - description = 'Absoft Corp Fortran Compiler' - #version_pattern = r'FORTRAN 77 Compiler (?P[^\s*,]*).*?Absoft Corp' - version_pattern = r'(f90:.*?(Absoft Pro FORTRAN Version|FORTRAN 77 Compiler|Absoft Fortran Compiler Version|Copyright Absoft Corporation.*?Version))'+\ - r' (?P[^\s*,]*)(.*?Absoft Corp|)' - - # on windows: f90 -V -c dummy.f - # f90: Copyright Absoft Corporation 1994-1998 mV2; Cray Research, Inc. 1994-1996 CF90 (2.x.x.x f36t87) Version 2.3 Wed Apr 19, 2006 13:05:16 - - # samt5735(8)$ f90 -V -c dummy.f - # f90: Copyright Absoft Corporation 1994-2002; Absoft Pro FORTRAN Version 8.0 - # Note that fink installs g77 as f77, so need to use f90 for detection. - - executables = { - 'version_cmd' : None, # set by update_executables - 'compiler_f77' : ["f77"], - 'compiler_fix' : ["f90"], - 'compiler_f90' : ["f90"], - 'linker_so' : [""], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - if os.name=='nt': - library_switch = '/out:' #No space after /out:! - - module_dir_switch = None - module_include_switch = '-p' - - def update_executables(self): - f = cyg2win32(dummy_fortran_file()) - self.executables['version_cmd'] = ['', '-V', '-c', - f+'.f', '-o', f+'.o'] - - def get_flags_linker_so(self): - if os.name=='nt': - opt = ['/dll'] - # The "-K shared" switches are being left in for pre-9.0 versions - # of Absoft though I don't think versions earlier than 9 can - # actually be used to build shared libraries. In fact, version - # 8 of Absoft doesn't recognize "-K shared" and will fail. - elif self.get_version() >= '9.0': - opt = ['-shared'] - else: - opt = ["-K", "shared"] - return opt - - def library_dir_option(self, dir): - if os.name=='nt': - return ['-link', '/PATH:%s' % (dir)] - return "-L" + dir - - def library_option(self, lib): - if os.name=='nt': - return '%s.lib' % (lib) - return "-l" + lib - - def get_library_dirs(self): - opt = FCompiler.get_library_dirs(self) - d = os.environ.get('ABSOFT') - if d: - if self.get_version() >= '10.0': - # use shared libraries, the static libraries were not compiled -fPIC - prefix = 'sh' - else: - prefix = '' - if cpu.is_64bit(): - suffix = '64' - else: - suffix = '' - opt.append(os.path.join(d, '%slib%s' % (prefix, suffix))) - return opt - - def get_libraries(self): - opt = FCompiler.get_libraries(self) - if self.get_version() >= '11.0': - opt.extend(['af90math', 'afio', 'af77math', 'amisc']) - elif self.get_version() >= '10.0': - opt.extend(['af90math', 'afio', 'af77math', 'U77']) - elif self.get_version() >= '8.0': - opt.extend(['f90math', 'fio', 'f77math', 'U77']) - else: - opt.extend(['fio', 'f90math', 'fmath', 'U77']) - if os.name =='nt': - opt.append('COMDLG32') - return opt - - def get_flags(self): - opt = FCompiler.get_flags(self) - if os.name != 'nt': - opt.extend(['-s']) - if self.get_version(): - if self.get_version()>='8.2': - opt.append('-fpic') - return opt - - def get_flags_f77(self): - opt = FCompiler.get_flags_f77(self) - opt.extend(['-N22', '-N90', '-N110']) - v = self.get_version() - if os.name == 'nt': - if v and v>='8.0': - opt.extend(['-f', '-N15']) - else: - opt.append('-f') - if v: - if v<='4.6': - opt.append('-B108') - else: - # Though -N15 is undocumented, it works with - # Absoft 8.0 on Linux - opt.append('-N15') - return opt - - def get_flags_f90(self): - opt = FCompiler.get_flags_f90(self) - opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX", - "-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"]) - if self.get_version(): - if self.get_version()>'4.6': - opt.extend(["-YDEALLOC=ALL"]) - return opt - - def get_flags_fix(self): - opt = FCompiler.get_flags_fix(self) - opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX", - "-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"]) - opt.extend(["-f", "fixed"]) - return opt - - def get_flags_opt(self): - opt = ['-O'] - return opt - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='absoft').get_version()) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/compaq.py b/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/compaq.py deleted file mode 100644 index 671b3a5..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/compaq.py +++ /dev/null @@ -1,126 +0,0 @@ - -#http://www.compaq.com/fortran/docs/ -from __future__ import division, absolute_import, print_function - -import os -import sys - -from numpy.distutils.fcompiler import FCompiler -from numpy.distutils.compat import get_exception -from distutils.errors import DistutilsPlatformError - -compilers = ['CompaqFCompiler'] -if os.name != 'posix' or sys.platform[:6] == 'cygwin' : - # Otherwise we'd get a false positive on posix systems with - # case-insensitive filesystems (like darwin), because we'll pick - # up /bin/df - compilers.append('CompaqVisualFCompiler') - -class CompaqFCompiler(FCompiler): - - compiler_type = 'compaq' - description = 'Compaq Fortran Compiler' - version_pattern = r'Compaq Fortran (?P[^\s]*).*' - - if sys.platform[:5]=='linux': - fc_exe = 'fort' - else: - fc_exe = 'f90' - - executables = { - 'version_cmd' : ['', "-version"], - 'compiler_f77' : [fc_exe, "-f77rtl", "-fixed"], - 'compiler_fix' : [fc_exe, "-fixed"], - 'compiler_f90' : [fc_exe], - 'linker_so' : [''], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - module_dir_switch = '-module ' # not tested - module_include_switch = '-I' - - def get_flags(self): - return ['-assume no2underscore', '-nomixed_str_len_arg'] - def get_flags_debug(self): - return ['-g', '-check bounds'] - def get_flags_opt(self): - return ['-O4', '-align dcommons', '-assume bigarrays', - '-assume nozsize', '-math_library fast'] - def get_flags_arch(self): - return ['-arch host', '-tune host'] - def get_flags_linker_so(self): - if sys.platform[:5]=='linux': - return ['-shared'] - return ['-shared', '-Wl,-expect_unresolved,*'] - -class CompaqVisualFCompiler(FCompiler): - - compiler_type = 'compaqv' - description = 'DIGITAL or Compaq Visual Fortran Compiler' - version_pattern = (r'(DIGITAL|Compaq) Visual Fortran Optimizing Compiler' - r' Version (?P[^\s]*).*') - - compile_switch = '/compile_only' - object_switch = '/object:' - library_switch = '/OUT:' #No space after /OUT:! - - static_lib_extension = ".lib" - static_lib_format = "%s%s" - module_dir_switch = '/module:' - module_include_switch = '/I' - - ar_exe = 'lib.exe' - fc_exe = 'DF' - - if sys.platform=='win32': - from numpy.distutils.msvccompiler import MSVCCompiler - - try: - m = MSVCCompiler() - m.initialize() - ar_exe = m.lib - except DistutilsPlatformError: - pass - except AttributeError: - msg = get_exception() - if '_MSVCCompiler__root' in str(msg): - print('Ignoring "%s" (I think it is msvccompiler.py bug)' % (msg)) - else: - raise - except IOError: - e = get_exception() - if not "vcvarsall.bat" in str(e): - print("Unexpected IOError in", __file__) - raise e - except ValueError: - e = get_exception() - if not "'path'" in str(e): - print("Unexpected ValueError in", __file__) - raise e - - executables = { - 'version_cmd' : ['', "/what"], - 'compiler_f77' : [fc_exe, "/f77rtl", "/fixed"], - 'compiler_fix' : [fc_exe, "/fixed"], - 'compiler_f90' : [fc_exe], - 'linker_so' : [''], - 'archiver' : [ar_exe, "/OUT:"], - 'ranlib' : None - } - - def get_flags(self): - return ['/nologo', '/MD', '/WX', '/iface=(cref,nomixed_str_len_arg)', - '/names:lowercase', '/assume:underscore'] - def get_flags_opt(self): - return ['/Ox', '/fast', '/optimize:5', '/unroll:0', '/math_library:fast'] - def get_flags_arch(self): - return ['/threads'] - def get_flags_debug(self): - return ['/debug'] - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='compaq').get_version()) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/environment.py b/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/environment.py deleted file mode 100644 index bb362d4..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/environment.py +++ /dev/null @@ -1,88 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import warnings -from distutils.dist import Distribution - -__metaclass__ = type - -class EnvironmentConfig(object): - def __init__(self, distutils_section='ALL', **kw): - self._distutils_section = distutils_section - self._conf_keys = kw - self._conf = None - self._hook_handler = None - - def dump_variable(self, name): - conf_desc = self._conf_keys[name] - hook, envvar, confvar, convert, append = conf_desc - if not convert: - convert = lambda x : x - print('%s.%s:' % (self._distutils_section, name)) - v = self._hook_handler(name, hook) - print(' hook : %s' % (convert(v),)) - if envvar: - v = os.environ.get(envvar, None) - print(' environ: %s' % (convert(v),)) - if confvar and self._conf: - v = self._conf.get(confvar, (None, None))[1] - print(' config : %s' % (convert(v),)) - - def dump_variables(self): - for name in self._conf_keys: - self.dump_variable(name) - - def __getattr__(self, name): - try: - conf_desc = self._conf_keys[name] - except KeyError: - raise AttributeError(name) - return self._get_var(name, conf_desc) - - def get(self, name, default=None): - try: - conf_desc = self._conf_keys[name] - except KeyError: - return default - var = self._get_var(name, conf_desc) - if var is None: - var = default - return var - - def _get_var(self, name, conf_desc): - hook, envvar, confvar, convert, append = conf_desc - if convert is None: - convert = lambda x: x - var = self._hook_handler(name, hook) - if envvar is not None: - envvar_contents = os.environ.get(envvar) - if envvar_contents is not None: - envvar_contents = convert(envvar_contents) - if var and append: - if os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '1') == '1': - var.extend(envvar_contents) - else: - # NPY_DISTUTILS_APPEND_FLAGS was explicitly set to 0 - # to keep old (overwrite flags rather than append to - # them) behavior - var = envvar_contents - else: - var = envvar_contents - if confvar is not None and self._conf: - if confvar in self._conf: - source, confvar_contents = self._conf[confvar] - var = convert(confvar_contents) - return var - - - def clone(self, hook_handler): - ec = self.__class__(distutils_section=self._distutils_section, - **self._conf_keys) - ec._hook_handler = hook_handler - return ec - - def use_distribution(self, dist): - if isinstance(dist, Distribution): - self._conf = dist.get_option_dict(self._distutils_section) - else: - self._conf = dist diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/g95.py b/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/g95.py deleted file mode 100644 index e7c659b..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/g95.py +++ /dev/null @@ -1,44 +0,0 @@ -# http://g95.sourceforge.net/ -from __future__ import division, absolute_import, print_function - -from numpy.distutils.fcompiler import FCompiler - -compilers = ['G95FCompiler'] - -class G95FCompiler(FCompiler): - compiler_type = 'g95' - description = 'G95 Fortran Compiler' - -# version_pattern = r'G95 \((GCC (?P[\d.]+)|.*?) \(g95!\) (?P.*)\).*' - # $ g95 --version - # G95 (GCC 4.0.3 (g95!) May 22 2006) - - version_pattern = r'G95 \((GCC (?P[\d.]+)|.*?) \(g95 (?P.*)!\) (?P.*)\).*' - # $ g95 --version - # G95 (GCC 4.0.3 (g95 0.90!) Aug 22 2006) - - executables = { - 'version_cmd' : ["", "--version"], - 'compiler_f77' : ["g95", "-ffixed-form"], - 'compiler_fix' : ["g95", "-ffixed-form"], - 'compiler_f90' : ["g95"], - 'linker_so' : ["", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - pic_flags = ['-fpic'] - module_dir_switch = '-fmod=' - module_include_switch = '-I' - - def get_flags(self): - return ['-fno-second-underscore'] - def get_flags_opt(self): - return ['-O'] - def get_flags_debug(self): - return ['-g'] - -if __name__ == '__main__': - from distutils import log - from numpy.distutils import customized_fcompiler - log.set_verbosity(2) - print(customized_fcompiler('g95').get_version()) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/gnu.py b/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/gnu.py deleted file mode 100644 index 965c670..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/gnu.py +++ /dev/null @@ -1,564 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import re -import os -import sys -import warnings -import platform -import tempfile -import hashlib -import base64 -import subprocess -from subprocess import Popen, PIPE, STDOUT -from numpy.distutils.exec_command import filepath_from_subprocess_output -from numpy.distutils.fcompiler import FCompiler -from numpy.distutils.compat import get_exception -from numpy.distutils.system_info import system_info - -compilers = ['GnuFCompiler', 'Gnu95FCompiler'] - -TARGET_R = re.compile(r"Target: ([a-zA-Z0-9_\-]*)") - -# XXX: handle cross compilation - - -def is_win64(): - return sys.platform == "win32" and platform.architecture()[0] == "64bit" - - -if is_win64(): - #_EXTRAFLAGS = ["-fno-leading-underscore"] - _EXTRAFLAGS = [] -else: - _EXTRAFLAGS = [] - - -class GnuFCompiler(FCompiler): - compiler_type = 'gnu' - compiler_aliases = ('g77', ) - description = 'GNU Fortran 77 compiler' - - def gnu_version_match(self, version_string): - """Handle the different versions of GNU fortran compilers""" - # Strip warning(s) that may be emitted by gfortran - while version_string.startswith('gfortran: warning'): - version_string = version_string[version_string.find('\n') + 1:] - - # Gfortran versions from after 2010 will output a simple string - # (usually "x.y", "x.y.z" or "x.y.z-q") for ``-dumpversion``; older - # gfortrans may still return long version strings (``-dumpversion`` was - # an alias for ``--version``) - if len(version_string) <= 20: - # Try to find a valid version string - m = re.search(r'([0-9.]+)', version_string) - if m: - # g77 provides a longer version string that starts with GNU - # Fortran - if version_string.startswith('GNU Fortran'): - return ('g77', m.group(1)) - - # gfortran only outputs a version string such as #.#.#, so check - # if the match is at the start of the string - elif m.start() == 0: - return ('gfortran', m.group(1)) - else: - # Output probably from --version, try harder: - m = re.search(r'GNU Fortran\s+95.*?([0-9-.]+)', version_string) - if m: - return ('gfortran', m.group(1)) - m = re.search( - r'GNU Fortran.*?\-?([0-9-.]+\.[0-9-.]+)', version_string) - if m: - v = m.group(1) - if v.startswith('0') or v.startswith('2') or v.startswith('3'): - # the '0' is for early g77's - return ('g77', v) - else: - # at some point in the 4.x series, the ' 95' was dropped - # from the version string - return ('gfortran', v) - - # If still nothing, raise an error to make the problem easy to find. - err = 'A valid Fortran version was not found in this string:\n' - raise ValueError(err + version_string) - - def version_match(self, version_string): - v = self.gnu_version_match(version_string) - if not v or v[0] != 'g77': - return None - return v[1] - - possible_executables = ['g77', 'f77'] - executables = { - 'version_cmd' : [None, "-dumpversion"], - 'compiler_f77' : [None, "-g", "-Wall", "-fno-second-underscore"], - 'compiler_f90' : None, # Use --fcompiler=gnu95 for f90 codes - 'compiler_fix' : None, - 'linker_so' : [None, "-g", "-Wall"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"], - 'linker_exe' : [None, "-g", "-Wall"] - } - module_dir_switch = None - module_include_switch = None - - # Cygwin: f771: warning: -fPIC ignored for target (all code is - # position independent) - if os.name != 'nt' and sys.platform != 'cygwin': - pic_flags = ['-fPIC'] - - # use -mno-cygwin for g77 when Python is not Cygwin-Python - if sys.platform == 'win32': - for key in ['version_cmd', 'compiler_f77', 'linker_so', 'linker_exe']: - executables[key].append('-mno-cygwin') - - g2c = 'g2c' - suggested_f90_compiler = 'gnu95' - - def get_flags_linker_so(self): - opt = self.linker_so[1:] - if sys.platform == 'darwin': - target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', None) - # If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value - # and leave it alone. But, distutils will complain if the - # environment's value is different from the one in the Python - # Makefile used to build Python. We let disutils handle this - # error checking. - if not target: - # If MACOSX_DEPLOYMENT_TARGET is not set in the environment, - # we try to get it first from the Python Makefile and then we - # fall back to setting it to 10.3 to maximize the set of - # versions we can work with. This is a reasonable default - # even when using the official Python dist and those derived - # from it. - import distutils.sysconfig as sc - g = {} - try: - get_makefile_filename = sc.get_makefile_filename - except AttributeError: - pass # i.e. PyPy - else: - filename = get_makefile_filename() - sc.parse_makefile(filename, g) - target = g.get('MACOSX_DEPLOYMENT_TARGET', '10.3') - os.environ['MACOSX_DEPLOYMENT_TARGET'] = target - if target == '10.3': - s = 'Env. variable MACOSX_DEPLOYMENT_TARGET set to 10.3' - warnings.warn(s, stacklevel=2) - - opt.extend(['-undefined', 'dynamic_lookup', '-bundle']) - else: - opt.append("-shared") - if sys.platform.startswith('sunos'): - # SunOS often has dynamically loaded symbols defined in the - # static library libg2c.a The linker doesn't like this. To - # ignore the problem, use the -mimpure-text flag. It isn't - # the safest thing, but seems to work. 'man gcc' says: - # ".. Instead of using -mimpure-text, you should compile all - # source code with -fpic or -fPIC." - opt.append('-mimpure-text') - return opt - - def get_libgcc_dir(self): - try: - output = subprocess.check_output(self.compiler_f77 + - ['-print-libgcc-file-name']) - except (OSError, subprocess.CalledProcessError): - pass - else: - output = filepath_from_subprocess_output(output) - return os.path.dirname(output) - return None - - def get_libgfortran_dir(self): - if sys.platform[:5] == 'linux': - libgfortran_name = 'libgfortran.so' - elif sys.platform == 'darwin': - libgfortran_name = 'libgfortran.dylib' - else: - libgfortran_name = None - - libgfortran_dir = None - if libgfortran_name: - find_lib_arg = ['-print-file-name={0}'.format(libgfortran_name)] - try: - output = subprocess.check_output( - self.compiler_f77 + find_lib_arg) - except (OSError, subprocess.CalledProcessError): - pass - else: - output = filepath_from_subprocess_output(output) - libgfortran_dir = os.path.dirname(output) - return libgfortran_dir - - def get_library_dirs(self): - opt = [] - if sys.platform[:5] != 'linux': - d = self.get_libgcc_dir() - if d: - # if windows and not cygwin, libg2c lies in a different folder - if sys.platform == 'win32' and not d.startswith('/usr/lib'): - d = os.path.normpath(d) - path = os.path.join(d, "lib%s.a" % self.g2c) - if not os.path.exists(path): - root = os.path.join(d, *((os.pardir, ) * 4)) - d2 = os.path.abspath(os.path.join(root, 'lib')) - path = os.path.join(d2, "lib%s.a" % self.g2c) - if os.path.exists(path): - opt.append(d2) - opt.append(d) - # For Macports / Linux, libgfortran and libgcc are not co-located - lib_gfortran_dir = self.get_libgfortran_dir() - if lib_gfortran_dir: - opt.append(lib_gfortran_dir) - return opt - - def get_libraries(self): - opt = [] - d = self.get_libgcc_dir() - if d is not None: - g2c = self.g2c + '-pic' - f = self.static_lib_format % (g2c, self.static_lib_extension) - if not os.path.isfile(os.path.join(d, f)): - g2c = self.g2c - else: - g2c = self.g2c - - if g2c is not None: - opt.append(g2c) - c_compiler = self.c_compiler - if sys.platform == 'win32' and c_compiler and \ - c_compiler.compiler_type == 'msvc': - opt.append('gcc') - if sys.platform == 'darwin': - opt.append('cc_dynamic') - return opt - - def get_flags_debug(self): - return ['-g'] - - def get_flags_opt(self): - v = self.get_version() - if v and v <= '3.3.3': - # With this compiler version building Fortran BLAS/LAPACK - # with -O3 caused failures in lib.lapack heevr,syevr tests. - opt = ['-O2'] - else: - opt = ['-O3'] - opt.append('-funroll-loops') - return opt - - def _c_arch_flags(self): - """ Return detected arch flags from CFLAGS """ - from distutils import sysconfig - try: - cflags = sysconfig.get_config_vars()['CFLAGS'] - except KeyError: - return [] - arch_re = re.compile(r"-arch\s+(\w+)") - arch_flags = [] - for arch in arch_re.findall(cflags): - arch_flags += ['-arch', arch] - return arch_flags - - def get_flags_arch(self): - return [] - - def runtime_library_dir_option(self, dir): - if sys.platform[:3] == 'aix' or sys.platform == 'win32': - # Linux/Solaris/Unix support RPATH, Windows and AIX do not - raise NotImplementedError - - # TODO: could use -Xlinker here, if it's supported - assert "," not in dir - - sep = ',' if sys.platform == 'darwin' else '=' - return '-Wl,-rpath%s%s' % (sep, dir) - - -class Gnu95FCompiler(GnuFCompiler): - compiler_type = 'gnu95' - compiler_aliases = ('gfortran', ) - description = 'GNU Fortran 95 compiler' - - def version_match(self, version_string): - v = self.gnu_version_match(version_string) - if not v or v[0] != 'gfortran': - return None - v = v[1] - if v >= '4.': - # gcc-4 series releases do not support -mno-cygwin option - pass - else: - # use -mno-cygwin flag for gfortran when Python is not - # Cygwin-Python - if sys.platform == 'win32': - for key in [ - 'version_cmd', 'compiler_f77', 'compiler_f90', - 'compiler_fix', 'linker_so', 'linker_exe' - ]: - self.executables[key].append('-mno-cygwin') - return v - - possible_executables = ['gfortran', 'f95'] - executables = { - 'version_cmd' : ["", "-dumpversion"], - 'compiler_f77' : [None, "-Wall", "-g", "-ffixed-form", - "-fno-second-underscore"] + _EXTRAFLAGS, - 'compiler_f90' : [None, "-Wall", "-g", - "-fno-second-underscore"] + _EXTRAFLAGS, - 'compiler_fix' : [None, "-Wall", "-g","-ffixed-form", - "-fno-second-underscore"] + _EXTRAFLAGS, - 'linker_so' : ["", "-Wall", "-g"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"], - 'linker_exe' : [None, "-Wall"] - } - - module_dir_switch = '-J' - module_include_switch = '-I' - - if sys.platform[:3] == 'aix': - executables['linker_so'].append('-lpthread') - if platform.architecture()[0][:2] == '64': - for key in ['compiler_f77', 'compiler_f90','compiler_fix','linker_so', 'linker_exe']: - executables[key].append('-maix64') - - g2c = 'gfortran' - - def _universal_flags(self, cmd): - """Return a list of -arch flags for every supported architecture.""" - if not sys.platform == 'darwin': - return [] - arch_flags = [] - # get arches the C compiler gets. - c_archs = self._c_arch_flags() - if "i386" in c_archs: - c_archs[c_archs.index("i386")] = "i686" - # check the arches the Fortran compiler supports, and compare with - # arch flags from C compiler - for arch in ["ppc", "i686", "x86_64", "ppc64"]: - if _can_target(cmd, arch) and arch in c_archs: - arch_flags.extend(["-arch", arch]) - return arch_flags - - def get_flags(self): - flags = GnuFCompiler.get_flags(self) - arch_flags = self._universal_flags(self.compiler_f90) - if arch_flags: - flags[:0] = arch_flags - return flags - - def get_flags_linker_so(self): - flags = GnuFCompiler.get_flags_linker_so(self) - arch_flags = self._universal_flags(self.linker_so) - if arch_flags: - flags[:0] = arch_flags - return flags - - def get_library_dirs(self): - opt = GnuFCompiler.get_library_dirs(self) - if sys.platform == 'win32': - c_compiler = self.c_compiler - if c_compiler and c_compiler.compiler_type == "msvc": - target = self.get_target() - if target: - d = os.path.normpath(self.get_libgcc_dir()) - root = os.path.join(d, *((os.pardir, ) * 4)) - path = os.path.join(root, "lib") - mingwdir = os.path.normpath(path) - if os.path.exists(os.path.join(mingwdir, "libmingwex.a")): - opt.append(mingwdir) - # For Macports / Linux, libgfortran and libgcc are not co-located - lib_gfortran_dir = self.get_libgfortran_dir() - if lib_gfortran_dir: - opt.append(lib_gfortran_dir) - return opt - - def get_libraries(self): - opt = GnuFCompiler.get_libraries(self) - if sys.platform == 'darwin': - opt.remove('cc_dynamic') - if sys.platform == 'win32': - c_compiler = self.c_compiler - if c_compiler and c_compiler.compiler_type == "msvc": - if "gcc" in opt: - i = opt.index("gcc") - opt.insert(i + 1, "mingwex") - opt.insert(i + 1, "mingw32") - c_compiler = self.c_compiler - if c_compiler and c_compiler.compiler_type == "msvc": - return [] - else: - pass - return opt - - def get_target(self): - try: - output = subprocess.check_output(self.compiler_f77 + ['-v']) - except (OSError, subprocess.CalledProcessError): - pass - else: - output = filepath_from_subprocess_output(output) - m = TARGET_R.search(output) - if m: - return m.group(1) - return "" - - def _hash_files(self, filenames): - h = hashlib.sha1() - for fn in filenames: - with open(fn, 'rb') as f: - while True: - block = f.read(131072) - if not block: - break - h.update(block) - text = base64.b32encode(h.digest()) - if sys.version_info[0] >= 3: - text = text.decode('ascii') - return text.rstrip('=') - - def _link_wrapper_lib(self, objects, output_dir, extra_dll_dir, - chained_dlls, is_archive): - """Create a wrapper shared library for the given objects - - Return an MSVC-compatible lib - """ - - c_compiler = self.c_compiler - if c_compiler.compiler_type != "msvc": - raise ValueError("This method only supports MSVC") - - object_hash = self._hash_files(list(objects) + list(chained_dlls)) - - if is_win64(): - tag = 'win_amd64' - else: - tag = 'win32' - - basename = 'lib' + os.path.splitext( - os.path.basename(objects[0]))[0][:8] - root_name = basename + '.' + object_hash + '.gfortran-' + tag - dll_name = root_name + '.dll' - def_name = root_name + '.def' - lib_name = root_name + '.lib' - dll_path = os.path.join(extra_dll_dir, dll_name) - def_path = os.path.join(output_dir, def_name) - lib_path = os.path.join(output_dir, lib_name) - - if os.path.isfile(lib_path): - # Nothing to do - return lib_path, dll_path - - if is_archive: - objects = (["-Wl,--whole-archive"] + list(objects) + - ["-Wl,--no-whole-archive"]) - self.link_shared_object( - objects, - dll_name, - output_dir=extra_dll_dir, - extra_postargs=list(chained_dlls) + [ - '-Wl,--allow-multiple-definition', - '-Wl,--output-def,' + def_path, - '-Wl,--export-all-symbols', - '-Wl,--enable-auto-import', - '-static', - '-mlong-double-64', - ]) - - # No PowerPC! - if is_win64(): - specifier = '/MACHINE:X64' - else: - specifier = '/MACHINE:X86' - - # MSVC specific code - lib_args = ['/def:' + def_path, '/OUT:' + lib_path, specifier] - if not c_compiler.initialized: - c_compiler.initialize() - c_compiler.spawn([c_compiler.lib] + lib_args) - - return lib_path, dll_path - - def can_ccompiler_link(self, compiler): - # MSVC cannot link objects compiled by GNU fortran - return compiler.compiler_type not in ("msvc", ) - - def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir): - """ - Convert a set of object files that are not compatible with the default - linker, to a file that is compatible. - """ - if self.c_compiler.compiler_type == "msvc": - # Compile a DLL and return the lib for the DLL as - # the object. Also keep track of previous DLLs that - # we have compiled so that we can link against them. - - # If there are .a archives, assume they are self-contained - # static libraries, and build separate DLLs for each - archives = [] - plain_objects = [] - for obj in objects: - if obj.lower().endswith('.a'): - archives.append(obj) - else: - plain_objects.append(obj) - - chained_libs = [] - chained_dlls = [] - for archive in archives[::-1]: - lib, dll = self._link_wrapper_lib( - [archive], - output_dir, - extra_dll_dir, - chained_dlls=chained_dlls, - is_archive=True) - chained_libs.insert(0, lib) - chained_dlls.insert(0, dll) - - if not plain_objects: - return chained_libs - - lib, dll = self._link_wrapper_lib( - plain_objects, - output_dir, - extra_dll_dir, - chained_dlls=chained_dlls, - is_archive=False) - return [lib] + chained_libs - else: - raise ValueError("Unsupported C compiler") - - -def _can_target(cmd, arch): - """Return true if the architecture supports the -arch flag""" - newcmd = cmd[:] - fid, filename = tempfile.mkstemp(suffix=".f") - os.close(fid) - try: - d = os.path.dirname(filename) - output = os.path.splitext(filename)[0] + ".o" - try: - newcmd.extend(["-arch", arch, "-c", filename]) - p = Popen(newcmd, stderr=STDOUT, stdout=PIPE, cwd=d) - p.communicate() - return p.returncode == 0 - finally: - if os.path.exists(output): - os.remove(output) - finally: - os.remove(filename) - return False - - -if __name__ == '__main__': - from distutils import log - from numpy.distutils import customized_fcompiler - log.set_verbosity(2) - - print(customized_fcompiler('gnu').get_version()) - try: - print(customized_fcompiler('g95').get_version()) - except Exception: - print(get_exception()) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/hpux.py b/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/hpux.py deleted file mode 100644 index 51bad54..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/hpux.py +++ /dev/null @@ -1,43 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy.distutils.fcompiler import FCompiler - -compilers = ['HPUXFCompiler'] - -class HPUXFCompiler(FCompiler): - - compiler_type = 'hpux' - description = 'HP Fortran 90 Compiler' - version_pattern = r'HP F90 (?P[^\s*,]*)' - - executables = { - 'version_cmd' : ["f90", "+version"], - 'compiler_f77' : ["f90"], - 'compiler_fix' : ["f90"], - 'compiler_f90' : ["f90"], - 'linker_so' : ["ld", "-b"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - module_dir_switch = None #XXX: fix me - module_include_switch = None #XXX: fix me - pic_flags = ['+Z'] - def get_flags(self): - return self.pic_flags + ['+ppu', '+DD64'] - def get_flags_opt(self): - return ['-O3'] - def get_libraries(self): - return ['m'] - def get_library_dirs(self): - opt = ['/usr/lib/hpux64'] - return opt - def get_version(self, force=0, ok_status=[256, 0, 1]): - # XXX status==256 may indicate 'unrecognized option' or - # 'no input file'. So, version_cmd needs more work. - return FCompiler.get_version(self, force, ok_status) - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(10) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='hpux').get_version()) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/ibm.py b/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/ibm.py deleted file mode 100644 index 70d2132..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/ibm.py +++ /dev/null @@ -1,99 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import re -import sys -import subprocess - -from numpy.distutils.fcompiler import FCompiler -from numpy.distutils.exec_command import find_executable -from numpy.distutils.misc_util import make_temp_file -from distutils import log - -compilers = ['IBMFCompiler'] - -class IBMFCompiler(FCompiler): - compiler_type = 'ibm' - description = 'IBM XL Fortran Compiler' - version_pattern = r'(xlf\(1\)\s*|)IBM XL Fortran ((Advanced Edition |)Version |Enterprise Edition V|for AIX, V)(?P[^\s*]*)' - #IBM XL Fortran Enterprise Edition V10.1 for AIX \nVersion: 10.01.0000.0004 - - executables = { - 'version_cmd' : ["", "-qversion"], - 'compiler_f77' : ["xlf"], - 'compiler_fix' : ["xlf90", "-qfixed"], - 'compiler_f90' : ["xlf90"], - 'linker_so' : ["xlf95"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - def get_version(self,*args,**kwds): - version = FCompiler.get_version(self,*args,**kwds) - - if version is None and sys.platform.startswith('aix'): - # use lslpp to find out xlf version - lslpp = find_executable('lslpp') - xlf = find_executable('xlf') - if os.path.exists(xlf) and os.path.exists(lslpp): - try: - o = subprocess.check_output([lslpp, '-Lc', 'xlfcmp']) - except (OSError, subprocess.CalledProcessError): - pass - else: - m = re.search(r'xlfcmp:(?P\d+([.]\d+)+)', o) - if m: version = m.group('version') - - xlf_dir = '/etc/opt/ibmcmp/xlf' - if version is None and os.path.isdir(xlf_dir): - # linux: - # If the output of xlf does not contain version info - # (that's the case with xlf 8.1, for instance) then - # let's try another method: - l = sorted(os.listdir(xlf_dir)) - l.reverse() - l = [d for d in l if os.path.isfile(os.path.join(xlf_dir, d, 'xlf.cfg'))] - if l: - from distutils.version import LooseVersion - self.version = version = LooseVersion(l[0]) - return version - - def get_flags(self): - return ['-qextname'] - - def get_flags_debug(self): - return ['-g'] - - def get_flags_linker_so(self): - opt = [] - if sys.platform=='darwin': - opt.append('-Wl,-bundle,-flat_namespace,-undefined,suppress') - else: - opt.append('-bshared') - version = self.get_version(ok_status=[0, 40]) - if version is not None: - if sys.platform.startswith('aix'): - xlf_cfg = '/etc/xlf.cfg' - else: - xlf_cfg = '/etc/opt/ibmcmp/xlf/%s/xlf.cfg' % version - fo, new_cfg = make_temp_file(suffix='_xlf.cfg') - log.info('Creating '+new_cfg) - with open(xlf_cfg, 'r') as fi: - crt1_match = re.compile(r'\s*crt\s*[=]\s*(?P.*)/crt1.o').match - for line in fi: - m = crt1_match(line) - if m: - fo.write('crt = %s/bundle1.o\n' % (m.group('path'))) - else: - fo.write(line) - fo.close() - opt.append('-F'+new_cfg) - return opt - - def get_flags_opt(self): - return ['-O3'] - -if __name__ == '__main__': - from numpy.distutils import customized_fcompiler - log.set_verbosity(2) - print(customized_fcompiler(compiler='ibm').get_version()) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/intel.py b/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/intel.py deleted file mode 100644 index 51f6812..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/intel.py +++ /dev/null @@ -1,222 +0,0 @@ -# http://developer.intel.com/software/products/compilers/flin/ -from __future__ import division, absolute_import, print_function - -import sys - -from numpy.distutils.ccompiler import simple_version_match -from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file - -compilers = ['IntelFCompiler', 'IntelVisualFCompiler', - 'IntelItaniumFCompiler', 'IntelItaniumVisualFCompiler', - 'IntelEM64VisualFCompiler', 'IntelEM64TFCompiler'] - - -def intel_version_match(type): - # Match against the important stuff in the version string - return simple_version_match(start=r'Intel.*?Fortran.*?(?:%s).*?Version' % (type,)) - - -class BaseIntelFCompiler(FCompiler): - def update_executables(self): - f = dummy_fortran_file() - self.executables['version_cmd'] = ['', '-FI', '-V', '-c', - f + '.f', '-o', f + '.o'] - - def runtime_library_dir_option(self, dir): - # TODO: could use -Xlinker here, if it's supported - assert "," not in dir - - return '-Wl,-rpath=%s' % dir - - -class IntelFCompiler(BaseIntelFCompiler): - - compiler_type = 'intel' - compiler_aliases = ('ifort',) - description = 'Intel Fortran Compiler for 32-bit apps' - version_match = intel_version_match('32-bit|IA-32') - - possible_executables = ['ifort', 'ifc'] - - executables = { - 'version_cmd' : None, # set by update_executables - 'compiler_f77' : [None, "-72", "-w90", "-w95"], - 'compiler_f90' : [None], - 'compiler_fix' : [None, "-FI"], - 'linker_so' : ["", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - pic_flags = ['-fPIC'] - module_dir_switch = '-module ' # Don't remove ending space! - module_include_switch = '-I' - - def get_flags_free(self): - return ['-FR'] - - def get_flags(self): - return ['-fPIC'] - - def get_flags_opt(self): # Scipy test failures with -O2 - v = self.get_version() - mpopt = 'openmp' if v and v < '15' else 'qopenmp' - return ['-fp-model strict -O1 -{}'.format(mpopt)] - - def get_flags_arch(self): - return [] - - def get_flags_linker_so(self): - opt = FCompiler.get_flags_linker_so(self) - v = self.get_version() - if v and v >= '8.0': - opt.append('-nofor_main') - if sys.platform == 'darwin': - # Here, it's -dynamiclib - try: - idx = opt.index('-shared') - opt.remove('-shared') - except ValueError: - idx = 0 - opt[idx:idx] = ['-dynamiclib', '-Wl,-undefined,dynamic_lookup'] - return opt - - -class IntelItaniumFCompiler(IntelFCompiler): - compiler_type = 'intele' - compiler_aliases = () - description = 'Intel Fortran Compiler for Itanium apps' - - version_match = intel_version_match('Itanium|IA-64') - - possible_executables = ['ifort', 'efort', 'efc'] - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None, "-FI", "-w90", "-w95"], - 'compiler_fix' : [None, "-FI"], - 'compiler_f90' : [None], - 'linker_so' : ['', "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - -class IntelEM64TFCompiler(IntelFCompiler): - compiler_type = 'intelem' - compiler_aliases = () - description = 'Intel Fortran Compiler for 64-bit apps' - - version_match = intel_version_match('EM64T-based|Intel\\(R\\) 64|64|IA-64|64-bit') - - possible_executables = ['ifort', 'efort', 'efc'] - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None, "-FI"], - 'compiler_fix' : [None, "-FI"], - 'compiler_f90' : [None], - 'linker_so' : ['', "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - def get_flags(self): - return ['-fPIC'] - - def get_flags_opt(self): # Scipy test failures with -O2 - v = self.get_version() - mpopt = 'openmp' if v and v < '15' else 'qopenmp' - return ['-fp-model strict -O1 -{}'.format(mpopt)] - - def get_flags_arch(self): - return [''] - -# Is there no difference in the version string between the above compilers -# and the Visual compilers? - - -class IntelVisualFCompiler(BaseIntelFCompiler): - compiler_type = 'intelv' - description = 'Intel Visual Fortran Compiler for 32-bit apps' - version_match = intel_version_match('32-bit|IA-32') - - def update_executables(self): - f = dummy_fortran_file() - self.executables['version_cmd'] = ['', '/FI', '/c', - f + '.f', '/o', f + '.o'] - - ar_exe = 'lib.exe' - possible_executables = ['ifort', 'ifl'] - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None], - 'compiler_fix' : [None], - 'compiler_f90' : [None], - 'linker_so' : [None], - 'archiver' : [ar_exe, "/verbose", "/OUT:"], - 'ranlib' : None - } - - compile_switch = '/c ' - object_switch = '/Fo' # No space after /Fo! - library_switch = '/OUT:' # No space after /OUT:! - module_dir_switch = '/module:' # No space after /module: - module_include_switch = '/I' - - def get_flags(self): - opt = ['/nologo', '/MD', '/nbs', '/names:lowercase', '/assume:underscore'] - return opt - - def get_flags_free(self): - return [] - - def get_flags_debug(self): - return ['/4Yb', '/d2'] - - def get_flags_opt(self): - return ['/O1'] # Scipy test failures with /O2 - - def get_flags_arch(self): - return ["/arch:IA32", "/QaxSSE3"] - - def runtime_library_dir_option(self, dir): - raise NotImplementedError - - -class IntelItaniumVisualFCompiler(IntelVisualFCompiler): - compiler_type = 'intelev' - description = 'Intel Visual Fortran Compiler for Itanium apps' - - version_match = intel_version_match('Itanium') - - possible_executables = ['efl'] # XXX this is a wild guess - ar_exe = IntelVisualFCompiler.ar_exe - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None, "-FI", "-w90", "-w95"], - 'compiler_fix' : [None, "-FI", "-4L72", "-w"], - 'compiler_f90' : [None], - 'linker_so' : ['', "-shared"], - 'archiver' : [ar_exe, "/verbose", "/OUT:"], - 'ranlib' : None - } - - -class IntelEM64VisualFCompiler(IntelVisualFCompiler): - compiler_type = 'intelvem' - description = 'Intel Visual Fortran Compiler for 64-bit apps' - - version_match = simple_version_match(start=r'Intel\(R\).*?64,') - - def get_flags_arch(self): - return [''] - - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='intel').get_version()) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/lahey.py b/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/lahey.py deleted file mode 100644 index 1beb662..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/lahey.py +++ /dev/null @@ -1,47 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os - -from numpy.distutils.fcompiler import FCompiler - -compilers = ['LaheyFCompiler'] - -class LaheyFCompiler(FCompiler): - - compiler_type = 'lahey' - description = 'Lahey/Fujitsu Fortran 95 Compiler' - version_pattern = r'Lahey/Fujitsu Fortran 95 Compiler Release (?P[^\s*]*)' - - executables = { - 'version_cmd' : ["", "--version"], - 'compiler_f77' : ["lf95", "--fix"], - 'compiler_fix' : ["lf95", "--fix"], - 'compiler_f90' : ["lf95"], - 'linker_so' : ["lf95", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - module_dir_switch = None #XXX Fix me - module_include_switch = None #XXX Fix me - - def get_flags_opt(self): - return ['-O'] - def get_flags_debug(self): - return ['-g', '--chk', '--chkglobal'] - def get_library_dirs(self): - opt = [] - d = os.environ.get('LAHEY') - if d: - opt.append(os.path.join(d, 'lib')) - return opt - def get_libraries(self): - opt = [] - opt.extend(['fj9f6', 'fj9i6', 'fj9ipp', 'fj9e6']) - return opt - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='lahey').get_version()) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/mips.py b/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/mips.py deleted file mode 100644 index da337b2..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/mips.py +++ /dev/null @@ -1,56 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy.distutils.cpuinfo import cpu -from numpy.distutils.fcompiler import FCompiler - -compilers = ['MIPSFCompiler'] - -class MIPSFCompiler(FCompiler): - - compiler_type = 'mips' - description = 'MIPSpro Fortran Compiler' - version_pattern = r'MIPSpro Compilers: Version (?P[^\s*,]*)' - - executables = { - 'version_cmd' : ["", "-version"], - 'compiler_f77' : ["f77", "-f77"], - 'compiler_fix' : ["f90", "-fixedform"], - 'compiler_f90' : ["f90"], - 'linker_so' : ["f90", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : None - } - module_dir_switch = None #XXX: fix me - module_include_switch = None #XXX: fix me - pic_flags = ['-KPIC'] - - def get_flags(self): - return self.pic_flags + ['-n32'] - def get_flags_opt(self): - return ['-O3'] - def get_flags_arch(self): - opt = [] - for a in '19 20 21 22_4k 22_5k 24 25 26 27 28 30 32_5k 32_10k'.split(): - if getattr(cpu, 'is_IP%s'%a)(): - opt.append('-TARG:platform=IP%s' % a) - break - return opt - def get_flags_arch_f77(self): - r = None - if cpu.is_r10000(): r = 10000 - elif cpu.is_r12000(): r = 12000 - elif cpu.is_r8000(): r = 8000 - elif cpu.is_r5000(): r = 5000 - elif cpu.is_r4000(): r = 4000 - if r is not None: - return ['r%s' % (r)] - return [] - def get_flags_arch_f90(self): - r = self.get_flags_arch_f77() - if r: - r[0] = '-' + r[0] - return r - -if __name__ == '__main__': - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='mips').get_version()) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/nag.py b/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/nag.py deleted file mode 100644 index cb71d54..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/nag.py +++ /dev/null @@ -1,84 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import re -from numpy.distutils.fcompiler import FCompiler - -compilers = ['NAGFCompiler', 'NAGFORCompiler'] - -class BaseNAGFCompiler(FCompiler): - version_pattern = r'NAG.* Release (?P[^(\s]*)' - - def version_match(self, version_string): - m = re.search(self.version_pattern, version_string) - if m: - return m.group('version') - else: - return None - - def get_flags_linker_so(self): - return ["-Wl,-shared"] - def get_flags_opt(self): - return ['-O4'] - def get_flags_arch(self): - return [''] - -class NAGFCompiler(BaseNAGFCompiler): - - compiler_type = 'nag' - description = 'NAGWare Fortran 95 Compiler' - - executables = { - 'version_cmd' : ["", "-V"], - 'compiler_f77' : ["f95", "-fixed"], - 'compiler_fix' : ["f95", "-fixed"], - 'compiler_f90' : ["f95"], - 'linker_so' : [""], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - def get_flags_linker_so(self): - if sys.platform == 'darwin': - return ['-unsharedf95', '-Wl,-bundle,-flat_namespace,-undefined,suppress'] - return BaseNAGFCompiler.get_flags_linker_so(self) - def get_flags_arch(self): - version = self.get_version() - if version and version < '5.1': - return ['-target=native'] - else: - return BaseNAGFCompiler.get_flags_arch(self) - def get_flags_debug(self): - return ['-g', '-gline', '-g90', '-nan', '-C'] - -class NAGFORCompiler(BaseNAGFCompiler): - - compiler_type = 'nagfor' - description = 'NAG Fortran Compiler' - - executables = { - 'version_cmd' : ["nagfor", "-V"], - 'compiler_f77' : ["nagfor", "-fixed"], - 'compiler_fix' : ["nagfor", "-fixed"], - 'compiler_f90' : ["nagfor"], - 'linker_so' : ["nagfor"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - def get_flags_debug(self): - version = self.get_version() - if version and version > '6.1': - return ['-g', '-u', '-nan', '-C=all', '-thread_safe', - '-kind=unique', '-Warn=allocation', '-Warn=subnormal'] - else: - return ['-g', '-nan', '-C=all', '-u', '-thread_safe'] - - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - compiler = customized_fcompiler(compiler='nagfor') - print(compiler.get_version()) - print(compiler.get_flags_debug()) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/none.py b/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/none.py deleted file mode 100644 index bdeea15..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/none.py +++ /dev/null @@ -1,30 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy.distutils.fcompiler import FCompiler -from numpy.distutils import customized_fcompiler - -compilers = ['NoneFCompiler'] - -class NoneFCompiler(FCompiler): - - compiler_type = 'none' - description = 'Fake Fortran compiler' - - executables = {'compiler_f77': None, - 'compiler_f90': None, - 'compiler_fix': None, - 'linker_so': None, - 'linker_exe': None, - 'archiver': None, - 'ranlib': None, - 'version_cmd': None, - } - - def find_executables(self): - pass - - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - print(customized_fcompiler(compiler='none').get_version()) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/pathf95.py b/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/pathf95.py deleted file mode 100644 index 5de86f6..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/pathf95.py +++ /dev/null @@ -1,35 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy.distutils.fcompiler import FCompiler - -compilers = ['PathScaleFCompiler'] - -class PathScaleFCompiler(FCompiler): - - compiler_type = 'pathf95' - description = 'PathScale Fortran Compiler' - version_pattern = r'PathScale\(TM\) Compiler Suite: Version (?P[\d.]+)' - - executables = { - 'version_cmd' : ["pathf95", "-version"], - 'compiler_f77' : ["pathf95", "-fixedform"], - 'compiler_fix' : ["pathf95", "-fixedform"], - 'compiler_f90' : ["pathf95"], - 'linker_so' : ["pathf95", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - pic_flags = ['-fPIC'] - module_dir_switch = '-module ' # Don't remove ending space! - module_include_switch = '-I' - - def get_flags_opt(self): - return ['-O3'] - def get_flags_debug(self): - return ['-g'] - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='pathf95').get_version()) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/pg.py b/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/pg.py deleted file mode 100644 index 9c51947..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/pg.py +++ /dev/null @@ -1,142 +0,0 @@ -# http://www.pgroup.com -from __future__ import division, absolute_import, print_function - -import sys - -from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file -from sys import platform -from os.path import join, dirname, normpath - -compilers = ['PGroupFCompiler', 'PGroupFlangCompiler'] - - -class PGroupFCompiler(FCompiler): - - compiler_type = 'pg' - description = 'Portland Group Fortran Compiler' - version_pattern = r'\s*pg(f77|f90|hpf|fortran) (?P[\d.-]+).*' - - if platform == 'darwin': - executables = { - 'version_cmd': ["", "-V"], - 'compiler_f77': ["pgfortran", "-dynamiclib"], - 'compiler_fix': ["pgfortran", "-Mfixed", "-dynamiclib"], - 'compiler_f90': ["pgfortran", "-dynamiclib"], - 'linker_so': ["libtool"], - 'archiver': ["ar", "-cr"], - 'ranlib': ["ranlib"] - } - pic_flags = [''] - else: - executables = { - 'version_cmd': ["", "-V"], - 'compiler_f77': ["pgfortran"], - 'compiler_fix': ["pgfortran", "-Mfixed"], - 'compiler_f90': ["pgfortran"], - 'linker_so': ["pgfortran"], - 'archiver': ["ar", "-cr"], - 'ranlib': ["ranlib"] - } - pic_flags = ['-fpic'] - - module_dir_switch = '-module ' - module_include_switch = '-I' - - def get_flags(self): - opt = ['-Minform=inform', '-Mnosecond_underscore'] - return self.pic_flags + opt - - def get_flags_opt(self): - return ['-fast'] - - def get_flags_debug(self): - return ['-g'] - - if platform == 'darwin': - def get_flags_linker_so(self): - return ["-dynamic", '-undefined', 'dynamic_lookup'] - - else: - def get_flags_linker_so(self): - return ["-shared", '-fpic'] - - def runtime_library_dir_option(self, dir): - return '-R%s' % dir - - -if sys.version_info >= (3, 5): - import functools - - class PGroupFlangCompiler(FCompiler): - compiler_type = 'flang' - description = 'Portland Group Fortran LLVM Compiler' - version_pattern = r'\s*(flang|clang) version (?P[\d.-]+).*' - - ar_exe = 'lib.exe' - possible_executables = ['flang'] - - executables = { - 'version_cmd': ["", "--version"], - 'compiler_f77': ["flang"], - 'compiler_fix': ["flang"], - 'compiler_f90': ["flang"], - 'linker_so': [None], - 'archiver': [ar_exe, "/verbose", "/OUT:"], - 'ranlib': None - } - - library_switch = '/OUT:' # No space after /OUT:! - module_dir_switch = '-module ' # Don't remove ending space! - - def get_libraries(self): - opt = FCompiler.get_libraries(self) - opt.extend(['flang', 'flangrti', 'ompstub']) - return opt - - @functools.lru_cache(maxsize=128) - def get_library_dirs(self): - """List of compiler library directories.""" - opt = FCompiler.get_library_dirs(self) - flang_dir = dirname(self.executables['compiler_f77'][0]) - opt.append(normpath(join(flang_dir, '..', 'lib'))) - - return opt - - def get_flags(self): - return [] - - def get_flags_free(self): - return [] - - def get_flags_debug(self): - return ['-g'] - - def get_flags_opt(self): - return ['-O3'] - - def get_flags_arch(self): - return [] - - def runtime_library_dir_option(self, dir): - raise NotImplementedError - -else: - from numpy.distutils.fcompiler import CompilerNotFound - - # No point in supporting on older Pythons because not ABI compatible - class PGroupFlangCompiler(FCompiler): - compiler_type = 'flang' - description = 'Portland Group Fortran LLVM Compiler' - - def get_version(self): - raise CompilerNotFound('Flang unsupported on Python < 3.5') - - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - if 'flang' in sys.argv: - print(customized_fcompiler(compiler='flang').get_version()) - else: - print(customized_fcompiler(compiler='pg').get_version()) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/sun.py b/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/sun.py deleted file mode 100644 index 561ea85..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/sun.py +++ /dev/null @@ -1,53 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy.distutils.ccompiler import simple_version_match -from numpy.distutils.fcompiler import FCompiler - -compilers = ['SunFCompiler'] - -class SunFCompiler(FCompiler): - - compiler_type = 'sun' - description = 'Sun or Forte Fortran 95 Compiler' - # ex: - # f90: Sun WorkShop 6 update 2 Fortran 95 6.2 Patch 111690-10 2003/08/28 - version_match = simple_version_match( - start=r'f9[05]: (Sun|Forte|WorkShop).*Fortran 95') - - executables = { - 'version_cmd' : ["", "-V"], - 'compiler_f77' : ["f90"], - 'compiler_fix' : ["f90", "-fixed"], - 'compiler_f90' : ["f90"], - 'linker_so' : ["", "-Bdynamic", "-G"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - module_dir_switch = '-moddir=' - module_include_switch = '-M' - pic_flags = ['-xcode=pic32'] - - def get_flags_f77(self): - ret = ["-ftrap=%none"] - if (self.get_version() or '') >= '7': - ret.append("-f77") - else: - ret.append("-fixed") - return ret - def get_opt(self): - return ['-fast', '-dalign'] - def get_arch(self): - return ['-xtarget=generic'] - def get_libraries(self): - opt = [] - opt.extend(['fsu', 'sunmath', 'mvec']) - return opt - - def runtime_library_dir_option(self, dir): - return '-R%s' % dir - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='sun').get_version()) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/vast.py b/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/vast.py deleted file mode 100644 index adc1591..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/fcompiler/vast.py +++ /dev/null @@ -1,54 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os - -from numpy.distutils.fcompiler.gnu import GnuFCompiler - -compilers = ['VastFCompiler'] - -class VastFCompiler(GnuFCompiler): - compiler_type = 'vast' - compiler_aliases = () - description = 'Pacific-Sierra Research Fortran 90 Compiler' - version_pattern = (r'\s*Pacific-Sierra Research vf90 ' - r'(Personal|Professional)\s+(?P[^\s]*)') - - # VAST f90 does not support -o with -c. So, object files are created - # to the current directory and then moved to build directory - object_switch = ' && function _mvfile { mv -v `basename $1` $1 ; } && _mvfile ' - - executables = { - 'version_cmd' : ["vf90", "-v"], - 'compiler_f77' : ["g77"], - 'compiler_fix' : ["f90", "-Wv,-ya"], - 'compiler_f90' : ["f90"], - 'linker_so' : [""], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - module_dir_switch = None #XXX Fix me - module_include_switch = None #XXX Fix me - - def find_executables(self): - pass - - def get_version_cmd(self): - f90 = self.compiler_f90[0] - d, b = os.path.split(f90) - vf90 = os.path.join(d, 'v'+b) - return vf90 - - def get_flags_arch(self): - vast_version = self.get_version() - gnu = GnuFCompiler() - gnu.customize(None) - self.version = gnu.get_version() - opt = GnuFCompiler.get_flags_arch(self) - self.version = vast_version - return opt - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='vast').get_version()) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/from_template.py b/venv/lib/python3.7/site-packages/numpy/distutils/from_template.py deleted file mode 100644 index c5c1163..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/from_template.py +++ /dev/null @@ -1,264 +0,0 @@ -#!/usr/bin/env python -""" - -process_file(filename) - - takes templated file .xxx.src and produces .xxx file where .xxx - is .pyf .f90 or .f using the following template rules: - - '<..>' denotes a template. - - All function and subroutine blocks in a source file with names that - contain '<..>' will be replicated according to the rules in '<..>'. - - The number of comma-separated words in '<..>' will determine the number of - replicates. - - '<..>' may have two different forms, named and short. For example, - - named: - where anywhere inside a block '

' will be replaced with - 'd', 's', 'z', and 'c' for each replicate of the block. - - <_c> is already defined: <_c=s,d,c,z> - <_t> is already defined: <_t=real,double precision,complex,double complex> - - short: - , a short form of the named, useful when no

appears inside - a block. - - In general, '<..>' contains a comma separated list of arbitrary - expressions. If these expression must contain a comma|leftarrow|rightarrow, - then prepend the comma|leftarrow|rightarrow with a backslash. - - If an expression matches '\\' then it will be replaced - by -th expression. - - Note that all '<..>' forms in a block must have the same number of - comma-separated entries. - - Predefined named template rules: - - - - - - -""" -from __future__ import division, absolute_import, print_function - -__all__ = ['process_str', 'process_file'] - -import os -import sys -import re - -routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b', re.I) -routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)', re.I) -function_start_re = re.compile(r'\n (\$|\*)\s*function\b', re.I) - -def parse_structure(astr): - """ Return a list of tuples for each function or subroutine each - tuple is the start and end of a subroutine or function to be - expanded. - """ - - spanlist = [] - ind = 0 - while True: - m = routine_start_re.search(astr, ind) - if m is None: - break - start = m.start() - if function_start_re.match(astr, start, m.end()): - while True: - i = astr.rfind('\n', ind, start) - if i==-1: - break - start = i - if astr[i:i+7]!='\n $': - break - start += 1 - m = routine_end_re.search(astr, m.end()) - ind = end = m and m.end()-1 or len(astr) - spanlist.append((start, end)) - return spanlist - -template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>") -named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>") -list_re = re.compile(r"<\s*((.*?))\s*>") - -def find_repl_patterns(astr): - reps = named_re.findall(astr) - names = {} - for rep in reps: - name = rep[0].strip() or unique_key(names) - repl = rep[1].replace(r'\,', '@comma@') - thelist = conv(repl) - names[name] = thelist - return names - -def find_and_remove_repl_patterns(astr): - names = find_repl_patterns(astr) - astr = re.subn(named_re, '', astr)[0] - return astr, names - -item_re = re.compile(r"\A\\(?P\d+)\Z") -def conv(astr): - b = astr.split(',') - l = [x.strip() for x in b] - for i in range(len(l)): - m = item_re.match(l[i]) - if m: - j = int(m.group('index')) - l[i] = l[j] - return ','.join(l) - -def unique_key(adict): - """ Obtain a unique key given a dictionary.""" - allkeys = list(adict.keys()) - done = False - n = 1 - while not done: - newkey = '__l%s' % (n) - if newkey in allkeys: - n += 1 - else: - done = True - return newkey - - -template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z') -def expand_sub(substr, names): - substr = substr.replace(r'\>', '@rightarrow@') - substr = substr.replace(r'\<', '@leftarrow@') - lnames = find_repl_patterns(substr) - substr = named_re.sub(r"<\1>", substr) # get rid of definition templates - - def listrepl(mobj): - thelist = conv(mobj.group(1).replace(r'\,', '@comma@')) - if template_name_re.match(thelist): - return "<%s>" % (thelist) - name = None - for key in lnames.keys(): # see if list is already in dictionary - if lnames[key] == thelist: - name = key - if name is None: # this list is not in the dictionary yet - name = unique_key(lnames) - lnames[name] = thelist - return "<%s>" % name - - substr = list_re.sub(listrepl, substr) # convert all lists to named templates - # newnames are constructed as needed - - numsubs = None - base_rule = None - rules = {} - for r in template_re.findall(substr): - if r not in rules: - thelist = lnames.get(r, names.get(r, None)) - if thelist is None: - raise ValueError('No replicates found for <%s>' % (r)) - if r not in names and not thelist.startswith('_'): - names[r] = thelist - rule = [i.replace('@comma@', ',') for i in thelist.split(',')] - num = len(rule) - - if numsubs is None: - numsubs = num - rules[r] = rule - base_rule = r - elif num == numsubs: - rules[r] = rule - else: - print("Mismatch in number of replacements (base <%s=%s>)" - " for <%s=%s>. Ignoring." % - (base_rule, ','.join(rules[base_rule]), r, thelist)) - if not rules: - return substr - - def namerepl(mobj): - name = mobj.group(1) - return rules.get(name, (k+1)*[name])[k] - - newstr = '' - for k in range(numsubs): - newstr += template_re.sub(namerepl, substr) + '\n\n' - - newstr = newstr.replace('@rightarrow@', '>') - newstr = newstr.replace('@leftarrow@', '<') - return newstr - -def process_str(allstr): - newstr = allstr - writestr = '' - - struct = parse_structure(newstr) - - oldend = 0 - names = {} - names.update(_special_names) - for sub in struct: - cleanedstr, defs = find_and_remove_repl_patterns(newstr[oldend:sub[0]]) - writestr += cleanedstr - names.update(defs) - writestr += expand_sub(newstr[sub[0]:sub[1]], names) - oldend = sub[1] - writestr += newstr[oldend:] - - return writestr - -include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P[\w\d./\\]+[.]src)['\"]", re.I) - -def resolve_includes(source): - d = os.path.dirname(source) - with open(source) as fid: - lines = [] - for line in fid: - m = include_src_re.match(line) - if m: - fn = m.group('name') - if not os.path.isabs(fn): - fn = os.path.join(d, fn) - if os.path.isfile(fn): - print('Including file', fn) - lines.extend(resolve_includes(fn)) - else: - lines.append(line) - else: - lines.append(line) - return lines - -def process_file(source): - lines = resolve_includes(source) - return process_str(''.join(lines)) - -_special_names = find_repl_patterns(''' -<_c=s,d,c,z> -<_t=real,double precision,complex,double complex> - - - - - -''') - -def main(): - try: - file = sys.argv[1] - except IndexError: - fid = sys.stdin - outfile = sys.stdout - else: - fid = open(file, 'r') - (base, ext) = os.path.splitext(file) - newname = base - outfile = open(newname, 'w') - - allstr = fid.read() - writestr = process_str(allstr) - outfile.write(writestr) - - -if __name__ == "__main__": - main() diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/intelccompiler.py b/venv/lib/python3.7/site-packages/numpy/distutils/intelccompiler.py deleted file mode 100644 index 3386775..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/intelccompiler.py +++ /dev/null @@ -1,113 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import platform - -from distutils.unixccompiler import UnixCCompiler -from numpy.distutils.exec_command import find_executable -from numpy.distutils.ccompiler import simple_version_match -if platform.system() == 'Windows': - from numpy.distutils.msvc9compiler import MSVCCompiler - - -class IntelCCompiler(UnixCCompiler): - """A modified Intel compiler compatible with a GCC-built Python.""" - compiler_type = 'intel' - cc_exe = 'icc' - cc_args = 'fPIC' - - def __init__(self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__(self, verbose, dry_run, force) - - v = self.get_version() - mpopt = 'openmp' if v and v < '15' else 'qopenmp' - self.cc_exe = ('icc -fPIC -fp-model strict -O3 ' - '-fomit-frame-pointer -{}').format(mpopt) - compiler = self.cc_exe - - if platform.system() == 'Darwin': - shared_flag = '-Wl,-undefined,dynamic_lookup' - else: - shared_flag = '-shared' - self.set_executables(compiler=compiler, - compiler_so=compiler, - compiler_cxx=compiler, - archiver='xiar' + ' cru', - linker_exe=compiler + ' -shared-intel', - linker_so=compiler + ' ' + shared_flag + - ' -shared-intel') - - -class IntelItaniumCCompiler(IntelCCompiler): - compiler_type = 'intele' - - # On Itanium, the Intel Compiler used to be called ecc, let's search for - # it (now it's also icc, so ecc is last in the search). - for cc_exe in map(find_executable, ['icc', 'ecc']): - if cc_exe: - break - - -class IntelEM64TCCompiler(UnixCCompiler): - """ - A modified Intel x86_64 compiler compatible with a 64bit GCC-built Python. - """ - compiler_type = 'intelem' - cc_exe = 'icc -m64' - cc_args = '-fPIC' - - def __init__(self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__(self, verbose, dry_run, force) - - v = self.get_version() - mpopt = 'openmp' if v and v < '15' else 'qopenmp' - self.cc_exe = ('icc -m64 -fPIC -fp-model strict -O3 ' - '-fomit-frame-pointer -{}').format(mpopt) - compiler = self.cc_exe - - if platform.system() == 'Darwin': - shared_flag = '-Wl,-undefined,dynamic_lookup' - else: - shared_flag = '-shared' - self.set_executables(compiler=compiler, - compiler_so=compiler, - compiler_cxx=compiler, - archiver='xiar' + ' cru', - linker_exe=compiler + ' -shared-intel', - linker_so=compiler + ' ' + shared_flag + - ' -shared-intel') - - -if platform.system() == 'Windows': - class IntelCCompilerW(MSVCCompiler): - """ - A modified Intel compiler compatible with an MSVC-built Python. - """ - compiler_type = 'intelw' - compiler_cxx = 'icl' - - def __init__(self, verbose=0, dry_run=0, force=0): - MSVCCompiler.__init__(self, verbose, dry_run, force) - version_match = simple_version_match(start=r'Intel\(R\).*?32,') - self.__version = version_match - - def initialize(self, plat_name=None): - MSVCCompiler.initialize(self, plat_name) - self.cc = self.find_exe('icl.exe') - self.lib = self.find_exe('xilib') - self.linker = self.find_exe('xilink') - self.compile_options = ['/nologo', '/O3', '/MD', '/W3', - '/Qstd=c99'] - self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', - '/Qstd=c99', '/Z7', '/D_DEBUG'] - - class IntelEM64TCCompilerW(IntelCCompilerW): - """ - A modified Intel x86_64 compiler compatible with - a 64bit MSVC-built Python. - """ - compiler_type = 'intelemw' - - def __init__(self, verbose=0, dry_run=0, force=0): - MSVCCompiler.__init__(self, verbose, dry_run, force) - version_match = simple_version_match(start=r'Intel\(R\).*?64,') - self.__version = version_match diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/lib2def.py b/venv/lib/python3.7/site-packages/numpy/distutils/lib2def.py deleted file mode 100644 index 34b1ece..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/lib2def.py +++ /dev/null @@ -1,118 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import re -import sys -import subprocess - -__doc__ = """This module generates a DEF file from the symbols in -an MSVC-compiled DLL import library. It correctly discriminates between -data and functions. The data is collected from the output of the program -nm(1). - -Usage: - python lib2def.py [libname.lib] [output.def] -or - python lib2def.py [libname.lib] > output.def - -libname.lib defaults to python.lib and output.def defaults to stdout - -Author: Robert Kern -Last Update: April 30, 1999 -""" - -__version__ = '0.1a' - -py_ver = "%d%d" % tuple(sys.version_info[:2]) - -DEFAULT_NM = ['nm', '-Cs'] - -DEF_HEADER = """LIBRARY python%s.dll -;CODE PRELOAD MOVEABLE DISCARDABLE -;DATA PRELOAD SINGLE - -EXPORTS -""" % py_ver -# the header of the DEF file - -FUNC_RE = re.compile(r"^(.*) in python%s\.dll" % py_ver, re.MULTILINE) -DATA_RE = re.compile(r"^_imp__(.*) in python%s\.dll" % py_ver, re.MULTILINE) - -def parse_cmd(): - """Parses the command-line arguments. - -libfile, deffile = parse_cmd()""" - if len(sys.argv) == 3: - if sys.argv[1][-4:] == '.lib' and sys.argv[2][-4:] == '.def': - libfile, deffile = sys.argv[1:] - elif sys.argv[1][-4:] == '.def' and sys.argv[2][-4:] == '.lib': - deffile, libfile = sys.argv[1:] - else: - print("I'm assuming that your first argument is the library") - print("and the second is the DEF file.") - elif len(sys.argv) == 2: - if sys.argv[1][-4:] == '.def': - deffile = sys.argv[1] - libfile = 'python%s.lib' % py_ver - elif sys.argv[1][-4:] == '.lib': - deffile = None - libfile = sys.argv[1] - else: - libfile = 'python%s.lib' % py_ver - deffile = None - return libfile, deffile - -def getnm(nm_cmd=['nm', '-Cs', 'python%s.lib' % py_ver], shell=True): - """Returns the output of nm_cmd via a pipe. - -nm_output = getnm(nm_cmd = 'nm -Cs py_lib')""" - p = subprocess.Popen(nm_cmd, shell=shell, stdout=subprocess.PIPE, - stderr=subprocess.PIPE, universal_newlines=True) - nm_output, nm_err = p.communicate() - if p.returncode != 0: - raise RuntimeError('failed to run "%s": "%s"' % ( - ' '.join(nm_cmd), nm_err)) - return nm_output - -def parse_nm(nm_output): - """Returns a tuple of lists: dlist for the list of data -symbols and flist for the list of function symbols. - -dlist, flist = parse_nm(nm_output)""" - data = DATA_RE.findall(nm_output) - func = FUNC_RE.findall(nm_output) - - flist = [] - for sym in data: - if sym in func and (sym[:2] == 'Py' or sym[:3] == '_Py' or sym[:4] == 'init'): - flist.append(sym) - - dlist = [] - for sym in data: - if sym not in flist and (sym[:2] == 'Py' or sym[:3] == '_Py'): - dlist.append(sym) - - dlist.sort() - flist.sort() - return dlist, flist - -def output_def(dlist, flist, header, file = sys.stdout): - """Outputs the final DEF file to a file defaulting to stdout. - -output_def(dlist, flist, header, file = sys.stdout)""" - for data_sym in dlist: - header = header + '\t%s DATA\n' % data_sym - header = header + '\n' # blank line - for func_sym in flist: - header = header + '\t%s\n' % func_sym - file.write(header) - -if __name__ == '__main__': - libfile, deffile = parse_cmd() - if deffile is None: - deffile = sys.stdout - else: - deffile = open(deffile, 'w') - nm_cmd = DEFAULT_NM + [str(libfile)] - nm_output = getnm(nm_cmd, shell=False) - dlist, flist = parse_nm(nm_output) - output_def(dlist, flist, DEF_HEADER, deffile) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/line_endings.py b/venv/lib/python3.7/site-packages/numpy/distutils/line_endings.py deleted file mode 100644 index fe8fd1b..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/line_endings.py +++ /dev/null @@ -1,76 +0,0 @@ -""" Functions for converting from DOS to UNIX line endings - -""" -from __future__ import division, absolute_import, print_function - -import sys, re, os - -def dos2unix(file): - "Replace CRLF with LF in argument files. Print names of changed files." - if os.path.isdir(file): - print(file, "Directory!") - return - - with open(file, "rb") as fp: - data = fp.read() - if '\0' in data: - print(file, "Binary!") - return - - newdata = re.sub("\r\n", "\n", data) - if newdata != data: - print('dos2unix:', file) - with open(file, "wb") as f: - f.write(newdata) - return file - else: - print(file, 'ok') - -def dos2unix_one_dir(modified_files, dir_name, file_names): - for file in file_names: - full_path = os.path.join(dir_name, file) - file = dos2unix(full_path) - if file is not None: - modified_files.append(file) - -def dos2unix_dir(dir_name): - modified_files = [] - os.path.walk(dir_name, dos2unix_one_dir, modified_files) - return modified_files -#---------------------------------- - -def unix2dos(file): - "Replace LF with CRLF in argument files. Print names of changed files." - if os.path.isdir(file): - print(file, "Directory!") - return - - with open(file, "rb") as fp: - data = fp.read() - if '\0' in data: - print(file, "Binary!") - return - newdata = re.sub("\r\n", "\n", data) - newdata = re.sub("\n", "\r\n", newdata) - if newdata != data: - print('unix2dos:', file) - with open(file, "wb") as f: - f.write(newdata) - return file - else: - print(file, 'ok') - -def unix2dos_one_dir(modified_files, dir_name, file_names): - for file in file_names: - full_path = os.path.join(dir_name, file) - unix2dos(full_path) - if file is not None: - modified_files.append(file) - -def unix2dos_dir(dir_name): - modified_files = [] - os.path.walk(dir_name, unix2dos_one_dir, modified_files) - return modified_files - -if __name__ == "__main__": - dos2unix_dir(sys.argv[1]) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/log.py b/venv/lib/python3.7/site-packages/numpy/distutils/log.py deleted file mode 100644 index ff7de86..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/log.py +++ /dev/null @@ -1,95 +0,0 @@ -# Colored log, requires Python 2.3 or up. -from __future__ import division, absolute_import, print_function - -import sys -from distutils.log import * -from distutils.log import Log as old_Log -from distutils.log import _global_log - -if sys.version_info[0] < 3: - from .misc_util import (red_text, default_text, cyan_text, green_text, - is_sequence, is_string) -else: - from numpy.distutils.misc_util import (red_text, default_text, cyan_text, - green_text, is_sequence, is_string) - - -def _fix_args(args,flag=1): - if is_string(args): - return args.replace('%', '%%') - if flag and is_sequence(args): - return tuple([_fix_args(a, flag=0) for a in args]) - return args - - -class Log(old_Log): - def _log(self, level, msg, args): - if level >= self.threshold: - if args: - msg = msg % _fix_args(args) - if 0: - if msg.startswith('copying ') and msg.find(' -> ') != -1: - return - if msg.startswith('byte-compiling '): - return - print(_global_color_map[level](msg)) - sys.stdout.flush() - - def good(self, msg, *args): - """ - If we log WARN messages, log this message as a 'nice' anti-warn - message. - - """ - if WARN >= self.threshold: - if args: - print(green_text(msg % _fix_args(args))) - else: - print(green_text(msg)) - sys.stdout.flush() - - -_global_log.__class__ = Log - -good = _global_log.good - -def set_threshold(level, force=False): - prev_level = _global_log.threshold - if prev_level > DEBUG or force: - # If we're running at DEBUG, don't change the threshold, as there's - # likely a good reason why we're running at this level. - _global_log.threshold = level - if level <= DEBUG: - info('set_threshold: setting threshold to DEBUG level,' - ' it can be changed only with force argument') - else: - info('set_threshold: not changing threshold from DEBUG level' - ' %s to %s' % (prev_level, level)) - return prev_level - -def get_threshold(): - return _global_log.threshold - -def set_verbosity(v, force=False): - prev_level = _global_log.threshold - if v < 0: - set_threshold(ERROR, force) - elif v == 0: - set_threshold(WARN, force) - elif v == 1: - set_threshold(INFO, force) - elif v >= 2: - set_threshold(DEBUG, force) - return {FATAL:-2,ERROR:-1,WARN:0,INFO:1,DEBUG:2}.get(prev_level, 1) - - -_global_color_map = { - DEBUG:cyan_text, - INFO:default_text, - WARN:red_text, - ERROR:red_text, - FATAL:red_text -} - -# don't use INFO,.. flags in set_verbosity, these flags are for set_threshold. -set_verbosity(0, force=True) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/mingw/gfortran_vs2003_hack.c b/venv/lib/python3.7/site-packages/numpy/distutils/mingw/gfortran_vs2003_hack.c deleted file mode 100644 index 485a675..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/mingw/gfortran_vs2003_hack.c +++ /dev/null @@ -1,6 +0,0 @@ -int _get_output_format(void) -{ - return 0; -} - -int _imp____lc_codepage = 0; diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/mingw32ccompiler.py b/venv/lib/python3.7/site-packages/numpy/distutils/mingw32ccompiler.py deleted file mode 100644 index a56cc8f..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/mingw32ccompiler.py +++ /dev/null @@ -1,660 +0,0 @@ -""" -Support code for building Python extensions on Windows. - - # NT stuff - # 1. Make sure libpython.a exists for gcc. If not, build it. - # 2. Force windows to use gcc (we're struggling with MSVC and g77 support) - # 3. Force windows to use g77 - -""" -from __future__ import division, absolute_import, print_function - -import os -import sys -import subprocess -import re -import textwrap - -# Overwrite certain distutils.ccompiler functions: -import numpy.distutils.ccompiler - -if sys.version_info[0] < 3: - from . import log -else: - from numpy.distutils import log -# NT stuff -# 1. Make sure libpython.a exists for gcc. If not, build it. -# 2. Force windows to use gcc (we're struggling with MSVC and g77 support) -# --> this is done in numpy/distutils/ccompiler.py -# 3. Force windows to use g77 - -import distutils.cygwinccompiler -from distutils.version import StrictVersion -from distutils.unixccompiler import UnixCCompiler -from distutils.msvccompiler import get_build_version as get_build_msvc_version -from distutils.errors import (DistutilsExecError, CompileError, - UnknownFileError) -from numpy.distutils.misc_util import (msvc_runtime_library, - msvc_runtime_version, - msvc_runtime_major, - get_build_architecture) - -def get_msvcr_replacement(): - """Replacement for outdated version of get_msvcr from cygwinccompiler""" - msvcr = msvc_runtime_library() - return [] if msvcr is None else [msvcr] - -# monkey-patch cygwinccompiler with our updated version from misc_util -# to avoid getting an exception raised on Python 3.5 -distutils.cygwinccompiler.get_msvcr = get_msvcr_replacement - -# Useful to generate table of symbols from a dll -_START = re.compile(r'\[Ordinal/Name Pointer\] Table') -_TABLE = re.compile(r'^\s+\[([\s*[0-9]*)\] ([a-zA-Z0-9_]*)') - -# the same as cygwin plus some additional parameters -class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler): - """ A modified MingW32 compiler compatible with an MSVC built Python. - - """ - - compiler_type = 'mingw32' - - def __init__ (self, - verbose=0, - dry_run=0, - force=0): - - distutils.cygwinccompiler.CygwinCCompiler.__init__ (self, verbose, - dry_run, force) - - # we need to support 3.2 which doesn't match the standard - # get_versions methods regex - if self.gcc_version is None: - try: - out_string = subprocess.check_output(['gcc', '-dumpversion']) - except (OSError, CalledProcessError): - out_string = "" # ignore failures to match old behavior - result = re.search(r'(\d+\.\d+)', out_string) - if result: - self.gcc_version = StrictVersion(result.group(1)) - - # A real mingw32 doesn't need to specify a different entry point, - # but cygwin 2.91.57 in no-cygwin-mode needs it. - if self.gcc_version <= "2.91.57": - entry_point = '--entry _DllMain@12' - else: - entry_point = '' - - if self.linker_dll == 'dllwrap': - # Commented out '--driver-name g++' part that fixes weird - # g++.exe: g++: No such file or directory - # error (mingw 1.0 in Enthon24 tree, gcc-3.4.5). - # If the --driver-name part is required for some environment - # then make the inclusion of this part specific to that - # environment. - self.linker = 'dllwrap' # --driver-name g++' - elif self.linker_dll == 'gcc': - self.linker = 'g++' - - # **changes: eric jones 4/11/01 - # 1. Check for import library on Windows. Build if it doesn't exist. - - build_import_library() - - # Check for custom msvc runtime library on Windows. Build if it doesn't exist. - msvcr_success = build_msvcr_library() - msvcr_dbg_success = build_msvcr_library(debug=True) - if msvcr_success or msvcr_dbg_success: - # add preprocessor statement for using customized msvcr lib - self.define_macro('NPY_MINGW_USE_CUSTOM_MSVCR') - - # Define the MSVC version as hint for MinGW - msvcr_version = msvc_runtime_version() - if msvcr_version: - self.define_macro('__MSVCRT_VERSION__', '0x%04i' % msvcr_version) - - # MS_WIN64 should be defined when building for amd64 on windows, - # but python headers define it only for MS compilers, which has all - # kind of bad consequences, like using Py_ModuleInit4 instead of - # Py_ModuleInit4_64, etc... So we add it here - if get_build_architecture() == 'AMD64': - if self.gcc_version < "4.0": - self.set_executables( - compiler='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0 -Wall', - compiler_so='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0' - ' -Wall -Wstrict-prototypes', - linker_exe='gcc -g -mno-cygwin', - linker_so='gcc -g -mno-cygwin -shared') - else: - # gcc-4 series releases do not support -mno-cygwin option - self.set_executables( - compiler='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall', - compiler_so='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall -Wstrict-prototypes', - linker_exe='gcc -g', - linker_so='gcc -g -shared') - else: - if self.gcc_version <= "3.0.0": - self.set_executables( - compiler='gcc -mno-cygwin -O2 -w', - compiler_so='gcc -mno-cygwin -mdll -O2 -w' - ' -Wstrict-prototypes', - linker_exe='g++ -mno-cygwin', - linker_so='%s -mno-cygwin -mdll -static %s' % - (self.linker, entry_point)) - elif self.gcc_version < "4.0": - self.set_executables( - compiler='gcc -mno-cygwin -O2 -Wall', - compiler_so='gcc -mno-cygwin -O2 -Wall' - ' -Wstrict-prototypes', - linker_exe='g++ -mno-cygwin', - linker_so='g++ -mno-cygwin -shared') - else: - # gcc-4 series releases do not support -mno-cygwin option - self.set_executables(compiler='gcc -O2 -Wall', - compiler_so='gcc -O2 -Wall -Wstrict-prototypes', - linker_exe='g++ ', - linker_so='g++ -shared') - # added for python2.3 support - # we can't pass it through set_executables because pre 2.2 would fail - self.compiler_cxx = ['g++'] - - # Maybe we should also append -mthreads, but then the finished dlls - # need another dll (mingwm10.dll see Mingw32 docs) (-mthreads: Support - # thread-safe exception handling on `Mingw32') - - # no additional libraries needed - #self.dll_libraries=[] - return - - # __init__ () - - def link(self, - target_desc, - objects, - output_filename, - output_dir, - libraries, - library_dirs, - runtime_library_dirs, - export_symbols = None, - debug=0, - extra_preargs=None, - extra_postargs=None, - build_temp=None, - target_lang=None): - # Include the appropriate MSVC runtime library if Python was built - # with MSVC >= 7.0 (MinGW standard is msvcrt) - runtime_library = msvc_runtime_library() - if runtime_library: - if not libraries: - libraries = [] - libraries.append(runtime_library) - args = (self, - target_desc, - objects, - output_filename, - output_dir, - libraries, - library_dirs, - runtime_library_dirs, - None, #export_symbols, we do this in our def-file - debug, - extra_preargs, - extra_postargs, - build_temp, - target_lang) - if self.gcc_version < "3.0.0": - func = distutils.cygwinccompiler.CygwinCCompiler.link - else: - func = UnixCCompiler.link - func(*args[:func.__code__.co_argcount]) - return - - def object_filenames (self, - source_filenames, - strip_dir=0, - output_dir=''): - if output_dir is None: output_dir = '' - obj_names = [] - for src_name in source_filenames: - # use normcase to make sure '.rc' is really '.rc' and not '.RC' - (base, ext) = os.path.splitext (os.path.normcase(src_name)) - - # added these lines to strip off windows drive letters - # without it, .o files are placed next to .c files - # instead of the build directory - drv, base = os.path.splitdrive(base) - if drv: - base = base[1:] - - if ext not in (self.src_extensions + ['.rc', '.res']): - raise UnknownFileError( - "unknown file type '%s' (from '%s')" % \ - (ext, src_name)) - if strip_dir: - base = os.path.basename (base) - if ext == '.res' or ext == '.rc': - # these need to be compiled to object files - obj_names.append (os.path.join (output_dir, - base + ext + self.obj_extension)) - else: - obj_names.append (os.path.join (output_dir, - base + self.obj_extension)) - return obj_names - - # object_filenames () - - -def find_python_dll(): - # We can't do much here: - # - find it in the virtualenv (sys.prefix) - # - find it in python main dir (sys.base_prefix, if in a virtualenv) - # - sys.real_prefix is main dir for virtualenvs in Python 2.7 - # - in system32, - # - ortherwise (Sxs), I don't know how to get it. - stems = [sys.prefix] - if hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix: - stems.append(sys.base_prefix) - elif hasattr(sys, 'real_prefix') and sys.real_prefix != sys.prefix: - stems.append(sys.real_prefix) - - sub_dirs = ['', 'lib', 'bin'] - # generate possible combinations of directory trees and sub-directories - lib_dirs = [] - for stem in stems: - for folder in sub_dirs: - lib_dirs.append(os.path.join(stem, folder)) - - # add system directory as well - if 'SYSTEMROOT' in os.environ: - lib_dirs.append(os.path.join(os.environ['SYSTEMROOT'], 'System32')) - - # search in the file system for possible candidates - major_version, minor_version = tuple(sys.version_info[:2]) - patterns = ['python%d%d.dll'] - - for pat in patterns: - dllname = pat % (major_version, minor_version) - print("Looking for %s" % dllname) - for folder in lib_dirs: - dll = os.path.join(folder, dllname) - if os.path.exists(dll): - return dll - - raise ValueError("%s not found in %s" % (dllname, lib_dirs)) - -def dump_table(dll): - st = subprocess.check_output(["objdump.exe", "-p", dll]) - return st.split(b'\n') - -def generate_def(dll, dfile): - """Given a dll file location, get all its exported symbols and dump them - into the given def file. - - The .def file will be overwritten""" - dump = dump_table(dll) - for i in range(len(dump)): - if _START.match(dump[i].decode()): - break - else: - raise ValueError("Symbol table not found") - - syms = [] - for j in range(i+1, len(dump)): - m = _TABLE.match(dump[j].decode()) - if m: - syms.append((int(m.group(1).strip()), m.group(2))) - else: - break - - if len(syms) == 0: - log.warn('No symbols found in %s' % dll) - - with open(dfile, 'w') as d: - d.write('LIBRARY %s\n' % os.path.basename(dll)) - d.write(';CODE PRELOAD MOVEABLE DISCARDABLE\n') - d.write(';DATA PRELOAD SINGLE\n') - d.write('\nEXPORTS\n') - for s in syms: - #d.write('@%d %s\n' % (s[0], s[1])) - d.write('%s\n' % s[1]) - -def find_dll(dll_name): - - arch = {'AMD64' : 'amd64', - 'Intel' : 'x86'}[get_build_architecture()] - - def _find_dll_in_winsxs(dll_name): - # Walk through the WinSxS directory to find the dll. - winsxs_path = os.path.join(os.environ.get('WINDIR', r'C:\WINDOWS'), - 'winsxs') - if not os.path.exists(winsxs_path): - return None - for root, dirs, files in os.walk(winsxs_path): - if dll_name in files and arch in root: - return os.path.join(root, dll_name) - return None - - def _find_dll_in_path(dll_name): - # First, look in the Python directory, then scan PATH for - # the given dll name. - for path in [sys.prefix] + os.environ['PATH'].split(';'): - filepath = os.path.join(path, dll_name) - if os.path.exists(filepath): - return os.path.abspath(filepath) - - return _find_dll_in_winsxs(dll_name) or _find_dll_in_path(dll_name) - -def build_msvcr_library(debug=False): - if os.name != 'nt': - return False - - # If the version number is None, then we couldn't find the MSVC runtime at - # all, because we are running on a Python distribution which is customed - # compiled; trust that the compiler is the same as the one available to us - # now, and that it is capable of linking with the correct runtime without - # any extra options. - msvcr_ver = msvc_runtime_major() - if msvcr_ver is None: - log.debug('Skip building import library: ' - 'Runtime is not compiled with MSVC') - return False - - # Skip using a custom library for versions < MSVC 8.0 - if msvcr_ver < 80: - log.debug('Skip building msvcr library:' - ' custom functionality not present') - return False - - msvcr_name = msvc_runtime_library() - if debug: - msvcr_name += 'd' - - # Skip if custom library already exists - out_name = "lib%s.a" % msvcr_name - out_file = os.path.join(sys.prefix, 'libs', out_name) - if os.path.isfile(out_file): - log.debug('Skip building msvcr library: "%s" exists' % - (out_file,)) - return True - - # Find the msvcr dll - msvcr_dll_name = msvcr_name + '.dll' - dll_file = find_dll(msvcr_dll_name) - if not dll_file: - log.warn('Cannot build msvcr library: "%s" not found' % - msvcr_dll_name) - return False - - def_name = "lib%s.def" % msvcr_name - def_file = os.path.join(sys.prefix, 'libs', def_name) - - log.info('Building msvcr library: "%s" (from %s)' \ - % (out_file, dll_file)) - - # Generate a symbol definition file from the msvcr dll - generate_def(dll_file, def_file) - - # Create a custom mingw library for the given symbol definitions - cmd = ['dlltool', '-d', def_file, '-l', out_file] - retcode = subprocess.call(cmd) - - # Clean up symbol definitions - os.remove(def_file) - - return (not retcode) - -def build_import_library(): - if os.name != 'nt': - return - - arch = get_build_architecture() - if arch == 'AMD64': - return _build_import_library_amd64() - elif arch == 'Intel': - return _build_import_library_x86() - else: - raise ValueError("Unhandled arch %s" % arch) - -def _check_for_import_lib(): - """Check if an import library for the Python runtime already exists.""" - major_version, minor_version = tuple(sys.version_info[:2]) - - # patterns for the file name of the library itself - patterns = ['libpython%d%d.a', - 'libpython%d%d.dll.a', - 'libpython%d.%d.dll.a'] - - # directory trees that may contain the library - stems = [sys.prefix] - if hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix: - stems.append(sys.base_prefix) - elif hasattr(sys, 'real_prefix') and sys.real_prefix != sys.prefix: - stems.append(sys.real_prefix) - - # possible subdirectories within those trees where it is placed - sub_dirs = ['libs', 'lib'] - - # generate a list of candidate locations - candidates = [] - for pat in patterns: - filename = pat % (major_version, minor_version) - for stem_dir in stems: - for folder in sub_dirs: - candidates.append(os.path.join(stem_dir, folder, filename)) - - # test the filesystem to see if we can find any of these - for fullname in candidates: - if os.path.isfile(fullname): - # already exists, in location given - return (True, fullname) - - # needs to be built, preferred location given first - return (False, candidates[0]) - -def _build_import_library_amd64(): - out_exists, out_file = _check_for_import_lib() - if out_exists: - log.debug('Skip building import library: "%s" exists', out_file) - return - - # get the runtime dll for which we are building import library - dll_file = find_python_dll() - log.info('Building import library (arch=AMD64): "%s" (from %s)' % - (out_file, dll_file)) - - # generate symbol list from this library - def_name = "python%d%d.def" % tuple(sys.version_info[:2]) - def_file = os.path.join(sys.prefix, 'libs', def_name) - generate_def(dll_file, def_file) - - # generate import library from this symbol list - cmd = ['dlltool', '-d', def_file, '-l', out_file] - subprocess.check_call(cmd) - -def _build_import_library_x86(): - """ Build the import libraries for Mingw32-gcc on Windows - """ - out_exists, out_file = _check_for_import_lib() - if out_exists: - log.debug('Skip building import library: "%s" exists', out_file) - return - - lib_name = "python%d%d.lib" % tuple(sys.version_info[:2]) - lib_file = os.path.join(sys.prefix, 'libs', lib_name) - if not os.path.isfile(lib_file): - # didn't find library file in virtualenv, try base distribution, too, - # and use that instead if found there. for Python 2.7 venvs, the base - # directory is in attribute real_prefix instead of base_prefix. - if hasattr(sys, 'base_prefix'): - base_lib = os.path.join(sys.base_prefix, 'libs', lib_name) - elif hasattr(sys, 'real_prefix'): - base_lib = os.path.join(sys.real_prefix, 'libs', lib_name) - else: - base_lib = '' # os.path.isfile('') == False - - if os.path.isfile(base_lib): - lib_file = base_lib - else: - log.warn('Cannot build import library: "%s" not found', lib_file) - return - log.info('Building import library (ARCH=x86): "%s"', out_file) - - from numpy.distutils import lib2def - - def_name = "python%d%d.def" % tuple(sys.version_info[:2]) - def_file = os.path.join(sys.prefix, 'libs', def_name) - nm_output = lib2def.getnm( - lib2def.DEFAULT_NM + [lib_file], shell=False) - dlist, flist = lib2def.parse_nm(nm_output) - with open(def_file, 'w') as fid: - lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, fid) - - dll_name = find_python_dll () - - cmd = ["dlltool", - "--dllname", dll_name, - "--def", def_file, - "--output-lib", out_file] - status = subprocess.check_output(cmd) - if status: - log.warn('Failed to build import library for gcc. Linking will fail.') - return - -#===================================== -# Dealing with Visual Studio MANIFESTS -#===================================== - -# Functions to deal with visual studio manifests. Manifest are a mechanism to -# enforce strong DLL versioning on windows, and has nothing to do with -# distutils MANIFEST. manifests are XML files with version info, and used by -# the OS loader; they are necessary when linking against a DLL not in the -# system path; in particular, official python 2.6 binary is built against the -# MS runtime 9 (the one from VS 2008), which is not available on most windows -# systems; python 2.6 installer does install it in the Win SxS (Side by side) -# directory, but this requires the manifest for this to work. This is a big -# mess, thanks MS for a wonderful system. - -# XXX: ideally, we should use exactly the same version as used by python. I -# submitted a patch to get this version, but it was only included for python -# 2.6.1 and above. So for versions below, we use a "best guess". -_MSVCRVER_TO_FULLVER = {} -if sys.platform == 'win32': - try: - import msvcrt - # I took one version in my SxS directory: no idea if it is the good - # one, and we can't retrieve it from python - _MSVCRVER_TO_FULLVER['80'] = "8.0.50727.42" - _MSVCRVER_TO_FULLVER['90'] = "9.0.21022.8" - # Value from msvcrt.CRT_ASSEMBLY_VERSION under Python 3.3.0 - # on Windows XP: - _MSVCRVER_TO_FULLVER['100'] = "10.0.30319.460" - # Python 3.7 uses 1415, but get_build_version returns 140 ?? - _MSVCRVER_TO_FULLVER['140'] = "14.15.26726.0" - if hasattr(msvcrt, "CRT_ASSEMBLY_VERSION"): - major, minor, rest = msvcrt.CRT_ASSEMBLY_VERSION.split(".", 2) - _MSVCRVER_TO_FULLVER[major + minor] = msvcrt.CRT_ASSEMBLY_VERSION - del major, minor, rest - except ImportError: - # If we are here, means python was not built with MSVC. Not sure what - # to do in that case: manifest building will fail, but it should not be - # used in that case anyway - log.warn('Cannot import msvcrt: using manifest will not be possible') - -def msvc_manifest_xml(maj, min): - """Given a major and minor version of the MSVCR, returns the - corresponding XML file.""" - try: - fullver = _MSVCRVER_TO_FULLVER[str(maj * 10 + min)] - except KeyError: - raise ValueError("Version %d,%d of MSVCRT not supported yet" % - (maj, min)) - # Don't be fooled, it looks like an XML, but it is not. In particular, it - # should not have any space before starting, and its size should be - # divisible by 4, most likely for alignment constraints when the xml is - # embedded in the binary... - # This template was copied directly from the python 2.6 binary (using - # strings.exe from mingw on python.exe). - template = textwrap.dedent("""\ - - - - - - - - - - - - - - """) - - return template % {'fullver': fullver, 'maj': maj, 'min': min} - -def manifest_rc(name, type='dll'): - """Return the rc file used to generate the res file which will be embedded - as manifest for given manifest file name, of given type ('dll' or - 'exe'). - - Parameters - ---------- - name : str - name of the manifest file to embed - type : str {'dll', 'exe'} - type of the binary which will embed the manifest - - """ - if type == 'dll': - rctype = 2 - elif type == 'exe': - rctype = 1 - else: - raise ValueError("Type %s not supported" % type) - - return """\ -#include "winuser.h" -%d RT_MANIFEST %s""" % (rctype, name) - -def check_embedded_msvcr_match_linked(msver): - """msver is the ms runtime version used for the MANIFEST.""" - # check msvcr major version are the same for linking and - # embedding - maj = msvc_runtime_major() - if maj: - if not maj == int(msver): - raise ValueError( - "Discrepancy between linked msvcr " \ - "(%d) and the one about to be embedded " \ - "(%d)" % (int(msver), maj)) - -def configtest_name(config): - base = os.path.basename(config._gen_temp_sourcefile("yo", [], "c")) - return os.path.splitext(base)[0] - -def manifest_name(config): - # Get configest name (including suffix) - root = configtest_name(config) - exext = config.compiler.exe_extension - return root + exext + ".manifest" - -def rc_name(config): - # Get configtest name (including suffix) - root = configtest_name(config) - return root + ".rc" - -def generate_manifest(config): - msver = get_build_msvc_version() - if msver is not None: - if msver >= 8: - check_embedded_msvcr_match_linked(msver) - ma = int(msver) - mi = int((msver - ma) * 10) - # Write the manifest file - manxml = msvc_manifest_xml(ma, mi) - man = open(manifest_name(config), "w") - config.temp_files.append(manifest_name(config)) - man.write(manxml) - man.close() diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/misc_util.py b/venv/lib/python3.7/site-packages/numpy/distutils/misc_util.py deleted file mode 100644 index bb1699e..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/misc_util.py +++ /dev/null @@ -1,2373 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import re -import sys -import copy -import glob -import atexit -import tempfile -import subprocess -import shutil -import multiprocessing -import textwrap - -import distutils -from distutils.errors import DistutilsError -try: - from threading import local as tlocal -except ImportError: - from dummy_threading import local as tlocal - -# stores temporary directory of each thread to only create one per thread -_tdata = tlocal() - -# store all created temporary directories so they can be deleted on exit -_tmpdirs = [] -def clean_up_temporary_directory(): - if _tmpdirs is not None: - for d in _tmpdirs: - try: - shutil.rmtree(d) - except OSError: - pass - -atexit.register(clean_up_temporary_directory) - -from numpy.distutils.compat import get_exception -from numpy.compat import basestring -from numpy.compat import npy_load_module - -__all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict', - 'dict_append', 'appendpath', 'generate_config_py', - 'get_cmd', 'allpath', 'get_mathlibs', - 'terminal_has_colors', 'red_text', 'green_text', 'yellow_text', - 'blue_text', 'cyan_text', 'cyg2win32', 'mingw32', 'all_strings', - 'has_f_sources', 'has_cxx_sources', 'filter_sources', - 'get_dependencies', 'is_local_src_dir', 'get_ext_source_files', - 'get_script_files', 'get_lib_source_files', 'get_data_files', - 'dot_join', 'get_frame', 'minrelpath', 'njoin', - 'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language', - 'quote_args', 'get_build_architecture', 'get_info', 'get_pkg_info', - 'get_num_build_jobs'] - -class InstallableLib(object): - """ - Container to hold information on an installable library. - - Parameters - ---------- - name : str - Name of the installed library. - build_info : dict - Dictionary holding build information. - target_dir : str - Absolute path specifying where to install the library. - - See Also - -------- - Configuration.add_installed_library - - Notes - ----- - The three parameters are stored as attributes with the same names. - - """ - def __init__(self, name, build_info, target_dir): - self.name = name - self.build_info = build_info - self.target_dir = target_dir - - -def get_num_build_jobs(): - """ - Get number of parallel build jobs set by the --parallel command line - argument of setup.py - If the command did not receive a setting the environment variable - NPY_NUM_BUILD_JOBS is checked. If that is unset, return the number of - processors on the system, with a maximum of 8 (to prevent - overloading the system if there a lot of CPUs). - - Returns - ------- - out : int - number of parallel jobs that can be run - - """ - from numpy.distutils.core import get_distribution - try: - cpu_count = len(os.sched_getaffinity(0)) - except AttributeError: - cpu_count = multiprocessing.cpu_count() - cpu_count = min(cpu_count, 8) - envjobs = int(os.environ.get("NPY_NUM_BUILD_JOBS", cpu_count)) - dist = get_distribution() - # may be None during configuration - if dist is None: - return envjobs - - # any of these three may have the job set, take the largest - cmdattr = (getattr(dist.get_command_obj('build'), 'parallel', None), - getattr(dist.get_command_obj('build_ext'), 'parallel', None), - getattr(dist.get_command_obj('build_clib'), 'parallel', None)) - if all(x is None for x in cmdattr): - return envjobs - else: - return max(x for x in cmdattr if x is not None) - -def quote_args(args): - # don't used _nt_quote_args as it does not check if - # args items already have quotes or not. - args = list(args) - for i in range(len(args)): - a = args[i] - if ' ' in a and a[0] not in '"\'': - args[i] = '"%s"' % (a) - return args - -def allpath(name): - "Convert a /-separated pathname to one using the OS's path separator." - splitted = name.split('/') - return os.path.join(*splitted) - -def rel_path(path, parent_path): - """Return path relative to parent_path.""" - # Use realpath to avoid issues with symlinked dirs (see gh-7707) - pd = os.path.realpath(os.path.abspath(parent_path)) - apath = os.path.realpath(os.path.abspath(path)) - if len(apath) < len(pd): - return path - if apath == pd: - return '' - if pd == apath[:len(pd)]: - assert apath[len(pd)] in [os.sep], repr((path, apath[len(pd)])) - path = apath[len(pd)+1:] - return path - -def get_path_from_frame(frame, parent_path=None): - """Return path of the module given a frame object from the call stack. - - Returned path is relative to parent_path when given, - otherwise it is absolute path. - """ - - # First, try to find if the file name is in the frame. - try: - caller_file = eval('__file__', frame.f_globals, frame.f_locals) - d = os.path.dirname(os.path.abspath(caller_file)) - except NameError: - # __file__ is not defined, so let's try __name__. We try this second - # because setuptools spoofs __name__ to be '__main__' even though - # sys.modules['__main__'] might be something else, like easy_install(1). - caller_name = eval('__name__', frame.f_globals, frame.f_locals) - __import__(caller_name) - mod = sys.modules[caller_name] - if hasattr(mod, '__file__'): - d = os.path.dirname(os.path.abspath(mod.__file__)) - else: - # we're probably running setup.py as execfile("setup.py") - # (likely we're building an egg) - d = os.path.abspath('.') - # hmm, should we use sys.argv[0] like in __builtin__ case? - - if parent_path is not None: - d = rel_path(d, parent_path) - - return d or '.' - -def njoin(*path): - """Join two or more pathname components + - - convert a /-separated pathname to one using the OS's path separator. - - resolve `..` and `.` from path. - - Either passing n arguments as in njoin('a','b'), or a sequence - of n names as in njoin(['a','b']) is handled, or a mixture of such arguments. - """ - paths = [] - for p in path: - if is_sequence(p): - # njoin(['a', 'b'], 'c') - paths.append(njoin(*p)) - else: - assert is_string(p) - paths.append(p) - path = paths - if not path: - # njoin() - joined = '' - else: - # njoin('a', 'b') - joined = os.path.join(*path) - if os.path.sep != '/': - joined = joined.replace('/', os.path.sep) - return minrelpath(joined) - -def get_mathlibs(path=None): - """Return the MATHLIB line from numpyconfig.h - """ - if path is not None: - config_file = os.path.join(path, '_numpyconfig.h') - else: - # Look for the file in each of the numpy include directories. - dirs = get_numpy_include_dirs() - for path in dirs: - fn = os.path.join(path, '_numpyconfig.h') - if os.path.exists(fn): - config_file = fn - break - else: - raise DistutilsError('_numpyconfig.h not found in numpy include ' - 'dirs %r' % (dirs,)) - - with open(config_file) as fid: - mathlibs = [] - s = '#define MATHLIB' - for line in fid: - if line.startswith(s): - value = line[len(s):].strip() - if value: - mathlibs.extend(value.split(',')) - return mathlibs - -def minrelpath(path): - """Resolve `..` and '.' from path. - """ - if not is_string(path): - return path - if '.' not in path: - return path - l = path.split(os.sep) - while l: - try: - i = l.index('.', 1) - except ValueError: - break - del l[i] - j = 1 - while l: - try: - i = l.index('..', j) - except ValueError: - break - if l[i-1]=='..': - j += 1 - else: - del l[i], l[i-1] - j = 1 - if not l: - return '' - return os.sep.join(l) - -def sorted_glob(fileglob): - """sorts output of python glob for https://bugs.python.org/issue30461 - to allow extensions to have reproducible build results""" - return sorted(glob.glob(fileglob)) - -def _fix_paths(paths, local_path, include_non_existing): - assert is_sequence(paths), repr(type(paths)) - new_paths = [] - assert not is_string(paths), repr(paths) - for n in paths: - if is_string(n): - if '*' in n or '?' in n: - p = sorted_glob(n) - p2 = sorted_glob(njoin(local_path, n)) - if p2: - new_paths.extend(p2) - elif p: - new_paths.extend(p) - else: - if include_non_existing: - new_paths.append(n) - print('could not resolve pattern in %r: %r' % - (local_path, n)) - else: - n2 = njoin(local_path, n) - if os.path.exists(n2): - new_paths.append(n2) - else: - if os.path.exists(n): - new_paths.append(n) - elif include_non_existing: - new_paths.append(n) - if not os.path.exists(n): - print('non-existing path in %r: %r' % - (local_path, n)) - - elif is_sequence(n): - new_paths.extend(_fix_paths(n, local_path, include_non_existing)) - else: - new_paths.append(n) - return [minrelpath(p) for p in new_paths] - -def gpaths(paths, local_path='', include_non_existing=True): - """Apply glob to paths and prepend local_path if needed. - """ - if is_string(paths): - paths = (paths,) - return _fix_paths(paths, local_path, include_non_existing) - -def make_temp_file(suffix='', prefix='', text=True): - if not hasattr(_tdata, 'tempdir'): - _tdata.tempdir = tempfile.mkdtemp() - _tmpdirs.append(_tdata.tempdir) - fid, name = tempfile.mkstemp(suffix=suffix, - prefix=prefix, - dir=_tdata.tempdir, - text=text) - fo = os.fdopen(fid, 'w') - return fo, name - -# Hooks for colored terminal output. -# See also https://web.archive.org/web/20100314204946/http://www.livinglogic.de/Python/ansistyle -def terminal_has_colors(): - if sys.platform=='cygwin' and 'USE_COLOR' not in os.environ: - # Avoid importing curses that causes illegal operation - # with a message: - # PYTHON2 caused an invalid page fault in - # module CYGNURSES7.DLL as 015f:18bbfc28 - # Details: Python 2.3.3 [GCC 3.3.1 (cygming special)] - # ssh to Win32 machine from debian - # curses.version is 2.2 - # CYGWIN_98-4.10, release 1.5.7(0.109/3/2)) - return 0 - if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty(): - try: - import curses - curses.setupterm() - if (curses.tigetnum("colors") >= 0 - and curses.tigetnum("pairs") >= 0 - and ((curses.tigetstr("setf") is not None - and curses.tigetstr("setb") is not None) - or (curses.tigetstr("setaf") is not None - and curses.tigetstr("setab") is not None) - or curses.tigetstr("scp") is not None)): - return 1 - except Exception: - pass - return 0 - -if terminal_has_colors(): - _colour_codes = dict(black=0, red=1, green=2, yellow=3, - blue=4, magenta=5, cyan=6, white=7, default=9) - def colour_text(s, fg=None, bg=None, bold=False): - seq = [] - if bold: - seq.append('1') - if fg: - fgcode = 30 + _colour_codes.get(fg.lower(), 0) - seq.append(str(fgcode)) - if bg: - bgcode = 40 + _colour_codes.get(fg.lower(), 7) - seq.append(str(bgcode)) - if seq: - return '\x1b[%sm%s\x1b[0m' % (';'.join(seq), s) - else: - return s -else: - def colour_text(s, fg=None, bg=None): - return s - -def default_text(s): - return colour_text(s, 'default') -def red_text(s): - return colour_text(s, 'red') -def green_text(s): - return colour_text(s, 'green') -def yellow_text(s): - return colour_text(s, 'yellow') -def cyan_text(s): - return colour_text(s, 'cyan') -def blue_text(s): - return colour_text(s, 'blue') - -######################### - -def cyg2win32(path): - if sys.platform=='cygwin' and path.startswith('/cygdrive'): - path = path[10] + ':' + os.path.normcase(path[11:]) - return path - -def mingw32(): - """Return true when using mingw32 environment. - """ - if sys.platform=='win32': - if os.environ.get('OSTYPE', '')=='msys': - return True - if os.environ.get('MSYSTEM', '')=='MINGW32': - return True - return False - -def msvc_runtime_version(): - "Return version of MSVC runtime library, as defined by __MSC_VER__ macro" - msc_pos = sys.version.find('MSC v.') - if msc_pos != -1: - msc_ver = int(sys.version[msc_pos+6:msc_pos+10]) - else: - msc_ver = None - return msc_ver - -def msvc_runtime_library(): - "Return name of MSVC runtime library if Python was built with MSVC >= 7" - ver = msvc_runtime_major () - if ver: - if ver < 140: - return "msvcr%i" % ver - else: - return "vcruntime%i" % ver - else: - return None - -def msvc_runtime_major(): - "Return major version of MSVC runtime coded like get_build_msvc_version" - major = {1300: 70, # MSVC 7.0 - 1310: 71, # MSVC 7.1 - 1400: 80, # MSVC 8 - 1500: 90, # MSVC 9 (aka 2008) - 1600: 100, # MSVC 10 (aka 2010) - 1900: 140, # MSVC 14 (aka 2015) - }.get(msvc_runtime_version(), None) - return major - -######################### - -#XXX need support for .C that is also C++ -cxx_ext_match = re.compile(r'.*[.](cpp|cxx|cc)\Z', re.I).match -fortran_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f)\Z', re.I).match -f90_ext_match = re.compile(r'.*[.](f90|f95)\Z', re.I).match -f90_module_name_match = re.compile(r'\s*module\s*(?P[\w_]+)', re.I).match -def _get_f90_modules(source): - """Return a list of Fortran f90 module names that - given source file defines. - """ - if not f90_ext_match(source): - return [] - modules = [] - with open(source, 'r') as f: - for line in f: - m = f90_module_name_match(line) - if m: - name = m.group('name') - modules.append(name) - # break # XXX can we assume that there is one module per file? - return modules - -def is_string(s): - return isinstance(s, basestring) - -def all_strings(lst): - """Return True if all items in lst are string objects. """ - for item in lst: - if not is_string(item): - return False - return True - -def is_sequence(seq): - if is_string(seq): - return False - try: - len(seq) - except Exception: - return False - return True - -def is_glob_pattern(s): - return is_string(s) and ('*' in s or '?' in s) - -def as_list(seq): - if is_sequence(seq): - return list(seq) - else: - return [seq] - -def get_language(sources): - # not used in numpy/scipy packages, use build_ext.detect_language instead - """Determine language value (c,f77,f90) from sources """ - language = None - for source in sources: - if isinstance(source, str): - if f90_ext_match(source): - language = 'f90' - break - elif fortran_ext_match(source): - language = 'f77' - return language - -def has_f_sources(sources): - """Return True if sources contains Fortran files """ - for source in sources: - if fortran_ext_match(source): - return True - return False - -def has_cxx_sources(sources): - """Return True if sources contains C++ files """ - for source in sources: - if cxx_ext_match(source): - return True - return False - -def filter_sources(sources): - """Return four lists of filenames containing - C, C++, Fortran, and Fortran 90 module sources, - respectively. - """ - c_sources = [] - cxx_sources = [] - f_sources = [] - fmodule_sources = [] - for source in sources: - if fortran_ext_match(source): - modules = _get_f90_modules(source) - if modules: - fmodule_sources.append(source) - else: - f_sources.append(source) - elif cxx_ext_match(source): - cxx_sources.append(source) - else: - c_sources.append(source) - return c_sources, cxx_sources, f_sources, fmodule_sources - - -def _get_headers(directory_list): - # get *.h files from list of directories - headers = [] - for d in directory_list: - head = sorted_glob(os.path.join(d, "*.h")) #XXX: *.hpp files?? - headers.extend(head) - return headers - -def _get_directories(list_of_sources): - # get unique directories from list of sources. - direcs = [] - for f in list_of_sources: - d = os.path.split(f) - if d[0] != '' and not d[0] in direcs: - direcs.append(d[0]) - return direcs - -def _commandline_dep_string(cc_args, extra_postargs, pp_opts): - """ - Return commandline representation used to determine if a file needs - to be recompiled - """ - cmdline = 'commandline: ' - cmdline += ' '.join(cc_args) - cmdline += ' '.join(extra_postargs) - cmdline += ' '.join(pp_opts) + '\n' - return cmdline - - -def get_dependencies(sources): - #XXX scan sources for include statements - return _get_headers(_get_directories(sources)) - -def is_local_src_dir(directory): - """Return true if directory is local directory. - """ - if not is_string(directory): - return False - abs_dir = os.path.abspath(directory) - c = os.path.commonprefix([os.getcwd(), abs_dir]) - new_dir = abs_dir[len(c):].split(os.sep) - if new_dir and not new_dir[0]: - new_dir = new_dir[1:] - if new_dir and new_dir[0]=='build': - return False - new_dir = os.sep.join(new_dir) - return os.path.isdir(new_dir) - -def general_source_files(top_path): - pruned_directories = {'CVS':1, '.svn':1, 'build':1} - prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$') - for dirpath, dirnames, filenames in os.walk(top_path, topdown=True): - pruned = [ d for d in dirnames if d not in pruned_directories ] - dirnames[:] = pruned - for f in filenames: - if not prune_file_pat.search(f): - yield os.path.join(dirpath, f) - -def general_source_directories_files(top_path): - """Return a directory name relative to top_path and - files contained. - """ - pruned_directories = ['CVS', '.svn', 'build'] - prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$') - for dirpath, dirnames, filenames in os.walk(top_path, topdown=True): - pruned = [ d for d in dirnames if d not in pruned_directories ] - dirnames[:] = pruned - for d in dirnames: - dpath = os.path.join(dirpath, d) - rpath = rel_path(dpath, top_path) - files = [] - for f in os.listdir(dpath): - fn = os.path.join(dpath, f) - if os.path.isfile(fn) and not prune_file_pat.search(fn): - files.append(fn) - yield rpath, files - dpath = top_path - rpath = rel_path(dpath, top_path) - filenames = [os.path.join(dpath, f) for f in os.listdir(dpath) \ - if not prune_file_pat.search(f)] - files = [f for f in filenames if os.path.isfile(f)] - yield rpath, files - - -def get_ext_source_files(ext): - # Get sources and any include files in the same directory. - filenames = [] - sources = [_m for _m in ext.sources if is_string(_m)] - filenames.extend(sources) - filenames.extend(get_dependencies(sources)) - for d in ext.depends: - if is_local_src_dir(d): - filenames.extend(list(general_source_files(d))) - elif os.path.isfile(d): - filenames.append(d) - return filenames - -def get_script_files(scripts): - scripts = [_m for _m in scripts if is_string(_m)] - return scripts - -def get_lib_source_files(lib): - filenames = [] - sources = lib[1].get('sources', []) - sources = [_m for _m in sources if is_string(_m)] - filenames.extend(sources) - filenames.extend(get_dependencies(sources)) - depends = lib[1].get('depends', []) - for d in depends: - if is_local_src_dir(d): - filenames.extend(list(general_source_files(d))) - elif os.path.isfile(d): - filenames.append(d) - return filenames - -def get_shared_lib_extension(is_python_ext=False): - """Return the correct file extension for shared libraries. - - Parameters - ---------- - is_python_ext : bool, optional - Whether the shared library is a Python extension. Default is False. - - Returns - ------- - so_ext : str - The shared library extension. - - Notes - ----- - For Python shared libs, `so_ext` will typically be '.so' on Linux and OS X, - and '.pyd' on Windows. For Python >= 3.2 `so_ext` has a tag prepended on - POSIX systems according to PEP 3149. For Python 3.2 this is implemented on - Linux, but not on OS X. - - """ - confvars = distutils.sysconfig.get_config_vars() - # SO is deprecated in 3.3.1, use EXT_SUFFIX instead - so_ext = confvars.get('EXT_SUFFIX', None) - if so_ext is None: - so_ext = confvars.get('SO', '') - - if not is_python_ext: - # hardcode known values, config vars (including SHLIB_SUFFIX) are - # unreliable (see #3182) - # darwin, windows and debug linux are wrong in 3.3.1 and older - if (sys.platform.startswith('linux') or - sys.platform.startswith('gnukfreebsd')): - so_ext = '.so' - elif sys.platform.startswith('darwin'): - so_ext = '.dylib' - elif sys.platform.startswith('win'): - so_ext = '.dll' - else: - # fall back to config vars for unknown platforms - # fix long extension for Python >=3.2, see PEP 3149. - if 'SOABI' in confvars: - # Does nothing unless SOABI config var exists - so_ext = so_ext.replace('.' + confvars.get('SOABI'), '', 1) - - return so_ext - -def get_data_files(data): - if is_string(data): - return [data] - sources = data[1] - filenames = [] - for s in sources: - if hasattr(s, '__call__'): - continue - if is_local_src_dir(s): - filenames.extend(list(general_source_files(s))) - elif is_string(s): - if os.path.isfile(s): - filenames.append(s) - else: - print('Not existing data file:', s) - else: - raise TypeError(repr(s)) - return filenames - -def dot_join(*args): - return '.'.join([a for a in args if a]) - -def get_frame(level=0): - """Return frame object from call stack with given level. - """ - try: - return sys._getframe(level+1) - except AttributeError: - frame = sys.exc_info()[2].tb_frame - for _ in range(level+1): - frame = frame.f_back - return frame - - -###################### - -class Configuration(object): - - _list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs', - 'libraries', 'headers', 'scripts', 'py_modules', - 'installed_libraries', 'define_macros'] - _dict_keys = ['package_dir', 'installed_pkg_config'] - _extra_keys = ['name', 'version'] - - numpy_include_dirs = [] - - def __init__(self, - package_name=None, - parent_name=None, - top_path=None, - package_path=None, - caller_level=1, - setup_name='setup.py', - **attrs): - """Construct configuration instance of a package. - - package_name -- name of the package - Ex.: 'distutils' - parent_name -- name of the parent package - Ex.: 'numpy' - top_path -- directory of the toplevel package - Ex.: the directory where the numpy package source sits - package_path -- directory of package. Will be computed by magic from the - directory of the caller module if not specified - Ex.: the directory where numpy.distutils is - caller_level -- frame level to caller namespace, internal parameter. - """ - self.name = dot_join(parent_name, package_name) - self.version = None - - caller_frame = get_frame(caller_level) - self.local_path = get_path_from_frame(caller_frame, top_path) - # local_path -- directory of a file (usually setup.py) that - # defines a configuration() function. - # local_path -- directory of a file (usually setup.py) that - # defines a configuration() function. - if top_path is None: - top_path = self.local_path - self.local_path = '' - if package_path is None: - package_path = self.local_path - elif os.path.isdir(njoin(self.local_path, package_path)): - package_path = njoin(self.local_path, package_path) - if not os.path.isdir(package_path or '.'): - raise ValueError("%r is not a directory" % (package_path,)) - self.top_path = top_path - self.package_path = package_path - # this is the relative path in the installed package - self.path_in_package = os.path.join(*self.name.split('.')) - - self.list_keys = self._list_keys[:] - self.dict_keys = self._dict_keys[:] - - for n in self.list_keys: - v = copy.copy(attrs.get(n, [])) - setattr(self, n, as_list(v)) - - for n in self.dict_keys: - v = copy.copy(attrs.get(n, {})) - setattr(self, n, v) - - known_keys = self.list_keys + self.dict_keys - self.extra_keys = self._extra_keys[:] - for n in attrs.keys(): - if n in known_keys: - continue - a = attrs[n] - setattr(self, n, a) - if isinstance(a, list): - self.list_keys.append(n) - elif isinstance(a, dict): - self.dict_keys.append(n) - else: - self.extra_keys.append(n) - - if os.path.exists(njoin(package_path, '__init__.py')): - self.packages.append(self.name) - self.package_dir[self.name] = package_path - - self.options = dict( - ignore_setup_xxx_py = False, - assume_default_configuration = False, - delegate_options_to_subpackages = False, - quiet = False, - ) - - caller_instance = None - for i in range(1, 3): - try: - f = get_frame(i) - except ValueError: - break - try: - caller_instance = eval('self', f.f_globals, f.f_locals) - break - except NameError: - pass - if isinstance(caller_instance, self.__class__): - if caller_instance.options['delegate_options_to_subpackages']: - self.set_options(**caller_instance.options) - - self.setup_name = setup_name - - def todict(self): - """ - Return a dictionary compatible with the keyword arguments of distutils - setup function. - - Examples - -------- - >>> setup(**config.todict()) #doctest: +SKIP - """ - - self._optimize_data_files() - d = {} - known_keys = self.list_keys + self.dict_keys + self.extra_keys - for n in known_keys: - a = getattr(self, n) - if a: - d[n] = a - return d - - def info(self, message): - if not self.options['quiet']: - print(message) - - def warn(self, message): - sys.stderr.write('Warning: %s\n' % (message,)) - - def set_options(self, **options): - """ - Configure Configuration instance. - - The following options are available: - - ignore_setup_xxx_py - - assume_default_configuration - - delegate_options_to_subpackages - - quiet - - """ - for key, value in options.items(): - if key in self.options: - self.options[key] = value - else: - raise ValueError('Unknown option: '+key) - - def get_distribution(self): - """Return the distutils distribution object for self.""" - from numpy.distutils.core import get_distribution - return get_distribution() - - def _wildcard_get_subpackage(self, subpackage_name, - parent_name, - caller_level = 1): - l = subpackage_name.split('.') - subpackage_path = njoin([self.local_path]+l) - dirs = [_m for _m in sorted_glob(subpackage_path) if os.path.isdir(_m)] - config_list = [] - for d in dirs: - if not os.path.isfile(njoin(d, '__init__.py')): - continue - if 'build' in d.split(os.sep): - continue - n = '.'.join(d.split(os.sep)[-len(l):]) - c = self.get_subpackage(n, - parent_name = parent_name, - caller_level = caller_level+1) - config_list.extend(c) - return config_list - - def _get_configuration_from_setup_py(self, setup_py, - subpackage_name, - subpackage_path, - parent_name, - caller_level = 1): - # In case setup_py imports local modules: - sys.path.insert(0, os.path.dirname(setup_py)) - try: - setup_name = os.path.splitext(os.path.basename(setup_py))[0] - n = dot_join(self.name, subpackage_name, setup_name) - setup_module = npy_load_module('_'.join(n.split('.')), - setup_py, - ('.py', 'U', 1)) - if not hasattr(setup_module, 'configuration'): - if not self.options['assume_default_configuration']: - self.warn('Assuming default configuration '\ - '(%s does not define configuration())'\ - % (setup_module)) - config = Configuration(subpackage_name, parent_name, - self.top_path, subpackage_path, - caller_level = caller_level + 1) - else: - pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1])) - args = (pn,) - def fix_args_py2(args): - if setup_module.configuration.__code__.co_argcount > 1: - args = args + (self.top_path,) - return args - def fix_args_py3(args): - if setup_module.configuration.__code__.co_argcount > 1: - args = args + (self.top_path,) - return args - if sys.version_info[0] < 3: - args = fix_args_py2(args) - else: - args = fix_args_py3(args) - config = setup_module.configuration(*args) - if config.name!=dot_join(parent_name, subpackage_name): - self.warn('Subpackage %r configuration returned as %r' % \ - (dot_join(parent_name, subpackage_name), config.name)) - finally: - del sys.path[0] - return config - - def get_subpackage(self,subpackage_name, - subpackage_path=None, - parent_name=None, - caller_level = 1): - """Return list of subpackage configurations. - - Parameters - ---------- - subpackage_name : str or None - Name of the subpackage to get the configuration. '*' in - subpackage_name is handled as a wildcard. - subpackage_path : str - If None, then the path is assumed to be the local path plus the - subpackage_name. If a setup.py file is not found in the - subpackage_path, then a default configuration is used. - parent_name : str - Parent name. - """ - if subpackage_name is None: - if subpackage_path is None: - raise ValueError( - "either subpackage_name or subpackage_path must be specified") - subpackage_name = os.path.basename(subpackage_path) - - # handle wildcards - l = subpackage_name.split('.') - if subpackage_path is None and '*' in subpackage_name: - return self._wildcard_get_subpackage(subpackage_name, - parent_name, - caller_level = caller_level+1) - assert '*' not in subpackage_name, repr((subpackage_name, subpackage_path, parent_name)) - if subpackage_path is None: - subpackage_path = njoin([self.local_path] + l) - else: - subpackage_path = njoin([subpackage_path] + l[:-1]) - subpackage_path = self.paths([subpackage_path])[0] - setup_py = njoin(subpackage_path, self.setup_name) - if not self.options['ignore_setup_xxx_py']: - if not os.path.isfile(setup_py): - setup_py = njoin(subpackage_path, - 'setup_%s.py' % (subpackage_name)) - if not os.path.isfile(setup_py): - if not self.options['assume_default_configuration']: - self.warn('Assuming default configuration '\ - '(%s/{setup_%s,setup}.py was not found)' \ - % (os.path.dirname(setup_py), subpackage_name)) - config = Configuration(subpackage_name, parent_name, - self.top_path, subpackage_path, - caller_level = caller_level+1) - else: - config = self._get_configuration_from_setup_py( - setup_py, - subpackage_name, - subpackage_path, - parent_name, - caller_level = caller_level + 1) - if config: - return [config] - else: - return [] - - def add_subpackage(self,subpackage_name, - subpackage_path=None, - standalone = False): - """Add a sub-package to the current Configuration instance. - - This is useful in a setup.py script for adding sub-packages to a - package. - - Parameters - ---------- - subpackage_name : str - name of the subpackage - subpackage_path : str - if given, the subpackage path such as the subpackage is in - subpackage_path / subpackage_name. If None,the subpackage is - assumed to be located in the local path / subpackage_name. - standalone : bool - """ - - if standalone: - parent_name = None - else: - parent_name = self.name - config_list = self.get_subpackage(subpackage_name, subpackage_path, - parent_name = parent_name, - caller_level = 2) - if not config_list: - self.warn('No configuration returned, assuming unavailable.') - for config in config_list: - d = config - if isinstance(config, Configuration): - d = config.todict() - assert isinstance(d, dict), repr(type(d)) - - self.info('Appending %s configuration to %s' \ - % (d.get('name'), self.name)) - self.dict_append(**d) - - dist = self.get_distribution() - if dist is not None: - self.warn('distutils distribution has been initialized,'\ - ' it may be too late to add a subpackage '+ subpackage_name) - - def add_data_dir(self, data_path): - """Recursively add files under data_path to data_files list. - - Recursively add files under data_path to the list of data_files to be - installed (and distributed). The data_path can be either a relative - path-name, or an absolute path-name, or a 2-tuple where the first - argument shows where in the install directory the data directory - should be installed to. - - Parameters - ---------- - data_path : seq or str - Argument can be either - - * 2-sequence (, ) - * path to data directory where python datadir suffix defaults - to package dir. - - Notes - ----- - Rules for installation paths:: - - foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar - (gun, foo/bar) -> parent/gun - foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b - (gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun - (gun/*, foo/*) -> parent/gun/a, parent/gun/b - /foo/bar -> (bar, /foo/bar) -> parent/bar - (gun, /foo/bar) -> parent/gun - (fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar - - Examples - -------- - For example suppose the source directory contains fun/foo.dat and - fun/bar/car.dat: - - >>> self.add_data_dir('fun') #doctest: +SKIP - >>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP - >>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP - - Will install data-files to the locations:: - - / - fun/ - foo.dat - bar/ - car.dat - sun/ - foo.dat - bar/ - car.dat - gun/ - foo.dat - car.dat - - """ - if is_sequence(data_path): - d, data_path = data_path - else: - d = None - if is_sequence(data_path): - [self.add_data_dir((d, p)) for p in data_path] - return - if not is_string(data_path): - raise TypeError("not a string: %r" % (data_path,)) - if d is None: - if os.path.isabs(data_path): - return self.add_data_dir((os.path.basename(data_path), data_path)) - return self.add_data_dir((data_path, data_path)) - paths = self.paths(data_path, include_non_existing=False) - if is_glob_pattern(data_path): - if is_glob_pattern(d): - pattern_list = allpath(d).split(os.sep) - pattern_list.reverse() - # /a/*//b/ -> /a/*/b - rl = list(range(len(pattern_list)-1)); rl.reverse() - for i in rl: - if not pattern_list[i]: - del pattern_list[i] - # - for path in paths: - if not os.path.isdir(path): - print('Not a directory, skipping', path) - continue - rpath = rel_path(path, self.local_path) - path_list = rpath.split(os.sep) - path_list.reverse() - target_list = [] - i = 0 - for s in pattern_list: - if is_glob_pattern(s): - if i>=len(path_list): - raise ValueError('cannot fill pattern %r with %r' \ - % (d, path)) - target_list.append(path_list[i]) - else: - assert s==path_list[i], repr((s, path_list[i], data_path, d, path, rpath)) - target_list.append(s) - i += 1 - if path_list[i:]: - self.warn('mismatch of pattern_list=%s and path_list=%s'\ - % (pattern_list, path_list)) - target_list.reverse() - self.add_data_dir((os.sep.join(target_list), path)) - else: - for path in paths: - self.add_data_dir((d, path)) - return - assert not is_glob_pattern(d), repr(d) - - dist = self.get_distribution() - if dist is not None and dist.data_files is not None: - data_files = dist.data_files - else: - data_files = self.data_files - - for path in paths: - for d1, f in list(general_source_directories_files(path)): - target_path = os.path.join(self.path_in_package, d, d1) - data_files.append((target_path, f)) - - def _optimize_data_files(self): - data_dict = {} - for p, files in self.data_files: - if p not in data_dict: - data_dict[p] = set() - for f in files: - data_dict[p].add(f) - self.data_files[:] = [(p, list(files)) for p, files in data_dict.items()] - - def add_data_files(self,*files): - """Add data files to configuration data_files. - - Parameters - ---------- - files : sequence - Argument(s) can be either - - * 2-sequence (,) - * paths to data files where python datadir prefix defaults - to package dir. - - Notes - ----- - The form of each element of the files sequence is very flexible - allowing many combinations of where to get the files from the package - and where they should ultimately be installed on the system. The most - basic usage is for an element of the files argument sequence to be a - simple filename. This will cause that file from the local path to be - installed to the installation path of the self.name package (package - path). The file argument can also be a relative path in which case the - entire relative path will be installed into the package directory. - Finally, the file can be an absolute path name in which case the file - will be found at the absolute path name but installed to the package - path. - - This basic behavior can be augmented by passing a 2-tuple in as the - file argument. The first element of the tuple should specify the - relative path (under the package install directory) where the - remaining sequence of files should be installed to (it has nothing to - do with the file-names in the source distribution). The second element - of the tuple is the sequence of files that should be installed. The - files in this sequence can be filenames, relative paths, or absolute - paths. For absolute paths the file will be installed in the top-level - package installation directory (regardless of the first argument). - Filenames and relative path names will be installed in the package - install directory under the path name given as the first element of - the tuple. - - Rules for installation paths: - - #. file.txt -> (., file.txt)-> parent/file.txt - #. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt - #. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt - #. ``*``.txt -> parent/a.txt, parent/b.txt - #. foo/``*``.txt`` -> parent/foo/a.txt, parent/foo/b.txt - #. ``*/*.txt`` -> (``*``, ``*``/``*``.txt) -> parent/c/a.txt, parent/d/b.txt - #. (sun, file.txt) -> parent/sun/file.txt - #. (sun, bar/file.txt) -> parent/sun/file.txt - #. (sun, /foo/bar/file.txt) -> parent/sun/file.txt - #. (sun, ``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt - #. (sun, bar/``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt - #. (sun/``*``, ``*``/``*``.txt) -> parent/sun/c/a.txt, parent/d/b.txt - - An additional feature is that the path to a data-file can actually be - a function that takes no arguments and returns the actual path(s) to - the data-files. This is useful when the data files are generated while - building the package. - - Examples - -------- - Add files to the list of data_files to be included with the package. - - >>> self.add_data_files('foo.dat', - ... ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']), - ... 'bar/cat.dat', - ... '/full/path/to/can.dat') #doctest: +SKIP - - will install these data files to:: - - / - foo.dat - fun/ - gun.dat - nun/ - pun.dat - sun.dat - bar/ - car.dat - can.dat - - where is the package (or sub-package) - directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C: - \\Python2.4 \\Lib \\site-packages \\mypackage') or - '/usr/lib/python2.4/site- packages/mypackage/mysubpackage' ('C: - \\Python2.4 \\Lib \\site-packages \\mypackage \\mysubpackage'). - """ - - if len(files)>1: - for f in files: - self.add_data_files(f) - return - assert len(files)==1 - if is_sequence(files[0]): - d, files = files[0] - else: - d = None - if is_string(files): - filepat = files - elif is_sequence(files): - if len(files)==1: - filepat = files[0] - else: - for f in files: - self.add_data_files((d, f)) - return - else: - raise TypeError(repr(type(files))) - - if d is None: - if hasattr(filepat, '__call__'): - d = '' - elif os.path.isabs(filepat): - d = '' - else: - d = os.path.dirname(filepat) - self.add_data_files((d, files)) - return - - paths = self.paths(filepat, include_non_existing=False) - if is_glob_pattern(filepat): - if is_glob_pattern(d): - pattern_list = d.split(os.sep) - pattern_list.reverse() - for path in paths: - path_list = path.split(os.sep) - path_list.reverse() - path_list.pop() # filename - target_list = [] - i = 0 - for s in pattern_list: - if is_glob_pattern(s): - target_list.append(path_list[i]) - i += 1 - else: - target_list.append(s) - target_list.reverse() - self.add_data_files((os.sep.join(target_list), path)) - else: - self.add_data_files((d, paths)) - return - assert not is_glob_pattern(d), repr((d, filepat)) - - dist = self.get_distribution() - if dist is not None and dist.data_files is not None: - data_files = dist.data_files - else: - data_files = self.data_files - - data_files.append((os.path.join(self.path_in_package, d), paths)) - - ### XXX Implement add_py_modules - - def add_define_macros(self, macros): - """Add define macros to configuration - - Add the given sequence of macro name and value duples to the beginning - of the define_macros list This list will be visible to all extension - modules of the current package. - """ - dist = self.get_distribution() - if dist is not None: - if not hasattr(dist, 'define_macros'): - dist.define_macros = [] - dist.define_macros.extend(macros) - else: - self.define_macros.extend(macros) - - - def add_include_dirs(self,*paths): - """Add paths to configuration include directories. - - Add the given sequence of paths to the beginning of the include_dirs - list. This list will be visible to all extension modules of the - current package. - """ - include_dirs = self.paths(paths) - dist = self.get_distribution() - if dist is not None: - if dist.include_dirs is None: - dist.include_dirs = [] - dist.include_dirs.extend(include_dirs) - else: - self.include_dirs.extend(include_dirs) - - def add_headers(self,*files): - """Add installable headers to configuration. - - Add the given sequence of files to the beginning of the headers list. - By default, headers will be installed under // directory. If an item of files - is a tuple, then its first argument specifies the actual installation - location relative to the path. - - Parameters - ---------- - files : str or seq - Argument(s) can be either: - - * 2-sequence (,) - * path(s) to header file(s) where python includedir suffix will - default to package name. - """ - headers = [] - for path in files: - if is_string(path): - [headers.append((self.name, p)) for p in self.paths(path)] - else: - if not isinstance(path, (tuple, list)) or len(path) != 2: - raise TypeError(repr(path)) - [headers.append((path[0], p)) for p in self.paths(path[1])] - dist = self.get_distribution() - if dist is not None: - if dist.headers is None: - dist.headers = [] - dist.headers.extend(headers) - else: - self.headers.extend(headers) - - def paths(self,*paths,**kws): - """Apply glob to paths and prepend local_path if needed. - - Applies glob.glob(...) to each path in the sequence (if needed) and - pre-pends the local_path if needed. Because this is called on all - source lists, this allows wildcard characters to be specified in lists - of sources for extension modules and libraries and scripts and allows - path-names be relative to the source directory. - - """ - include_non_existing = kws.get('include_non_existing', True) - return gpaths(paths, - local_path = self.local_path, - include_non_existing=include_non_existing) - - def _fix_paths_dict(self, kw): - for k in kw.keys(): - v = kw[k] - if k in ['sources', 'depends', 'include_dirs', 'library_dirs', - 'module_dirs', 'extra_objects']: - new_v = self.paths(v) - kw[k] = new_v - - def add_extension(self,name,sources,**kw): - """Add extension to configuration. - - Create and add an Extension instance to the ext_modules list. This - method also takes the following optional keyword arguments that are - passed on to the Extension constructor. - - Parameters - ---------- - name : str - name of the extension - sources : seq - list of the sources. The list of sources may contain functions - (called source generators) which must take an extension instance - and a build directory as inputs and return a source file or list of - source files or None. If None is returned then no sources are - generated. If the Extension instance has no sources after - processing all source generators, then no extension module is - built. - include_dirs : - define_macros : - undef_macros : - library_dirs : - libraries : - runtime_library_dirs : - extra_objects : - extra_compile_args : - extra_link_args : - extra_f77_compile_args : - extra_f90_compile_args : - export_symbols : - swig_opts : - depends : - The depends list contains paths to files or directories that the - sources of the extension module depend on. If any path in the - depends list is newer than the extension module, then the module - will be rebuilt. - language : - f2py_options : - module_dirs : - extra_info : dict or list - dict or list of dict of keywords to be appended to keywords. - - Notes - ----- - The self.paths(...) method is applied to all lists that may contain - paths. - """ - ext_args = copy.copy(kw) - ext_args['name'] = dot_join(self.name, name) - ext_args['sources'] = sources - - if 'extra_info' in ext_args: - extra_info = ext_args['extra_info'] - del ext_args['extra_info'] - if isinstance(extra_info, dict): - extra_info = [extra_info] - for info in extra_info: - assert isinstance(info, dict), repr(info) - dict_append(ext_args,**info) - - self._fix_paths_dict(ext_args) - - # Resolve out-of-tree dependencies - libraries = ext_args.get('libraries', []) - libnames = [] - ext_args['libraries'] = [] - for libname in libraries: - if isinstance(libname, tuple): - self._fix_paths_dict(libname[1]) - - # Handle library names of the form libname@relative/path/to/library - if '@' in libname: - lname, lpath = libname.split('@', 1) - lpath = os.path.abspath(njoin(self.local_path, lpath)) - if os.path.isdir(lpath): - c = self.get_subpackage(None, lpath, - caller_level = 2) - if isinstance(c, Configuration): - c = c.todict() - for l in [l[0] for l in c.get('libraries', [])]: - llname = l.split('__OF__', 1)[0] - if llname == lname: - c.pop('name', None) - dict_append(ext_args,**c) - break - continue - libnames.append(libname) - - ext_args['libraries'] = libnames + ext_args['libraries'] - ext_args['define_macros'] = \ - self.define_macros + ext_args.get('define_macros', []) - - from numpy.distutils.core import Extension - ext = Extension(**ext_args) - self.ext_modules.append(ext) - - dist = self.get_distribution() - if dist is not None: - self.warn('distutils distribution has been initialized,'\ - ' it may be too late to add an extension '+name) - return ext - - def add_library(self,name,sources,**build_info): - """ - Add library to configuration. - - Parameters - ---------- - name : str - Name of the extension. - sources : sequence - List of the sources. The list of sources may contain functions - (called source generators) which must take an extension instance - and a build directory as inputs and return a source file or list of - source files or None. If None is returned then no sources are - generated. If the Extension instance has no sources after - processing all source generators, then no extension module is - built. - build_info : dict, optional - The following keys are allowed: - - * depends - * macros - * include_dirs - * extra_compiler_args - * extra_f77_compile_args - * extra_f90_compile_args - * f2py_options - * language - - """ - self._add_library(name, sources, None, build_info) - - dist = self.get_distribution() - if dist is not None: - self.warn('distutils distribution has been initialized,'\ - ' it may be too late to add a library '+ name) - - def _add_library(self, name, sources, install_dir, build_info): - """Common implementation for add_library and add_installed_library. Do - not use directly""" - build_info = copy.copy(build_info) - build_info['sources'] = sources - - # Sometimes, depends is not set up to an empty list by default, and if - # depends is not given to add_library, distutils barfs (#1134) - if not 'depends' in build_info: - build_info['depends'] = [] - - self._fix_paths_dict(build_info) - - # Add to libraries list so that it is build with build_clib - self.libraries.append((name, build_info)) - - def add_installed_library(self, name, sources, install_dir, build_info=None): - """ - Similar to add_library, but the specified library is installed. - - Most C libraries used with `distutils` are only used to build python - extensions, but libraries built through this method will be installed - so that they can be reused by third-party packages. - - Parameters - ---------- - name : str - Name of the installed library. - sources : sequence - List of the library's source files. See `add_library` for details. - install_dir : str - Path to install the library, relative to the current sub-package. - build_info : dict, optional - The following keys are allowed: - - * depends - * macros - * include_dirs - * extra_compiler_args - * extra_f77_compile_args - * extra_f90_compile_args - * f2py_options - * language - - Returns - ------- - None - - See Also - -------- - add_library, add_npy_pkg_config, get_info - - Notes - ----- - The best way to encode the options required to link against the specified - C libraries is to use a "libname.ini" file, and use `get_info` to - retrieve the required options (see `add_npy_pkg_config` for more - information). - - """ - if not build_info: - build_info = {} - - install_dir = os.path.join(self.package_path, install_dir) - self._add_library(name, sources, install_dir, build_info) - self.installed_libraries.append(InstallableLib(name, build_info, install_dir)) - - def add_npy_pkg_config(self, template, install_dir, subst_dict=None): - """ - Generate and install a npy-pkg config file from a template. - - The config file generated from `template` is installed in the - given install directory, using `subst_dict` for variable substitution. - - Parameters - ---------- - template : str - The path of the template, relatively to the current package path. - install_dir : str - Where to install the npy-pkg config file, relatively to the current - package path. - subst_dict : dict, optional - If given, any string of the form ``@key@`` will be replaced by - ``subst_dict[key]`` in the template file when installed. The install - prefix is always available through the variable ``@prefix@``, since the - install prefix is not easy to get reliably from setup.py. - - See also - -------- - add_installed_library, get_info - - Notes - ----- - This works for both standard installs and in-place builds, i.e. the - ``@prefix@`` refer to the source directory for in-place builds. - - Examples - -------- - :: - - config.add_npy_pkg_config('foo.ini.in', 'lib', {'foo': bar}) - - Assuming the foo.ini.in file has the following content:: - - [meta] - Name=@foo@ - Version=1.0 - Description=dummy description - - [default] - Cflags=-I@prefix@/include - Libs= - - The generated file will have the following content:: - - [meta] - Name=bar - Version=1.0 - Description=dummy description - - [default] - Cflags=-Iprefix_dir/include - Libs= - - and will be installed as foo.ini in the 'lib' subpath. - - When cross-compiling with numpy distutils, it might be necessary to - use modified npy-pkg-config files. Using the default/generated files - will link with the host libraries (i.e. libnpymath.a). For - cross-compilation you of-course need to link with target libraries, - while using the host Python installation. - - You can copy out the numpy/core/lib/npy-pkg-config directory, add a - pkgdir value to the .ini files and set NPY_PKG_CONFIG_PATH environment - variable to point to the directory with the modified npy-pkg-config - files. - - Example npymath.ini modified for cross-compilation:: - - [meta] - Name=npymath - Description=Portable, core math library implementing C99 standard - Version=0.1 - - [variables] - pkgname=numpy.core - pkgdir=/build/arm-linux-gnueabi/sysroot/usr/lib/python3.7/site-packages/numpy/core - prefix=${pkgdir} - libdir=${prefix}/lib - includedir=${prefix}/include - - [default] - Libs=-L${libdir} -lnpymath - Cflags=-I${includedir} - Requires=mlib - - [msvc] - Libs=/LIBPATH:${libdir} npymath.lib - Cflags=/INCLUDE:${includedir} - Requires=mlib - - """ - if subst_dict is None: - subst_dict = {} - template = os.path.join(self.package_path, template) - - if self.name in self.installed_pkg_config: - self.installed_pkg_config[self.name].append((template, install_dir, - subst_dict)) - else: - self.installed_pkg_config[self.name] = [(template, install_dir, - subst_dict)] - - - def add_scripts(self,*files): - """Add scripts to configuration. - - Add the sequence of files to the beginning of the scripts list. - Scripts will be installed under the /bin/ directory. - - """ - scripts = self.paths(files) - dist = self.get_distribution() - if dist is not None: - if dist.scripts is None: - dist.scripts = [] - dist.scripts.extend(scripts) - else: - self.scripts.extend(scripts) - - def dict_append(self,**dict): - for key in self.list_keys: - a = getattr(self, key) - a.extend(dict.get(key, [])) - for key in self.dict_keys: - a = getattr(self, key) - a.update(dict.get(key, {})) - known_keys = self.list_keys + self.dict_keys + self.extra_keys - for key in dict.keys(): - if key not in known_keys: - a = getattr(self, key, None) - if a and a==dict[key]: continue - self.warn('Inheriting attribute %r=%r from %r' \ - % (key, dict[key], dict.get('name', '?'))) - setattr(self, key, dict[key]) - self.extra_keys.append(key) - elif key in self.extra_keys: - self.info('Ignoring attempt to set %r (from %r to %r)' \ - % (key, getattr(self, key), dict[key])) - elif key in known_keys: - # key is already processed above - pass - else: - raise ValueError("Don't know about key=%r" % (key)) - - def __str__(self): - from pprint import pformat - known_keys = self.list_keys + self.dict_keys + self.extra_keys - s = '<'+5*'-' + '\n' - s += 'Configuration of '+self.name+':\n' - known_keys.sort() - for k in known_keys: - a = getattr(self, k, None) - if a: - s += '%s = %s\n' % (k, pformat(a)) - s += 5*'-' + '>' - return s - - def get_config_cmd(self): - """ - Returns the numpy.distutils config command instance. - """ - cmd = get_cmd('config') - cmd.ensure_finalized() - cmd.dump_source = 0 - cmd.noisy = 0 - old_path = os.environ.get('PATH') - if old_path: - path = os.pathsep.join(['.', old_path]) - os.environ['PATH'] = path - return cmd - - def get_build_temp_dir(self): - """ - Return a path to a temporary directory where temporary files should be - placed. - """ - cmd = get_cmd('build') - cmd.ensure_finalized() - return cmd.build_temp - - def have_f77c(self): - """Check for availability of Fortran 77 compiler. - - Use it inside source generating function to ensure that - setup distribution instance has been initialized. - - Notes - ----- - True if a Fortran 77 compiler is available (because a simple Fortran 77 - code was able to be compiled successfully). - """ - simple_fortran_subroutine = ''' - subroutine simple - end - ''' - config_cmd = self.get_config_cmd() - flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f77') - return flag - - def have_f90c(self): - """Check for availability of Fortran 90 compiler. - - Use it inside source generating function to ensure that - setup distribution instance has been initialized. - - Notes - ----- - True if a Fortran 90 compiler is available (because a simple Fortran - 90 code was able to be compiled successfully) - """ - simple_fortran_subroutine = ''' - subroutine simple - end - ''' - config_cmd = self.get_config_cmd() - flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f90') - return flag - - def append_to(self, extlib): - """Append libraries, include_dirs to extension or library item. - """ - if is_sequence(extlib): - lib_name, build_info = extlib - dict_append(build_info, - libraries=self.libraries, - include_dirs=self.include_dirs) - else: - from numpy.distutils.core import Extension - assert isinstance(extlib, Extension), repr(extlib) - extlib.libraries.extend(self.libraries) - extlib.include_dirs.extend(self.include_dirs) - - def _get_svn_revision(self, path): - """Return path's SVN revision number. - """ - try: - output = subprocess.check_output(['svnversion'], cwd=path) - except (subprocess.CalledProcessError, OSError): - pass - else: - m = re.match(rb'(?P\d+)', output) - if m: - return int(m.group('revision')) - - if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK', None): - entries = njoin(path, '_svn', 'entries') - else: - entries = njoin(path, '.svn', 'entries') - if os.path.isfile(entries): - with open(entries) as f: - fstr = f.read() - if fstr[:5] == '\d+)"', fstr) - if m: - return int(m.group('revision')) - else: # non-xml entries file --- check to be sure that - m = re.search(r'dir[\n\r]+(?P\d+)', fstr) - if m: - return int(m.group('revision')) - return None - - def _get_hg_revision(self, path): - """Return path's Mercurial revision number. - """ - try: - output = subprocess.check_output( - ['hg', 'identify', '--num'], cwd=path) - except (subprocess.CalledProcessError, OSError): - pass - else: - m = re.match(rb'(?P\d+)', output) - if m: - return int(m.group('revision')) - - branch_fn = njoin(path, '.hg', 'branch') - branch_cache_fn = njoin(path, '.hg', 'branch.cache') - - if os.path.isfile(branch_fn): - branch0 = None - with open(branch_fn) as f: - revision0 = f.read().strip() - - branch_map = {} - for line in file(branch_cache_fn, 'r'): - branch1, revision1 = line.split()[:2] - if revision1==revision0: - branch0 = branch1 - try: - revision1 = int(revision1) - except ValueError: - continue - branch_map[branch1] = revision1 - - return branch_map.get(branch0) - - return None - - - def get_version(self, version_file=None, version_variable=None): - """Try to get version string of a package. - - Return a version string of the current package or None if the version - information could not be detected. - - Notes - ----- - This method scans files named - __version__.py, _version.py, version.py, and - __svn_version__.py for string variables version, __version__, and - _version, until a version number is found. - """ - version = getattr(self, 'version', None) - if version is not None: - return version - - # Get version from version file. - if version_file is None: - files = ['__version__.py', - self.name.split('.')[-1]+'_version.py', - 'version.py', - '__svn_version__.py', - '__hg_version__.py'] - else: - files = [version_file] - if version_variable is None: - version_vars = ['version', - '__version__', - self.name.split('.')[-1]+'_version'] - else: - version_vars = [version_variable] - for f in files: - fn = njoin(self.local_path, f) - if os.path.isfile(fn): - info = ('.py', 'U', 1) - name = os.path.splitext(os.path.basename(fn))[0] - n = dot_join(self.name, name) - try: - version_module = npy_load_module('_'.join(n.split('.')), - fn, info) - except ImportError: - msg = get_exception() - self.warn(str(msg)) - version_module = None - if version_module is None: - continue - - for a in version_vars: - version = getattr(version_module, a, None) - if version is not None: - break - if version is not None: - break - - if version is not None: - self.version = version - return version - - # Get version as SVN or Mercurial revision number - revision = self._get_svn_revision(self.local_path) - if revision is None: - revision = self._get_hg_revision(self.local_path) - - if revision is not None: - version = str(revision) - self.version = version - - return version - - def make_svn_version_py(self, delete=True): - """Appends a data function to the data_files list that will generate - __svn_version__.py file to the current package directory. - - Generate package __svn_version__.py file from SVN revision number, - it will be removed after python exits but will be available - when sdist, etc commands are executed. - - Notes - ----- - If __svn_version__.py existed before, nothing is done. - - This is - intended for working with source directories that are in an SVN - repository. - """ - target = njoin(self.local_path, '__svn_version__.py') - revision = self._get_svn_revision(self.local_path) - if os.path.isfile(target) or revision is None: - return - else: - def generate_svn_version_py(): - if not os.path.isfile(target): - version = str(revision) - self.info('Creating %s (version=%r)' % (target, version)) - with open(target, 'w') as f: - f.write('version = %r\n' % (version)) - - def rm_file(f=target,p=self.info): - if delete: - try: os.remove(f); p('removed '+f) - except OSError: pass - try: os.remove(f+'c'); p('removed '+f+'c') - except OSError: pass - - atexit.register(rm_file) - - return target - - self.add_data_files(('', generate_svn_version_py())) - - def make_hg_version_py(self, delete=True): - """Appends a data function to the data_files list that will generate - __hg_version__.py file to the current package directory. - - Generate package __hg_version__.py file from Mercurial revision, - it will be removed after python exits but will be available - when sdist, etc commands are executed. - - Notes - ----- - If __hg_version__.py existed before, nothing is done. - - This is intended for working with source directories that are - in an Mercurial repository. - """ - target = njoin(self.local_path, '__hg_version__.py') - revision = self._get_hg_revision(self.local_path) - if os.path.isfile(target) or revision is None: - return - else: - def generate_hg_version_py(): - if not os.path.isfile(target): - version = str(revision) - self.info('Creating %s (version=%r)' % (target, version)) - with open(target, 'w') as f: - f.write('version = %r\n' % (version)) - - def rm_file(f=target,p=self.info): - if delete: - try: os.remove(f); p('removed '+f) - except OSError: pass - try: os.remove(f+'c'); p('removed '+f+'c') - except OSError: pass - - atexit.register(rm_file) - - return target - - self.add_data_files(('', generate_hg_version_py())) - - def make_config_py(self,name='__config__'): - """Generate package __config__.py file containing system_info - information used during building the package. - - This file is installed to the - package installation directory. - - """ - self.py_modules.append((self.name, name, generate_config_py)) - - def get_info(self,*names): - """Get resources information. - - Return information (from system_info.get_info) for all of the names in - the argument list in a single dictionary. - """ - from .system_info import get_info, dict_append - info_dict = {} - for a in names: - dict_append(info_dict,**get_info(a)) - return info_dict - - -def get_cmd(cmdname, _cache={}): - if cmdname not in _cache: - import distutils.core - dist = distutils.core._setup_distribution - if dist is None: - from distutils.errors import DistutilsInternalError - raise DistutilsInternalError( - 'setup distribution instance not initialized') - cmd = dist.get_command_obj(cmdname) - _cache[cmdname] = cmd - return _cache[cmdname] - -def get_numpy_include_dirs(): - # numpy_include_dirs are set by numpy/core/setup.py, otherwise [] - include_dirs = Configuration.numpy_include_dirs[:] - if not include_dirs: - import numpy - include_dirs = [ numpy.get_include() ] - # else running numpy/core/setup.py - return include_dirs - -def get_npy_pkg_dir(): - """Return the path where to find the npy-pkg-config directory. - - If the NPY_PKG_CONFIG_PATH environment variable is set, the value of that - is returned. Otherwise, a path inside the location of the numpy module is - returned. - - The NPY_PKG_CONFIG_PATH can be useful when cross-compiling, maintaining - customized npy-pkg-config .ini files for the cross-compilation - environment, and using them when cross-compiling. - - """ - # XXX: import here for bootstrapping reasons - import numpy - d = os.environ.get('NPY_PKG_CONFIG_PATH') - if d is not None: - return d - d = os.path.join(os.path.dirname(numpy.__file__), - 'core', 'lib', 'npy-pkg-config') - return d - -def get_pkg_info(pkgname, dirs=None): - """ - Return library info for the given package. - - Parameters - ---------- - pkgname : str - Name of the package (should match the name of the .ini file, without - the extension, e.g. foo for the file foo.ini). - dirs : sequence, optional - If given, should be a sequence of additional directories where to look - for npy-pkg-config files. Those directories are searched prior to the - NumPy directory. - - Returns - ------- - pkginfo : class instance - The `LibraryInfo` instance containing the build information. - - Raises - ------ - PkgNotFound - If the package is not found. - - See Also - -------- - Configuration.add_npy_pkg_config, Configuration.add_installed_library, - get_info - - """ - from numpy.distutils.npy_pkg_config import read_config - - if dirs: - dirs.append(get_npy_pkg_dir()) - else: - dirs = [get_npy_pkg_dir()] - return read_config(pkgname, dirs) - -def get_info(pkgname, dirs=None): - """ - Return an info dict for a given C library. - - The info dict contains the necessary options to use the C library. - - Parameters - ---------- - pkgname : str - Name of the package (should match the name of the .ini file, without - the extension, e.g. foo for the file foo.ini). - dirs : sequence, optional - If given, should be a sequence of additional directories where to look - for npy-pkg-config files. Those directories are searched prior to the - NumPy directory. - - Returns - ------- - info : dict - The dictionary with build information. - - Raises - ------ - PkgNotFound - If the package is not found. - - See Also - -------- - Configuration.add_npy_pkg_config, Configuration.add_installed_library, - get_pkg_info - - Examples - -------- - To get the necessary information for the npymath library from NumPy: - - >>> npymath_info = np.distutils.misc_util.get_info('npymath') - >>> npymath_info #doctest: +SKIP - {'define_macros': [], 'libraries': ['npymath'], 'library_dirs': - ['.../numpy/core/lib'], 'include_dirs': ['.../numpy/core/include']} - - This info dict can then be used as input to a `Configuration` instance:: - - config.add_extension('foo', sources=['foo.c'], extra_info=npymath_info) - - """ - from numpy.distutils.npy_pkg_config import parse_flags - pkg_info = get_pkg_info(pkgname, dirs) - - # Translate LibraryInfo instance into a build_info dict - info = parse_flags(pkg_info.cflags()) - for k, v in parse_flags(pkg_info.libs()).items(): - info[k].extend(v) - - # add_extension extra_info argument is ANAL - info['define_macros'] = info['macros'] - del info['macros'] - del info['ignored'] - - return info - -def is_bootstrapping(): - if sys.version_info[0] >= 3: - import builtins - else: - import __builtin__ as builtins - - try: - builtins.__NUMPY_SETUP__ - return True - except AttributeError: - return False - - -######################### - -def default_config_dict(name = None, parent_name = None, local_path=None): - """Return a configuration dictionary for usage in - configuration() function defined in file setup_.py. - """ - import warnings - warnings.warn('Use Configuration(%r,%r,top_path=%r) instead of '\ - 'deprecated default_config_dict(%r,%r,%r)' - % (name, parent_name, local_path, - name, parent_name, local_path, - ), stacklevel=2) - c = Configuration(name, parent_name, local_path) - return c.todict() - - -def dict_append(d, **kws): - for k, v in kws.items(): - if k in d: - ov = d[k] - if isinstance(ov, str): - d[k] = v - else: - d[k].extend(v) - else: - d[k] = v - -def appendpath(prefix, path): - if os.path.sep != '/': - prefix = prefix.replace('/', os.path.sep) - path = path.replace('/', os.path.sep) - drive = '' - if os.path.isabs(path): - drive = os.path.splitdrive(prefix)[0] - absprefix = os.path.splitdrive(os.path.abspath(prefix))[1] - pathdrive, path = os.path.splitdrive(path) - d = os.path.commonprefix([absprefix, path]) - if os.path.join(absprefix[:len(d)], absprefix[len(d):]) != absprefix \ - or os.path.join(path[:len(d)], path[len(d):]) != path: - # Handle invalid paths - d = os.path.dirname(d) - subpath = path[len(d):] - if os.path.isabs(subpath): - subpath = subpath[1:] - else: - subpath = path - return os.path.normpath(njoin(drive + prefix, subpath)) - -def generate_config_py(target): - """Generate config.py file containing system_info information - used during building the package. - - Usage: - config['py_modules'].append((packagename, '__config__',generate_config_py)) - """ - from numpy.distutils.system_info import system_info - from distutils.dir_util import mkpath - mkpath(os.path.dirname(target)) - with open(target, 'w') as f: - f.write('# This file is generated by numpy\'s %s\n' % (os.path.basename(sys.argv[0]))) - f.write('# It contains system_info results at the time of building this package.\n') - f.write('__all__ = ["get_info","show"]\n\n') - - # For gfortran+msvc combination, extra shared libraries may exist - f.write(textwrap.dedent(""" - import os - import sys - - extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs') - - if sys.platform == 'win32' and os.path.isdir(extra_dll_dir): - if sys.version_info >= (3, 8): - os.add_dll_directory(extra_dll_dir) - else: - os.environ.setdefault('PATH', '') - os.environ['PATH'] += os.pathsep + extra_dll_dir - - """)) - - for k, i in system_info.saved_results.items(): - f.write('%s=%r\n' % (k, i)) - f.write(textwrap.dedent(r''' - def get_info(name): - g = globals() - return g.get(name, g.get(name + "_info", {})) - - def show(): - for name,info_dict in globals().items(): - if name[0] == "_" or type(info_dict) is not type({}): continue - print(name + ":") - if not info_dict: - print(" NOT AVAILABLE") - for k,v in info_dict.items(): - v = str(v) - if k == "sources" and len(v) > 200: - v = v[:60] + " ...\n... " + v[-60:] - print(" %s = %s" % (k,v)) - ''')) - - return target - -def msvc_version(compiler): - """Return version major and minor of compiler instance if it is - MSVC, raise an exception otherwise.""" - if not compiler.compiler_type == "msvc": - raise ValueError("Compiler instance is not msvc (%s)"\ - % compiler.compiler_type) - return compiler._MSVCCompiler__version - -def get_build_architecture(): - # Importing distutils.msvccompiler triggers a warning on non-Windows - # systems, so delay the import to here. - from distutils.msvccompiler import get_build_architecture - return get_build_architecture() diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/msvc9compiler.py b/venv/lib/python3.7/site-packages/numpy/distutils/msvc9compiler.py deleted file mode 100644 index e9cc334..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/msvc9compiler.py +++ /dev/null @@ -1,65 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -from distutils.msvc9compiler import MSVCCompiler as _MSVCCompiler - -from .system_info import platform_bits - - -def _merge(old, new): - """Concatenate two environment paths avoiding repeats. - - Here `old` is the environment string before the base class initialize - function is called and `new` is the string after the call. The new string - will be a fixed string if it is not obtained from the current environment, - or the same as the old string if obtained from the same environment. The aim - here is not to append the new string if it is already contained in the old - string so as to limit the growth of the environment string. - - Parameters - ---------- - old : string - Previous environment string. - new : string - New environment string. - - Returns - ------- - ret : string - Updated environment string. - - """ - if not old: - return new - if new in old: - return old - - # Neither new nor old is empty. Give old priority. - return ';'.join([old, new]) - - -class MSVCCompiler(_MSVCCompiler): - def __init__(self, verbose=0, dry_run=0, force=0): - _MSVCCompiler.__init__(self, verbose, dry_run, force) - - def initialize(self, plat_name=None): - # The 'lib' and 'include' variables may be overwritten - # by MSVCCompiler.initialize, so save them for later merge. - environ_lib = os.getenv('lib') - environ_include = os.getenv('include') - _MSVCCompiler.initialize(self, plat_name) - - # Merge current and previous values of 'lib' and 'include' - os.environ['lib'] = _merge(environ_lib, os.environ['lib']) - os.environ['include'] = _merge(environ_include, os.environ['include']) - - # msvc9 building for 32 bits requires SSE2 to work around a - # compiler bug. - if platform_bits == 32: - self.compile_options += ['/arch:SSE2'] - self.compile_options_debug += ['/arch:SSE2'] - - def manifest_setup_ldargs(self, output_filename, build_temp, ld_args): - ld_args.append('/MANIFEST') - _MSVCCompiler.manifest_setup_ldargs(self, output_filename, - build_temp, ld_args) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/msvccompiler.py b/venv/lib/python3.7/site-packages/numpy/distutils/msvccompiler.py deleted file mode 100644 index 0cb4bf9..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/msvccompiler.py +++ /dev/null @@ -1,60 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -from distutils.msvccompiler import MSVCCompiler as _MSVCCompiler - -from .system_info import platform_bits - - -def _merge(old, new): - """Concatenate two environment paths avoiding repeats. - - Here `old` is the environment string before the base class initialize - function is called and `new` is the string after the call. The new string - will be a fixed string if it is not obtained from the current environment, - or the same as the old string if obtained from the same environment. The aim - here is not to append the new string if it is already contained in the old - string so as to limit the growth of the environment string. - - Parameters - ---------- - old : string - Previous environment string. - new : string - New environment string. - - Returns - ------- - ret : string - Updated environment string. - - """ - if new in old: - return old - if not old: - return new - - # Neither new nor old is empty. Give old priority. - return ';'.join([old, new]) - - -class MSVCCompiler(_MSVCCompiler): - def __init__(self, verbose=0, dry_run=0, force=0): - _MSVCCompiler.__init__(self, verbose, dry_run, force) - - def initialize(self): - # The 'lib' and 'include' variables may be overwritten - # by MSVCCompiler.initialize, so save them for later merge. - environ_lib = os.getenv('lib', '') - environ_include = os.getenv('include', '') - _MSVCCompiler.initialize(self) - - # Merge current and previous values of 'lib' and 'include' - os.environ['lib'] = _merge(environ_lib, os.environ['lib']) - os.environ['include'] = _merge(environ_include, os.environ['include']) - - # msvc9 building for 32 bits requires SSE2 to work around a - # compiler bug. - if platform_bits == 32: - self.compile_options += ['/arch:SSE2'] - self.compile_options_debug += ['/arch:SSE2'] diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/npy_pkg_config.py b/venv/lib/python3.7/site-packages/numpy/distutils/npy_pkg_config.py deleted file mode 100644 index 48584b4..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/npy_pkg_config.py +++ /dev/null @@ -1,443 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import re -import os - -if sys.version_info[0] < 3: - from ConfigParser import RawConfigParser -else: - from configparser import RawConfigParser - -__all__ = ['FormatError', 'PkgNotFound', 'LibraryInfo', 'VariableSet', - 'read_config', 'parse_flags'] - -_VAR = re.compile(r'\$\{([a-zA-Z0-9_-]+)\}') - -class FormatError(IOError): - """ - Exception thrown when there is a problem parsing a configuration file. - - """ - def __init__(self, msg): - self.msg = msg - - def __str__(self): - return self.msg - -class PkgNotFound(IOError): - """Exception raised when a package can not be located.""" - def __init__(self, msg): - self.msg = msg - - def __str__(self): - return self.msg - -def parse_flags(line): - """ - Parse a line from a config file containing compile flags. - - Parameters - ---------- - line : str - A single line containing one or more compile flags. - - Returns - ------- - d : dict - Dictionary of parsed flags, split into relevant categories. - These categories are the keys of `d`: - - * 'include_dirs' - * 'library_dirs' - * 'libraries' - * 'macros' - * 'ignored' - - """ - d = {'include_dirs': [], 'library_dirs': [], 'libraries': [], - 'macros': [], 'ignored': []} - - flags = (' ' + line).split(' -') - for flag in flags: - flag = '-' + flag - if len(flag) > 0: - if flag.startswith('-I'): - d['include_dirs'].append(flag[2:].strip()) - elif flag.startswith('-L'): - d['library_dirs'].append(flag[2:].strip()) - elif flag.startswith('-l'): - d['libraries'].append(flag[2:].strip()) - elif flag.startswith('-D'): - d['macros'].append(flag[2:].strip()) - else: - d['ignored'].append(flag) - - return d - -def _escape_backslash(val): - return val.replace('\\', '\\\\') - -class LibraryInfo(object): - """ - Object containing build information about a library. - - Parameters - ---------- - name : str - The library name. - description : str - Description of the library. - version : str - Version string. - sections : dict - The sections of the configuration file for the library. The keys are - the section headers, the values the text under each header. - vars : class instance - A `VariableSet` instance, which contains ``(name, value)`` pairs for - variables defined in the configuration file for the library. - requires : sequence, optional - The required libraries for the library to be installed. - - Notes - ----- - All input parameters (except "sections" which is a method) are available as - attributes of the same name. - - """ - def __init__(self, name, description, version, sections, vars, requires=None): - self.name = name - self.description = description - if requires: - self.requires = requires - else: - self.requires = [] - self.version = version - self._sections = sections - self.vars = vars - - def sections(self): - """ - Return the section headers of the config file. - - Parameters - ---------- - None - - Returns - ------- - keys : list of str - The list of section headers. - - """ - return list(self._sections.keys()) - - def cflags(self, section="default"): - val = self.vars.interpolate(self._sections[section]['cflags']) - return _escape_backslash(val) - - def libs(self, section="default"): - val = self.vars.interpolate(self._sections[section]['libs']) - return _escape_backslash(val) - - def __str__(self): - m = ['Name: %s' % self.name, 'Description: %s' % self.description] - if self.requires: - m.append('Requires:') - else: - m.append('Requires: %s' % ",".join(self.requires)) - m.append('Version: %s' % self.version) - - return "\n".join(m) - -class VariableSet(object): - """ - Container object for the variables defined in a config file. - - `VariableSet` can be used as a plain dictionary, with the variable names - as keys. - - Parameters - ---------- - d : dict - Dict of items in the "variables" section of the configuration file. - - """ - def __init__(self, d): - self._raw_data = dict([(k, v) for k, v in d.items()]) - - self._re = {} - self._re_sub = {} - - self._init_parse() - - def _init_parse(self): - for k, v in self._raw_data.items(): - self._init_parse_var(k, v) - - def _init_parse_var(self, name, value): - self._re[name] = re.compile(r'\$\{%s\}' % name) - self._re_sub[name] = value - - def interpolate(self, value): - # Brute force: we keep interpolating until there is no '${var}' anymore - # or until interpolated string is equal to input string - def _interpolate(value): - for k in self._re.keys(): - value = self._re[k].sub(self._re_sub[k], value) - return value - while _VAR.search(value): - nvalue = _interpolate(value) - if nvalue == value: - break - value = nvalue - - return value - - def variables(self): - """ - Return the list of variable names. - - Parameters - ---------- - None - - Returns - ------- - names : list of str - The names of all variables in the `VariableSet` instance. - - """ - return list(self._raw_data.keys()) - - # Emulate a dict to set/get variables values - def __getitem__(self, name): - return self._raw_data[name] - - def __setitem__(self, name, value): - self._raw_data[name] = value - self._init_parse_var(name, value) - -def parse_meta(config): - if not config.has_section('meta'): - raise FormatError("No meta section found !") - - d = dict(config.items('meta')) - - for k in ['name', 'description', 'version']: - if not k in d: - raise FormatError("Option %s (section [meta]) is mandatory, " - "but not found" % k) - - if not 'requires' in d: - d['requires'] = [] - - return d - -def parse_variables(config): - if not config.has_section('variables'): - raise FormatError("No variables section found !") - - d = {} - - for name, value in config.items("variables"): - d[name] = value - - return VariableSet(d) - -def parse_sections(config): - return meta_d, r - -def pkg_to_filename(pkg_name): - return "%s.ini" % pkg_name - -def parse_config(filename, dirs=None): - if dirs: - filenames = [os.path.join(d, filename) for d in dirs] - else: - filenames = [filename] - - config = RawConfigParser() - - n = config.read(filenames) - if not len(n) >= 1: - raise PkgNotFound("Could not find file(s) %s" % str(filenames)) - - # Parse meta and variables sections - meta = parse_meta(config) - - vars = {} - if config.has_section('variables'): - for name, value in config.items("variables"): - vars[name] = _escape_backslash(value) - - # Parse "normal" sections - secs = [s for s in config.sections() if not s in ['meta', 'variables']] - sections = {} - - requires = {} - for s in secs: - d = {} - if config.has_option(s, "requires"): - requires[s] = config.get(s, 'requires') - - for name, value in config.items(s): - d[name] = value - sections[s] = d - - return meta, vars, sections, requires - -def _read_config_imp(filenames, dirs=None): - def _read_config(f): - meta, vars, sections, reqs = parse_config(f, dirs) - # recursively add sections and variables of required libraries - for rname, rvalue in reqs.items(): - nmeta, nvars, nsections, nreqs = _read_config(pkg_to_filename(rvalue)) - - # Update var dict for variables not in 'top' config file - for k, v in nvars.items(): - if not k in vars: - vars[k] = v - - # Update sec dict - for oname, ovalue in nsections[rname].items(): - if ovalue: - sections[rname][oname] += ' %s' % ovalue - - return meta, vars, sections, reqs - - meta, vars, sections, reqs = _read_config(filenames) - - # FIXME: document this. If pkgname is defined in the variables section, and - # there is no pkgdir variable defined, pkgdir is automatically defined to - # the path of pkgname. This requires the package to be imported to work - if not 'pkgdir' in vars and "pkgname" in vars: - pkgname = vars["pkgname"] - if not pkgname in sys.modules: - raise ValueError("You should import %s to get information on %s" % - (pkgname, meta["name"])) - - mod = sys.modules[pkgname] - vars["pkgdir"] = _escape_backslash(os.path.dirname(mod.__file__)) - - return LibraryInfo(name=meta["name"], description=meta["description"], - version=meta["version"], sections=sections, vars=VariableSet(vars)) - -# Trivial cache to cache LibraryInfo instances creation. To be really -# efficient, the cache should be handled in read_config, since a same file can -# be parsed many time outside LibraryInfo creation, but I doubt this will be a -# problem in practice -_CACHE = {} -def read_config(pkgname, dirs=None): - """ - Return library info for a package from its configuration file. - - Parameters - ---------- - pkgname : str - Name of the package (should match the name of the .ini file, without - the extension, e.g. foo for the file foo.ini). - dirs : sequence, optional - If given, should be a sequence of directories - usually including - the NumPy base directory - where to look for npy-pkg-config files. - - Returns - ------- - pkginfo : class instance - The `LibraryInfo` instance containing the build information. - - Raises - ------ - PkgNotFound - If the package is not found. - - See Also - -------- - misc_util.get_info, misc_util.get_pkg_info - - Examples - -------- - >>> npymath_info = np.distutils.npy_pkg_config.read_config('npymath') - >>> type(npymath_info) - - >>> print(npymath_info) - Name: npymath - Description: Portable, core math library implementing C99 standard - Requires: - Version: 0.1 #random - - """ - try: - return _CACHE[pkgname] - except KeyError: - v = _read_config_imp(pkg_to_filename(pkgname), dirs) - _CACHE[pkgname] = v - return v - -# TODO: -# - implements version comparison (modversion + atleast) - -# pkg-config simple emulator - useful for debugging, and maybe later to query -# the system -if __name__ == '__main__': - import sys - from optparse import OptionParser - import glob - - parser = OptionParser() - parser.add_option("--cflags", dest="cflags", action="store_true", - help="output all preprocessor and compiler flags") - parser.add_option("--libs", dest="libs", action="store_true", - help="output all linker flags") - parser.add_option("--use-section", dest="section", - help="use this section instead of default for options") - parser.add_option("--version", dest="version", action="store_true", - help="output version") - parser.add_option("--atleast-version", dest="min_version", - help="Minimal version") - parser.add_option("--list-all", dest="list_all", action="store_true", - help="Minimal version") - parser.add_option("--define-variable", dest="define_variable", - help="Replace variable with the given value") - - (options, args) = parser.parse_args(sys.argv) - - if len(args) < 2: - raise ValueError("Expect package name on the command line:") - - if options.list_all: - files = glob.glob("*.ini") - for f in files: - info = read_config(f) - print("%s\t%s - %s" % (info.name, info.name, info.description)) - - pkg_name = args[1] - d = os.environ.get('NPY_PKG_CONFIG_PATH') - if d: - info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.', d]) - else: - info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.']) - - if options.section: - section = options.section - else: - section = "default" - - if options.define_variable: - m = re.search(r'([\S]+)=([\S]+)', options.define_variable) - if not m: - raise ValueError("--define-variable option should be of " - "the form --define-variable=foo=bar") - else: - name = m.group(1) - value = m.group(2) - info.vars[name] = value - - if options.cflags: - print(info.cflags(section)) - if options.libs: - print(info.libs(section)) - if options.version: - print(info.version) - if options.min_version: - print(info.version >= options.min_version) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/numpy_distribution.py b/venv/lib/python3.7/site-packages/numpy/distutils/numpy_distribution.py deleted file mode 100644 index 6ae19d1..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/numpy_distribution.py +++ /dev/null @@ -1,19 +0,0 @@ -# XXX: Handle setuptools ? -from __future__ import division, absolute_import, print_function - -from distutils.core import Distribution - -# This class is used because we add new files (sconscripts, and so on) with the -# scons command -class NumpyDistribution(Distribution): - def __init__(self, attrs = None): - # A list of (sconscripts, pre_hook, post_hook, src, parent_names) - self.scons_data = [] - # A list of installable libraries - self.installed_libraries = [] - # A dict of pkg_config files to generate/install - self.installed_pkg_config = {} - Distribution.__init__(self, attrs) - - def has_scons_scripts(self): - return bool(self.scons_data) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/pathccompiler.py b/venv/lib/python3.7/site-packages/numpy/distutils/pathccompiler.py deleted file mode 100644 index fc9872d..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/pathccompiler.py +++ /dev/null @@ -1,23 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from distutils.unixccompiler import UnixCCompiler - -class PathScaleCCompiler(UnixCCompiler): - - """ - PathScale compiler compatible with an gcc built Python. - """ - - compiler_type = 'pathcc' - cc_exe = 'pathcc' - cxx_exe = 'pathCC' - - def __init__ (self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__ (self, verbose, dry_run, force) - cc_compiler = self.cc_exe - cxx_compiler = self.cxx_exe - self.set_executables(compiler=cc_compiler, - compiler_so=cc_compiler, - compiler_cxx=cxx_compiler, - linker_exe=cc_compiler, - linker_so=cc_compiler + ' -shared') diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/setup.py b/venv/lib/python3.7/site-packages/numpy/distutils/setup.py deleted file mode 100644 index 82a53bd..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/setup.py +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env python -from __future__ import division, print_function - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('distutils', parent_package, top_path) - config.add_subpackage('command') - config.add_subpackage('fcompiler') - config.add_data_dir('tests') - config.add_data_files('site.cfg') - config.add_data_files('mingw/gfortran_vs2003_hack.c') - config.make_config_py() - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/system_info.py b/venv/lib/python3.7/site-packages/numpy/distutils/system_info.py deleted file mode 100644 index fc7018a..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/system_info.py +++ /dev/null @@ -1,2975 +0,0 @@ -#!/usr/bin/env python -""" -This file defines a set of system_info classes for getting -information about various resources (libraries, library directories, -include directories, etc.) in the system. Currently, the following -classes are available: - - atlas_info - atlas_threads_info - atlas_blas_info - atlas_blas_threads_info - lapack_atlas_info - lapack_atlas_threads_info - atlas_3_10_info - atlas_3_10_threads_info - atlas_3_10_blas_info, - atlas_3_10_blas_threads_info, - lapack_atlas_3_10_info - lapack_atlas_3_10_threads_info - flame_info - blas_info - lapack_info - openblas_info - openblas64__info - openblas_ilp64_info - blis_info - blas_opt_info # usage recommended - lapack_opt_info # usage recommended - blas_ilp64_opt_info # usage recommended (general ILP64 BLAS) - lapack_ilp64_opt_info # usage recommended (general ILP64 LAPACK) - blas_ilp64_plain_opt_info # usage recommended (general ILP64 BLAS, no symbol suffix) - lapack_ilp64_plain_opt_info # usage recommended (general ILP64 LAPACK, no symbol suffix) - blas64__opt_info # usage recommended (general ILP64 BLAS, 64_ symbol suffix) - lapack64__opt_info # usage recommended (general ILP64 LAPACK, 64_ symbol suffix) - fftw_info,dfftw_info,sfftw_info - fftw_threads_info,dfftw_threads_info,sfftw_threads_info - djbfft_info - x11_info - lapack_src_info - blas_src_info - numpy_info - numarray_info - numpy_info - boost_python_info - agg2_info - wx_info - gdk_pixbuf_xlib_2_info - gdk_pixbuf_2_info - gdk_x11_2_info - gtkp_x11_2_info - gtkp_2_info - xft_info - freetype2_info - umfpack_info - -Usage: - info_dict = get_info() - where is a string 'atlas','x11','fftw','lapack','blas', - 'lapack_src', 'blas_src', etc. For a complete list of allowed names, - see the definition of get_info() function below. - - Returned info_dict is a dictionary which is compatible with - distutils.setup keyword arguments. If info_dict == {}, then the - asked resource is not available (system_info could not find it). - - Several *_info classes specify an environment variable to specify - the locations of software. When setting the corresponding environment - variable to 'None' then the software will be ignored, even when it - is available in system. - -Global parameters: - system_info.search_static_first - search static libraries (.a) - in precedence to shared ones (.so, .sl) if enabled. - system_info.verbosity - output the results to stdout if enabled. - -The file 'site.cfg' is looked for in - -1) Directory of main setup.py file being run. -2) Home directory of user running the setup.py file as ~/.numpy-site.cfg -3) System wide directory (location of this file...) - -The first one found is used to get system configuration options The -format is that used by ConfigParser (i.e., Windows .INI style). The -section ALL has options that are the default for each section. The -available sections are fftw, atlas, and x11. Appropriate defaults are -used if nothing is specified. - -The order of finding the locations of resources is the following: - 1. environment variable - 2. section in site.cfg - 3. ALL section in site.cfg -Only the first complete match is returned. - -Example: ----------- -[ALL] -library_dirs = /usr/lib:/usr/local/lib:/opt/lib -include_dirs = /usr/include:/usr/local/include:/opt/include -src_dirs = /usr/local/src:/opt/src -# search static libraries (.a) in preference to shared ones (.so) -search_static_first = 0 - -[fftw] -libraries = rfftw, fftw - -[atlas] -library_dirs = /usr/lib/3dnow:/usr/lib/3dnow/atlas -# for overriding the names of the atlas libraries -libraries = lapack, f77blas, cblas, atlas - -[x11] -library_dirs = /usr/X11R6/lib -include_dirs = /usr/X11R6/include ----------- - -Note that the ``libraries`` key is the default setting for libraries. - -Authors: - Pearu Peterson , February 2002 - David M. Cooke , April 2002 - -Copyright 2002 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy (BSD style) license. See LICENSE.txt that came with -this distribution for specifics. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. - -""" -from __future__ import division, absolute_import, print_function - -import sys -import os -import re -import copy -import warnings -import subprocess -import textwrap - -from glob import glob -from functools import reduce -if sys.version_info[0] < 3: - from ConfigParser import NoOptionError - from ConfigParser import RawConfigParser as ConfigParser -else: - from configparser import NoOptionError - from configparser import RawConfigParser as ConfigParser -# It seems that some people are importing ConfigParser from here so is -# good to keep its class name. Use of RawConfigParser is needed in -# order to be able to load path names with percent in them, like -# `feature%2Fcool` which is common on git flow branch names. - -from distutils.errors import DistutilsError -from distutils.dist import Distribution -import distutils.sysconfig -from numpy.distutils import log -from distutils.util import get_platform - -from numpy.distutils.exec_command import ( - find_executable, filepath_from_subprocess_output, - get_pythonexe) -from numpy.distutils.misc_util import (is_sequence, is_string, - get_shared_lib_extension) -from numpy.distutils.command.config import config as cmd_config -from numpy.distutils.compat import get_exception -from numpy.distutils import customized_ccompiler as _customized_ccompiler -from numpy.distutils import _shell_utils -import distutils.ccompiler -import tempfile -import shutil - - -# Determine number of bits -import platform -_bits = {'32bit': 32, '64bit': 64} -platform_bits = _bits[platform.architecture()[0]] - - -global_compiler = None - -def customized_ccompiler(): - global global_compiler - if not global_compiler: - global_compiler = _customized_ccompiler() - return global_compiler - - -def _c_string_literal(s): - """ - Convert a python string into a literal suitable for inclusion into C code - """ - # only these three characters are forbidden in C strings - s = s.replace('\\', r'\\') - s = s.replace('"', r'\"') - s = s.replace('\n', r'\n') - return '"{}"'.format(s) - - -def libpaths(paths, bits): - """Return a list of library paths valid on 32 or 64 bit systems. - - Inputs: - paths : sequence - A sequence of strings (typically paths) - bits : int - An integer, the only valid values are 32 or 64. A ValueError exception - is raised otherwise. - - Examples: - - Consider a list of directories - >>> paths = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib'] - - For a 32-bit platform, this is already valid: - >>> np.distutils.system_info.libpaths(paths,32) - ['/usr/X11R6/lib', '/usr/X11/lib', '/usr/lib'] - - On 64 bits, we prepend the '64' postfix - >>> np.distutils.system_info.libpaths(paths,64) - ['/usr/X11R6/lib64', '/usr/X11R6/lib', '/usr/X11/lib64', '/usr/X11/lib', - '/usr/lib64', '/usr/lib'] - """ - if bits not in (32, 64): - raise ValueError("Invalid bit size in libpaths: 32 or 64 only") - - # Handle 32bit case - if bits == 32: - return paths - - # Handle 64bit case - out = [] - for p in paths: - out.extend([p + '64', p]) - - return out - - -if sys.platform == 'win32': - default_lib_dirs = ['C:\\', - os.path.join(distutils.sysconfig.EXEC_PREFIX, - 'libs')] - default_runtime_dirs = [] - default_include_dirs = [] - default_src_dirs = ['.'] - default_x11_lib_dirs = [] - default_x11_include_dirs = [] - _include_dirs = [ - 'include', - 'include/suitesparse', - ] - _lib_dirs = [ - 'lib', - ] - - _include_dirs = [d.replace('/', os.sep) for d in _include_dirs] - _lib_dirs = [d.replace('/', os.sep) for d in _lib_dirs] - def add_system_root(library_root): - """Add a package manager root to the include directories""" - global default_lib_dirs - global default_include_dirs - - library_root = os.path.normpath(library_root) - - default_lib_dirs.extend( - os.path.join(library_root, d) for d in _lib_dirs) - default_include_dirs.extend( - os.path.join(library_root, d) for d in _include_dirs) - - if sys.version_info >= (3, 3): - # VCpkg is the de-facto package manager on windows for C/C++ - # libraries. If it is on the PATH, then we append its paths here. - # We also don't re-implement shutil.which for Python 2.7 because - # vcpkg doesn't support MSVC 2008. - vcpkg = shutil.which('vcpkg') - if vcpkg: - vcpkg_dir = os.path.dirname(vcpkg) - if platform.architecture() == '32bit': - specifier = 'x86' - else: - specifier = 'x64' - - vcpkg_installed = os.path.join(vcpkg_dir, 'installed') - for vcpkg_root in [ - os.path.join(vcpkg_installed, specifier + '-windows'), - os.path.join(vcpkg_installed, specifier + '-windows-static'), - ]: - add_system_root(vcpkg_root) - - # Conda is another popular package manager that provides libraries - conda = shutil.which('conda') - if conda: - conda_dir = os.path.dirname(conda) - add_system_root(os.path.join(conda_dir, '..', 'Library')) - add_system_root(os.path.join(conda_dir, 'Library')) - -else: - default_lib_dirs = libpaths(['/usr/local/lib', '/opt/lib', '/usr/lib', - '/opt/local/lib', '/sw/lib'], platform_bits) - default_runtime_dirs = [] - default_include_dirs = ['/usr/local/include', - '/opt/include', '/usr/include', - # path of umfpack under macports - '/opt/local/include/ufsparse', - '/opt/local/include', '/sw/include', - '/usr/include/suitesparse'] - default_src_dirs = ['.', '/usr/local/src', '/opt/src', '/sw/src'] - - default_x11_lib_dirs = libpaths(['/usr/X11R6/lib', '/usr/X11/lib', - '/usr/lib'], platform_bits) - default_x11_include_dirs = ['/usr/X11R6/include', '/usr/X11/include', - '/usr/include'] - - if os.path.exists('/usr/lib/X11'): - globbed_x11_dir = glob('/usr/lib/*/libX11.so') - if globbed_x11_dir: - x11_so_dir = os.path.split(globbed_x11_dir[0])[0] - default_x11_lib_dirs.extend([x11_so_dir, '/usr/lib/X11']) - default_x11_include_dirs.extend(['/usr/lib/X11/include', - '/usr/include/X11']) - - with open(os.devnull, 'w') as tmp: - try: - p = subprocess.Popen(["gcc", "-print-multiarch"], stdout=subprocess.PIPE, - stderr=tmp) - except (OSError, DistutilsError): - # OSError if gcc is not installed, or SandboxViolation (DistutilsError - # subclass) if an old setuptools bug is triggered (see gh-3160). - pass - else: - triplet = str(p.communicate()[0].decode().strip()) - if p.returncode == 0: - # gcc supports the "-print-multiarch" option - default_x11_lib_dirs += [os.path.join("/usr/lib/", triplet)] - default_lib_dirs += [os.path.join("/usr/lib/", triplet)] - - -if os.path.join(sys.prefix, 'lib') not in default_lib_dirs: - default_lib_dirs.insert(0, os.path.join(sys.prefix, 'lib')) - default_include_dirs.append(os.path.join(sys.prefix, 'include')) - default_src_dirs.append(os.path.join(sys.prefix, 'src')) - -default_lib_dirs = [_m for _m in default_lib_dirs if os.path.isdir(_m)] -default_runtime_dirs = [_m for _m in default_runtime_dirs if os.path.isdir(_m)] -default_include_dirs = [_m for _m in default_include_dirs if os.path.isdir(_m)] -default_src_dirs = [_m for _m in default_src_dirs if os.path.isdir(_m)] - -so_ext = get_shared_lib_extension() - - -def get_standard_file(fname): - """Returns a list of files named 'fname' from - 1) System-wide directory (directory-location of this module) - 2) Users HOME directory (os.environ['HOME']) - 3) Local directory - """ - # System-wide file - filenames = [] - try: - f = __file__ - except NameError: - f = sys.argv[0] - else: - sysfile = os.path.join(os.path.split(os.path.abspath(f))[0], - fname) - if os.path.isfile(sysfile): - filenames.append(sysfile) - - # Home directory - # And look for the user config file - try: - f = os.path.expanduser('~') - except KeyError: - pass - else: - user_file = os.path.join(f, fname) - if os.path.isfile(user_file): - filenames.append(user_file) - - # Local file - if os.path.isfile(fname): - filenames.append(os.path.abspath(fname)) - - return filenames - - -def get_info(name, notfound_action=0): - """ - notfound_action: - 0 - do nothing - 1 - display warning message - 2 - raise error - """ - cl = {'atlas': atlas_info, # use lapack_opt or blas_opt instead - 'atlas_threads': atlas_threads_info, # ditto - 'atlas_blas': atlas_blas_info, - 'atlas_blas_threads': atlas_blas_threads_info, - 'lapack_atlas': lapack_atlas_info, # use lapack_opt instead - 'lapack_atlas_threads': lapack_atlas_threads_info, # ditto - 'atlas_3_10': atlas_3_10_info, # use lapack_opt or blas_opt instead - 'atlas_3_10_threads': atlas_3_10_threads_info, # ditto - 'atlas_3_10_blas': atlas_3_10_blas_info, - 'atlas_3_10_blas_threads': atlas_3_10_blas_threads_info, - 'lapack_atlas_3_10': lapack_atlas_3_10_info, # use lapack_opt instead - 'lapack_atlas_3_10_threads': lapack_atlas_3_10_threads_info, # ditto - 'flame': flame_info, # use lapack_opt instead - 'mkl': mkl_info, - # openblas which may or may not have embedded lapack - 'openblas': openblas_info, # use blas_opt instead - # openblas with embedded lapack - 'openblas_lapack': openblas_lapack_info, # use blas_opt instead - 'openblas_clapack': openblas_clapack_info, # use blas_opt instead - 'blis': blis_info, # use blas_opt instead - 'lapack_mkl': lapack_mkl_info, # use lapack_opt instead - 'blas_mkl': blas_mkl_info, # use blas_opt instead - 'accelerate': accelerate_info, # use blas_opt instead - 'openblas64_': openblas64__info, - 'openblas64__lapack': openblas64__lapack_info, - 'openblas_ilp64': openblas_ilp64_info, - 'openblas_ilp64_lapack': openblas_ilp64_lapack_info, - 'x11': x11_info, - 'fft_opt': fft_opt_info, - 'fftw': fftw_info, - 'fftw2': fftw2_info, - 'fftw3': fftw3_info, - 'dfftw': dfftw_info, - 'sfftw': sfftw_info, - 'fftw_threads': fftw_threads_info, - 'dfftw_threads': dfftw_threads_info, - 'sfftw_threads': sfftw_threads_info, - 'djbfft': djbfft_info, - 'blas': blas_info, # use blas_opt instead - 'lapack': lapack_info, # use lapack_opt instead - 'lapack_src': lapack_src_info, - 'blas_src': blas_src_info, - 'numpy': numpy_info, - 'f2py': f2py_info, - 'Numeric': Numeric_info, - 'numeric': Numeric_info, - 'numarray': numarray_info, - 'numerix': numerix_info, - 'lapack_opt': lapack_opt_info, - 'lapack_ilp64_opt': lapack_ilp64_opt_info, - 'lapack_ilp64_plain_opt': lapack_ilp64_plain_opt_info, - 'lapack64__opt': lapack64__opt_info, - 'blas_opt': blas_opt_info, - 'blas_ilp64_opt': blas_ilp64_opt_info, - 'blas_ilp64_plain_opt': blas_ilp64_plain_opt_info, - 'blas64__opt': blas64__opt_info, - 'boost_python': boost_python_info, - 'agg2': agg2_info, - 'wx': wx_info, - 'gdk_pixbuf_xlib_2': gdk_pixbuf_xlib_2_info, - 'gdk-pixbuf-xlib-2.0': gdk_pixbuf_xlib_2_info, - 'gdk_pixbuf_2': gdk_pixbuf_2_info, - 'gdk-pixbuf-2.0': gdk_pixbuf_2_info, - 'gdk': gdk_info, - 'gdk_2': gdk_2_info, - 'gdk-2.0': gdk_2_info, - 'gdk_x11_2': gdk_x11_2_info, - 'gdk-x11-2.0': gdk_x11_2_info, - 'gtkp_x11_2': gtkp_x11_2_info, - 'gtk+-x11-2.0': gtkp_x11_2_info, - 'gtkp_2': gtkp_2_info, - 'gtk+-2.0': gtkp_2_info, - 'xft': xft_info, - 'freetype2': freetype2_info, - 'umfpack': umfpack_info, - 'amd': amd_info, - }.get(name.lower(), system_info) - return cl().get_info(notfound_action) - - -class NotFoundError(DistutilsError): - """Some third-party program or library is not found.""" - - -class AliasedOptionError(DistutilsError): - """ - Aliases entries in config files should not be existing. - In section '{section}' we found multiple appearances of options {options}.""" - - -class AtlasNotFoundError(NotFoundError): - """ - Atlas (http://github.com/math-atlas/math-atlas) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [atlas]) or by setting - the ATLAS environment variable.""" - - -class FlameNotFoundError(NotFoundError): - """ - FLAME (http://www.cs.utexas.edu/~flame/web/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [flame]).""" - - -class LapackNotFoundError(NotFoundError): - """ - Lapack (http://www.netlib.org/lapack/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [lapack]) or by setting - the LAPACK environment variable.""" - - -class LapackSrcNotFoundError(LapackNotFoundError): - """ - Lapack (http://www.netlib.org/lapack/) sources not found. - Directories to search for the sources can be specified in the - numpy/distutils/site.cfg file (section [lapack_src]) or by setting - the LAPACK_SRC environment variable.""" - - -class LapackILP64NotFoundError(NotFoundError): - """ - 64-bit Lapack libraries not found. - Known libraries in numpy/distutils/site.cfg file are: - openblas64_, openblas_ilp64 - """ - -class BlasOptNotFoundError(NotFoundError): - """ - Optimized (vendor) Blas libraries are not found. - Falls back to netlib Blas library which has worse performance. - A better performance should be easily gained by switching - Blas library.""" - -class BlasNotFoundError(NotFoundError): - """ - Blas (http://www.netlib.org/blas/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [blas]) or by setting - the BLAS environment variable.""" - -class BlasILP64NotFoundError(NotFoundError): - """ - 64-bit Blas libraries not found. - Known libraries in numpy/distutils/site.cfg file are: - openblas64_, openblas_ilp64 - """ - -class BlasSrcNotFoundError(BlasNotFoundError): - """ - Blas (http://www.netlib.org/blas/) sources not found. - Directories to search for the sources can be specified in the - numpy/distutils/site.cfg file (section [blas_src]) or by setting - the BLAS_SRC environment variable.""" - - -class FFTWNotFoundError(NotFoundError): - """ - FFTW (http://www.fftw.org/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [fftw]) or by setting - the FFTW environment variable.""" - - -class DJBFFTNotFoundError(NotFoundError): - """ - DJBFFT (https://cr.yp.to/djbfft.html) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [djbfft]) or by setting - the DJBFFT environment variable.""" - - -class NumericNotFoundError(NotFoundError): - """ - Numeric (https://www.numpy.org/) module not found. - Get it from above location, install it, and retry setup.py.""" - - -class X11NotFoundError(NotFoundError): - """X11 libraries not found.""" - - -class UmfpackNotFoundError(NotFoundError): - """ - UMFPACK sparse solver (https://www.cise.ufl.edu/research/sparse/umfpack/) - not found. Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [umfpack]) or by setting - the UMFPACK environment variable.""" - - -class system_info(object): - - """ get_info() is the only public method. Don't use others. - """ - section = 'ALL' - dir_env_var = None - search_static_first = 0 # XXX: disabled by default, may disappear in - # future unless it is proved to be useful. - saved_results = {} - - notfounderror = NotFoundError - - def __init__(self, - default_lib_dirs=default_lib_dirs, - default_include_dirs=default_include_dirs, - ): - self.__class__.info = {} - self.local_prefixes = [] - defaults = {'library_dirs': os.pathsep.join(default_lib_dirs), - 'include_dirs': os.pathsep.join(default_include_dirs), - 'runtime_library_dirs': os.pathsep.join(default_runtime_dirs), - 'rpath': '', - 'src_dirs': os.pathsep.join(default_src_dirs), - 'search_static_first': str(self.search_static_first), - 'extra_compile_args': '', 'extra_link_args': ''} - self.cp = ConfigParser(defaults) - self.files = [] - self.files.extend(get_standard_file('.numpy-site.cfg')) - self.files.extend(get_standard_file('site.cfg')) - self.parse_config_files() - - if self.section is not None: - self.search_static_first = self.cp.getboolean( - self.section, 'search_static_first') - assert isinstance(self.search_static_first, int) - - def parse_config_files(self): - self.cp.read(self.files) - if not self.cp.has_section(self.section): - if self.section is not None: - self.cp.add_section(self.section) - - def calc_libraries_info(self): - libs = self.get_libraries() - dirs = self.get_lib_dirs() - # The extensions use runtime_library_dirs - r_dirs = self.get_runtime_lib_dirs() - # Intrinsic distutils use rpath, we simply append both entries - # as though they were one entry - r_dirs.extend(self.get_runtime_lib_dirs(key='rpath')) - info = {} - for lib in libs: - i = self.check_libs(dirs, [lib]) - if i is not None: - dict_append(info, **i) - else: - log.info('Library %s was not found. Ignoring' % (lib)) - - if r_dirs: - i = self.check_libs(r_dirs, [lib]) - if i is not None: - # Swap library keywords found to runtime_library_dirs - # the libraries are insisting on the user having defined - # them using the library_dirs, and not necessarily by - # runtime_library_dirs - del i['libraries'] - i['runtime_library_dirs'] = i.pop('library_dirs') - dict_append(info, **i) - else: - log.info('Runtime library %s was not found. Ignoring' % (lib)) - - return info - - def set_info(self, **info): - if info: - lib_info = self.calc_libraries_info() - dict_append(info, **lib_info) - # Update extra information - extra_info = self.calc_extra_info() - dict_append(info, **extra_info) - self.saved_results[self.__class__.__name__] = info - - def get_option_single(self, *options): - """ Ensure that only one of `options` are found in the section - - Parameters - ---------- - *options : list of str - a list of options to be found in the section (``self.section``) - - Returns - ------- - str : - the option that is uniquely found in the section - - Raises - ------ - AliasedOptionError : - in case more than one of the options are found - """ - found = map(lambda opt: self.cp.has_option(self.section, opt), options) - found = list(found) - if sum(found) == 1: - return options[found.index(True)] - elif sum(found) == 0: - # nothing is found anyways - return options[0] - - # Else we have more than 1 key found - if AliasedOptionError.__doc__ is None: - raise AliasedOptionError() - raise AliasedOptionError(AliasedOptionError.__doc__.format( - section=self.section, options='[{}]'.format(', '.join(options)))) - - - def has_info(self): - return self.__class__.__name__ in self.saved_results - - def calc_extra_info(self): - """ Updates the information in the current information with - respect to these flags: - extra_compile_args - extra_link_args - """ - info = {} - for key in ['extra_compile_args', 'extra_link_args']: - # Get values - opt = self.cp.get(self.section, key) - opt = _shell_utils.NativeParser.split(opt) - if opt: - tmp = {key: opt} - dict_append(info, **tmp) - return info - - def get_info(self, notfound_action=0): - """ Return a dictonary with items that are compatible - with numpy.distutils.setup keyword arguments. - """ - flag = 0 - if not self.has_info(): - flag = 1 - log.info(self.__class__.__name__ + ':') - if hasattr(self, 'calc_info'): - self.calc_info() - if notfound_action: - if not self.has_info(): - if notfound_action == 1: - warnings.warn(self.notfounderror.__doc__, stacklevel=2) - elif notfound_action == 2: - raise self.notfounderror(self.notfounderror.__doc__) - else: - raise ValueError(repr(notfound_action)) - - if not self.has_info(): - log.info(' NOT AVAILABLE') - self.set_info() - else: - log.info(' FOUND:') - - res = self.saved_results.get(self.__class__.__name__) - if log.get_threshold() <= log.INFO and flag: - for k, v in res.items(): - v = str(v) - if k in ['sources', 'libraries'] and len(v) > 270: - v = v[:120] + '...\n...\n...' + v[-120:] - log.info(' %s = %s', k, v) - log.info('') - - return copy.deepcopy(res) - - def get_paths(self, section, key): - dirs = self.cp.get(section, key).split(os.pathsep) - env_var = self.dir_env_var - if env_var: - if is_sequence(env_var): - e0 = env_var[-1] - for e in env_var: - if e in os.environ: - e0 = e - break - if not env_var[0] == e0: - log.info('Setting %s=%s' % (env_var[0], e0)) - env_var = e0 - if env_var and env_var in os.environ: - d = os.environ[env_var] - if d == 'None': - log.info('Disabled %s: %s', - self.__class__.__name__, '(%s is None)' - % (env_var,)) - return [] - if os.path.isfile(d): - dirs = [os.path.dirname(d)] + dirs - l = getattr(self, '_lib_names', []) - if len(l) == 1: - b = os.path.basename(d) - b = os.path.splitext(b)[0] - if b[:3] == 'lib': - log.info('Replacing _lib_names[0]==%r with %r' \ - % (self._lib_names[0], b[3:])) - self._lib_names[0] = b[3:] - else: - ds = d.split(os.pathsep) - ds2 = [] - for d in ds: - if os.path.isdir(d): - ds2.append(d) - for dd in ['include', 'lib']: - d1 = os.path.join(d, dd) - if os.path.isdir(d1): - ds2.append(d1) - dirs = ds2 + dirs - default_dirs = self.cp.get(self.section, key).split(os.pathsep) - dirs.extend(default_dirs) - ret = [] - for d in dirs: - if len(d) > 0 and not os.path.isdir(d): - warnings.warn('Specified path %s is invalid.' % d, stacklevel=2) - continue - - if d not in ret: - ret.append(d) - - log.debug('( %s = %s )', key, ':'.join(ret)) - return ret - - def get_lib_dirs(self, key='library_dirs'): - return self.get_paths(self.section, key) - - def get_runtime_lib_dirs(self, key='runtime_library_dirs'): - path = self.get_paths(self.section, key) - if path == ['']: - path = [] - return path - - def get_include_dirs(self, key='include_dirs'): - return self.get_paths(self.section, key) - - def get_src_dirs(self, key='src_dirs'): - return self.get_paths(self.section, key) - - def get_libs(self, key, default): - try: - libs = self.cp.get(self.section, key) - except NoOptionError: - if not default: - return [] - if is_string(default): - return [default] - return default - return [b for b in [a.strip() for a in libs.split(',')] if b] - - def get_libraries(self, key='libraries'): - if hasattr(self, '_lib_names'): - return self.get_libs(key, default=self._lib_names) - else: - return self.get_libs(key, '') - - def library_extensions(self): - c = customized_ccompiler() - static_exts = [] - if c.compiler_type != 'msvc': - # MSVC doesn't understand binutils - static_exts.append('.a') - if sys.platform == 'win32': - static_exts.append('.lib') # .lib is used by MSVC and others - if self.search_static_first: - exts = static_exts + [so_ext] - else: - exts = [so_ext] + static_exts - if sys.platform == 'cygwin': - exts.append('.dll.a') - if sys.platform == 'darwin': - exts.append('.dylib') - return exts - - def check_libs(self, lib_dirs, libs, opt_libs=[]): - """If static or shared libraries are available then return - their info dictionary. - - Checks for all libraries as shared libraries first, then - static (or vice versa if self.search_static_first is True). - """ - exts = self.library_extensions() - info = None - for ext in exts: - info = self._check_libs(lib_dirs, libs, opt_libs, [ext]) - if info is not None: - break - if not info: - log.info(' libraries %s not found in %s', ','.join(libs), - lib_dirs) - return info - - def check_libs2(self, lib_dirs, libs, opt_libs=[]): - """If static or shared libraries are available then return - their info dictionary. - - Checks each library for shared or static. - """ - exts = self.library_extensions() - info = self._check_libs(lib_dirs, libs, opt_libs, exts) - if not info: - log.info(' libraries %s not found in %s', ','.join(libs), - lib_dirs) - - return info - - def _find_lib(self, lib_dir, lib, exts): - assert is_string(lib_dir) - # under windows first try without 'lib' prefix - if sys.platform == 'win32': - lib_prefixes = ['', 'lib'] - else: - lib_prefixes = ['lib'] - # for each library name, see if we can find a file for it. - for ext in exts: - for prefix in lib_prefixes: - p = self.combine_paths(lib_dir, prefix + lib + ext) - if p: - break - if p: - assert len(p) == 1 - # ??? splitext on p[0] would do this for cygwin - # doesn't seem correct - if ext == '.dll.a': - lib += '.dll' - if ext == '.lib': - lib = prefix + lib - return lib - - return False - - def _find_libs(self, lib_dirs, libs, exts): - # make sure we preserve the order of libs, as it can be important - found_dirs, found_libs = [], [] - for lib in libs: - for lib_dir in lib_dirs: - found_lib = self._find_lib(lib_dir, lib, exts) - if found_lib: - found_libs.append(found_lib) - if lib_dir not in found_dirs: - found_dirs.append(lib_dir) - break - return found_dirs, found_libs - - def _check_libs(self, lib_dirs, libs, opt_libs, exts): - """Find mandatory and optional libs in expected paths. - - Missing optional libraries are silently forgotten. - """ - if not is_sequence(lib_dirs): - lib_dirs = [lib_dirs] - # First, try to find the mandatory libraries - found_dirs, found_libs = self._find_libs(lib_dirs, libs, exts) - if len(found_libs) > 0 and len(found_libs) == len(libs): - # Now, check for optional libraries - opt_found_dirs, opt_found_libs = self._find_libs(lib_dirs, opt_libs, exts) - found_libs.extend(opt_found_libs) - for lib_dir in opt_found_dirs: - if lib_dir not in found_dirs: - found_dirs.append(lib_dir) - info = {'libraries': found_libs, 'library_dirs': found_dirs} - return info - else: - return None - - def combine_paths(self, *args): - """Return a list of existing paths composed by all combinations - of items from the arguments. - """ - return combine_paths(*args) - - -class fft_opt_info(system_info): - - def calc_info(self): - info = {} - fftw_info = get_info('fftw3') or get_info('fftw2') or get_info('dfftw') - djbfft_info = get_info('djbfft') - if fftw_info: - dict_append(info, **fftw_info) - if djbfft_info: - dict_append(info, **djbfft_info) - self.set_info(**info) - return - - -class fftw_info(system_info): - #variables to override - section = 'fftw' - dir_env_var = 'FFTW' - notfounderror = FFTWNotFoundError - ver_info = [{'name':'fftw3', - 'libs':['fftw3'], - 'includes':['fftw3.h'], - 'macros':[('SCIPY_FFTW3_H', None)]}, - {'name':'fftw2', - 'libs':['rfftw', 'fftw'], - 'includes':['fftw.h', 'rfftw.h'], - 'macros':[('SCIPY_FFTW_H', None)]}] - - def calc_ver_info(self, ver_param): - """Returns True on successful version detection, else False""" - lib_dirs = self.get_lib_dirs() - incl_dirs = self.get_include_dirs() - - opt = self.get_option_single(self.section + '_libs', 'libraries') - libs = self.get_libs(opt, ver_param['libs']) - info = self.check_libs(lib_dirs, libs) - if info is not None: - flag = 0 - for d in incl_dirs: - if len(self.combine_paths(d, ver_param['includes'])) \ - == len(ver_param['includes']): - dict_append(info, include_dirs=[d]) - flag = 1 - break - if flag: - dict_append(info, define_macros=ver_param['macros']) - else: - info = None - if info is not None: - self.set_info(**info) - return True - else: - log.info(' %s not found' % (ver_param['name'])) - return False - - def calc_info(self): - for i in self.ver_info: - if self.calc_ver_info(i): - break - - -class fftw2_info(fftw_info): - #variables to override - section = 'fftw' - dir_env_var = 'FFTW' - notfounderror = FFTWNotFoundError - ver_info = [{'name':'fftw2', - 'libs':['rfftw', 'fftw'], - 'includes':['fftw.h', 'rfftw.h'], - 'macros':[('SCIPY_FFTW_H', None)]} - ] - - -class fftw3_info(fftw_info): - #variables to override - section = 'fftw3' - dir_env_var = 'FFTW3' - notfounderror = FFTWNotFoundError - ver_info = [{'name':'fftw3', - 'libs':['fftw3'], - 'includes':['fftw3.h'], - 'macros':[('SCIPY_FFTW3_H', None)]}, - ] - - -class dfftw_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'dfftw', - 'libs':['drfftw', 'dfftw'], - 'includes':['dfftw.h', 'drfftw.h'], - 'macros':[('SCIPY_DFFTW_H', None)]}] - - -class sfftw_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'sfftw', - 'libs':['srfftw', 'sfftw'], - 'includes':['sfftw.h', 'srfftw.h'], - 'macros':[('SCIPY_SFFTW_H', None)]}] - - -class fftw_threads_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'fftw threads', - 'libs':['rfftw_threads', 'fftw_threads'], - 'includes':['fftw_threads.h', 'rfftw_threads.h'], - 'macros':[('SCIPY_FFTW_THREADS_H', None)]}] - - -class dfftw_threads_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'dfftw threads', - 'libs':['drfftw_threads', 'dfftw_threads'], - 'includes':['dfftw_threads.h', 'drfftw_threads.h'], - 'macros':[('SCIPY_DFFTW_THREADS_H', None)]}] - - -class sfftw_threads_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'sfftw threads', - 'libs':['srfftw_threads', 'sfftw_threads'], - 'includes':['sfftw_threads.h', 'srfftw_threads.h'], - 'macros':[('SCIPY_SFFTW_THREADS_H', None)]}] - - -class djbfft_info(system_info): - section = 'djbfft' - dir_env_var = 'DJBFFT' - notfounderror = DJBFFTNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend(self.combine_paths(d, ['djbfft']) + [d]) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - incl_dirs = self.get_include_dirs() - info = None - for d in lib_dirs: - p = self.combine_paths(d, ['djbfft.a']) - if p: - info = {'extra_objects': p} - break - p = self.combine_paths(d, ['libdjbfft.a', 'libdjbfft' + so_ext]) - if p: - info = {'libraries': ['djbfft'], 'library_dirs': [d]} - break - if info is None: - return - for d in incl_dirs: - if len(self.combine_paths(d, ['fftc8.h', 'fftfreq.h'])) == 2: - dict_append(info, include_dirs=[d], - define_macros=[('SCIPY_DJBFFT_H', None)]) - self.set_info(**info) - return - return - - -class mkl_info(system_info): - section = 'mkl' - dir_env_var = 'MKLROOT' - _lib_mkl = ['mkl_rt'] - - def get_mkl_rootdir(self): - mklroot = os.environ.get('MKLROOT', None) - if mklroot is not None: - return mklroot - paths = os.environ.get('LD_LIBRARY_PATH', '').split(os.pathsep) - ld_so_conf = '/etc/ld.so.conf' - if os.path.isfile(ld_so_conf): - with open(ld_so_conf, 'r') as f: - for d in f: - d = d.strip() - if d: - paths.append(d) - intel_mkl_dirs = [] - for path in paths: - path_atoms = path.split(os.sep) - for m in path_atoms: - if m.startswith('mkl'): - d = os.sep.join(path_atoms[:path_atoms.index(m) + 2]) - intel_mkl_dirs.append(d) - break - for d in paths: - dirs = glob(os.path.join(d, 'mkl', '*')) - dirs += glob(os.path.join(d, 'mkl*')) - for sub_dir in dirs: - if os.path.isdir(os.path.join(sub_dir, 'lib')): - return sub_dir - return None - - def __init__(self): - mklroot = self.get_mkl_rootdir() - if mklroot is None: - system_info.__init__(self) - else: - from .cpuinfo import cpu - if cpu.is_Itanium(): - plt = '64' - elif cpu.is_Intel() and cpu.is_64bit(): - plt = 'intel64' - else: - plt = '32' - system_info.__init__( - self, - default_lib_dirs=[os.path.join(mklroot, 'lib', plt)], - default_include_dirs=[os.path.join(mklroot, 'include')]) - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - incl_dirs = self.get_include_dirs() - opt = self.get_option_single('mkl_libs', 'libraries') - mkl_libs = self.get_libs(opt, self._lib_mkl) - info = self.check_libs2(lib_dirs, mkl_libs) - if info is None: - return - dict_append(info, - define_macros=[('SCIPY_MKL_H', None), - ('HAVE_CBLAS', None)], - include_dirs=incl_dirs) - if sys.platform == 'win32': - pass # win32 has no pthread library - else: - dict_append(info, libraries=['pthread']) - self.set_info(**info) - - -class lapack_mkl_info(mkl_info): - pass - - -class blas_mkl_info(mkl_info): - pass - - -class atlas_info(system_info): - section = 'atlas' - dir_env_var = 'ATLAS' - _lib_names = ['f77blas', 'cblas'] - if sys.platform[:7] == 'freebsd': - _lib_atlas = ['atlas_r'] - _lib_lapack = ['alapack_r'] - else: - _lib_atlas = ['atlas'] - _lib_lapack = ['lapack'] - - notfounderror = AtlasNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend(self.combine_paths(d, ['atlas*', 'ATLAS*', - 'sse', '3dnow', 'sse2']) + [d]) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - info = {} - opt = self.get_option_single('atlas_libs', 'libraries') - atlas_libs = self.get_libs(opt, self._lib_names + self._lib_atlas) - lapack_libs = self.get_libs('lapack_libs', self._lib_lapack) - atlas = None - lapack = None - atlas_1 = None - for d in lib_dirs: - # FIXME: lapack_atlas is unused - lapack_atlas = self.check_libs2(d, ['lapack_atlas'], []) - atlas = self.check_libs2(d, atlas_libs, []) - if atlas is not None: - lib_dirs2 = [d] + self.combine_paths(d, ['atlas*', 'ATLAS*']) - lapack = self.check_libs2(lib_dirs2, lapack_libs, []) - if lapack is not None: - break - if atlas: - atlas_1 = atlas - log.info(self.__class__) - if atlas is None: - atlas = atlas_1 - if atlas is None: - return - include_dirs = self.get_include_dirs() - h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) - h = h[0] - if h: - h = os.path.dirname(h) - dict_append(info, include_dirs=[h]) - info['language'] = 'c' - if lapack is not None: - dict_append(info, **lapack) - dict_append(info, **atlas) - elif 'lapack_atlas' in atlas['libraries']: - dict_append(info, **atlas) - dict_append(info, - define_macros=[('ATLAS_WITH_LAPACK_ATLAS', None)]) - self.set_info(**info) - return - else: - dict_append(info, **atlas) - dict_append(info, define_macros=[('ATLAS_WITHOUT_LAPACK', None)]) - message = textwrap.dedent(""" - ********************************************************************* - Could not find lapack library within the ATLAS installation. - ********************************************************************* - """) - warnings.warn(message, stacklevel=2) - self.set_info(**info) - return - - # Check if lapack library is complete, only warn if it is not. - lapack_dir = lapack['library_dirs'][0] - lapack_name = lapack['libraries'][0] - lapack_lib = None - lib_prefixes = ['lib'] - if sys.platform == 'win32': - lib_prefixes.append('') - for e in self.library_extensions(): - for prefix in lib_prefixes: - fn = os.path.join(lapack_dir, prefix + lapack_name + e) - if os.path.exists(fn): - lapack_lib = fn - break - if lapack_lib: - break - if lapack_lib is not None: - sz = os.stat(lapack_lib)[6] - if sz <= 4000 * 1024: - message = textwrap.dedent(""" - ********************************************************************* - Lapack library (from ATLAS) is probably incomplete: - size of %s is %sk (expected >4000k) - - Follow the instructions in the KNOWN PROBLEMS section of the file - numpy/INSTALL.txt. - ********************************************************************* - """) % (lapack_lib, sz / 1024) - warnings.warn(message, stacklevel=2) - else: - info['language'] = 'f77' - - atlas_version, atlas_extra_info = get_atlas_version(**atlas) - dict_append(info, **atlas_extra_info) - - self.set_info(**info) - - -class atlas_blas_info(atlas_info): - _lib_names = ['f77blas', 'cblas'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - info = {} - opt = self.get_option_single('atlas_libs', 'libraries') - atlas_libs = self.get_libs(opt, self._lib_names + self._lib_atlas) - atlas = self.check_libs2(lib_dirs, atlas_libs, []) - if atlas is None: - return - include_dirs = self.get_include_dirs() - h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) - h = h[0] - if h: - h = os.path.dirname(h) - dict_append(info, include_dirs=[h]) - info['language'] = 'c' - info['define_macros'] = [('HAVE_CBLAS', None)] - - atlas_version, atlas_extra_info = get_atlas_version(**atlas) - dict_append(atlas, **atlas_extra_info) - - dict_append(info, **atlas) - - self.set_info(**info) - return - - -class atlas_threads_info(atlas_info): - dir_env_var = ['PTATLAS', 'ATLAS'] - _lib_names = ['ptf77blas', 'ptcblas'] - - -class atlas_blas_threads_info(atlas_blas_info): - dir_env_var = ['PTATLAS', 'ATLAS'] - _lib_names = ['ptf77blas', 'ptcblas'] - - -class lapack_atlas_info(atlas_info): - _lib_names = ['lapack_atlas'] + atlas_info._lib_names - - -class lapack_atlas_threads_info(atlas_threads_info): - _lib_names = ['lapack_atlas'] + atlas_threads_info._lib_names - - -class atlas_3_10_info(atlas_info): - _lib_names = ['satlas'] - _lib_atlas = _lib_names - _lib_lapack = _lib_names - - -class atlas_3_10_blas_info(atlas_3_10_info): - _lib_names = ['satlas'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - info = {} - opt = self.get_option_single('atlas_lib', 'libraries') - atlas_libs = self.get_libs(opt, self._lib_names) - atlas = self.check_libs2(lib_dirs, atlas_libs, []) - if atlas is None: - return - include_dirs = self.get_include_dirs() - h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) - h = h[0] - if h: - h = os.path.dirname(h) - dict_append(info, include_dirs=[h]) - info['language'] = 'c' - info['define_macros'] = [('HAVE_CBLAS', None)] - - atlas_version, atlas_extra_info = get_atlas_version(**atlas) - dict_append(atlas, **atlas_extra_info) - - dict_append(info, **atlas) - - self.set_info(**info) - return - - -class atlas_3_10_threads_info(atlas_3_10_info): - dir_env_var = ['PTATLAS', 'ATLAS'] - _lib_names = ['tatlas'] - _lib_atlas = _lib_names - _lib_lapack = _lib_names - - -class atlas_3_10_blas_threads_info(atlas_3_10_blas_info): - dir_env_var = ['PTATLAS', 'ATLAS'] - _lib_names = ['tatlas'] - - -class lapack_atlas_3_10_info(atlas_3_10_info): - pass - - -class lapack_atlas_3_10_threads_info(atlas_3_10_threads_info): - pass - - -class lapack_info(system_info): - section = 'lapack' - dir_env_var = 'LAPACK' - _lib_names = ['lapack'] - notfounderror = LapackNotFoundError - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - - opt = self.get_option_single('lapack_libs', 'libraries') - lapack_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs(lib_dirs, lapack_libs, []) - if info is None: - return - info['language'] = 'f77' - self.set_info(**info) - - -class lapack_src_info(system_info): - section = 'lapack_src' - dir_env_var = 'LAPACK_SRC' - notfounderror = LapackSrcNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d, ['LAPACK*/SRC', 'SRC'])) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d, 'dgesv.f')): - src_dir = d - break - if not src_dir: - #XXX: Get sources from netlib. May be ask first. - return - # The following is extracted from LAPACK-3.0/SRC/Makefile. - # Added missing names from lapack-lite-3.1.1/SRC/Makefile - # while keeping removed names for Lapack-3.0 compatibility. - allaux = ''' - ilaenv ieeeck lsame lsamen xerbla - iparmq - ''' # *.f - laux = ''' - bdsdc bdsqr disna labad lacpy ladiv lae2 laebz laed0 laed1 - laed2 laed3 laed4 laed5 laed6 laed7 laed8 laed9 laeda laev2 - lagtf lagts lamch lamrg lanst lapy2 lapy3 larnv larrb larre - larrf lartg laruv las2 lascl lasd0 lasd1 lasd2 lasd3 lasd4 - lasd5 lasd6 lasd7 lasd8 lasd9 lasda lasdq lasdt laset lasq1 - lasq2 lasq3 lasq4 lasq5 lasq6 lasr lasrt lassq lasv2 pttrf - stebz stedc steqr sterf - - larra larrc larrd larr larrk larrj larrr laneg laisnan isnan - lazq3 lazq4 - ''' # [s|d]*.f - lasrc = ''' - gbbrd gbcon gbequ gbrfs gbsv gbsvx gbtf2 gbtrf gbtrs gebak - gebal gebd2 gebrd gecon geequ gees geesx geev geevx gegs gegv - gehd2 gehrd gelq2 gelqf gels gelsd gelss gelsx gelsy geql2 - geqlf geqp3 geqpf geqr2 geqrf gerfs gerq2 gerqf gesc2 gesdd - gesv gesvd gesvx getc2 getf2 getrf getri getrs ggbak ggbal - gges ggesx ggev ggevx ggglm gghrd gglse ggqrf ggrqf ggsvd - ggsvp gtcon gtrfs gtsv gtsvx gttrf gttrs gtts2 hgeqz hsein - hseqr labrd lacon laein lags2 lagtm lahqr lahrd laic1 lals0 - lalsa lalsd langb lange langt lanhs lansb lansp lansy lantb - lantp lantr lapll lapmt laqgb laqge laqp2 laqps laqsb laqsp - laqsy lar1v lar2v larf larfb larfg larft larfx largv larrv - lartv larz larzb larzt laswp lasyf latbs latdf latps latrd - latrs latrz latzm lauu2 lauum pbcon pbequ pbrfs pbstf pbsv - pbsvx pbtf2 pbtrf pbtrs pocon poequ porfs posv posvx potf2 - potrf potri potrs ppcon ppequ pprfs ppsv ppsvx pptrf pptri - pptrs ptcon pteqr ptrfs ptsv ptsvx pttrs ptts2 spcon sprfs - spsv spsvx sptrf sptri sptrs stegr stein sycon syrfs sysv - sysvx sytf2 sytrf sytri sytrs tbcon tbrfs tbtrs tgevc tgex2 - tgexc tgsen tgsja tgsna tgsy2 tgsyl tpcon tprfs tptri tptrs - trcon trevc trexc trrfs trsen trsna trsyl trti2 trtri trtrs - tzrqf tzrzf - - lacn2 lahr2 stemr laqr0 laqr1 laqr2 laqr3 laqr4 laqr5 - ''' # [s|c|d|z]*.f - sd_lasrc = ''' - laexc lag2 lagv2 laln2 lanv2 laqtr lasy2 opgtr opmtr org2l - org2r orgbr orghr orgl2 orglq orgql orgqr orgr2 orgrq orgtr - orm2l orm2r ormbr ormhr orml2 ormlq ormql ormqr ormr2 ormr3 - ormrq ormrz ormtr rscl sbev sbevd sbevx sbgst sbgv sbgvd sbgvx - sbtrd spev spevd spevx spgst spgv spgvd spgvx sptrd stev stevd - stevr stevx syev syevd syevr syevx sygs2 sygst sygv sygvd - sygvx sytd2 sytrd - ''' # [s|d]*.f - cz_lasrc = ''' - bdsqr hbev hbevd hbevx hbgst hbgv hbgvd hbgvx hbtrd hecon heev - heevd heevr heevx hegs2 hegst hegv hegvd hegvx herfs hesv - hesvx hetd2 hetf2 hetrd hetrf hetri hetrs hpcon hpev hpevd - hpevx hpgst hpgv hpgvd hpgvx hprfs hpsv hpsvx hptrd hptrf - hptri hptrs lacgv lacp2 lacpy lacrm lacrt ladiv laed0 laed7 - laed8 laesy laev2 lahef lanhb lanhe lanhp lanht laqhb laqhe - laqhp larcm larnv lartg lascl laset lasr lassq pttrf rot spmv - spr stedc steqr symv syr ung2l ung2r ungbr unghr ungl2 unglq - ungql ungqr ungr2 ungrq ungtr unm2l unm2r unmbr unmhr unml2 - unmlq unmql unmqr unmr2 unmr3 unmrq unmrz unmtr upgtr upmtr - ''' # [c|z]*.f - ####### - sclaux = laux + ' econd ' # s*.f - dzlaux = laux + ' secnd ' # d*.f - slasrc = lasrc + sd_lasrc # s*.f - dlasrc = lasrc + sd_lasrc # d*.f - clasrc = lasrc + cz_lasrc + ' srot srscl ' # c*.f - zlasrc = lasrc + cz_lasrc + ' drot drscl ' # z*.f - oclasrc = ' icmax1 scsum1 ' # *.f - ozlasrc = ' izmax1 dzsum1 ' # *.f - sources = ['s%s.f' % f for f in (sclaux + slasrc).split()] \ - + ['d%s.f' % f for f in (dzlaux + dlasrc).split()] \ - + ['c%s.f' % f for f in (clasrc).split()] \ - + ['z%s.f' % f for f in (zlasrc).split()] \ - + ['%s.f' % f for f in (allaux + oclasrc + ozlasrc).split()] - sources = [os.path.join(src_dir, f) for f in sources] - # Lapack 3.1: - src_dir2 = os.path.join(src_dir, '..', 'INSTALL') - sources += [os.path.join(src_dir2, p + 'lamch.f') for p in 'sdcz'] - # Lapack 3.2.1: - sources += [os.path.join(src_dir, p + 'larfp.f') for p in 'sdcz'] - sources += [os.path.join(src_dir, 'ila' + p + 'lr.f') for p in 'sdcz'] - sources += [os.path.join(src_dir, 'ila' + p + 'lc.f') for p in 'sdcz'] - # Should we check here actual existence of source files? - # Yes, the file listing is different between 3.0 and 3.1 - # versions. - sources = [f for f in sources if os.path.isfile(f)] - info = {'sources': sources, 'language': 'f77'} - self.set_info(**info) - -atlas_version_c_text = r''' -/* This file is generated from numpy/distutils/system_info.py */ -void ATL_buildinfo(void); -int main(void) { - ATL_buildinfo(); - return 0; -} -''' - -_cached_atlas_version = {} - - -def get_atlas_version(**config): - libraries = config.get('libraries', []) - library_dirs = config.get('library_dirs', []) - key = (tuple(libraries), tuple(library_dirs)) - if key in _cached_atlas_version: - return _cached_atlas_version[key] - c = cmd_config(Distribution()) - atlas_version = None - info = {} - try: - s, o = c.get_output(atlas_version_c_text, - libraries=libraries, library_dirs=library_dirs, - ) - if s and re.search(r'undefined reference to `_gfortran', o, re.M): - s, o = c.get_output(atlas_version_c_text, - libraries=libraries + ['gfortran'], - library_dirs=library_dirs, - ) - if not s: - warnings.warn(textwrap.dedent(""" - ***************************************************** - Linkage with ATLAS requires gfortran. Use - - python setup.py config_fc --fcompiler=gnu95 ... - - when building extension libraries that use ATLAS. - Make sure that -lgfortran is used for C++ extensions. - ***************************************************** - """), stacklevel=2) - dict_append(info, language='f90', - define_macros=[('ATLAS_REQUIRES_GFORTRAN', None)]) - except Exception: # failed to get version from file -- maybe on Windows - # look at directory name - for o in library_dirs: - m = re.search(r'ATLAS_(?P\d+[.]\d+[.]\d+)_', o) - if m: - atlas_version = m.group('version') - if atlas_version is not None: - break - - # final choice --- look at ATLAS_VERSION environment - # variable - if atlas_version is None: - atlas_version = os.environ.get('ATLAS_VERSION', None) - if atlas_version: - dict_append(info, define_macros=[( - 'ATLAS_INFO', _c_string_literal(atlas_version)) - ]) - else: - dict_append(info, define_macros=[('NO_ATLAS_INFO', -1)]) - return atlas_version or '?.?.?', info - - if not s: - m = re.search(r'ATLAS version (?P\d+[.]\d+[.]\d+)', o) - if m: - atlas_version = m.group('version') - if atlas_version is None: - if re.search(r'undefined symbol: ATL_buildinfo', o, re.M): - atlas_version = '3.2.1_pre3.3.6' - else: - log.info('Status: %d', s) - log.info('Output: %s', o) - - elif atlas_version == '3.2.1_pre3.3.6': - dict_append(info, define_macros=[('NO_ATLAS_INFO', -2)]) - else: - dict_append(info, define_macros=[( - 'ATLAS_INFO', _c_string_literal(atlas_version)) - ]) - result = _cached_atlas_version[key] = atlas_version, info - return result - - -class lapack_opt_info(system_info): - notfounderror = LapackNotFoundError - # List of all known BLAS libraries, in the default order - lapack_order = ['mkl', 'openblas', 'flame', 'atlas', 'accelerate', 'lapack'] - order_env_var_name = 'NPY_LAPACK_ORDER' - - def _calc_info_mkl(self): - info = get_info('lapack_mkl') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_openblas(self): - info = get_info('openblas_lapack') - if info: - self.set_info(**info) - return True - info = get_info('openblas_clapack') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_flame(self): - info = get_info('flame') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_atlas(self): - info = get_info('atlas_3_10_threads') - if not info: - info = get_info('atlas_3_10') - if not info: - info = get_info('atlas_threads') - if not info: - info = get_info('atlas') - if info: - # Figure out if ATLAS has lapack... - # If not we need the lapack library, but not BLAS! - l = info.get('define_macros', []) - if ('ATLAS_WITH_LAPACK_ATLAS', None) in l \ - or ('ATLAS_WITHOUT_LAPACK', None) in l: - # Get LAPACK (with possible warnings) - # If not found we don't accept anything - # since we can't use ATLAS with LAPACK! - lapack_info = self._get_info_lapack() - if not lapack_info: - return False - dict_append(info, **lapack_info) - self.set_info(**info) - return True - return False - - def _calc_info_accelerate(self): - info = get_info('accelerate') - if info: - self.set_info(**info) - return True - return False - - def _get_info_blas(self): - # Default to get the optimized BLAS implementation - info = get_info('blas_opt') - if not info: - warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=3) - info_src = get_info('blas_src') - if not info_src: - warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=3) - return {} - dict_append(info, libraries=[('fblas_src', info_src)]) - return info - - def _get_info_lapack(self): - info = get_info('lapack') - if not info: - warnings.warn(LapackNotFoundError.__doc__ or '', stacklevel=3) - info_src = get_info('lapack_src') - if not info_src: - warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=3) - return {} - dict_append(info, libraries=[('flapack_src', info_src)]) - return info - - def _calc_info_lapack(self): - info = self._get_info_lapack() - if info: - info_blas = self._get_info_blas() - dict_append(info, **info_blas) - dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)]) - self.set_info(**info) - return True - return False - - def _calc_info(self, name): - return getattr(self, '_calc_info_{}'.format(name))() - - def calc_info(self): - user_order = os.environ.get(self.order_env_var_name, None) - if user_order is None: - lapack_order = self.lapack_order - else: - # the user has requested the order of the - # check they are all in the available list, a COMMA SEPARATED list - user_order = user_order.lower().split(',') - non_existing = [] - lapack_order = [] - for order in user_order: - if order in self.lapack_order: - lapack_order.append(order) - elif len(order) > 0: - non_existing.append(order) - if len(non_existing) > 0: - raise ValueError("lapack_opt_info user defined " - "LAPACK order has unacceptable " - "values: {}".format(non_existing)) - - for lapack in lapack_order: - if self._calc_info(lapack): - return - - if 'lapack' not in lapack_order: - # Since the user may request *not* to use any library, we still need - # to raise warnings to signal missing packages! - warnings.warn(LapackNotFoundError.__doc__ or '', stacklevel=2) - warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=2) - - -class _ilp64_opt_info_mixin: - symbol_suffix = None - symbol_prefix = None - - def _check_info(self, info): - macros = dict(info.get('define_macros', [])) - prefix = macros.get('BLAS_SYMBOL_PREFIX', '') - suffix = macros.get('BLAS_SYMBOL_SUFFIX', '') - - if self.symbol_prefix not in (None, prefix): - return False - - if self.symbol_suffix not in (None, suffix): - return False - - return bool(info) - - -class lapack_ilp64_opt_info(lapack_opt_info, _ilp64_opt_info_mixin): - notfounderror = LapackILP64NotFoundError - lapack_order = ['openblas64_', 'openblas_ilp64'] - order_env_var_name = 'NPY_LAPACK_ILP64_ORDER' - - def _calc_info(self, name): - info = get_info(name + '_lapack') - if self._check_info(info): - self.set_info(**info) - return True - return False - - -class lapack_ilp64_plain_opt_info(lapack_ilp64_opt_info): - # Same as lapack_ilp64_opt_info, but fix symbol names - symbol_prefix = '' - symbol_suffix = '' - - -class lapack64__opt_info(lapack_ilp64_opt_info): - symbol_prefix = '' - symbol_suffix = '64_' - - -class blas_opt_info(system_info): - notfounderror = BlasNotFoundError - # List of all known BLAS libraries, in the default order - blas_order = ['mkl', 'blis', 'openblas', 'atlas', 'accelerate', 'blas'] - order_env_var_name = 'NPY_BLAS_ORDER' - - def _calc_info_mkl(self): - info = get_info('blas_mkl') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_blis(self): - info = get_info('blis') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_openblas(self): - info = get_info('openblas') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_atlas(self): - info = get_info('atlas_3_10_blas_threads') - if not info: - info = get_info('atlas_3_10_blas') - if not info: - info = get_info('atlas_blas_threads') - if not info: - info = get_info('atlas_blas') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_accelerate(self): - info = get_info('accelerate') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_blas(self): - # Warn about a non-optimized BLAS library - warnings.warn(BlasOptNotFoundError.__doc__ or '', stacklevel=3) - info = {} - dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)]) - - blas = get_info('blas') - if blas: - dict_append(info, **blas) - else: - # Not even BLAS was found! - warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=3) - - blas_src = get_info('blas_src') - if not blas_src: - warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=3) - return False - dict_append(info, libraries=[('fblas_src', blas_src)]) - - self.set_info(**info) - return True - - def _calc_info(self, name): - return getattr(self, '_calc_info_{}'.format(name))() - - def calc_info(self): - user_order = os.environ.get(self.order_env_var_name, None) - if user_order is None: - blas_order = self.blas_order - else: - # the user has requested the order of the - # check they are all in the available list - user_order = user_order.lower().split(',') - non_existing = [] - blas_order = [] - for order in user_order: - if order in self.blas_order: - blas_order.append(order) - elif len(order) > 0: - non_existing.append(order) - if len(non_existing) > 0: - raise ValueError("blas_opt_info user defined BLAS order has unacceptable values: {}".format(non_existing)) - - for blas in blas_order: - if self._calc_info(blas): - return - - if 'blas' not in blas_order: - # Since the user may request *not* to use any library, we still need - # to raise warnings to signal missing packages! - warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=2) - warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=2) - - -class blas_ilp64_opt_info(blas_opt_info, _ilp64_opt_info_mixin): - notfounderror = BlasILP64NotFoundError - blas_order = ['openblas64_', 'openblas_ilp64'] - order_env_var_name = 'NPY_BLAS_ILP64_ORDER' - - def _calc_info(self, name): - info = get_info(name) - if self._check_info(info): - self.set_info(**info) - return True - return False - - -class blas_ilp64_plain_opt_info(blas_ilp64_opt_info): - symbol_prefix = '' - symbol_suffix = '' - - -class blas64__opt_info(blas_ilp64_opt_info): - symbol_prefix = '' - symbol_suffix = '64_' - - -class blas_info(system_info): - section = 'blas' - dir_env_var = 'BLAS' - _lib_names = ['blas'] - notfounderror = BlasNotFoundError - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - opt = self.get_option_single('blas_libs', 'libraries') - blas_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs(lib_dirs, blas_libs, []) - if info is None: - return - else: - info['include_dirs'] = self.get_include_dirs() - if platform.system() == 'Windows': - # The check for windows is needed because get_cblas_libs uses the - # same compiler that was used to compile Python and msvc is - # often not installed when mingw is being used. This rough - # treatment is not desirable, but windows is tricky. - info['language'] = 'f77' # XXX: is it generally true? - else: - lib = self.get_cblas_libs(info) - if lib is not None: - info['language'] = 'c' - info['libraries'] = lib - info['define_macros'] = [('HAVE_CBLAS', None)] - self.set_info(**info) - - def get_cblas_libs(self, info): - """ Check whether we can link with CBLAS interface - - This method will search through several combinations of libraries - to check whether CBLAS is present: - - 1. Libraries in ``info['libraries']``, as is - 2. As 1. but also explicitly adding ``'cblas'`` as a library - 3. As 1. but also explicitly adding ``'blas'`` as a library - 4. Check only library ``'cblas'`` - 5. Check only library ``'blas'`` - - Parameters - ---------- - info : dict - system information dictionary for compilation and linking - - Returns - ------- - libraries : list of str or None - a list of libraries that enables the use of CBLAS interface. - Returns None if not found or a compilation error occurs. - - Since 1.17 returns a list. - """ - # primitive cblas check by looking for the header and trying to link - # cblas or blas - c = customized_ccompiler() - tmpdir = tempfile.mkdtemp() - s = textwrap.dedent("""\ - #include - int main(int argc, const char *argv[]) - { - double a[4] = {1,2,3,4}; - double b[4] = {5,6,7,8}; - return cblas_ddot(4, a, 1, b, 1) > 10; - }""") - src = os.path.join(tmpdir, 'source.c') - try: - with open(src, 'wt') as f: - f.write(s) - - try: - # check we can compile (find headers) - obj = c.compile([src], output_dir=tmpdir, - include_dirs=self.get_include_dirs()) - except (distutils.ccompiler.CompileError, distutils.ccompiler.LinkError): - return None - - # check we can link (find library) - # some systems have separate cblas and blas libs. - for libs in [info['libraries'], ['cblas'] + info['libraries'], - ['blas'] + info['libraries'], ['cblas'], ['blas']]: - try: - c.link_executable(obj, os.path.join(tmpdir, "a.out"), - libraries=libs, - library_dirs=info['library_dirs'], - extra_postargs=info.get('extra_link_args', [])) - return libs - except distutils.ccompiler.LinkError: - pass - finally: - shutil.rmtree(tmpdir) - return None - - -class openblas_info(blas_info): - section = 'openblas' - dir_env_var = 'OPENBLAS' - _lib_names = ['openblas'] - _require_symbols = [] - notfounderror = BlasNotFoundError - - @property - def symbol_prefix(self): - try: - return self.cp.get(self.section, 'symbol_prefix') - except NoOptionError: - return '' - - @property - def symbol_suffix(self): - try: - return self.cp.get(self.section, 'symbol_suffix') - except NoOptionError: - return '' - - def _calc_info(self): - c = customized_ccompiler() - - lib_dirs = self.get_lib_dirs() - - # Prefer to use libraries over openblas_libs - opt = self.get_option_single('openblas_libs', 'libraries') - openblas_libs = self.get_libs(opt, self._lib_names) - - info = self.check_libs(lib_dirs, openblas_libs, []) - - if c.compiler_type == "msvc" and info is None: - from numpy.distutils.fcompiler import new_fcompiler - f = new_fcompiler(c_compiler=c) - if f and f.compiler_type == 'gnu95': - # Try gfortran-compatible library files - info = self.check_msvc_gfortran_libs(lib_dirs, openblas_libs) - # Skip lapack check, we'd need build_ext to do it - skip_symbol_check = True - elif info: - skip_symbol_check = False - info['language'] = 'c' - - if info is None: - return None - - # Add extra info for OpenBLAS - extra_info = self.calc_extra_info() - dict_append(info, **extra_info) - - if not (skip_symbol_check or self.check_symbols(info)): - return None - - info['define_macros'] = [('HAVE_CBLAS', None)] - if self.symbol_prefix: - info['define_macros'] += [('BLAS_SYMBOL_PREFIX', self.symbol_prefix)] - if self.symbol_suffix: - info['define_macros'] += [('BLAS_SYMBOL_SUFFIX', self.symbol_suffix)] - - return info - - def calc_info(self): - info = self._calc_info() - if info is not None: - self.set_info(**info) - - def check_msvc_gfortran_libs(self, library_dirs, libraries): - # First, find the full path to each library directory - library_paths = [] - for library in libraries: - for library_dir in library_dirs: - # MinGW static ext will be .a - fullpath = os.path.join(library_dir, library + '.a') - if os.path.isfile(fullpath): - library_paths.append(fullpath) - break - else: - return None - - # Generate numpy.distutils virtual static library file - basename = self.__class__.__name__ - tmpdir = os.path.join(os.getcwd(), 'build', basename) - if not os.path.isdir(tmpdir): - os.makedirs(tmpdir) - - info = {'library_dirs': [tmpdir], - 'libraries': [basename], - 'language': 'f77'} - - fake_lib_file = os.path.join(tmpdir, basename + '.fobjects') - fake_clib_file = os.path.join(tmpdir, basename + '.cobjects') - with open(fake_lib_file, 'w') as f: - f.write("\n".join(library_paths)) - with open(fake_clib_file, 'w') as f: - pass - - return info - - def check_symbols(self, info): - res = False - c = customized_ccompiler() - - tmpdir = tempfile.mkdtemp() - - prototypes = "\n".join("void %s%s%s();" % (self.symbol_prefix, - symbol_name, - self.symbol_suffix) - for symbol_name in self._require_symbols) - calls = "\n".join("%s%s%s();" % (self.symbol_prefix, - symbol_name, - self.symbol_suffix) - for symbol_name in self._require_symbols) - s = textwrap.dedent("""\ - %(prototypes)s - int main(int argc, const char *argv[]) - { - %(calls)s - return 0; - }""") % dict(prototypes=prototypes, calls=calls) - src = os.path.join(tmpdir, 'source.c') - out = os.path.join(tmpdir, 'a.out') - # Add the additional "extra" arguments - try: - extra_args = info['extra_link_args'] - except Exception: - extra_args = [] - if sys.version_info < (3, 5) and sys.version_info > (3, 0) and c.compiler_type == "msvc": - extra_args.append("/MANIFEST") - try: - with open(src, 'wt') as f: - f.write(s) - obj = c.compile([src], output_dir=tmpdir) - try: - c.link_executable(obj, out, libraries=info['libraries'], - library_dirs=info['library_dirs'], - extra_postargs=extra_args) - res = True - except distutils.ccompiler.LinkError: - res = False - finally: - shutil.rmtree(tmpdir) - return res - -class openblas_lapack_info(openblas_info): - section = 'openblas' - dir_env_var = 'OPENBLAS' - _lib_names = ['openblas'] - _require_symbols = ['zungqr_'] - notfounderror = BlasNotFoundError - -class openblas_clapack_info(openblas_lapack_info): - _lib_names = ['openblas', 'lapack'] - -class openblas_ilp64_info(openblas_info): - section = 'openblas_ilp64' - dir_env_var = 'OPENBLAS_ILP64' - _lib_names = ['openblas64'] - _require_symbols = ['dgemm_', 'cblas_dgemm'] - notfounderror = BlasILP64NotFoundError - - def _calc_info(self): - info = super()._calc_info() - if info is not None: - info['define_macros'] += [('HAVE_BLAS_ILP64', None)] - return info - -class openblas_ilp64_lapack_info(openblas_ilp64_info): - _require_symbols = ['dgemm_', 'cblas_dgemm', 'zungqr_', 'LAPACKE_zungqr'] - - def _calc_info(self): - info = super()._calc_info() - if info: - info['define_macros'] += [('HAVE_LAPACKE', None)] - return info - -class openblas64__info(openblas_ilp64_info): - # ILP64 Openblas, with default symbol suffix - section = 'openblas64_' - dir_env_var = 'OPENBLAS64_' - _lib_names = ['openblas64_'] - symbol_suffix = '64_' - symbol_prefix = '' - -class openblas64__lapack_info(openblas_ilp64_lapack_info, openblas64__info): - pass - -class blis_info(blas_info): - section = 'blis' - dir_env_var = 'BLIS' - _lib_names = ['blis'] - notfounderror = BlasNotFoundError - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - opt = self.get_option_single('blis_libs', 'libraries') - blis_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs2(lib_dirs, blis_libs, []) - if info is None: - return - - # Add include dirs - incl_dirs = self.get_include_dirs() - dict_append(info, - language='c', - define_macros=[('HAVE_CBLAS', None)], - include_dirs=incl_dirs) - self.set_info(**info) - - -class flame_info(system_info): - """ Usage of libflame for LAPACK operations - - This requires libflame to be compiled with lapack wrappers: - - ./configure --enable-lapack2flame ... - - Be aware that libflame 5.1.0 has some missing names in the shared library, so - if you have problems, try the static flame library. - """ - section = 'flame' - _lib_names = ['flame'] - notfounderror = FlameNotFoundError - - def check_embedded_lapack(self, info): - """ libflame does not necessarily have a wrapper for fortran LAPACK, we need to check """ - c = customized_ccompiler() - - tmpdir = tempfile.mkdtemp() - s = textwrap.dedent("""\ - void zungqr_(); - int main(int argc, const char *argv[]) - { - zungqr_(); - return 0; - }""") - src = os.path.join(tmpdir, 'source.c') - out = os.path.join(tmpdir, 'a.out') - # Add the additional "extra" arguments - extra_args = info.get('extra_link_args', []) - try: - with open(src, 'wt') as f: - f.write(s) - obj = c.compile([src], output_dir=tmpdir) - try: - c.link_executable(obj, out, libraries=info['libraries'], - library_dirs=info['library_dirs'], - extra_postargs=extra_args) - return True - except distutils.ccompiler.LinkError: - return False - finally: - shutil.rmtree(tmpdir) - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - flame_libs = self.get_libs('libraries', self._lib_names) - - info = self.check_libs2(lib_dirs, flame_libs, []) - if info is None: - return - - if self.check_embedded_lapack(info): - # check if the user has supplied all information required - self.set_info(**info) - else: - # Try and get the BLAS lib to see if we can get it to work - blas_info = get_info('blas_opt') - if not blas_info: - # since we already failed once, this ain't going to work either - return - - # Now we need to merge the two dictionaries - for key in blas_info: - if isinstance(blas_info[key], list): - info[key] = info.get(key, []) + blas_info[key] - elif isinstance(blas_info[key], tuple): - info[key] = info.get(key, ()) + blas_info[key] - else: - info[key] = info.get(key, '') + blas_info[key] - - # Now check again - if self.check_embedded_lapack(info): - self.set_info(**info) - - -class accelerate_info(system_info): - section = 'accelerate' - _lib_names = ['accelerate', 'veclib'] - notfounderror = BlasNotFoundError - - def calc_info(self): - # Make possible to enable/disable from config file/env var - libraries = os.environ.get('ACCELERATE') - if libraries: - libraries = [libraries] - else: - libraries = self.get_libs('libraries', self._lib_names) - libraries = [lib.strip().lower() for lib in libraries] - - if (sys.platform == 'darwin' and - not os.getenv('_PYTHON_HOST_PLATFORM', None)): - # Use the system BLAS from Accelerate or vecLib under OSX - args = [] - link_args = [] - if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \ - 'x86_64' in get_platform() or \ - 'i386' in platform.platform(): - intel = 1 - else: - intel = 0 - if (os.path.exists('/System/Library/Frameworks' - '/Accelerate.framework/') and - 'accelerate' in libraries): - if intel: - args.extend(['-msse3']) - else: - args.extend(['-faltivec']) - args.extend([ - '-I/System/Library/Frameworks/vecLib.framework/Headers']) - link_args.extend(['-Wl,-framework', '-Wl,Accelerate']) - elif (os.path.exists('/System/Library/Frameworks' - '/vecLib.framework/') and - 'veclib' in libraries): - if intel: - args.extend(['-msse3']) - else: - args.extend(['-faltivec']) - args.extend([ - '-I/System/Library/Frameworks/vecLib.framework/Headers']) - link_args.extend(['-Wl,-framework', '-Wl,vecLib']) - - if args: - self.set_info(extra_compile_args=args, - extra_link_args=link_args, - define_macros=[('NO_ATLAS_INFO', 3), - ('HAVE_CBLAS', None)]) - - return - -class blas_src_info(system_info): - section = 'blas_src' - dir_env_var = 'BLAS_SRC' - notfounderror = BlasSrcNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d, ['blas'])) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d, 'daxpy.f')): - src_dir = d - break - if not src_dir: - #XXX: Get sources from netlib. May be ask first. - return - blas1 = ''' - caxpy csscal dnrm2 dzasum saxpy srotg zdotc ccopy cswap drot - dznrm2 scasum srotm zdotu cdotc dasum drotg icamax scnrm2 - srotmg zdrot cdotu daxpy drotm idamax scopy sscal zdscal crotg - dcabs1 drotmg isamax sdot sswap zrotg cscal dcopy dscal izamax - snrm2 zaxpy zscal csrot ddot dswap sasum srot zcopy zswap - scabs1 - ''' - blas2 = ''' - cgbmv chpmv ctrsv dsymv dtrsv sspr2 strmv zhemv ztpmv cgemv - chpr dgbmv dsyr lsame ssymv strsv zher ztpsv cgerc chpr2 dgemv - dsyr2 sgbmv ssyr xerbla zher2 ztrmv cgeru ctbmv dger dtbmv - sgemv ssyr2 zgbmv zhpmv ztrsv chbmv ctbsv dsbmv dtbsv sger - stbmv zgemv zhpr chemv ctpmv dspmv dtpmv ssbmv stbsv zgerc - zhpr2 cher ctpsv dspr dtpsv sspmv stpmv zgeru ztbmv cher2 - ctrmv dspr2 dtrmv sspr stpsv zhbmv ztbsv - ''' - blas3 = ''' - cgemm csymm ctrsm dsyrk sgemm strmm zhemm zsyr2k chemm csyr2k - dgemm dtrmm ssymm strsm zher2k zsyrk cher2k csyrk dsymm dtrsm - ssyr2k zherk ztrmm cherk ctrmm dsyr2k ssyrk zgemm zsymm ztrsm - ''' - sources = [os.path.join(src_dir, f + '.f') \ - for f in (blas1 + blas2 + blas3).split()] - #XXX: should we check here actual existence of source files? - sources = [f for f in sources if os.path.isfile(f)] - info = {'sources': sources, 'language': 'f77'} - self.set_info(**info) - - -class x11_info(system_info): - section = 'x11' - notfounderror = X11NotFoundError - _lib_names = ['X11'] - - def __init__(self): - system_info.__init__(self, - default_lib_dirs=default_x11_lib_dirs, - default_include_dirs=default_x11_include_dirs) - - def calc_info(self): - if sys.platform in ['win32']: - return - lib_dirs = self.get_lib_dirs() - include_dirs = self.get_include_dirs() - opt = self.get_option_single('x11_libs', 'libraries') - x11_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs(lib_dirs, x11_libs, []) - if info is None: - return - inc_dir = None - for d in include_dirs: - if self.combine_paths(d, 'X11/X.h'): - inc_dir = d - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir]) - self.set_info(**info) - - -class _numpy_info(system_info): - section = 'Numeric' - modulename = 'Numeric' - notfounderror = NumericNotFoundError - - def __init__(self): - include_dirs = [] - try: - module = __import__(self.modulename) - prefix = [] - for name in module.__file__.split(os.sep): - if name == 'lib': - break - prefix.append(name) - - # Ask numpy for its own include path before attempting - # anything else - try: - include_dirs.append(getattr(module, 'get_include')()) - except AttributeError: - pass - - include_dirs.append(distutils.sysconfig.get_python_inc( - prefix=os.sep.join(prefix))) - except ImportError: - pass - py_incl_dir = distutils.sysconfig.get_python_inc() - include_dirs.append(py_incl_dir) - py_pincl_dir = distutils.sysconfig.get_python_inc(plat_specific=True) - if py_pincl_dir not in include_dirs: - include_dirs.append(py_pincl_dir) - for d in default_include_dirs: - d = os.path.join(d, os.path.basename(py_incl_dir)) - if d not in include_dirs: - include_dirs.append(d) - system_info.__init__(self, - default_lib_dirs=[], - default_include_dirs=include_dirs) - - def calc_info(self): - try: - module = __import__(self.modulename) - except ImportError: - return - info = {} - macros = [] - for v in ['__version__', 'version']: - vrs = getattr(module, v, None) - if vrs is None: - continue - macros = [(self.modulename.upper() + '_VERSION', - _c_string_literal(vrs)), - (self.modulename.upper(), None)] - break - dict_append(info, define_macros=macros) - include_dirs = self.get_include_dirs() - inc_dir = None - for d in include_dirs: - if self.combine_paths(d, - os.path.join(self.modulename, - 'arrayobject.h')): - inc_dir = d - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir]) - if info: - self.set_info(**info) - return - - -class numarray_info(_numpy_info): - section = 'numarray' - modulename = 'numarray' - - -class Numeric_info(_numpy_info): - section = 'Numeric' - modulename = 'Numeric' - - -class numpy_info(_numpy_info): - section = 'numpy' - modulename = 'numpy' - - -class numerix_info(system_info): - section = 'numerix' - - def calc_info(self): - which = None, None - if os.getenv("NUMERIX"): - which = os.getenv("NUMERIX"), "environment var" - # If all the above fail, default to numpy. - if which[0] is None: - which = "numpy", "defaulted" - try: - import numpy # noqa: F401 - which = "numpy", "defaulted" - except ImportError: - msg1 = str(get_exception()) - try: - import Numeric # noqa: F401 - which = "numeric", "defaulted" - except ImportError: - msg2 = str(get_exception()) - try: - import numarray # noqa: F401 - which = "numarray", "defaulted" - except ImportError: - msg3 = str(get_exception()) - log.info(msg1) - log.info(msg2) - log.info(msg3) - which = which[0].strip().lower(), which[1] - if which[0] not in ["numeric", "numarray", "numpy"]: - raise ValueError("numerix selector must be either 'Numeric' " - "or 'numarray' or 'numpy' but the value obtained" - " from the %s was '%s'." % (which[1], which[0])) - os.environ['NUMERIX'] = which[0] - self.set_info(**get_info(which[0])) - - -class f2py_info(system_info): - def calc_info(self): - try: - import numpy.f2py as f2py - except ImportError: - return - f2py_dir = os.path.join(os.path.dirname(f2py.__file__), 'src') - self.set_info(sources=[os.path.join(f2py_dir, 'fortranobject.c')], - include_dirs=[f2py_dir]) - return - - -class boost_python_info(system_info): - section = 'boost_python' - dir_env_var = 'BOOST' - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d, ['boost*'])) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d, 'libs', 'python', 'src', - 'module.cpp')): - src_dir = d - break - if not src_dir: - return - py_incl_dirs = [distutils.sysconfig.get_python_inc()] - py_pincl_dir = distutils.sysconfig.get_python_inc(plat_specific=True) - if py_pincl_dir not in py_incl_dirs: - py_incl_dirs.append(py_pincl_dir) - srcs_dir = os.path.join(src_dir, 'libs', 'python', 'src') - bpl_srcs = glob(os.path.join(srcs_dir, '*.cpp')) - bpl_srcs += glob(os.path.join(srcs_dir, '*', '*.cpp')) - info = {'libraries': [('boost_python_src', - {'include_dirs': [src_dir] + py_incl_dirs, - 'sources':bpl_srcs} - )], - 'include_dirs': [src_dir], - } - if info: - self.set_info(**info) - return - - -class agg2_info(system_info): - section = 'agg2' - dir_env_var = 'AGG2' - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d, ['agg2*'])) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d, 'src', 'agg_affine_matrix.cpp')): - src_dir = d - break - if not src_dir: - return - if sys.platform == 'win32': - agg2_srcs = glob(os.path.join(src_dir, 'src', 'platform', - 'win32', 'agg_win32_bmp.cpp')) - else: - agg2_srcs = glob(os.path.join(src_dir, 'src', '*.cpp')) - agg2_srcs += [os.path.join(src_dir, 'src', 'platform', - 'X11', - 'agg_platform_support.cpp')] - - info = {'libraries': - [('agg2_src', - {'sources': agg2_srcs, - 'include_dirs': [os.path.join(src_dir, 'include')], - } - )], - 'include_dirs': [os.path.join(src_dir, 'include')], - } - if info: - self.set_info(**info) - return - - -class _pkg_config_info(system_info): - section = None - config_env_var = 'PKG_CONFIG' - default_config_exe = 'pkg-config' - append_config_exe = '' - version_macro_name = None - release_macro_name = None - version_flag = '--modversion' - cflags_flag = '--cflags' - - def get_config_exe(self): - if self.config_env_var in os.environ: - return os.environ[self.config_env_var] - return self.default_config_exe - - def get_config_output(self, config_exe, option): - cmd = config_exe + ' ' + self.append_config_exe + ' ' + option - try: - o = subprocess.check_output(cmd) - except (OSError, subprocess.CalledProcessError): - pass - else: - o = filepath_from_subprocess_output(o) - return o - - def calc_info(self): - config_exe = find_executable(self.get_config_exe()) - if not config_exe: - log.warn('File not found: %s. Cannot determine %s info.' \ - % (config_exe, self.section)) - return - info = {} - macros = [] - libraries = [] - library_dirs = [] - include_dirs = [] - extra_link_args = [] - extra_compile_args = [] - version = self.get_config_output(config_exe, self.version_flag) - if version: - macros.append((self.__class__.__name__.split('.')[-1].upper(), - _c_string_literal(version))) - if self.version_macro_name: - macros.append((self.version_macro_name + '_%s' - % (version.replace('.', '_')), None)) - if self.release_macro_name: - release = self.get_config_output(config_exe, '--release') - if release: - macros.append((self.release_macro_name + '_%s' - % (release.replace('.', '_')), None)) - opts = self.get_config_output(config_exe, '--libs') - if opts: - for opt in opts.split(): - if opt[:2] == '-l': - libraries.append(opt[2:]) - elif opt[:2] == '-L': - library_dirs.append(opt[2:]) - else: - extra_link_args.append(opt) - opts = self.get_config_output(config_exe, self.cflags_flag) - if opts: - for opt in opts.split(): - if opt[:2] == '-I': - include_dirs.append(opt[2:]) - elif opt[:2] == '-D': - if '=' in opt: - n, v = opt[2:].split('=') - macros.append((n, v)) - else: - macros.append((opt[2:], None)) - else: - extra_compile_args.append(opt) - if macros: - dict_append(info, define_macros=macros) - if libraries: - dict_append(info, libraries=libraries) - if library_dirs: - dict_append(info, library_dirs=library_dirs) - if include_dirs: - dict_append(info, include_dirs=include_dirs) - if extra_link_args: - dict_append(info, extra_link_args=extra_link_args) - if extra_compile_args: - dict_append(info, extra_compile_args=extra_compile_args) - if info: - self.set_info(**info) - return - - -class wx_info(_pkg_config_info): - section = 'wx' - config_env_var = 'WX_CONFIG' - default_config_exe = 'wx-config' - append_config_exe = '' - version_macro_name = 'WX_VERSION' - release_macro_name = 'WX_RELEASE' - version_flag = '--version' - cflags_flag = '--cxxflags' - - -class gdk_pixbuf_xlib_2_info(_pkg_config_info): - section = 'gdk_pixbuf_xlib_2' - append_config_exe = 'gdk-pixbuf-xlib-2.0' - version_macro_name = 'GDK_PIXBUF_XLIB_VERSION' - - -class gdk_pixbuf_2_info(_pkg_config_info): - section = 'gdk_pixbuf_2' - append_config_exe = 'gdk-pixbuf-2.0' - version_macro_name = 'GDK_PIXBUF_VERSION' - - -class gdk_x11_2_info(_pkg_config_info): - section = 'gdk_x11_2' - append_config_exe = 'gdk-x11-2.0' - version_macro_name = 'GDK_X11_VERSION' - - -class gdk_2_info(_pkg_config_info): - section = 'gdk_2' - append_config_exe = 'gdk-2.0' - version_macro_name = 'GDK_VERSION' - - -class gdk_info(_pkg_config_info): - section = 'gdk' - append_config_exe = 'gdk' - version_macro_name = 'GDK_VERSION' - - -class gtkp_x11_2_info(_pkg_config_info): - section = 'gtkp_x11_2' - append_config_exe = 'gtk+-x11-2.0' - version_macro_name = 'GTK_X11_VERSION' - - -class gtkp_2_info(_pkg_config_info): - section = 'gtkp_2' - append_config_exe = 'gtk+-2.0' - version_macro_name = 'GTK_VERSION' - - -class xft_info(_pkg_config_info): - section = 'xft' - append_config_exe = 'xft' - version_macro_name = 'XFT_VERSION' - - -class freetype2_info(_pkg_config_info): - section = 'freetype2' - append_config_exe = 'freetype2' - version_macro_name = 'FREETYPE2_VERSION' - - -class amd_info(system_info): - section = 'amd' - dir_env_var = 'AMD' - _lib_names = ['amd'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - - opt = self.get_option_single('amd_libs', 'libraries') - amd_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs(lib_dirs, amd_libs, []) - if info is None: - return - - include_dirs = self.get_include_dirs() - - inc_dir = None - for d in include_dirs: - p = self.combine_paths(d, 'amd.h') - if p: - inc_dir = os.path.dirname(p[0]) - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir], - define_macros=[('SCIPY_AMD_H', None)], - swig_opts=['-I' + inc_dir]) - - self.set_info(**info) - return - - -class umfpack_info(system_info): - section = 'umfpack' - dir_env_var = 'UMFPACK' - notfounderror = UmfpackNotFoundError - _lib_names = ['umfpack'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - - opt = self.get_option_single('umfpack_libs', 'libraries') - umfpack_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs(lib_dirs, umfpack_libs, []) - if info is None: - return - - include_dirs = self.get_include_dirs() - - inc_dir = None - for d in include_dirs: - p = self.combine_paths(d, ['', 'umfpack'], 'umfpack.h') - if p: - inc_dir = os.path.dirname(p[0]) - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir], - define_macros=[('SCIPY_UMFPACK_H', None)], - swig_opts=['-I' + inc_dir]) - - dict_append(info, **get_info('amd')) - - self.set_info(**info) - return - - -def combine_paths(*args, **kws): - """ Return a list of existing paths composed by all combinations of - items from arguments. - """ - r = [] - for a in args: - if not a: - continue - if is_string(a): - a = [a] - r.append(a) - args = r - if not args: - return [] - if len(args) == 1: - result = reduce(lambda a, b: a + b, map(glob, args[0]), []) - elif len(args) == 2: - result = [] - for a0 in args[0]: - for a1 in args[1]: - result.extend(glob(os.path.join(a0, a1))) - else: - result = combine_paths(*(combine_paths(args[0], args[1]) + args[2:])) - log.debug('(paths: %s)', ','.join(result)) - return result - -language_map = {'c': 0, 'c++': 1, 'f77': 2, 'f90': 3} -inv_language_map = {0: 'c', 1: 'c++', 2: 'f77', 3: 'f90'} - - -def dict_append(d, **kws): - languages = [] - for k, v in kws.items(): - if k == 'language': - languages.append(v) - continue - if k in d: - if k in ['library_dirs', 'include_dirs', - 'extra_compile_args', 'extra_link_args', - 'runtime_library_dirs', 'define_macros']: - [d[k].append(vv) for vv in v if vv not in d[k]] - else: - d[k].extend(v) - else: - d[k] = v - if languages: - l = inv_language_map[max([language_map.get(l, 0) for l in languages])] - d['language'] = l - return - - -def parseCmdLine(argv=(None,)): - import optparse - parser = optparse.OptionParser("usage: %prog [-v] [info objs]") - parser.add_option('-v', '--verbose', action='store_true', dest='verbose', - default=False, - help='be verbose and print more messages') - - opts, args = parser.parse_args(args=argv[1:]) - return opts, args - - -def show_all(argv=None): - import inspect - if argv is None: - argv = sys.argv - opts, args = parseCmdLine(argv) - if opts.verbose: - log.set_threshold(log.DEBUG) - else: - log.set_threshold(log.INFO) - show_only = [] - for n in args: - if n[-5:] != '_info': - n = n + '_info' - show_only.append(n) - show_all = not show_only - _gdict_ = globals().copy() - for name, c in _gdict_.items(): - if not inspect.isclass(c): - continue - if not issubclass(c, system_info) or c is system_info: - continue - if not show_all: - if name not in show_only: - continue - del show_only[show_only.index(name)] - conf = c() - conf.verbosity = 2 - # FIXME: r not used - r = conf.get_info() - if show_only: - log.info('Info classes not defined: %s', ','.join(show_only)) - -if __name__ == "__main__": - show_all() diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/tests/__init__.py b/venv/lib/python3.7/site-packages/numpy/distutils/tests/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_exec_command.py b/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_exec_command.py deleted file mode 100644 index 37912f5..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_exec_command.py +++ /dev/null @@ -1,220 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import sys -from tempfile import TemporaryFile - -from numpy.distutils import exec_command -from numpy.distutils.exec_command import get_pythonexe -from numpy.testing import tempdir, assert_, assert_warns - -# In python 3 stdout, stderr are text (unicode compliant) devices, so to -# emulate them import StringIO from the io module. -if sys.version_info[0] >= 3: - from io import StringIO -else: - from StringIO import StringIO - -class redirect_stdout(object): - """Context manager to redirect stdout for exec_command test.""" - def __init__(self, stdout=None): - self._stdout = stdout or sys.stdout - - def __enter__(self): - self.old_stdout = sys.stdout - sys.stdout = self._stdout - - def __exit__(self, exc_type, exc_value, traceback): - self._stdout.flush() - sys.stdout = self.old_stdout - # note: closing sys.stdout won't close it. - self._stdout.close() - -class redirect_stderr(object): - """Context manager to redirect stderr for exec_command test.""" - def __init__(self, stderr=None): - self._stderr = stderr or sys.stderr - - def __enter__(self): - self.old_stderr = sys.stderr - sys.stderr = self._stderr - - def __exit__(self, exc_type, exc_value, traceback): - self._stderr.flush() - sys.stderr = self.old_stderr - # note: closing sys.stderr won't close it. - self._stderr.close() - -class emulate_nonposix(object): - """Context manager to emulate os.name != 'posix' """ - def __init__(self, osname='non-posix'): - self._new_name = osname - - def __enter__(self): - self._old_name = os.name - os.name = self._new_name - - def __exit__(self, exc_type, exc_value, traceback): - os.name = self._old_name - - -def test_exec_command_stdout(): - # Regression test for gh-2999 and gh-2915. - # There are several packages (nose, scipy.weave.inline, Sage inline - # Fortran) that replace stdout, in which case it doesn't have a fileno - # method. This is tested here, with a do-nothing command that fails if the - # presence of fileno() is assumed in exec_command. - - # The code has a special case for posix systems, so if we are on posix test - # both that the special case works and that the generic code works. - - # Test posix version: - with redirect_stdout(StringIO()): - with redirect_stderr(TemporaryFile()): - with assert_warns(DeprecationWarning): - exec_command.exec_command("cd '.'") - - if os.name == 'posix': - # Test general (non-posix) version: - with emulate_nonposix(): - with redirect_stdout(StringIO()): - with redirect_stderr(TemporaryFile()): - with assert_warns(DeprecationWarning): - exec_command.exec_command("cd '.'") - -def test_exec_command_stderr(): - # Test posix version: - with redirect_stdout(TemporaryFile(mode='w+')): - with redirect_stderr(StringIO()): - with assert_warns(DeprecationWarning): - exec_command.exec_command("cd '.'") - - if os.name == 'posix': - # Test general (non-posix) version: - with emulate_nonposix(): - with redirect_stdout(TemporaryFile()): - with redirect_stderr(StringIO()): - with assert_warns(DeprecationWarning): - exec_command.exec_command("cd '.'") - - -class TestExecCommand(object): - def setup(self): - self.pyexe = get_pythonexe() - - def check_nt(self, **kws): - s, o = exec_command.exec_command('cmd /C echo path=%path%') - assert_(s == 0) - assert_(o != '') - - s, o = exec_command.exec_command( - '"%s" -c "import sys;sys.stderr.write(sys.platform)"' % self.pyexe) - assert_(s == 0) - assert_(o == 'win32') - - def check_posix(self, **kws): - s, o = exec_command.exec_command("echo Hello", **kws) - assert_(s == 0) - assert_(o == 'Hello') - - s, o = exec_command.exec_command('echo $AAA', **kws) - assert_(s == 0) - assert_(o == '') - - s, o = exec_command.exec_command('echo "$AAA"', AAA='Tere', **kws) - assert_(s == 0) - assert_(o == 'Tere') - - s, o = exec_command.exec_command('echo "$AAA"', **kws) - assert_(s == 0) - assert_(o == '') - - if 'BBB' not in os.environ: - os.environ['BBB'] = 'Hi' - s, o = exec_command.exec_command('echo "$BBB"', **kws) - assert_(s == 0) - assert_(o == 'Hi') - - s, o = exec_command.exec_command('echo "$BBB"', BBB='Hey', **kws) - assert_(s == 0) - assert_(o == 'Hey') - - s, o = exec_command.exec_command('echo "$BBB"', **kws) - assert_(s == 0) - assert_(o == 'Hi') - - del os.environ['BBB'] - - s, o = exec_command.exec_command('echo "$BBB"', **kws) - assert_(s == 0) - assert_(o == '') - - - s, o = exec_command.exec_command('this_is_not_a_command', **kws) - assert_(s != 0) - assert_(o != '') - - s, o = exec_command.exec_command('echo path=$PATH', **kws) - assert_(s == 0) - assert_(o != '') - - s, o = exec_command.exec_command( - '"%s" -c "import sys,os;sys.stderr.write(os.name)"' % - self.pyexe, **kws) - assert_(s == 0) - assert_(o == 'posix') - - def check_basic(self, *kws): - s, o = exec_command.exec_command( - '"%s" -c "raise \'Ignore me.\'"' % self.pyexe, **kws) - assert_(s != 0) - assert_(o != '') - - s, o = exec_command.exec_command( - '"%s" -c "import sys;sys.stderr.write(\'0\');' - 'sys.stderr.write(\'1\');sys.stderr.write(\'2\')"' % - self.pyexe, **kws) - assert_(s == 0) - assert_(o == '012') - - s, o = exec_command.exec_command( - '"%s" -c "import sys;sys.exit(15)"' % self.pyexe, **kws) - assert_(s == 15) - assert_(o == '') - - s, o = exec_command.exec_command( - '"%s" -c "print(\'Heipa\'")' % self.pyexe, **kws) - assert_(s == 0) - assert_(o == 'Heipa') - - def check_execute_in(self, **kws): - with tempdir() as tmpdir: - fn = "file" - tmpfile = os.path.join(tmpdir, fn) - f = open(tmpfile, 'w') - f.write('Hello') - f.close() - - s, o = exec_command.exec_command( - '"%s" -c "f = open(\'%s\', \'r\'); f.close()"' % - (self.pyexe, fn), **kws) - assert_(s != 0) - assert_(o != '') - s, o = exec_command.exec_command( - '"%s" -c "f = open(\'%s\', \'r\'); print(f.read()); ' - 'f.close()"' % (self.pyexe, fn), execute_in=tmpdir, **kws) - assert_(s == 0) - assert_(o == 'Hello') - - def test_basic(self): - with redirect_stdout(StringIO()): - with redirect_stderr(StringIO()): - with assert_warns(DeprecationWarning): - if os.name == "posix": - self.check_posix(use_tee=0) - self.check_posix(use_tee=1) - elif os.name == "nt": - self.check_nt(use_tee=0) - self.check_nt(use_tee=1) - self.check_execute_in(use_tee=0) - self.check_execute_in(use_tee=1) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_fcompiler.py b/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_fcompiler.py deleted file mode 100644 index 6d245fb..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_fcompiler.py +++ /dev/null @@ -1,47 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import pytest - -from numpy.testing import assert_, suppress_warnings -import numpy.distutils.fcompiler - -customizable_flags = [ - ('f77', 'F77FLAGS'), - ('f90', 'F90FLAGS'), - ('free', 'FREEFLAGS'), - ('arch', 'FARCH'), - ('debug', 'FDEBUG'), - ('flags', 'FFLAGS'), - ('linker_so', 'LDFLAGS'), -] - - -def test_fcompiler_flags(monkeypatch): - monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '0') - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='none') - flag_vars = fc.flag_vars.clone(lambda *args, **kwargs: None) - - for opt, envvar in customizable_flags: - new_flag = '-dummy-{}-flag'.format(opt) - prev_flags = getattr(flag_vars, opt) - - monkeypatch.setenv(envvar, new_flag) - new_flags = getattr(flag_vars, opt) - - monkeypatch.delenv(envvar) - assert_(new_flags == [new_flag]) - - monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '1') - - for opt, envvar in customizable_flags: - new_flag = '-dummy-{}-flag'.format(opt) - prev_flags = getattr(flag_vars, opt) - monkeypatch.setenv(envvar, new_flag) - new_flags = getattr(flag_vars, opt) - - monkeypatch.delenv(envvar) - if prev_flags is None: - assert_(new_flags == [new_flag]) - else: - assert_(new_flags == prev_flags + [new_flag]) - diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_fcompiler_gnu.py b/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_fcompiler_gnu.py deleted file mode 100644 index 49208aa..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_fcompiler_gnu.py +++ /dev/null @@ -1,57 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy.testing import assert_ - -import numpy.distutils.fcompiler - -g77_version_strings = [ - ('GNU Fortran 0.5.25 20010319 (prerelease)', '0.5.25'), - ('GNU Fortran (GCC 3.2) 3.2 20020814 (release)', '3.2'), - ('GNU Fortran (GCC) 3.3.3 20040110 (prerelease) (Debian)', '3.3.3'), - ('GNU Fortran (GCC) 3.3.3 (Debian 20040401)', '3.3.3'), - ('GNU Fortran (GCC 3.2.2 20030222 (Red Hat Linux 3.2.2-5)) 3.2.2' - ' 20030222 (Red Hat Linux 3.2.2-5)', '3.2.2'), -] - -gfortran_version_strings = [ - ('GNU Fortran 95 (GCC 4.0.3 20051023 (prerelease) (Debian 4.0.2-3))', - '4.0.3'), - ('GNU Fortran 95 (GCC) 4.1.0', '4.1.0'), - ('GNU Fortran 95 (GCC) 4.2.0 20060218 (experimental)', '4.2.0'), - ('GNU Fortran (GCC) 4.3.0 20070316 (experimental)', '4.3.0'), - ('GNU Fortran (rubenvb-4.8.0) 4.8.0', '4.8.0'), - ('4.8.0', '4.8.0'), - ('4.0.3-7', '4.0.3'), - ("gfortran: warning: couldn't understand kern.osversion '14.1.0\n4.9.1", - '4.9.1'), - ("gfortran: warning: couldn't understand kern.osversion '14.1.0\n" - "gfortran: warning: yet another warning\n4.9.1", - '4.9.1'), - ('GNU Fortran (crosstool-NG 8a21ab48) 7.2.0', '7.2.0') -] - -class TestG77Versions(object): - def test_g77_version(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu') - for vs, version in g77_version_strings: - v = fc.version_match(vs) - assert_(v == version, (vs, v)) - - def test_not_g77(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu') - for vs, _ in gfortran_version_strings: - v = fc.version_match(vs) - assert_(v is None, (vs, v)) - -class TestGFortranVersions(object): - def test_gfortran_version(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95') - for vs, version in gfortran_version_strings: - v = fc.version_match(vs) - assert_(v == version, (vs, v)) - - def test_not_gfortran(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95') - for vs, _ in g77_version_strings: - v = fc.version_match(vs) - assert_(v is None, (vs, v)) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_fcompiler_intel.py b/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_fcompiler_intel.py deleted file mode 100644 index 5e014ba..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_fcompiler_intel.py +++ /dev/null @@ -1,32 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy.distutils.fcompiler -from numpy.testing import assert_ - - -intel_32bit_version_strings = [ - ("Intel(R) Fortran Intel(R) 32-bit Compiler Professional for applications" - "running on Intel(R) 32, Version 11.1", '11.1'), -] - -intel_64bit_version_strings = [ - ("Intel(R) Fortran IA-64 Compiler Professional for applications" - "running on IA-64, Version 11.0", '11.0'), - ("Intel(R) Fortran Intel(R) 64 Compiler Professional for applications" - "running on Intel(R) 64, Version 11.1", '11.1') -] - -class TestIntelFCompilerVersions(object): - def test_32bit_version(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intel') - for vs, version in intel_32bit_version_strings: - v = fc.version_match(vs) - assert_(v == version) - - -class TestIntelEM64TFCompilerVersions(object): - def test_64bit_version(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intelem') - for vs, version in intel_64bit_version_strings: - v = fc.version_match(vs) - assert_(v == version) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_fcompiler_nagfor.py b/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_fcompiler_nagfor.py deleted file mode 100644 index 1c93605..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_fcompiler_nagfor.py +++ /dev/null @@ -1,24 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy.testing import assert_ -import numpy.distutils.fcompiler - -nag_version_strings = [('nagfor', 'NAG Fortran Compiler Release ' - '6.2(Chiyoda) Build 6200', '6.2'), - ('nagfor', 'NAG Fortran Compiler Release ' - '6.1(Tozai) Build 6136', '6.1'), - ('nagfor', 'NAG Fortran Compiler Release ' - '6.0(Hibiya) Build 1021', '6.0'), - ('nagfor', 'NAG Fortran Compiler Release ' - '5.3.2(971)', '5.3.2'), - ('nag', 'NAGWare Fortran 95 compiler Release 5.1' - '(347,355-367,375,380-383,389,394,399,401-402,407,' - '431,435,437,446,459-460,463,472,494,496,503,508,' - '511,517,529,555,557,565)', '5.1')] - -class TestNagFCompilerVersions(object): - def test_version_match(self): - for comp, vs, version in nag_version_strings: - fc = numpy.distutils.fcompiler.new_fcompiler(compiler=comp) - v = fc.version_match(vs) - assert_(v == version) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_from_template.py b/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_from_template.py deleted file mode 100644 index 5881754..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_from_template.py +++ /dev/null @@ -1,44 +0,0 @@ - -from numpy.distutils.from_template import process_str -from numpy.testing import assert_equal - - -pyf_src = """ -python module foo - <_rd=real,double precision> - interface - subroutine foosub(tol) - <_rd>, intent(in,out) :: tol - end subroutine foosub - end interface -end python module foo -""" - -expected_pyf = """ -python module foo - interface - subroutine sfoosub(tol) - real, intent(in,out) :: tol - end subroutine sfoosub - subroutine dfoosub(tol) - double precision, intent(in,out) :: tol - end subroutine dfoosub - end interface -end python module foo -""" - - -def normalize_whitespace(s): - """ - Remove leading and trailing whitespace, and convert internal - stretches of whitespace to a single space. - """ - return ' '.join(s.split()) - - -def test_from_template(): - """Regression test for gh-10712.""" - pyf = process_str(pyf_src) - normalized_pyf = normalize_whitespace(pyf) - normalized_expected_pyf = normalize_whitespace(expected_pyf) - assert_equal(normalized_pyf, normalized_expected_pyf) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_mingw32ccompiler.py b/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_mingw32ccompiler.py deleted file mode 100644 index ebedacb..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_mingw32ccompiler.py +++ /dev/null @@ -1,42 +0,0 @@ -import shutil -import subprocess -import sys -import pytest - -from numpy.distutils import mingw32ccompiler - - -@pytest.mark.skipif(sys.platform != 'win32', reason='win32 only test') -def test_build_import(): - '''Test the mingw32ccompiler.build_import_library, which builds a - `python.a` from the MSVC `python.lib` - ''' - - # make sure `nm.exe` exists and supports the current python version. This - # can get mixed up when the PATH has a 64-bit nm but the python is 32-bit - try: - out = subprocess.check_output(['nm.exe', '--help']) - except FileNotFoundError: - pytest.skip("'nm.exe' not on path, is mingw installed?") - supported = out[out.find(b'supported targets:'):] - if sys.maxsize < 2**32: - if b'pe-i386' not in supported: - raise ValueError("'nm.exe' found but it does not support 32-bit " - "dlls when using 32-bit python. Supported " - "formats: '%s'" % supported) - elif b'pe-x86-64' not in supported: - raise ValueError("'nm.exe' found but it does not support 64-bit " - "dlls when using 64-bit python. Supported " - "formats: '%s'" % supported) - # Hide the import library to force a build - has_import_lib, fullpath = mingw32ccompiler._check_for_import_lib() - if has_import_lib: - shutil.move(fullpath, fullpath + '.bak') - - try: - # Whew, now we can actually test the function - mingw32ccompiler.build_import_library() - - finally: - if has_import_lib: - shutil.move(fullpath + '.bak', fullpath) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_misc_util.py b/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_misc_util.py deleted file mode 100644 index 3e239cf..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_misc_util.py +++ /dev/null @@ -1,84 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from os.path import join, sep, dirname - -from numpy.distutils.misc_util import ( - appendpath, minrelpath, gpaths, get_shared_lib_extension, get_info - ) -from numpy.testing import ( - assert_, assert_equal - ) - -ajoin = lambda *paths: join(*((sep,)+paths)) - -class TestAppendpath(object): - - def test_1(self): - assert_equal(appendpath('prefix', 'name'), join('prefix', 'name')) - assert_equal(appendpath('/prefix', 'name'), ajoin('prefix', 'name')) - assert_equal(appendpath('/prefix', '/name'), ajoin('prefix', 'name')) - assert_equal(appendpath('prefix', '/name'), join('prefix', 'name')) - - def test_2(self): - assert_equal(appendpath('prefix/sub', 'name'), - join('prefix', 'sub', 'name')) - assert_equal(appendpath('prefix/sub', 'sup/name'), - join('prefix', 'sub', 'sup', 'name')) - assert_equal(appendpath('/prefix/sub', '/prefix/name'), - ajoin('prefix', 'sub', 'name')) - - def test_3(self): - assert_equal(appendpath('/prefix/sub', '/prefix/sup/name'), - ajoin('prefix', 'sub', 'sup', 'name')) - assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sup/sup2/name'), - ajoin('prefix', 'sub', 'sub2', 'sup', 'sup2', 'name')) - assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sub/sup/name'), - ajoin('prefix', 'sub', 'sub2', 'sup', 'name')) - -class TestMinrelpath(object): - - def test_1(self): - n = lambda path: path.replace('/', sep) - assert_equal(minrelpath(n('aa/bb')), n('aa/bb')) - assert_equal(minrelpath('..'), '..') - assert_equal(minrelpath(n('aa/..')), '') - assert_equal(minrelpath(n('aa/../bb')), 'bb') - assert_equal(minrelpath(n('aa/bb/..')), 'aa') - assert_equal(minrelpath(n('aa/bb/../..')), '') - assert_equal(minrelpath(n('aa/bb/../cc/../dd')), n('aa/dd')) - assert_equal(minrelpath(n('.././..')), n('../..')) - assert_equal(minrelpath(n('aa/bb/.././../dd')), n('dd')) - -class TestGpaths(object): - - def test_gpaths(self): - local_path = minrelpath(join(dirname(__file__), '..')) - ls = gpaths('command/*.py', local_path) - assert_(join(local_path, 'command', 'build_src.py') in ls, repr(ls)) - f = gpaths('system_info.py', local_path) - assert_(join(local_path, 'system_info.py') == f[0], repr(f)) - -class TestSharedExtension(object): - - def test_get_shared_lib_extension(self): - import sys - ext = get_shared_lib_extension(is_python_ext=False) - if sys.platform.startswith('linux'): - assert_equal(ext, '.so') - elif sys.platform.startswith('gnukfreebsd'): - assert_equal(ext, '.so') - elif sys.platform.startswith('darwin'): - assert_equal(ext, '.dylib') - elif sys.platform.startswith('win'): - assert_equal(ext, '.dll') - # just check for no crash - assert_(get_shared_lib_extension(is_python_ext=True)) - - -def test_installed_npymath_ini(): - # Regression test for gh-7707. If npymath.ini wasn't installed, then this - # will give an error. - info = get_info('npymath') - - assert isinstance(info, dict) - assert "define_macros" in info diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_npy_pkg_config.py b/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_npy_pkg_config.py deleted file mode 100644 index 537e16e..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_npy_pkg_config.py +++ /dev/null @@ -1,86 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os - -from numpy.distutils.npy_pkg_config import read_config, parse_flags -from numpy.testing import temppath, assert_ - -simple = """\ -[meta] -Name = foo -Description = foo lib -Version = 0.1 - -[default] -cflags = -I/usr/include -libs = -L/usr/lib -""" -simple_d = {'cflags': '-I/usr/include', 'libflags': '-L/usr/lib', - 'version': '0.1', 'name': 'foo'} - -simple_variable = """\ -[meta] -Name = foo -Description = foo lib -Version = 0.1 - -[variables] -prefix = /foo/bar -libdir = ${prefix}/lib -includedir = ${prefix}/include - -[default] -cflags = -I${includedir} -libs = -L${libdir} -""" -simple_variable_d = {'cflags': '-I/foo/bar/include', 'libflags': '-L/foo/bar/lib', - 'version': '0.1', 'name': 'foo'} - -class TestLibraryInfo(object): - def test_simple(self): - with temppath('foo.ini') as path: - with open(path, 'w') as f: - f.write(simple) - pkg = os.path.splitext(path)[0] - out = read_config(pkg) - - assert_(out.cflags() == simple_d['cflags']) - assert_(out.libs() == simple_d['libflags']) - assert_(out.name == simple_d['name']) - assert_(out.version == simple_d['version']) - - def test_simple_variable(self): - with temppath('foo.ini') as path: - with open(path, 'w') as f: - f.write(simple_variable) - pkg = os.path.splitext(path)[0] - out = read_config(pkg) - - assert_(out.cflags() == simple_variable_d['cflags']) - assert_(out.libs() == simple_variable_d['libflags']) - assert_(out.name == simple_variable_d['name']) - assert_(out.version == simple_variable_d['version']) - out.vars['prefix'] = '/Users/david' - assert_(out.cflags() == '-I/Users/david/include') - -class TestParseFlags(object): - def test_simple_cflags(self): - d = parse_flags("-I/usr/include") - assert_(d['include_dirs'] == ['/usr/include']) - - d = parse_flags("-I/usr/include -DFOO") - assert_(d['include_dirs'] == ['/usr/include']) - assert_(d['macros'] == ['FOO']) - - d = parse_flags("-I /usr/include -DFOO") - assert_(d['include_dirs'] == ['/usr/include']) - assert_(d['macros'] == ['FOO']) - - def test_simple_lflags(self): - d = parse_flags("-L/usr/lib -lfoo -L/usr/lib -lbar") - assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib']) - assert_(d['libraries'] == ['foo', 'bar']) - - d = parse_flags("-L /usr/lib -lfoo -L/usr/lib -lbar") - assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib']) - assert_(d['libraries'] == ['foo', 'bar']) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_shell_utils.py b/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_shell_utils.py deleted file mode 100644 index a034424..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_shell_utils.py +++ /dev/null @@ -1,79 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import pytest -import subprocess -import os -import json -import sys - -from numpy.distutils import _shell_utils - -argv_cases = [ - [r'exe'], - [r'path/exe'], - [r'path\exe'], - [r'\\server\path\exe'], - [r'path to/exe'], - [r'path to\exe'], - - [r'exe', '--flag'], - [r'path/exe', '--flag'], - [r'path\exe', '--flag'], - [r'path to/exe', '--flag'], - [r'path to\exe', '--flag'], - - # flags containing literal quotes in their name - [r'path to/exe', '--flag-"quoted"'], - [r'path to\exe', '--flag-"quoted"'], - [r'path to/exe', '"--flag-quoted"'], - [r'path to\exe', '"--flag-quoted"'], -] - - -@pytest.fixture(params=[ - _shell_utils.WindowsParser, - _shell_utils.PosixParser -]) -def Parser(request): - return request.param - - -@pytest.fixture -def runner(Parser): - if Parser != _shell_utils.NativeParser: - pytest.skip('Unable to run with non-native parser') - - if Parser == _shell_utils.WindowsParser: - return lambda cmd: subprocess.check_output(cmd) - elif Parser == _shell_utils.PosixParser: - # posix has no non-shell string parsing - return lambda cmd: subprocess.check_output(cmd, shell=True) - else: - raise NotImplementedError - - -@pytest.mark.parametrize('argv', argv_cases) -def test_join_matches_subprocess(Parser, runner, argv): - """ - Test that join produces strings understood by subprocess - """ - # invoke python to return its arguments as json - cmd = [ - sys.executable, '-c', - 'import json, sys; print(json.dumps(sys.argv[1:]))' - ] - joined = Parser.join(cmd + argv) - json_out = runner(joined).decode() - assert json.loads(json_out) == argv - - -@pytest.mark.parametrize('argv', argv_cases) -def test_roundtrip(Parser, argv): - """ - Test that split is the inverse operation of join - """ - try: - joined = Parser.join(argv) - assert argv == Parser.split(joined) - except NotImplementedError: - pytest.skip("Not implemented") diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_system_info.py b/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_system_info.py deleted file mode 100644 index 3c76389..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/tests/test_system_info.py +++ /dev/null @@ -1,257 +0,0 @@ -from __future__ import division, print_function - -import os -import shutil -import pytest -from tempfile import mkstemp, mkdtemp -from subprocess import Popen, PIPE -from distutils.errors import DistutilsError - -from numpy.testing import assert_, assert_equal, assert_raises -from numpy.distutils import ccompiler, customized_ccompiler -from numpy.distutils.system_info import system_info, ConfigParser -from numpy.distutils.system_info import AliasedOptionError -from numpy.distutils.system_info import default_lib_dirs, default_include_dirs -from numpy.distutils import _shell_utils - - -def get_class(name, notfound_action=1): - """ - notfound_action: - 0 - do nothing - 1 - display warning message - 2 - raise error - """ - cl = {'temp1': Temp1Info, - 'temp2': Temp2Info, - 'duplicate_options': DuplicateOptionInfo, - }.get(name.lower(), _system_info) - return cl() - -simple_site = """ -[ALL] -library_dirs = {dir1:s}{pathsep:s}{dir2:s} -libraries = {lib1:s},{lib2:s} -extra_compile_args = -I/fake/directory -I"/path with/spaces" -Os -runtime_library_dirs = {dir1:s} - -[temp1] -library_dirs = {dir1:s} -libraries = {lib1:s} -runtime_library_dirs = {dir1:s} - -[temp2] -library_dirs = {dir2:s} -libraries = {lib2:s} -extra_link_args = -Wl,-rpath={lib2_escaped:s} -rpath = {dir2:s} - -[duplicate_options] -mylib_libs = {lib1:s} -libraries = {lib2:s} -""" -site_cfg = simple_site - -fakelib_c_text = """ -/* This file is generated from numpy/distutils/testing/test_system_info.py */ -#include -void foo(void) { - printf("Hello foo"); -} -void bar(void) { - printf("Hello bar"); -} -""" - -def have_compiler(): - """ Return True if there appears to be an executable compiler - """ - compiler = customized_ccompiler() - try: - cmd = compiler.compiler # Unix compilers - except AttributeError: - try: - if not compiler.initialized: - compiler.initialize() # MSVC is different - except (DistutilsError, ValueError): - return False - cmd = [compiler.cc] - try: - p = Popen(cmd, stdout=PIPE, stderr=PIPE) - p.stdout.close() - p.stderr.close() - p.wait() - except OSError: - return False - return True - - -HAVE_COMPILER = have_compiler() - - -class _system_info(system_info): - - def __init__(self, - default_lib_dirs=default_lib_dirs, - default_include_dirs=default_include_dirs, - verbosity=1, - ): - self.__class__.info = {} - self.local_prefixes = [] - defaults = {'library_dirs': '', - 'include_dirs': '', - 'runtime_library_dirs': '', - 'rpath': '', - 'src_dirs': '', - 'search_static_first': "0", - 'extra_compile_args': '', - 'extra_link_args': ''} - self.cp = ConfigParser(defaults) - # We have to parse the config files afterwards - # to have a consistent temporary filepath - - def _check_libs(self, lib_dirs, libs, opt_libs, exts): - """Override _check_libs to return with all dirs """ - info = {'libraries': libs, 'library_dirs': lib_dirs} - return info - - -class Temp1Info(_system_info): - """For testing purposes""" - section = 'temp1' - - -class Temp2Info(_system_info): - """For testing purposes""" - section = 'temp2' - -class DuplicateOptionInfo(_system_info): - """For testing purposes""" - section = 'duplicate_options' - - -class TestSystemInfoReading(object): - - def setup(self): - """ Create the libraries """ - # Create 2 sources and 2 libraries - self._dir1 = mkdtemp() - self._src1 = os.path.join(self._dir1, 'foo.c') - self._lib1 = os.path.join(self._dir1, 'libfoo.so') - self._dir2 = mkdtemp() - self._src2 = os.path.join(self._dir2, 'bar.c') - self._lib2 = os.path.join(self._dir2, 'libbar.so') - # Update local site.cfg - global simple_site, site_cfg - site_cfg = simple_site.format(**{ - 'dir1': self._dir1, - 'lib1': self._lib1, - 'dir2': self._dir2, - 'lib2': self._lib2, - 'pathsep': os.pathsep, - 'lib2_escaped': _shell_utils.NativeParser.join([self._lib2]) - }) - # Write site.cfg - fd, self._sitecfg = mkstemp() - os.close(fd) - with open(self._sitecfg, 'w') as fd: - fd.write(site_cfg) - # Write the sources - with open(self._src1, 'w') as fd: - fd.write(fakelib_c_text) - with open(self._src2, 'w') as fd: - fd.write(fakelib_c_text) - # We create all class-instances - - def site_and_parse(c, site_cfg): - c.files = [site_cfg] - c.parse_config_files() - return c - self.c_default = site_and_parse(get_class('default'), self._sitecfg) - self.c_temp1 = site_and_parse(get_class('temp1'), self._sitecfg) - self.c_temp2 = site_and_parse(get_class('temp2'), self._sitecfg) - self.c_dup_options = site_and_parse(get_class('duplicate_options'), - self._sitecfg) - - - def teardown(self): - # Do each removal separately - try: - shutil.rmtree(self._dir1) - except Exception: - pass - try: - shutil.rmtree(self._dir2) - except Exception: - pass - try: - os.remove(self._sitecfg) - except Exception: - pass - - def test_all(self): - # Read in all information in the ALL block - tsi = self.c_default - assert_equal(tsi.get_lib_dirs(), [self._dir1, self._dir2]) - assert_equal(tsi.get_libraries(), [self._lib1, self._lib2]) - assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1]) - extra = tsi.calc_extra_info() - assert_equal(extra['extra_compile_args'], ['-I/fake/directory', '-I/path with/spaces', '-Os']) - - def test_temp1(self): - # Read in all information in the temp1 block - tsi = self.c_temp1 - assert_equal(tsi.get_lib_dirs(), [self._dir1]) - assert_equal(tsi.get_libraries(), [self._lib1]) - assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1]) - - def test_temp2(self): - # Read in all information in the temp2 block - tsi = self.c_temp2 - assert_equal(tsi.get_lib_dirs(), [self._dir2]) - assert_equal(tsi.get_libraries(), [self._lib2]) - # Now from rpath and not runtime_library_dirs - assert_equal(tsi.get_runtime_lib_dirs(key='rpath'), [self._dir2]) - extra = tsi.calc_extra_info() - assert_equal(extra['extra_link_args'], ['-Wl,-rpath=' + self._lib2]) - - def test_duplicate_options(self): - # Ensure that duplicates are raising an AliasedOptionError - tsi = self.c_dup_options - assert_raises(AliasedOptionError, tsi.get_option_single, "mylib_libs", "libraries") - assert_equal(tsi.get_libs("mylib_libs", [self._lib1]), [self._lib1]) - assert_equal(tsi.get_libs("libraries", [self._lib2]), [self._lib2]) - - @pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler") - def test_compile1(self): - # Compile source and link the first source - c = customized_ccompiler() - previousDir = os.getcwd() - try: - # Change directory to not screw up directories - os.chdir(self._dir1) - c.compile([os.path.basename(self._src1)], output_dir=self._dir1) - # Ensure that the object exists - assert_(os.path.isfile(self._src1.replace('.c', '.o')) or - os.path.isfile(self._src1.replace('.c', '.obj'))) - finally: - os.chdir(previousDir) - - @pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler") - @pytest.mark.skipif('msvc' in repr(ccompiler.new_compiler()), - reason="Fails with MSVC compiler ") - def test_compile2(self): - # Compile source and link the second source - tsi = self.c_temp2 - c = customized_ccompiler() - extra_link_args = tsi.calc_extra_info()['extra_link_args'] - previousDir = os.getcwd() - try: - # Change directory to not screw up directories - os.chdir(self._dir2) - c.compile([os.path.basename(self._src2)], output_dir=self._dir2, - extra_postargs=extra_link_args) - # Ensure that the object exists - assert_(os.path.isfile(self._src2.replace('.c', '.o'))) - finally: - os.chdir(previousDir) diff --git a/venv/lib/python3.7/site-packages/numpy/distutils/unixccompiler.py b/venv/lib/python3.7/site-packages/numpy/distutils/unixccompiler.py deleted file mode 100644 index 11b2cce..0000000 --- a/venv/lib/python3.7/site-packages/numpy/distutils/unixccompiler.py +++ /dev/null @@ -1,139 +0,0 @@ -""" -unixccompiler - can handle very long argument lists for ar. - -""" -from __future__ import division, absolute_import, print_function - -import os - -from distutils.errors import DistutilsExecError, CompileError -from distutils.unixccompiler import * -from numpy.distutils.ccompiler import replace_method -from numpy.distutils.compat import get_exception -from numpy.distutils.misc_util import _commandline_dep_string - -if sys.version_info[0] < 3: - from . import log -else: - from numpy.distutils import log - -# Note that UnixCCompiler._compile appeared in Python 2.3 -def UnixCCompiler__compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): - """Compile a single source files with a Unix-style compiler.""" - # HP ad-hoc fix, see ticket 1383 - ccomp = self.compiler_so - if ccomp[0] == 'aCC': - # remove flags that will trigger ANSI-C mode for aCC - if '-Ae' in ccomp: - ccomp.remove('-Ae') - if '-Aa' in ccomp: - ccomp.remove('-Aa') - # add flags for (almost) sane C++ handling - ccomp += ['-AA'] - self.compiler_so = ccomp - # ensure OPT environment variable is read - if 'OPT' in os.environ: - from distutils.sysconfig import get_config_vars - opt = " ".join(os.environ['OPT'].split()) - gcv_opt = " ".join(get_config_vars('OPT')[0].split()) - ccomp_s = " ".join(self.compiler_so) - if opt not in ccomp_s: - ccomp_s = ccomp_s.replace(gcv_opt, opt) - self.compiler_so = ccomp_s.split() - llink_s = " ".join(self.linker_so) - if opt not in llink_s: - self.linker_so = llink_s.split() + opt.split() - - display = '%s: %s' % (os.path.basename(self.compiler_so[0]), src) - - # gcc style automatic dependencies, outputs a makefile (-MF) that lists - # all headers needed by a c file as a side effect of compilation (-MMD) - if getattr(self, '_auto_depends', False): - deps = ['-MMD', '-MF', obj + '.d'] - else: - deps = [] - - try: - self.spawn(self.compiler_so + cc_args + [src, '-o', obj] + deps + - extra_postargs, display = display) - except DistutilsExecError: - msg = str(get_exception()) - raise CompileError(msg) - - # add commandline flags to dependency file - if deps: - with open(obj + '.d', 'a') as f: - f.write(_commandline_dep_string(cc_args, extra_postargs, pp_opts)) - -replace_method(UnixCCompiler, '_compile', UnixCCompiler__compile) - - -def UnixCCompiler_create_static_lib(self, objects, output_libname, - output_dir=None, debug=0, target_lang=None): - """ - Build a static library in a separate sub-process. - - Parameters - ---------- - objects : list or tuple of str - List of paths to object files used to build the static library. - output_libname : str - The library name as an absolute or relative (if `output_dir` is used) - path. - output_dir : str, optional - The path to the output directory. Default is None, in which case - the ``output_dir`` attribute of the UnixCCompiler instance. - debug : bool, optional - This parameter is not used. - target_lang : str, optional - This parameter is not used. - - Returns - ------- - None - - """ - objects, output_dir = self._fix_object_args(objects, output_dir) - - output_filename = \ - self.library_filename(output_libname, output_dir=output_dir) - - if self._need_link(objects, output_filename): - try: - # previous .a may be screwed up; best to remove it first - # and recreate. - # Also, ar on OS X doesn't handle updating universal archives - os.unlink(output_filename) - except (IOError, OSError): - pass - self.mkpath(os.path.dirname(output_filename)) - tmp_objects = objects + self.objects - while tmp_objects: - objects = tmp_objects[:50] - tmp_objects = tmp_objects[50:] - display = '%s: adding %d object files to %s' % ( - os.path.basename(self.archiver[0]), - len(objects), output_filename) - self.spawn(self.archiver + [output_filename] + objects, - display = display) - - # Not many Unices required ranlib anymore -- SunOS 4.x is, I - # think the only major Unix that does. Maybe we need some - # platform intelligence here to skip ranlib if it's not - # needed -- or maybe Python's configure script took care of - # it for us, hence the check for leading colon. - if self.ranlib: - display = '%s:@ %s' % (os.path.basename(self.ranlib[0]), - output_filename) - try: - self.spawn(self.ranlib + [output_filename], - display = display) - except DistutilsExecError: - msg = str(get_exception()) - raise LibError(msg) - else: - log.debug("skipping %s (up-to-date)", output_filename) - return - -replace_method(UnixCCompiler, 'create_static_lib', - UnixCCompiler_create_static_lib) diff --git a/venv/lib/python3.7/site-packages/numpy/doc/__init__.py b/venv/lib/python3.7/site-packages/numpy/doc/__init__.py deleted file mode 100644 index b6f1fa7..0000000 --- a/venv/lib/python3.7/site-packages/numpy/doc/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os - -ref_dir = os.path.join(os.path.dirname(__file__)) - -__all__ = sorted(f[:-3] for f in os.listdir(ref_dir) if f.endswith('.py') and - not f.startswith('__')) - -for f in __all__: - __import__(__name__ + '.' + f) - -del f, ref_dir - -__doc__ = """\ -Topical documentation -===================== - -The following topics are available: -%s - -You can view them by - ->>> help(np.doc.TOPIC) #doctest: +SKIP - -""" % '\n- '.join([''] + __all__) - -__all__.extend(['__doc__']) diff --git a/venv/lib/python3.7/site-packages/numpy/doc/basics.py b/venv/lib/python3.7/site-packages/numpy/doc/basics.py deleted file mode 100644 index c05f347..0000000 --- a/venv/lib/python3.7/site-packages/numpy/doc/basics.py +++ /dev/null @@ -1,342 +0,0 @@ -""" -============ -Array basics -============ - -Array types and conversions between types -========================================= - -NumPy supports a much greater variety of numerical types than Python does. -This section shows which are available, and how to modify an array's data-type. - -The primitive types supported are tied closely to those in C: - -.. list-table:: - :header-rows: 1 - - * - Numpy type - - C type - - Description - - * - `np.bool_` - - ``bool`` - - Boolean (True or False) stored as a byte - - * - `np.byte` - - ``signed char`` - - Platform-defined - - * - `np.ubyte` - - ``unsigned char`` - - Platform-defined - - * - `np.short` - - ``short`` - - Platform-defined - - * - `np.ushort` - - ``unsigned short`` - - Platform-defined - - * - `np.intc` - - ``int`` - - Platform-defined - - * - `np.uintc` - - ``unsigned int`` - - Platform-defined - - * - `np.int_` - - ``long`` - - Platform-defined - - * - `np.uint` - - ``unsigned long`` - - Platform-defined - - * - `np.longlong` - - ``long long`` - - Platform-defined - - * - `np.ulonglong` - - ``unsigned long long`` - - Platform-defined - - * - `np.half` / `np.float16` - - - - Half precision float: - sign bit, 5 bits exponent, 10 bits mantissa - - * - `np.single` - - ``float`` - - Platform-defined single precision float: - typically sign bit, 8 bits exponent, 23 bits mantissa - - * - `np.double` - - ``double`` - - Platform-defined double precision float: - typically sign bit, 11 bits exponent, 52 bits mantissa. - - * - `np.longdouble` - - ``long double`` - - Platform-defined extended-precision float - - * - `np.csingle` - - ``float complex`` - - Complex number, represented by two single-precision floats (real and imaginary components) - - * - `np.cdouble` - - ``double complex`` - - Complex number, represented by two double-precision floats (real and imaginary components). - - * - `np.clongdouble` - - ``long double complex`` - - Complex number, represented by two extended-precision floats (real and imaginary components). - - -Since many of these have platform-dependent definitions, a set of fixed-size -aliases are provided: - -.. list-table:: - :header-rows: 1 - - * - Numpy type - - C type - - Description - - * - `np.int8` - - ``int8_t`` - - Byte (-128 to 127) - - * - `np.int16` - - ``int16_t`` - - Integer (-32768 to 32767) - - * - `np.int32` - - ``int32_t`` - - Integer (-2147483648 to 2147483647) - - * - `np.int64` - - ``int64_t`` - - Integer (-9223372036854775808 to 9223372036854775807) - - * - `np.uint8` - - ``uint8_t`` - - Unsigned integer (0 to 255) - - * - `np.uint16` - - ``uint16_t`` - - Unsigned integer (0 to 65535) - - * - `np.uint32` - - ``uint32_t`` - - Unsigned integer (0 to 4294967295) - - * - `np.uint64` - - ``uint64_t`` - - Unsigned integer (0 to 18446744073709551615) - - * - `np.intp` - - ``intptr_t`` - - Integer used for indexing, typically the same as ``ssize_t`` - - * - `np.uintp` - - ``uintptr_t`` - - Integer large enough to hold a pointer - - * - `np.float32` - - ``float`` - - - - * - `np.float64` / `np.float_` - - ``double`` - - Note that this matches the precision of the builtin python `float`. - - * - `np.complex64` - - ``float complex`` - - Complex number, represented by two 32-bit floats (real and imaginary components) - - * - `np.complex128` / `np.complex_` - - ``double complex`` - - Note that this matches the precision of the builtin python `complex`. - - -NumPy numerical types are instances of ``dtype`` (data-type) objects, each -having unique characteristics. Once you have imported NumPy using - - :: - - >>> import numpy as np - -the dtypes are available as ``np.bool_``, ``np.float32``, etc. - -Advanced types, not listed in the table above, are explored in -section :ref:`structured_arrays`. - -There are 5 basic numerical types representing booleans (bool), integers (int), -unsigned integers (uint) floating point (float) and complex. Those with numbers -in their name indicate the bitsize of the type (i.e. how many bits are needed -to represent a single value in memory). Some types, such as ``int`` and -``intp``, have differing bitsizes, dependent on the platforms (e.g. 32-bit -vs. 64-bit machines). This should be taken into account when interfacing -with low-level code (such as C or Fortran) where the raw memory is addressed. - -Data-types can be used as functions to convert python numbers to array scalars -(see the array scalar section for an explanation), python sequences of numbers -to arrays of that type, or as arguments to the dtype keyword that many numpy -functions or methods accept. Some examples:: - - >>> import numpy as np - >>> x = np.float32(1.0) - >>> x - 1.0 - >>> y = np.int_([1,2,4]) - >>> y - array([1, 2, 4]) - >>> z = np.arange(3, dtype=np.uint8) - >>> z - array([0, 1, 2], dtype=uint8) - -Array types can also be referred to by character codes, mostly to retain -backward compatibility with older packages such as Numeric. Some -documentation may still refer to these, for example:: - - >>> np.array([1, 2, 3], dtype='f') - array([ 1., 2., 3.], dtype=float32) - -We recommend using dtype objects instead. - -To convert the type of an array, use the .astype() method (preferred) or -the type itself as a function. For example: :: - - >>> z.astype(float) #doctest: +NORMALIZE_WHITESPACE - array([ 0., 1., 2.]) - >>> np.int8(z) - array([0, 1, 2], dtype=int8) - -Note that, above, we use the *Python* float object as a dtype. NumPy knows -that ``int`` refers to ``np.int_``, ``bool`` means ``np.bool_``, -that ``float`` is ``np.float_`` and ``complex`` is ``np.complex_``. -The other data-types do not have Python equivalents. - -To determine the type of an array, look at the dtype attribute:: - - >>> z.dtype - dtype('uint8') - -dtype objects also contain information about the type, such as its bit-width -and its byte-order. The data type can also be used indirectly to query -properties of the type, such as whether it is an integer:: - - >>> d = np.dtype(int) - >>> d - dtype('int32') - - >>> np.issubdtype(d, np.integer) - True - - >>> np.issubdtype(d, np.floating) - False - - -Array Scalars -============= - -NumPy generally returns elements of arrays as array scalars (a scalar -with an associated dtype). Array scalars differ from Python scalars, but -for the most part they can be used interchangeably (the primary -exception is for versions of Python older than v2.x, where integer array -scalars cannot act as indices for lists and tuples). There are some -exceptions, such as when code requires very specific attributes of a scalar -or when it checks specifically whether a value is a Python scalar. Generally, -problems are easily fixed by explicitly converting array scalars -to Python scalars, using the corresponding Python type function -(e.g., ``int``, ``float``, ``complex``, ``str``, ``unicode``). - -The primary advantage of using array scalars is that -they preserve the array type (Python may not have a matching scalar type -available, e.g. ``int16``). Therefore, the use of array scalars ensures -identical behaviour between arrays and scalars, irrespective of whether the -value is inside an array or not. NumPy scalars also have many of the same -methods arrays do. - -Overflow Errors -=============== - -The fixed size of NumPy numeric types may cause overflow errors when a value -requires more memory than available in the data type. For example, -`numpy.power` evaluates ``100 * 10 ** 8`` correctly for 64-bit integers, -but gives 1874919424 (incorrect) for a 32-bit integer. - - >>> np.power(100, 8, dtype=np.int64) - 10000000000000000 - >>> np.power(100, 8, dtype=np.int32) - 1874919424 - -The behaviour of NumPy and Python integer types differs significantly for -integer overflows and may confuse users expecting NumPy integers to behave -similar to Python's ``int``. Unlike NumPy, the size of Python's ``int`` is -flexible. This means Python integers may expand to accommodate any integer and -will not overflow. - -NumPy provides `numpy.iinfo` and `numpy.finfo` to verify the -minimum or maximum values of NumPy integer and floating point values -respectively :: - - >>> np.iinfo(int) # Bounds of the default integer on this system. - iinfo(min=-9223372036854775808, max=9223372036854775807, dtype=int64) - >>> np.iinfo(np.int32) # Bounds of a 32-bit integer - iinfo(min=-2147483648, max=2147483647, dtype=int32) - >>> np.iinfo(np.int64) # Bounds of a 64-bit integer - iinfo(min=-9223372036854775808, max=9223372036854775807, dtype=int64) - -If 64-bit integers are still too small the result may be cast to a -floating point number. Floating point numbers offer a larger, but inexact, -range of possible values. - - >>> np.power(100, 100, dtype=np.int64) # Incorrect even with 64-bit int - 0 - >>> np.power(100, 100, dtype=np.float64) - 1e+200 - -Extended Precision -================== - -Python's floating-point numbers are usually 64-bit floating-point numbers, -nearly equivalent to ``np.float64``. In some unusual situations it may be -useful to use floating-point numbers with more precision. Whether this -is possible in numpy depends on the hardware and on the development -environment: specifically, x86 machines provide hardware floating-point -with 80-bit precision, and while most C compilers provide this as their -``long double`` type, MSVC (standard for Windows builds) makes -``long double`` identical to ``double`` (64 bits). NumPy makes the -compiler's ``long double`` available as ``np.longdouble`` (and -``np.clongdouble`` for the complex numbers). You can find out what your -numpy provides with ``np.finfo(np.longdouble)``. - -NumPy does not provide a dtype with more precision than C's -``long double``\\; in particular, the 128-bit IEEE quad precision -data type (FORTRAN's ``REAL*16``\\) is not available. - -For efficient memory alignment, ``np.longdouble`` is usually stored -padded with zero bits, either to 96 or 128 bits. Which is more efficient -depends on hardware and development environment; typically on 32-bit -systems they are padded to 96 bits, while on 64-bit systems they are -typically padded to 128 bits. ``np.longdouble`` is padded to the system -default; ``np.float96`` and ``np.float128`` are provided for users who -want specific padding. In spite of the names, ``np.float96`` and -``np.float128`` provide only as much precision as ``np.longdouble``, -that is, 80 bits on most x86 machines and 64 bits in standard -Windows builds. - -Be warned that even if ``np.longdouble`` offers more precision than -python ``float``, it is easy to lose that extra precision, since -python often forces values to pass through ``float``. For example, -the ``%`` formatting operator requires its arguments to be converted -to standard python types, and it is therefore impossible to preserve -extended precision even if many decimal places are requested. It can -be useful to test your code with the value -``1 + np.finfo(np.longdouble).eps``. - -""" -from __future__ import division, absolute_import, print_function diff --git a/venv/lib/python3.7/site-packages/numpy/doc/broadcasting.py b/venv/lib/python3.7/site-packages/numpy/doc/broadcasting.py deleted file mode 100644 index cb548a0..0000000 --- a/venv/lib/python3.7/site-packages/numpy/doc/broadcasting.py +++ /dev/null @@ -1,181 +0,0 @@ -""" -======================== -Broadcasting over arrays -======================== - -.. note:: - See `this article - `_ - for illustrations of broadcasting concepts. - - -The term broadcasting describes how numpy treats arrays with different -shapes during arithmetic operations. Subject to certain constraints, -the smaller array is "broadcast" across the larger array so that they -have compatible shapes. Broadcasting provides a means of vectorizing -array operations so that looping occurs in C instead of Python. It does -this without making needless copies of data and usually leads to -efficient algorithm implementations. There are, however, cases where -broadcasting is a bad idea because it leads to inefficient use of memory -that slows computation. - -NumPy operations are usually done on pairs of arrays on an -element-by-element basis. In the simplest case, the two arrays must -have exactly the same shape, as in the following example: - - >>> a = np.array([1.0, 2.0, 3.0]) - >>> b = np.array([2.0, 2.0, 2.0]) - >>> a * b - array([ 2., 4., 6.]) - -NumPy's broadcasting rule relaxes this constraint when the arrays' -shapes meet certain constraints. The simplest broadcasting example occurs -when an array and a scalar value are combined in an operation: - ->>> a = np.array([1.0, 2.0, 3.0]) ->>> b = 2.0 ->>> a * b -array([ 2., 4., 6.]) - -The result is equivalent to the previous example where ``b`` was an array. -We can think of the scalar ``b`` being *stretched* during the arithmetic -operation into an array with the same shape as ``a``. The new elements in -``b`` are simply copies of the original scalar. The stretching analogy is -only conceptual. NumPy is smart enough to use the original scalar value -without actually making copies so that broadcasting operations are as -memory and computationally efficient as possible. - -The code in the second example is more efficient than that in the first -because broadcasting moves less memory around during the multiplication -(``b`` is a scalar rather than an array). - -General Broadcasting Rules -========================== -When operating on two arrays, NumPy compares their shapes element-wise. -It starts with the trailing dimensions and works its way forward. Two -dimensions are compatible when - -1) they are equal, or -2) one of them is 1 - -If these conditions are not met, a -``ValueError: operands could not be broadcast together`` exception is -thrown, indicating that the arrays have incompatible shapes. The size of -the resulting array is the size that is not 1 along each axis of the inputs. - -Arrays do not need to have the same *number* of dimensions. For example, -if you have a ``256x256x3`` array of RGB values, and you want to scale -each color in the image by a different value, you can multiply the image -by a one-dimensional array with 3 values. Lining up the sizes of the -trailing axes of these arrays according to the broadcast rules, shows that -they are compatible:: - - Image (3d array): 256 x 256 x 3 - Scale (1d array): 3 - Result (3d array): 256 x 256 x 3 - -When either of the dimensions compared is one, the other is -used. In other words, dimensions with size 1 are stretched or "copied" -to match the other. - -In the following example, both the ``A`` and ``B`` arrays have axes with -length one that are expanded to a larger size during the broadcast -operation:: - - A (4d array): 8 x 1 x 6 x 1 - B (3d array): 7 x 1 x 5 - Result (4d array): 8 x 7 x 6 x 5 - -Here are some more examples:: - - A (2d array): 5 x 4 - B (1d array): 1 - Result (2d array): 5 x 4 - - A (2d array): 5 x 4 - B (1d array): 4 - Result (2d array): 5 x 4 - - A (3d array): 15 x 3 x 5 - B (3d array): 15 x 1 x 5 - Result (3d array): 15 x 3 x 5 - - A (3d array): 15 x 3 x 5 - B (2d array): 3 x 5 - Result (3d array): 15 x 3 x 5 - - A (3d array): 15 x 3 x 5 - B (2d array): 3 x 1 - Result (3d array): 15 x 3 x 5 - -Here are examples of shapes that do not broadcast:: - - A (1d array): 3 - B (1d array): 4 # trailing dimensions do not match - - A (2d array): 2 x 1 - B (3d array): 8 x 4 x 3 # second from last dimensions mismatched - -An example of broadcasting in practice:: - - >>> x = np.arange(4) - >>> xx = x.reshape(4,1) - >>> y = np.ones(5) - >>> z = np.ones((3,4)) - - >>> x.shape - (4,) - - >>> y.shape - (5,) - - >>> x + y - ValueError: operands could not be broadcast together with shapes (4,) (5,) - - >>> xx.shape - (4, 1) - - >>> y.shape - (5,) - - >>> (xx + y).shape - (4, 5) - - >>> xx + y - array([[ 1., 1., 1., 1., 1.], - [ 2., 2., 2., 2., 2.], - [ 3., 3., 3., 3., 3.], - [ 4., 4., 4., 4., 4.]]) - - >>> x.shape - (4,) - - >>> z.shape - (3, 4) - - >>> (x + z).shape - (3, 4) - - >>> x + z - array([[ 1., 2., 3., 4.], - [ 1., 2., 3., 4.], - [ 1., 2., 3., 4.]]) - -Broadcasting provides a convenient way of taking the outer product (or -any other outer operation) of two arrays. The following example shows an -outer addition operation of two 1-d arrays:: - - >>> a = np.array([0.0, 10.0, 20.0, 30.0]) - >>> b = np.array([1.0, 2.0, 3.0]) - >>> a[:, np.newaxis] + b - array([[ 1., 2., 3.], - [ 11., 12., 13.], - [ 21., 22., 23.], - [ 31., 32., 33.]]) - -Here the ``newaxis`` index operator inserts a new axis into ``a``, -making it a two-dimensional ``4x1`` array. Combining the ``4x1`` array -with ``b``, which has shape ``(3,)``, yields a ``4x3`` array. - -""" -from __future__ import division, absolute_import, print_function diff --git a/venv/lib/python3.7/site-packages/numpy/doc/byteswapping.py b/venv/lib/python3.7/site-packages/numpy/doc/byteswapping.py deleted file mode 100644 index 7a749c8..0000000 --- a/venv/lib/python3.7/site-packages/numpy/doc/byteswapping.py +++ /dev/null @@ -1,156 +0,0 @@ -""" - -============================= - Byteswapping and byte order -============================= - -Introduction to byte ordering and ndarrays -========================================== - -The ``ndarray`` is an object that provide a python array interface to data -in memory. - -It often happens that the memory that you want to view with an array is -not of the same byte ordering as the computer on which you are running -Python. - -For example, I might be working on a computer with a little-endian CPU - -such as an Intel Pentium, but I have loaded some data from a file -written by a computer that is big-endian. Let's say I have loaded 4 -bytes from a file written by a Sun (big-endian) computer. I know that -these 4 bytes represent two 16-bit integers. On a big-endian machine, a -two-byte integer is stored with the Most Significant Byte (MSB) first, -and then the Least Significant Byte (LSB). Thus the bytes are, in memory order: - -#. MSB integer 1 -#. LSB integer 1 -#. MSB integer 2 -#. LSB integer 2 - -Let's say the two integers were in fact 1 and 770. Because 770 = 256 * -3 + 2, the 4 bytes in memory would contain respectively: 0, 1, 3, 2. -The bytes I have loaded from the file would have these contents: - ->>> big_end_buffer = bytearray([0,1,3,2]) ->>> big_end_buffer -bytearray(b'\\x00\\x01\\x03\\x02') - -We might want to use an ``ndarray`` to access these integers. In that -case, we can create an array around this memory, and tell numpy that -there are two integers, and that they are 16 bit and big-endian: - ->>> import numpy as np ->>> big_end_arr = np.ndarray(shape=(2,),dtype='>i2', buffer=big_end_buffer) ->>> big_end_arr[0] -1 ->>> big_end_arr[1] -770 - -Note the array ``dtype`` above of ``>i2``. The ``>`` means 'big-endian' -(``<`` is little-endian) and ``i2`` means 'signed 2-byte integer'. For -example, if our data represented a single unsigned 4-byte little-endian -integer, the dtype string would be ``>> little_end_u4 = np.ndarray(shape=(1,),dtype='>> little_end_u4[0] == 1 * 256**1 + 3 * 256**2 + 2 * 256**3 -True - -Returning to our ``big_end_arr`` - in this case our underlying data is -big-endian (data endianness) and we've set the dtype to match (the dtype -is also big-endian). However, sometimes you need to flip these around. - -.. warning:: - - Scalars currently do not include byte order information, so extracting - a scalar from an array will return an integer in native byte order. - Hence: - - >>> big_end_arr[0].dtype.byteorder == little_end_u4[0].dtype.byteorder - True - -Changing byte ordering -====================== - -As you can imagine from the introduction, there are two ways you can -affect the relationship between the byte ordering of the array and the -underlying memory it is looking at: - -* Change the byte-ordering information in the array dtype so that it - interprets the underlying data as being in a different byte order. - This is the role of ``arr.newbyteorder()`` -* Change the byte-ordering of the underlying data, leaving the dtype - interpretation as it was. This is what ``arr.byteswap()`` does. - -The common situations in which you need to change byte ordering are: - -#. Your data and dtype endianness don't match, and you want to change - the dtype so that it matches the data. -#. Your data and dtype endianness don't match, and you want to swap the - data so that they match the dtype -#. Your data and dtype endianness match, but you want the data swapped - and the dtype to reflect this - -Data and dtype endianness don't match, change dtype to match data ------------------------------------------------------------------ - -We make something where they don't match: - ->>> wrong_end_dtype_arr = np.ndarray(shape=(2,),dtype='>> wrong_end_dtype_arr[0] -256 - -The obvious fix for this situation is to change the dtype so it gives -the correct endianness: - ->>> fixed_end_dtype_arr = wrong_end_dtype_arr.newbyteorder() ->>> fixed_end_dtype_arr[0] -1 - -Note the array has not changed in memory: - ->>> fixed_end_dtype_arr.tobytes() == big_end_buffer -True - -Data and type endianness don't match, change data to match dtype ----------------------------------------------------------------- - -You might want to do this if you need the data in memory to be a certain -ordering. For example you might be writing the memory out to a file -that needs a certain byte ordering. - ->>> fixed_end_mem_arr = wrong_end_dtype_arr.byteswap() ->>> fixed_end_mem_arr[0] -1 - -Now the array *has* changed in memory: - ->>> fixed_end_mem_arr.tobytes() == big_end_buffer -False - -Data and dtype endianness match, swap data and dtype ----------------------------------------------------- - -You may have a correctly specified array dtype, but you need the array -to have the opposite byte order in memory, and you want the dtype to -match so the array values make sense. In this case you just do both of -the previous operations: - ->>> swapped_end_arr = big_end_arr.byteswap().newbyteorder() ->>> swapped_end_arr[0] -1 ->>> swapped_end_arr.tobytes() == big_end_buffer -False - -An easier way of casting the data to a specific dtype and byte ordering -can be achieved with the ndarray astype method: - ->>> swapped_end_arr = big_end_arr.astype('>> swapped_end_arr[0] -1 ->>> swapped_end_arr.tobytes() == big_end_buffer -False - -""" -from __future__ import division, absolute_import, print_function diff --git a/venv/lib/python3.7/site-packages/numpy/doc/constants.py b/venv/lib/python3.7/site-packages/numpy/doc/constants.py deleted file mode 100644 index 72793e4..0000000 --- a/venv/lib/python3.7/site-packages/numpy/doc/constants.py +++ /dev/null @@ -1,418 +0,0 @@ -# -*- coding: utf-8 -*- -""" -========= -Constants -========= - -.. currentmodule:: numpy - -NumPy includes several constants: - -%(constant_list)s -""" -# -# Note: the docstring is autogenerated. -# -from __future__ import division, absolute_import, print_function - -import textwrap, re - -# Maintain same format as in numpy.add_newdocs -constants = [] -def add_newdoc(module, name, doc): - constants.append((name, doc)) - -add_newdoc('numpy', 'pi', - """ - ``pi = 3.1415926535897932384626433...`` - - References - ---------- - https://en.wikipedia.org/wiki/Pi - - """) - -add_newdoc('numpy', 'e', - """ - Euler's constant, base of natural logarithms, Napier's constant. - - ``e = 2.71828182845904523536028747135266249775724709369995...`` - - See Also - -------- - exp : Exponential function - log : Natural logarithm - - References - ---------- - https://en.wikipedia.org/wiki/E_%28mathematical_constant%29 - - """) - -add_newdoc('numpy', 'euler_gamma', - """ - ``γ = 0.5772156649015328606065120900824024310421...`` - - References - ---------- - https://en.wikipedia.org/wiki/Euler-Mascheroni_constant - - """) - -add_newdoc('numpy', 'inf', - """ - IEEE 754 floating point representation of (positive) infinity. - - Returns - ------- - y : float - A floating point representation of positive infinity. - - See Also - -------- - isinf : Shows which elements are positive or negative infinity - - isposinf : Shows which elements are positive infinity - - isneginf : Shows which elements are negative infinity - - isnan : Shows which elements are Not a Number - - isfinite : Shows which elements are finite (not one of Not a Number, - positive infinity and negative infinity) - - Notes - ----- - NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). This means that Not a Number is not equivalent to infinity. - Also that positive infinity is not equivalent to negative infinity. But - infinity is equivalent to positive infinity. - - `Inf`, `Infinity`, `PINF` and `infty` are aliases for `inf`. - - Examples - -------- - >>> np.inf - inf - >>> np.array([1]) / 0. - array([ Inf]) - - """) - -add_newdoc('numpy', 'nan', - """ - IEEE 754 floating point representation of Not a Number (NaN). - - Returns - ------- - y : A floating point representation of Not a Number. - - See Also - -------- - isnan : Shows which elements are Not a Number. - - isfinite : Shows which elements are finite (not one of - Not a Number, positive infinity and negative infinity) - - Notes - ----- - NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). This means that Not a Number is not equivalent to infinity. - - `NaN` and `NAN` are aliases of `nan`. - - Examples - -------- - >>> np.nan - nan - >>> np.log(-1) - nan - >>> np.log([-1, 1, 2]) - array([ NaN, 0. , 0.69314718]) - - """) - -add_newdoc('numpy', 'newaxis', - """ - A convenient alias for None, useful for indexing arrays. - - See Also - -------- - `numpy.doc.indexing` - - Examples - -------- - >>> newaxis is None - True - >>> x = np.arange(3) - >>> x - array([0, 1, 2]) - >>> x[:, newaxis] - array([[0], - [1], - [2]]) - >>> x[:, newaxis, newaxis] - array([[[0]], - [[1]], - [[2]]]) - >>> x[:, newaxis] * x - array([[0, 0, 0], - [0, 1, 2], - [0, 2, 4]]) - - Outer product, same as ``outer(x, y)``: - - >>> y = np.arange(3, 6) - >>> x[:, newaxis] * y - array([[ 0, 0, 0], - [ 3, 4, 5], - [ 6, 8, 10]]) - - ``x[newaxis, :]`` is equivalent to ``x[newaxis]`` and ``x[None]``: - - >>> x[newaxis, :].shape - (1, 3) - >>> x[newaxis].shape - (1, 3) - >>> x[None].shape - (1, 3) - >>> x[:, newaxis].shape - (3, 1) - - """) - -add_newdoc('numpy', 'NZERO', - """ - IEEE 754 floating point representation of negative zero. - - Returns - ------- - y : float - A floating point representation of negative zero. - - See Also - -------- - PZERO : Defines positive zero. - - isinf : Shows which elements are positive or negative infinity. - - isposinf : Shows which elements are positive infinity. - - isneginf : Shows which elements are negative infinity. - - isnan : Shows which elements are Not a Number. - - isfinite : Shows which elements are finite - not one of - Not a Number, positive infinity and negative infinity. - - Notes - ----- - NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). Negative zero is considered to be a finite number. - - Examples - -------- - >>> np.NZERO - -0.0 - >>> np.PZERO - 0.0 - - >>> np.isfinite([np.NZERO]) - array([ True]) - >>> np.isnan([np.NZERO]) - array([False]) - >>> np.isinf([np.NZERO]) - array([False]) - - """) - -add_newdoc('numpy', 'PZERO', - """ - IEEE 754 floating point representation of positive zero. - - Returns - ------- - y : float - A floating point representation of positive zero. - - See Also - -------- - NZERO : Defines negative zero. - - isinf : Shows which elements are positive or negative infinity. - - isposinf : Shows which elements are positive infinity. - - isneginf : Shows which elements are negative infinity. - - isnan : Shows which elements are Not a Number. - - isfinite : Shows which elements are finite - not one of - Not a Number, positive infinity and negative infinity. - - Notes - ----- - NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). Positive zero is considered to be a finite number. - - Examples - -------- - >>> np.PZERO - 0.0 - >>> np.NZERO - -0.0 - - >>> np.isfinite([np.PZERO]) - array([ True]) - >>> np.isnan([np.PZERO]) - array([False]) - >>> np.isinf([np.PZERO]) - array([False]) - - """) - -add_newdoc('numpy', 'NAN', - """ - IEEE 754 floating point representation of Not a Number (NaN). - - `NaN` and `NAN` are equivalent definitions of `nan`. Please use - `nan` instead of `NAN`. - - See Also - -------- - nan - - """) - -add_newdoc('numpy', 'NaN', - """ - IEEE 754 floating point representation of Not a Number (NaN). - - `NaN` and `NAN` are equivalent definitions of `nan`. Please use - `nan` instead of `NaN`. - - See Also - -------- - nan - - """) - -add_newdoc('numpy', 'NINF', - """ - IEEE 754 floating point representation of negative infinity. - - Returns - ------- - y : float - A floating point representation of negative infinity. - - See Also - -------- - isinf : Shows which elements are positive or negative infinity - - isposinf : Shows which elements are positive infinity - - isneginf : Shows which elements are negative infinity - - isnan : Shows which elements are Not a Number - - isfinite : Shows which elements are finite (not one of Not a Number, - positive infinity and negative infinity) - - Notes - ----- - NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). This means that Not a Number is not equivalent to infinity. - Also that positive infinity is not equivalent to negative infinity. But - infinity is equivalent to positive infinity. - - Examples - -------- - >>> np.NINF - -inf - >>> np.log(0) - -inf - - """) - -add_newdoc('numpy', 'PINF', - """ - IEEE 754 floating point representation of (positive) infinity. - - Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for - `inf`. For more details, see `inf`. - - See Also - -------- - inf - - """) - -add_newdoc('numpy', 'infty', - """ - IEEE 754 floating point representation of (positive) infinity. - - Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for - `inf`. For more details, see `inf`. - - See Also - -------- - inf - - """) - -add_newdoc('numpy', 'Inf', - """ - IEEE 754 floating point representation of (positive) infinity. - - Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for - `inf`. For more details, see `inf`. - - See Also - -------- - inf - - """) - -add_newdoc('numpy', 'Infinity', - """ - IEEE 754 floating point representation of (positive) infinity. - - Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for - `inf`. For more details, see `inf`. - - See Also - -------- - inf - - """) - - -if __doc__: - constants_str = [] - constants.sort() - for name, doc in constants: - s = textwrap.dedent(doc).replace("\n", "\n ") - - # Replace sections by rubrics - lines = s.split("\n") - new_lines = [] - for line in lines: - m = re.match(r'^(\s+)[-=]+\s*$', line) - if m and new_lines: - prev = textwrap.dedent(new_lines.pop()) - new_lines.append('%s.. rubric:: %s' % (m.group(1), prev)) - new_lines.append('') - else: - new_lines.append(line) - s = "\n".join(new_lines) - - # Done. - constants_str.append(""".. data:: %s\n %s""" % (name, s)) - constants_str = "\n".join(constants_str) - - __doc__ = __doc__ % dict(constant_list=constants_str) - del constants_str, name, doc - del line, lines, new_lines, m, s, prev - -del constants, add_newdoc diff --git a/venv/lib/python3.7/site-packages/numpy/doc/creation.py b/venv/lib/python3.7/site-packages/numpy/doc/creation.py deleted file mode 100644 index 9ebe938..0000000 --- a/venv/lib/python3.7/site-packages/numpy/doc/creation.py +++ /dev/null @@ -1,144 +0,0 @@ -""" -============== -Array Creation -============== - -Introduction -============ - -There are 5 general mechanisms for creating arrays: - -1) Conversion from other Python structures (e.g., lists, tuples) -2) Intrinsic numpy array creation objects (e.g., arange, ones, zeros, - etc.) -3) Reading arrays from disk, either from standard or custom formats -4) Creating arrays from raw bytes through the use of strings or buffers -5) Use of special library functions (e.g., random) - -This section will not cover means of replicating, joining, or otherwise -expanding or mutating existing arrays. Nor will it cover creating object -arrays or structured arrays. Both of those are covered in their own sections. - -Converting Python array_like Objects to NumPy Arrays -==================================================== - -In general, numerical data arranged in an array-like structure in Python can -be converted to arrays through the use of the array() function. The most -obvious examples are lists and tuples. See the documentation for array() for -details for its use. Some objects may support the array-protocol and allow -conversion to arrays this way. A simple way to find out if the object can be -converted to a numpy array using array() is simply to try it interactively and -see if it works! (The Python Way). - -Examples: :: - - >>> x = np.array([2,3,1,0]) - >>> x = np.array([2, 3, 1, 0]) - >>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists, - and types - >>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]]) - -Intrinsic NumPy Array Creation -============================== - -NumPy has built-in functions for creating arrays from scratch: - -zeros(shape) will create an array filled with 0 values with the specified -shape. The default dtype is float64. :: - - >>> np.zeros((2, 3)) - array([[ 0., 0., 0.], [ 0., 0., 0.]]) - -ones(shape) will create an array filled with 1 values. It is identical to -zeros in all other respects. - -arange() will create arrays with regularly incrementing values. Check the -docstring for complete information on the various ways it can be used. A few -examples will be given here: :: - - >>> np.arange(10) - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - >>> np.arange(2, 10, dtype=float) - array([ 2., 3., 4., 5., 6., 7., 8., 9.]) - >>> np.arange(2, 3, 0.1) - array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9]) - -Note that there are some subtleties regarding the last usage that the user -should be aware of that are described in the arange docstring. - -linspace() will create arrays with a specified number of elements, and -spaced equally between the specified beginning and end values. For -example: :: - - >>> np.linspace(1., 4., 6) - array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ]) - -The advantage of this creation function is that one can guarantee the -number of elements and the starting and end point, which arange() -generally will not do for arbitrary start, stop, and step values. - -indices() will create a set of arrays (stacked as a one-higher dimensioned -array), one per dimension with each representing variation in that dimension. -An example illustrates much better than a verbal description: :: - - >>> np.indices((3,3)) - array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]]) - -This is particularly useful for evaluating functions of multiple dimensions on -a regular grid. - -Reading Arrays From Disk -======================== - -This is presumably the most common case of large array creation. The details, -of course, depend greatly on the format of data on disk and so this section -can only give general pointers on how to handle various formats. - -Standard Binary Formats ------------------------ - -Various fields have standard formats for array data. The following lists the -ones with known python libraries to read them and return numpy arrays (there -may be others for which it is possible to read and convert to numpy arrays so -check the last section as well) -:: - - HDF5: h5py - FITS: Astropy - -Examples of formats that cannot be read directly but for which it is not hard to -convert are those formats supported by libraries like PIL (able to read and -write many image formats such as jpg, png, etc). - -Common ASCII Formats ------------------------- - -Comma Separated Value files (CSV) are widely used (and an export and import -option for programs like Excel). There are a number of ways of reading these -files in Python. There are CSV functions in Python and functions in pylab -(part of matplotlib). - -More generic ascii files can be read using the io package in scipy. - -Custom Binary Formats ---------------------- - -There are a variety of approaches one can use. If the file has a relatively -simple format then one can write a simple I/O library and use the numpy -fromfile() function and .tofile() method to read and write numpy arrays -directly (mind your byteorder though!) If a good C or C++ library exists that -read the data, one can wrap that library with a variety of techniques though -that certainly is much more work and requires significantly more advanced -knowledge to interface with C or C++. - -Use of Special Libraries ------------------------- - -There are libraries that can be used to generate arrays for special purposes -and it isn't possible to enumerate all of them. The most common uses are use -of the many array generation functions in random that can generate arrays of -random values, and some utility functions to generate special matrices (e.g. -diagonal). - -""" -from __future__ import division, absolute_import, print_function diff --git a/venv/lib/python3.7/site-packages/numpy/doc/dispatch.py b/venv/lib/python3.7/site-packages/numpy/doc/dispatch.py deleted file mode 100644 index c902994..0000000 --- a/venv/lib/python3.7/site-packages/numpy/doc/dispatch.py +++ /dev/null @@ -1,271 +0,0 @@ -""".. _dispatch_mechanism: - -Numpy's dispatch mechanism, introduced in numpy version v1.16 is the -recommended approach for writing custom N-dimensional array containers that are -compatible with the numpy API and provide custom implementations of numpy -functionality. Applications include `dask `_ arrays, an -N-dimensional array distributed across multiple nodes, and `cupy -`_ arrays, an N-dimensional array on -a GPU. - -To get a feel for writing custom array containers, we'll begin with a simple -example that has rather narrow utility but illustrates the concepts involved. - ->>> import numpy as np ->>> class DiagonalArray: -... def __init__(self, N, value): -... self._N = N -... self._i = value -... def __repr__(self): -... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" -... def __array__(self): -... return self._i * np.eye(self._N) -... - -Our custom array can be instantiated like: - ->>> arr = DiagonalArray(5, 1) ->>> arr -DiagonalArray(N=5, value=1) - -We can convert to a numpy array using :func:`numpy.array` or -:func:`numpy.asarray`, which will call its ``__array__`` method to obtain a -standard ``numpy.ndarray``. - ->>> np.asarray(arr) -array([[1., 0., 0., 0., 0.], - [0., 1., 0., 0., 0.], - [0., 0., 1., 0., 0.], - [0., 0., 0., 1., 0.], - [0., 0., 0., 0., 1.]]) - -If we operate on ``arr`` with a numpy function, numpy will again use the -``__array__`` interface to convert it to an array and then apply the function -in the usual way. - ->>> np.multiply(arr, 2) -array([[2., 0., 0., 0., 0.], - [0., 2., 0., 0., 0.], - [0., 0., 2., 0., 0.], - [0., 0., 0., 2., 0.], - [0., 0., 0., 0., 2.]]) - - -Notice that the return type is a standard ``numpy.ndarray``. - ->>> type(arr) -numpy.ndarray - -How can we pass our custom array type through this function? Numpy allows a -class to indicate that it would like to handle computations in a custom-defined -way through the interaces ``__array_ufunc__`` and ``__array_function__``. Let's -take one at a time, starting with ``_array_ufunc__``. This method covers -:ref:`ufuncs`, a class of functions that includes, for example, -:func:`numpy.multiply` and :func:`numpy.sin`. - -The ``__array_ufunc__`` receives: - -- ``ufunc``, a function like ``numpy.multiply`` -- ``method``, a string, differentiating between ``numpy.multiply(...)`` and - variants like ``numpy.multiply.outer``, ``numpy.multiply.accumulate``, and so - on. For the common case, ``numpy.multiply(...)``, ``method == '__call__'``. -- ``inputs``, which could be a mixture of different types -- ``kwargs``, keyword arguments passed to the function - -For this example we will only handle the method ``__call__``. - ->>> from numbers import Number ->>> class DiagonalArray: -... def __init__(self, N, value): -... self._N = N -... self._i = value -... def __repr__(self): -... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" -... def __array__(self): -... return self._i * np.eye(self._N) -... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): -... if method == '__call__': -... N = None -... scalars = [] -... for input in inputs: -... if isinstance(input, Number): -... scalars.append(input) -... elif isinstance(input, self.__class__): -... scalars.append(input._i) -... if N is not None: -... if N != self._N: -... raise TypeError("inconsistent sizes") -... else: -... N = self._N -... else: -... return NotImplemented -... return self.__class__(N, ufunc(*scalars, **kwargs)) -... else: -... return NotImplemented -... - -Now our custom array type passes through numpy functions. - ->>> arr = DiagonalArray(5, 1) ->>> np.multiply(arr, 3) -DiagonalArray(N=5, value=3) ->>> np.add(arr, 3) -DiagonalArray(N=5, value=4) ->>> np.sin(arr) -DiagonalArray(N=5, value=0.8414709848078965) - -At this point ``arr + 3`` does not work. - ->>> arr + 3 -TypeError: unsupported operand type(s) for *: 'DiagonalArray' and 'int' - -To support it, we need to define the Python interfaces ``__add__``, ``__lt__``, -and so on to dispatch to the corresponding ufunc. We can achieve this -conveniently by inheriting from the mixin -:class:`~numpy.lib.mixins.NDArrayOperatorsMixin`. - ->>> import numpy.lib.mixins ->>> class DiagonalArray(numpy.lib.mixins.NDArrayOperatorsMixin): -... def __init__(self, N, value): -... self._N = N -... self._i = value -... def __repr__(self): -... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" -... def __array__(self): -... return self._i * np.eye(self._N) -... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): -... if method == '__call__': -... N = None -... scalars = [] -... for input in inputs: -... if isinstance(input, Number): -... scalars.append(input) -... elif isinstance(input, self.__class__): -... scalars.append(input._i) -... if N is not None: -... if N != self._N: -... raise TypeError("inconsistent sizes") -... else: -... N = self._N -... else: -... return NotImplemented -... return self.__class__(N, ufunc(*scalars, **kwargs)) -... else: -... return NotImplemented -... - ->>> arr = DiagonalArray(5, 1) ->>> arr + 3 -DiagonalArray(N=5, value=4) ->>> arr > 0 -DiagonalArray(N=5, value=True) - -Now let's tackle ``__array_function__``. We'll create dict that maps numpy -functions to our custom variants. - ->>> HANDLED_FUNCTIONS = {} ->>> class DiagonalArray(numpy.lib.mixins.NDArrayOperatorsMixin): -... def __init__(self, N, value): -... self._N = N -... self._i = value -... def __repr__(self): -... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" -... def __array__(self): -... return self._i * np.eye(self._N) -... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): -... if method == '__call__': -... N = None -... scalars = [] -... for input in inputs: -... # In this case we accept only scalar numbers or DiagonalArrays. -... if isinstance(input, Number): -... scalars.append(input) -... elif isinstance(input, self.__class__): -... scalars.append(input._i) -... if N is not None: -... if N != self._N: -... raise TypeError("inconsistent sizes") -... else: -... N = self._N -... else: -... return NotImplemented -... return self.__class__(N, ufunc(*scalars, **kwargs)) -... else: -... return NotImplemented -... def __array_function__(self, func, types, args, kwargs): -... if func not in HANDLED_FUNCTIONS: -... return NotImplemented -... # Note: this allows subclasses that don't override -... # __array_function__ to handle DiagonalArray objects. -... if not all(issubclass(t, self.__class__) for t in types): -... return NotImplemented -... return HANDLED_FUNCTIONS[func](*args, **kwargs) -... - -A convenient pattern is to define a decorator ``implements`` that can be used -to add functions to ``HANDLED_FUNCTIONS``. - ->>> def implements(np_function): -... "Register an __array_function__ implementation for DiagonalArray objects." -... def decorator(func): -... HANDLED_FUNCTIONS[np_function] = func -... return func -... return decorator -... - -Now we write implementations of numpy functions for ``DiagonalArray``. -For completeness, to support the usage ``arr.sum()`` add a method ``sum`` that -calls ``numpy.sum(self)``, and the same for ``mean``. - ->>> @implements(np.sum) -... def sum(arr): -... "Implementation of np.sum for DiagonalArray objects" -... return arr._i * arr._N -... ->>> @implements(np.mean) -... def mean(arr): -... "Implementation of np.mean for DiagonalArray objects" -... return arr._i / arr._N -... ->>> arr = DiagonalArray(5, 1) ->>> np.sum(arr) -5 ->>> np.mean(arr) -0.2 - -If the user tries to use any numpy functions not included in -``HANDLED_FUNCTIONS``, a ``TypeError`` will be raised by numpy, indicating that -this operation is not supported. For example, concatenating two -``DiagonalArrays`` does not produce another diagonal array, so it is not -supported. - ->>> np.concatenate([arr, arr]) -TypeError: no implementation found for 'numpy.concatenate' on types that implement __array_function__: [] - -Additionally, our implementations of ``sum`` and ``mean`` do not accept the -optional arguments that numpy's implementation does. - ->>> np.sum(arr, axis=0) -TypeError: sum() got an unexpected keyword argument 'axis' - -The user always has the option of converting to a normal ``numpy.ndarray`` with -:func:`numpy.asarray` and using standard numpy from there. - ->>> np.concatenate([np.asarray(arr), np.asarray(arr)]) -array([[1., 0., 0., 0., 0.], - [0., 1., 0., 0., 0.], - [0., 0., 1., 0., 0.], - [0., 0., 0., 1., 0.], - [0., 0., 0., 0., 1.], - [1., 0., 0., 0., 0.], - [0., 1., 0., 0., 0.], - [0., 0., 1., 0., 0.], - [0., 0., 0., 1., 0.], - [0., 0., 0., 0., 1.]]) - -Refer to the `dask source code `_ and -`cupy source code `_ for more fully-worked -examples of custom array containers. - -See also `NEP 18 `_. -""" diff --git a/venv/lib/python3.7/site-packages/numpy/doc/glossary.py b/venv/lib/python3.7/site-packages/numpy/doc/glossary.py deleted file mode 100644 index 7d1c9a1..0000000 --- a/venv/lib/python3.7/site-packages/numpy/doc/glossary.py +++ /dev/null @@ -1,476 +0,0 @@ -""" -======== -Glossary -======== - -.. glossary:: - - along an axis - Axes are defined for arrays with more than one dimension. A - 2-dimensional array has two corresponding axes: the first running - vertically downwards across rows (axis 0), and the second running - horizontally across columns (axis 1). - - Many operations can take place along one of these axes. For example, - we can sum each row of an array, in which case we operate along - columns, or axis 1:: - - >>> x = np.arange(12).reshape((3,4)) - - >>> x - array([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - - >>> x.sum(axis=1) - array([ 6, 22, 38]) - - array - A homogeneous container of numerical elements. Each element in the - array occupies a fixed amount of memory (hence homogeneous), and - can be a numerical element of a single type (such as float, int - or complex) or a combination (such as ``(float, int, float)``). Each - array has an associated data-type (or ``dtype``), which describes - the numerical type of its elements:: - - >>> x = np.array([1, 2, 3], float) - - >>> x - array([ 1., 2., 3.]) - - >>> x.dtype # floating point number, 64 bits of memory per element - dtype('float64') - - - # More complicated data type: each array element is a combination of - # and integer and a floating point number - >>> np.array([(1, 2.0), (3, 4.0)], dtype=[('x', int), ('y', float)]) - array([(1, 2.0), (3, 4.0)], - dtype=[('x', '>> x = np.array([1, 2, 3]) - >>> x.shape - (3,) - - big-endian - When storing a multi-byte value in memory as a sequence of bytes, the - sequence addresses/sends/stores the most significant byte first (lowest - address) and the least significant byte last (highest address). Common in - micro-processors and used for transmission of data over network protocols. - - BLAS - `Basic Linear Algebra Subprograms `_ - - broadcast - NumPy can do operations on arrays whose shapes are mismatched:: - - >>> x = np.array([1, 2]) - >>> y = np.array([[3], [4]]) - - >>> x - array([1, 2]) - - >>> y - array([[3], - [4]]) - - >>> x + y - array([[4, 5], - [5, 6]]) - - See `numpy.doc.broadcasting` for more information. - - C order - See `row-major` - - column-major - A way to represent items in a N-dimensional array in the 1-dimensional - computer memory. In column-major order, the leftmost index "varies the - fastest": for example the array:: - - [[1, 2, 3], - [4, 5, 6]] - - is represented in the column-major order as:: - - [1, 4, 2, 5, 3, 6] - - Column-major order is also known as the Fortran order, as the Fortran - programming language uses it. - - decorator - An operator that transforms a function. For example, a ``log`` - decorator may be defined to print debugging information upon - function execution:: - - >>> def log(f): - ... def new_logging_func(*args, **kwargs): - ... print("Logging call with parameters:", args, kwargs) - ... return f(*args, **kwargs) - ... - ... return new_logging_func - - Now, when we define a function, we can "decorate" it using ``log``:: - - >>> @log - ... def add(a, b): - ... return a + b - - Calling ``add`` then yields: - - >>> add(1, 2) - Logging call with parameters: (1, 2) {} - 3 - - dictionary - Resembling a language dictionary, which provides a mapping between - words and descriptions thereof, a Python dictionary is a mapping - between two objects:: - - >>> x = {1: 'one', 'two': [1, 2]} - - Here, `x` is a dictionary mapping keys to values, in this case - the integer 1 to the string "one", and the string "two" to - the list ``[1, 2]``. The values may be accessed using their - corresponding keys:: - - >>> x[1] - 'one' - - >>> x['two'] - [1, 2] - - Note that dictionaries are not stored in any specific order. Also, - most mutable (see *immutable* below) objects, such as lists, may not - be used as keys. - - For more information on dictionaries, read the - `Python tutorial `_. - - field - In a :term:`structured data type`, each sub-type is called a `field`. - The `field` has a name (a string), a type (any valid dtype, and - an optional `title`. See :ref:`arrays.dtypes` - - Fortran order - See `column-major` - - flattened - Collapsed to a one-dimensional array. See `numpy.ndarray.flatten` - for details. - - homogenous - Describes a block of memory comprised of blocks, each block comprised of - items and of the same size, and blocks are interpreted in exactly the - same way. In the simplest case each block contains a single item, for - instance int32 or float64. - - immutable - An object that cannot be modified after execution is called - immutable. Two common examples are strings and tuples. - - instance - A class definition gives the blueprint for constructing an object:: - - >>> class House(object): - ... wall_colour = 'white' - - Yet, we have to *build* a house before it exists:: - - >>> h = House() # build a house - - Now, ``h`` is called a ``House`` instance. An instance is therefore - a specific realisation of a class. - - iterable - A sequence that allows "walking" (iterating) over items, typically - using a loop such as:: - - >>> x = [1, 2, 3] - >>> [item**2 for item in x] - [1, 4, 9] - - It is often used in combination with ``enumerate``:: - >>> keys = ['a','b','c'] - >>> for n, k in enumerate(keys): - ... print("Key %d: %s" % (n, k)) - ... - Key 0: a - Key 1: b - Key 2: c - - itemsize - The size of the dtype element in bytes. - - list - A Python container that can hold any number of objects or items. - The items do not have to be of the same type, and can even be - lists themselves:: - - >>> x = [2, 2.0, "two", [2, 2.0]] - - The list `x` contains 4 items, each which can be accessed individually:: - - >>> x[2] # the string 'two' - 'two' - - >>> x[3] # a list, containing an integer 2 and a float 2.0 - [2, 2.0] - - It is also possible to select more than one item at a time, - using *slicing*:: - - >>> x[0:2] # or, equivalently, x[:2] - [2, 2.0] - - In code, arrays are often conveniently expressed as nested lists:: - - - >>> np.array([[1, 2], [3, 4]]) - array([[1, 2], - [3, 4]]) - - For more information, read the section on lists in the `Python - tutorial `_. For a mapping - type (key-value), see *dictionary*. - - little-endian - When storing a multi-byte value in memory as a sequence of bytes, the - sequence addresses/sends/stores the least significant byte first (lowest - address) and the most significant byte last (highest address). Common in - x86 processors. - - mask - A boolean array, used to select only certain elements for an operation:: - - >>> x = np.arange(5) - >>> x - array([0, 1, 2, 3, 4]) - - >>> mask = (x > 2) - >>> mask - array([False, False, False, True, True]) - - >>> x[mask] = -1 - >>> x - array([ 0, 1, 2, -1, -1]) - - masked array - Array that suppressed values indicated by a mask:: - - >>> x = np.ma.masked_array([np.nan, 2, np.nan], [True, False, True]) - >>> x - masked_array(data = [-- 2.0 --], - mask = [ True False True], - fill_value = 1e+20) - - >>> x + [1, 2, 3] - masked_array(data = [-- 4.0 --], - mask = [ True False True], - fill_value = 1e+20) - - - Masked arrays are often used when operating on arrays containing - missing or invalid entries. - - matrix - A 2-dimensional ndarray that preserves its two-dimensional nature - throughout operations. It has certain special operations, such as ``*`` - (matrix multiplication) and ``**`` (matrix power), defined:: - - >>> x = np.mat([[1, 2], [3, 4]]) - >>> x - matrix([[1, 2], - [3, 4]]) - - >>> x**2 - matrix([[ 7, 10], - [15, 22]]) - - method - A function associated with an object. For example, each ndarray has a - method called ``repeat``:: - - >>> x = np.array([1, 2, 3]) - >>> x.repeat(2) - array([1, 1, 2, 2, 3, 3]) - - ndarray - See *array*. - - record array - An :term:`ndarray` with :term:`structured data type` which has been - subclassed as ``np.recarray`` and whose dtype is of type ``np.record``, - making the fields of its data type to be accessible by attribute. - - reference - If ``a`` is a reference to ``b``, then ``(a is b) == True``. Therefore, - ``a`` and ``b`` are different names for the same Python object. - - row-major - A way to represent items in a N-dimensional array in the 1-dimensional - computer memory. In row-major order, the rightmost index "varies - the fastest": for example the array:: - - [[1, 2, 3], - [4, 5, 6]] - - is represented in the row-major order as:: - - [1, 2, 3, 4, 5, 6] - - Row-major order is also known as the C order, as the C programming - language uses it. New NumPy arrays are by default in row-major order. - - self - Often seen in method signatures, ``self`` refers to the instance - of the associated class. For example: - - >>> class Paintbrush(object): - ... color = 'blue' - ... - ... def paint(self): - ... print("Painting the city %s!" % self.color) - ... - >>> p = Paintbrush() - >>> p.color = 'red' - >>> p.paint() # self refers to 'p' - Painting the city red! - - slice - Used to select only certain elements from a sequence: - - >>> x = range(5) - >>> x - [0, 1, 2, 3, 4] - - >>> x[1:3] # slice from 1 to 3 (excluding 3 itself) - [1, 2] - - >>> x[1:5:2] # slice from 1 to 5, but skipping every second element - [1, 3] - - >>> x[::-1] # slice a sequence in reverse - [4, 3, 2, 1, 0] - - Arrays may have more than one dimension, each which can be sliced - individually: - - >>> x = np.array([[1, 2], [3, 4]]) - >>> x - array([[1, 2], - [3, 4]]) - - >>> x[:, 1] - array([2, 4]) - - structure - See :term:`structured data type` - - structured data type - A data type composed of other datatypes - - subarray data type - A :term:`structured data type` may contain a :term:`ndarray` with its - own dtype and shape: - - >>> dt = np.dtype([('a', np.int32), ('b', np.float32, (3,))]) - >>> np.zeros(3, dtype=dt) - array([(0, [0., 0., 0.]), (0, [0., 0., 0.]), (0, [0., 0., 0.])], - dtype=[('a', '` which is an alias to the name and is - commonly used for plotting. - - tuple - A sequence that may contain a variable number of types of any - kind. A tuple is immutable, i.e., once constructed it cannot be - changed. Similar to a list, it can be indexed and sliced:: - - >>> x = (1, 'one', [1, 2]) - >>> x - (1, 'one', [1, 2]) - - >>> x[0] - 1 - - >>> x[:2] - (1, 'one') - - A useful concept is "tuple unpacking", which allows variables to - be assigned to the contents of a tuple:: - - >>> x, y = (1, 2) - >>> x, y = 1, 2 - - This is often used when a function returns multiple values: - - >>> def return_many(): - ... return 1, 'alpha', None - - >>> a, b, c = return_many() - >>> a, b, c - (1, 'alpha', None) - - >>> a - 1 - >>> b - 'alpha' - - ufunc - Universal function. A fast element-wise, :term:`vectorized - ` array operation. Examples include ``add``, ``sin`` and - ``logical_or``. - - vectorization - Optimizing a looping block by specialized code. In a traditional sense, - vectorization performs the same operation on multiple elements with - fixed strides between them via specialized hardware. Compilers know how - to take advantage of well-constructed loops to implement such - optimizations. NumPy uses :ref:`vectorization ` - to mean any optimization via specialized code performing the same - operations on multiple elements, typically achieving speedups by - avoiding some of the overhead in looking up and converting the elements. - - view - An array that does not own its data, but refers to another array's - data instead. For example, we may create a view that only shows - every second element of another array:: - - >>> x = np.arange(5) - >>> x - array([0, 1, 2, 3, 4]) - - >>> y = x[::2] - >>> y - array([0, 2, 4]) - - >>> x[0] = 3 # changing x changes y as well, since y is a view on x - >>> y - array([3, 2, 4]) - - wrapper - Python is a high-level (highly abstracted, or English-like) language. - This abstraction comes at a price in execution speed, and sometimes - it becomes necessary to use lower level languages to do fast - computations. A wrapper is code that provides a bridge between - high and the low level languages, allowing, e.g., Python to execute - code written in C or Fortran. - - Examples include ctypes, SWIG and Cython (which wraps C and C++) - and f2py (which wraps Fortran). - -""" -from __future__ import division, absolute_import, print_function diff --git a/venv/lib/python3.7/site-packages/numpy/doc/indexing.py b/venv/lib/python3.7/site-packages/numpy/doc/indexing.py deleted file mode 100644 index 6760156..0000000 --- a/venv/lib/python3.7/site-packages/numpy/doc/indexing.py +++ /dev/null @@ -1,449 +0,0 @@ -""" -============== -Array indexing -============== - -Array indexing refers to any use of the square brackets ([]) to index -array values. There are many options to indexing, which give numpy -indexing great power, but with power comes some complexity and the -potential for confusion. This section is just an overview of the -various options and issues related to indexing. Aside from single -element indexing, the details on most of these options are to be -found in related sections. - -Assignment vs referencing -========================= - -Most of the following examples show the use of indexing when -referencing data in an array. The examples work just as well -when assigning to an array. See the section at the end for -specific examples and explanations on how assignments work. - -Single element indexing -======================= - -Single element indexing for a 1-D array is what one expects. It work -exactly like that for other standard Python sequences. It is 0-based, -and accepts negative indices for indexing from the end of the array. :: - - >>> x = np.arange(10) - >>> x[2] - 2 - >>> x[-2] - 8 - -Unlike lists and tuples, numpy arrays support multidimensional indexing -for multidimensional arrays. That means that it is not necessary to -separate each dimension's index into its own set of square brackets. :: - - >>> x.shape = (2,5) # now x is 2-dimensional - >>> x[1,3] - 8 - >>> x[1,-1] - 9 - -Note that if one indexes a multidimensional array with fewer indices -than dimensions, one gets a subdimensional array. For example: :: - - >>> x[0] - array([0, 1, 2, 3, 4]) - -That is, each index specified selects the array corresponding to the -rest of the dimensions selected. In the above example, choosing 0 -means that the remaining dimension of length 5 is being left unspecified, -and that what is returned is an array of that dimensionality and size. -It must be noted that the returned array is not a copy of the original, -but points to the same values in memory as does the original array. -In this case, the 1-D array at the first position (0) is returned. -So using a single index on the returned array, results in a single -element being returned. That is: :: - - >>> x[0][2] - 2 - -So note that ``x[0,2] = x[0][2]`` though the second case is more -inefficient as a new temporary array is created after the first index -that is subsequently indexed by 2. - -Note to those used to IDL or Fortran memory order as it relates to -indexing. NumPy uses C-order indexing. That means that the last -index usually represents the most rapidly changing memory location, -unlike Fortran or IDL, where the first index represents the most -rapidly changing location in memory. This difference represents a -great potential for confusion. - -Other indexing options -====================== - -It is possible to slice and stride arrays to extract arrays of the -same number of dimensions, but of different sizes than the original. -The slicing and striding works exactly the same way it does for lists -and tuples except that they can be applied to multiple dimensions as -well. A few examples illustrates best: :: - - >>> x = np.arange(10) - >>> x[2:5] - array([2, 3, 4]) - >>> x[:-7] - array([0, 1, 2]) - >>> x[1:7:2] - array([1, 3, 5]) - >>> y = np.arange(35).reshape(5,7) - >>> y[1:5:2,::3] - array([[ 7, 10, 13], - [21, 24, 27]]) - -Note that slices of arrays do not copy the internal array data but -only produce new views of the original data. This is different from -list or tuple slicing and an explicit ``copy()`` is recommended if -the original data is not required anymore. - -It is possible to index arrays with other arrays for the purposes of -selecting lists of values out of arrays into new arrays. There are -two different ways of accomplishing this. One uses one or more arrays -of index values. The other involves giving a boolean array of the proper -shape to indicate the values to be selected. Index arrays are a very -powerful tool that allow one to avoid looping over individual elements in -arrays and thus greatly improve performance. - -It is possible to use special features to effectively increase the -number of dimensions in an array through indexing so the resulting -array acquires the shape needed for use in an expression or with a -specific function. - -Index arrays -============ - -NumPy arrays may be indexed with other arrays (or any other sequence- -like object that can be converted to an array, such as lists, with the -exception of tuples; see the end of this document for why this is). The -use of index arrays ranges from simple, straightforward cases to -complex, hard-to-understand cases. For all cases of index arrays, what -is returned is a copy of the original data, not a view as one gets for -slices. - -Index arrays must be of integer type. Each value in the array indicates -which value in the array to use in place of the index. To illustrate: :: - - >>> x = np.arange(10,1,-1) - >>> x - array([10, 9, 8, 7, 6, 5, 4, 3, 2]) - >>> x[np.array([3, 3, 1, 8])] - array([7, 7, 9, 2]) - - -The index array consisting of the values 3, 3, 1 and 8 correspondingly -create an array of length 4 (same as the index array) where each index -is replaced by the value the index array has in the array being indexed. - -Negative values are permitted and work as they do with single indices -or slices: :: - - >>> x[np.array([3,3,-3,8])] - array([7, 7, 4, 2]) - -It is an error to have index values out of bounds: :: - - >>> x[np.array([3, 3, 20, 8])] - : index 20 out of bounds 0<=index<9 - -Generally speaking, what is returned when index arrays are used is -an array with the same shape as the index array, but with the type -and values of the array being indexed. As an example, we can use a -multidimensional index array instead: :: - - >>> x[np.array([[1,1],[2,3]])] - array([[9, 9], - [8, 7]]) - -Indexing Multi-dimensional arrays -================================= - -Things become more complex when multidimensional arrays are indexed, -particularly with multidimensional index arrays. These tend to be -more unusual uses, but they are permitted, and they are useful for some -problems. We'll start with the simplest multidimensional case (using -the array y from the previous examples): :: - - >>> y[np.array([0,2,4]), np.array([0,1,2])] - array([ 0, 15, 30]) - -In this case, if the index arrays have a matching shape, and there is -an index array for each dimension of the array being indexed, the -resultant array has the same shape as the index arrays, and the values -correspond to the index set for each position in the index arrays. In -this example, the first index value is 0 for both index arrays, and -thus the first value of the resultant array is y[0,0]. The next value -is y[2,1], and the last is y[4,2]. - -If the index arrays do not have the same shape, there is an attempt to -broadcast them to the same shape. If they cannot be broadcast to the -same shape, an exception is raised: :: - - >>> y[np.array([0,2,4]), np.array([0,1])] - : shape mismatch: objects cannot be - broadcast to a single shape - -The broadcasting mechanism permits index arrays to be combined with -scalars for other indices. The effect is that the scalar value is used -for all the corresponding values of the index arrays: :: - - >>> y[np.array([0,2,4]), 1] - array([ 1, 15, 29]) - -Jumping to the next level of complexity, it is possible to only -partially index an array with index arrays. It takes a bit of thought -to understand what happens in such cases. For example if we just use -one index array with y: :: - - >>> y[np.array([0,2,4])] - array([[ 0, 1, 2, 3, 4, 5, 6], - [14, 15, 16, 17, 18, 19, 20], - [28, 29, 30, 31, 32, 33, 34]]) - -What results is the construction of a new array where each value of -the index array selects one row from the array being indexed and the -resultant array has the resulting shape (number of index elements, -size of row). - -An example of where this may be useful is for a color lookup table -where we want to map the values of an image into RGB triples for -display. The lookup table could have a shape (nlookup, 3). Indexing -such an array with an image with shape (ny, nx) with dtype=np.uint8 -(or any integer type so long as values are with the bounds of the -lookup table) will result in an array of shape (ny, nx, 3) where a -triple of RGB values is associated with each pixel location. - -In general, the shape of the resultant array will be the concatenation -of the shape of the index array (or the shape that all the index arrays -were broadcast to) with the shape of any unused dimensions (those not -indexed) in the array being indexed. - -Boolean or "mask" index arrays -============================== - -Boolean arrays used as indices are treated in a different manner -entirely than index arrays. Boolean arrays must be of the same shape -as the initial dimensions of the array being indexed. In the -most straightforward case, the boolean array has the same shape: :: - - >>> b = y>20 - >>> y[b] - array([21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34]) - -Unlike in the case of integer index arrays, in the boolean case, the -result is a 1-D array containing all the elements in the indexed array -corresponding to all the true elements in the boolean array. The -elements in the indexed array are always iterated and returned in -:term:`row-major` (C-style) order. The result is also identical to -``y[np.nonzero(b)]``. As with index arrays, what is returned is a copy -of the data, not a view as one gets with slices. - -The result will be multidimensional if y has more dimensions than b. -For example: :: - - >>> b[:,5] # use a 1-D boolean whose first dim agrees with the first dim of y - array([False, False, False, True, True]) - >>> y[b[:,5]] - array([[21, 22, 23, 24, 25, 26, 27], - [28, 29, 30, 31, 32, 33, 34]]) - -Here the 4th and 5th rows are selected from the indexed array and -combined to make a 2-D array. - -In general, when the boolean array has fewer dimensions than the array -being indexed, this is equivalent to y[b, ...], which means -y is indexed by b followed by as many : as are needed to fill -out the rank of y. -Thus the shape of the result is one dimension containing the number -of True elements of the boolean array, followed by the remaining -dimensions of the array being indexed. - -For example, using a 2-D boolean array of shape (2,3) -with four True elements to select rows from a 3-D array of shape -(2,3,5) results in a 2-D result of shape (4,5): :: - - >>> x = np.arange(30).reshape(2,3,5) - >>> x - array([[[ 0, 1, 2, 3, 4], - [ 5, 6, 7, 8, 9], - [10, 11, 12, 13, 14]], - [[15, 16, 17, 18, 19], - [20, 21, 22, 23, 24], - [25, 26, 27, 28, 29]]]) - >>> b = np.array([[True, True, False], [False, True, True]]) - >>> x[b] - array([[ 0, 1, 2, 3, 4], - [ 5, 6, 7, 8, 9], - [20, 21, 22, 23, 24], - [25, 26, 27, 28, 29]]) - -For further details, consult the numpy reference documentation on array indexing. - -Combining index arrays with slices -================================== - -Index arrays may be combined with slices. For example: :: - - >>> y[np.array([0,2,4]),1:3] - array([[ 1, 2], - [15, 16], - [29, 30]]) - -In effect, the slice is converted to an index array -np.array([[1,2]]) (shape (1,2)) that is broadcast with the index array -to produce a resultant array of shape (3,2). - -Likewise, slicing can be combined with broadcasted boolean indices: :: - - >>> b = y > 20 - >>> b - array([[False, False, False, False, False, False, False], - [False, False, False, False, False, False, False], - [False, False, False, False, False, False, False], - [ True, True, True, True, True, True, True], - [ True, True, True, True, True, True, True]]) - >>> y[b[:,5],1:3] - array([[22, 23], - [29, 30]]) - -Structural indexing tools -========================= - -To facilitate easy matching of array shapes with expressions and in -assignments, the np.newaxis object can be used within array indices -to add new dimensions with a size of 1. For example: :: - - >>> y.shape - (5, 7) - >>> y[:,np.newaxis,:].shape - (5, 1, 7) - -Note that there are no new elements in the array, just that the -dimensionality is increased. This can be handy to combine two -arrays in a way that otherwise would require explicitly reshaping -operations. For example: :: - - >>> x = np.arange(5) - >>> x[:,np.newaxis] + x[np.newaxis,:] - array([[0, 1, 2, 3, 4], - [1, 2, 3, 4, 5], - [2, 3, 4, 5, 6], - [3, 4, 5, 6, 7], - [4, 5, 6, 7, 8]]) - -The ellipsis syntax maybe used to indicate selecting in full any -remaining unspecified dimensions. For example: :: - - >>> z = np.arange(81).reshape(3,3,3,3) - >>> z[1,...,2] - array([[29, 32, 35], - [38, 41, 44], - [47, 50, 53]]) - -This is equivalent to: :: - - >>> z[1,:,:,2] - array([[29, 32, 35], - [38, 41, 44], - [47, 50, 53]]) - -Assigning values to indexed arrays -================================== - -As mentioned, one can select a subset of an array to assign to using -a single index, slices, and index and mask arrays. The value being -assigned to the indexed array must be shape consistent (the same shape -or broadcastable to the shape the index produces). For example, it is -permitted to assign a constant to a slice: :: - - >>> x = np.arange(10) - >>> x[2:7] = 1 - -or an array of the right size: :: - - >>> x[2:7] = np.arange(5) - -Note that assignments may result in changes if assigning -higher types to lower types (like floats to ints) or even -exceptions (assigning complex to floats or ints): :: - - >>> x[1] = 1.2 - >>> x[1] - 1 - >>> x[1] = 1.2j - : can't convert complex to long; use - long(abs(z)) - - -Unlike some of the references (such as array and mask indices) -assignments are always made to the original data in the array -(indeed, nothing else would make sense!). Note though, that some -actions may not work as one may naively expect. This particular -example is often surprising to people: :: - - >>> x = np.arange(0, 50, 10) - >>> x - array([ 0, 10, 20, 30, 40]) - >>> x[np.array([1, 1, 3, 1])] += 1 - >>> x - array([ 0, 11, 20, 31, 40]) - -Where people expect that the 1st location will be incremented by 3. -In fact, it will only be incremented by 1. The reason is because -a new array is extracted from the original (as a temporary) containing -the values at 1, 1, 3, 1, then the value 1 is added to the temporary, -and then the temporary is assigned back to the original array. Thus -the value of the array at x[1]+1 is assigned to x[1] three times, -rather than being incremented 3 times. - -Dealing with variable numbers of indices within programs -======================================================== - -The index syntax is very powerful but limiting when dealing with -a variable number of indices. For example, if you want to write -a function that can handle arguments with various numbers of -dimensions without having to write special case code for each -number of possible dimensions, how can that be done? If one -supplies to the index a tuple, the tuple will be interpreted -as a list of indices. For example (using the previous definition -for the array z): :: - - >>> indices = (1,1,1,1) - >>> z[indices] - 40 - -So one can use code to construct tuples of any number of indices -and then use these within an index. - -Slices can be specified within programs by using the slice() function -in Python. For example: :: - - >>> indices = (1,1,1,slice(0,2)) # same as [1,1,1,0:2] - >>> z[indices] - array([39, 40]) - -Likewise, ellipsis can be specified by code by using the Ellipsis -object: :: - - >>> indices = (1, Ellipsis, 1) # same as [1,...,1] - >>> z[indices] - array([[28, 31, 34], - [37, 40, 43], - [46, 49, 52]]) - -For this reason it is possible to use the output from the np.nonzero() -function directly as an index since it always returns a tuple of index -arrays. - -Because the special treatment of tuples, they are not automatically -converted to an array as a list would be. As an example: :: - - >>> z[[1,1,1,1]] # produces a large array - array([[[[27, 28, 29], - [30, 31, 32], ... - >>> z[(1,1,1,1)] # returns a single value - 40 - -""" -from __future__ import division, absolute_import, print_function diff --git a/venv/lib/python3.7/site-packages/numpy/doc/internals.py b/venv/lib/python3.7/site-packages/numpy/doc/internals.py deleted file mode 100644 index a14fee7..0000000 --- a/venv/lib/python3.7/site-packages/numpy/doc/internals.py +++ /dev/null @@ -1,163 +0,0 @@ -""" -=============== -Array Internals -=============== - -Internal organization of numpy arrays -===================================== - -It helps to understand a bit about how numpy arrays are handled under the covers to help understand numpy better. This section will not go into great detail. Those wishing to understand the full details are referred to Travis Oliphant's book "Guide to NumPy". - -NumPy arrays consist of two major components, the raw array data (from now on, -referred to as the data buffer), and the information about the raw array data. -The data buffer is typically what people think of as arrays in C or Fortran, -a contiguous (and fixed) block of memory containing fixed sized data items. -NumPy also contains a significant set of data that describes how to interpret -the data in the data buffer. This extra information contains (among other things): - - 1) The basic data element's size in bytes - 2) The start of the data within the data buffer (an offset relative to the - beginning of the data buffer). - 3) The number of dimensions and the size of each dimension - 4) The separation between elements for each dimension (the 'stride'). This - does not have to be a multiple of the element size - 5) The byte order of the data (which may not be the native byte order) - 6) Whether the buffer is read-only - 7) Information (via the dtype object) about the interpretation of the basic - data element. The basic data element may be as simple as a int or a float, - or it may be a compound object (e.g., struct-like), a fixed character field, - or Python object pointers. - 8) Whether the array is to interpreted as C-order or Fortran-order. - -This arrangement allow for very flexible use of arrays. One thing that it allows -is simple changes of the metadata to change the interpretation of the array buffer. -Changing the byteorder of the array is a simple change involving no rearrangement -of the data. The shape of the array can be changed very easily without changing -anything in the data buffer or any data copying at all - -Among other things that are made possible is one can create a new array metadata -object that uses the same data buffer -to create a new view of that data buffer that has a different interpretation -of the buffer (e.g., different shape, offset, byte order, strides, etc) but -shares the same data bytes. Many operations in numpy do just this such as -slices. Other operations, such as transpose, don't move data elements -around in the array, but rather change the information about the shape and strides so that the indexing of the array changes, but the data in the doesn't move. - -Typically these new versions of the array metadata but the same data buffer are -new 'views' into the data buffer. There is a different ndarray object, but it -uses the same data buffer. This is why it is necessary to force copies through -use of the .copy() method if one really wants to make a new and independent -copy of the data buffer. - -New views into arrays mean the object reference counts for the data buffer -increase. Simply doing away with the original array object will not remove the -data buffer if other views of it still exist. - -Multidimensional Array Indexing Order Issues -============================================ - -What is the right way to index -multi-dimensional arrays? Before you jump to conclusions about the one and -true way to index multi-dimensional arrays, it pays to understand why this is -a confusing issue. This section will try to explain in detail how numpy -indexing works and why we adopt the convention we do for images, and when it -may be appropriate to adopt other conventions. - -The first thing to understand is -that there are two conflicting conventions for indexing 2-dimensional arrays. -Matrix notation uses the first index to indicate which row is being selected and -the second index to indicate which column is selected. This is opposite the -geometrically oriented-convention for images where people generally think the -first index represents x position (i.e., column) and the second represents y -position (i.e., row). This alone is the source of much confusion; -matrix-oriented users and image-oriented users expect two different things with -regard to indexing. - -The second issue to understand is how indices correspond -to the order the array is stored in memory. In Fortran the first index is the -most rapidly varying index when moving through the elements of a two -dimensional array as it is stored in memory. If you adopt the matrix -convention for indexing, then this means the matrix is stored one column at a -time (since the first index moves to the next row as it changes). Thus Fortran -is considered a Column-major language. C has just the opposite convention. In -C, the last index changes most rapidly as one moves through the array as -stored in memory. Thus C is a Row-major language. The matrix is stored by -rows. Note that in both cases it presumes that the matrix convention for -indexing is being used, i.e., for both Fortran and C, the first index is the -row. Note this convention implies that the indexing convention is invariant -and that the data order changes to keep that so. - -But that's not the only way -to look at it. Suppose one has large two-dimensional arrays (images or -matrices) stored in data files. Suppose the data are stored by rows rather than -by columns. If we are to preserve our index convention (whether matrix or -image) that means that depending on the language we use, we may be forced to -reorder the data if it is read into memory to preserve our indexing -convention. For example if we read row-ordered data into memory without -reordering, it will match the matrix indexing convention for C, but not for -Fortran. Conversely, it will match the image indexing convention for Fortran, -but not for C. For C, if one is using data stored in row order, and one wants -to preserve the image index convention, the data must be reordered when -reading into memory. - -In the end, which you do for Fortran or C depends on -which is more important, not reordering data or preserving the indexing -convention. For large images, reordering data is potentially expensive, and -often the indexing convention is inverted to avoid that. - -The situation with -numpy makes this issue yet more complicated. The internal machinery of numpy -arrays is flexible enough to accept any ordering of indices. One can simply -reorder indices by manipulating the internal stride information for arrays -without reordering the data at all. NumPy will know how to map the new index -order to the data without moving the data. - -So if this is true, why not choose -the index order that matches what you most expect? In particular, why not define -row-ordered images to use the image convention? (This is sometimes referred -to as the Fortran convention vs the C convention, thus the 'C' and 'FORTRAN' -order options for array ordering in numpy.) The drawback of doing this is -potential performance penalties. It's common to access the data sequentially, -either implicitly in array operations or explicitly by looping over rows of an -image. When that is done, then the data will be accessed in non-optimal order. -As the first index is incremented, what is actually happening is that elements -spaced far apart in memory are being sequentially accessed, with usually poor -memory access speeds. For example, for a two dimensional image 'im' defined so -that im[0, 10] represents the value at x=0, y=10. To be consistent with usual -Python behavior then im[0] would represent a column at x=0. Yet that data -would be spread over the whole array since the data are stored in row order. -Despite the flexibility of numpy's indexing, it can't really paper over the fact -basic operations are rendered inefficient because of data order or that getting -contiguous subarrays is still awkward (e.g., im[:,0] for the first row, vs -im[0]), thus one can't use an idiom such as for row in im; for col in im does -work, but doesn't yield contiguous column data. - -As it turns out, numpy is -smart enough when dealing with ufuncs to determine which index is the most -rapidly varying one in memory and uses that for the innermost loop. Thus for -ufuncs there is no large intrinsic advantage to either approach in most cases. -On the other hand, use of .flat with an FORTRAN ordered array will lead to -non-optimal memory access as adjacent elements in the flattened array (iterator, -actually) are not contiguous in memory. - -Indeed, the fact is that Python -indexing on lists and other sequences naturally leads to an outside-to inside -ordering (the first index gets the largest grouping, the next the next largest, -and the last gets the smallest element). Since image data are normally stored -by rows, this corresponds to position within rows being the last item indexed. - -If you do want to use Fortran ordering realize that -there are two approaches to consider: 1) accept that the first index is just not -the most rapidly changing in memory and have all your I/O routines reorder -your data when going from memory to disk or visa versa, or use numpy's -mechanism for mapping the first index to the most rapidly varying data. We -recommend the former if possible. The disadvantage of the latter is that many -of numpy's functions will yield arrays without Fortran ordering unless you are -careful to use the 'order' keyword. Doing this would be highly inconvenient. - -Otherwise we recommend simply learning to reverse the usual order of indices -when accessing elements of an array. Granted, it goes against the grain, but -it is more in line with Python semantics and the natural order of the data. - -""" -from __future__ import division, absolute_import, print_function diff --git a/venv/lib/python3.7/site-packages/numpy/doc/misc.py b/venv/lib/python3.7/site-packages/numpy/doc/misc.py deleted file mode 100644 index a76abe1..0000000 --- a/venv/lib/python3.7/site-packages/numpy/doc/misc.py +++ /dev/null @@ -1,227 +0,0 @@ -""" -============= -Miscellaneous -============= - -IEEE 754 Floating Point Special Values --------------------------------------- - -Special values defined in numpy: nan, inf, - -NaNs can be used as a poor-man's mask (if you don't care what the -original value was) - -Note: cannot use equality to test NaNs. E.g.: :: - - >>> myarr = np.array([1., 0., np.nan, 3.]) - >>> np.nonzero(myarr == np.nan) - (array([], dtype=int64),) - >>> np.nan == np.nan # is always False! Use special numpy functions instead. - False - >>> myarr[myarr == np.nan] = 0. # doesn't work - >>> myarr - array([ 1., 0., NaN, 3.]) - >>> myarr[np.isnan(myarr)] = 0. # use this instead find - >>> myarr - array([ 1., 0., 0., 3.]) - -Other related special value functions: :: - - isinf(): True if value is inf - isfinite(): True if not nan or inf - nan_to_num(): Map nan to 0, inf to max float, -inf to min float - -The following corresponds to the usual functions except that nans are excluded -from the results: :: - - nansum() - nanmax() - nanmin() - nanargmax() - nanargmin() - - >>> x = np.arange(10.) - >>> x[3] = np.nan - >>> x.sum() - nan - >>> np.nansum(x) - 42.0 - -How numpy handles numerical exceptions --------------------------------------- - -The default is to ``'warn'`` for ``invalid``, ``divide``, and ``overflow`` -and ``'ignore'`` for ``underflow``. But this can be changed, and it can be -set individually for different kinds of exceptions. The different behaviors -are: - - - 'ignore' : Take no action when the exception occurs. - - 'warn' : Print a `RuntimeWarning` (via the Python `warnings` module). - - 'raise' : Raise a `FloatingPointError`. - - 'call' : Call a function specified using the `seterrcall` function. - - 'print' : Print a warning directly to ``stdout``. - - 'log' : Record error in a Log object specified by `seterrcall`. - -These behaviors can be set for all kinds of errors or specific ones: - - - all : apply to all numeric exceptions - - invalid : when NaNs are generated - - divide : divide by zero (for integers as well!) - - overflow : floating point overflows - - underflow : floating point underflows - -Note that integer divide-by-zero is handled by the same machinery. -These behaviors are set on a per-thread basis. - -Examples --------- - -:: - - >>> oldsettings = np.seterr(all='warn') - >>> np.zeros(5,dtype=np.float32)/0. - invalid value encountered in divide - >>> j = np.seterr(under='ignore') - >>> np.array([1.e-100])**10 - >>> j = np.seterr(invalid='raise') - >>> np.sqrt(np.array([-1.])) - FloatingPointError: invalid value encountered in sqrt - >>> def errorhandler(errstr, errflag): - ... print("saw stupid error!") - >>> np.seterrcall(errorhandler) - - >>> j = np.seterr(all='call') - >>> np.zeros(5, dtype=np.int32)/0 - FloatingPointError: invalid value encountered in divide - saw stupid error! - >>> j = np.seterr(**oldsettings) # restore previous - ... # error-handling settings - -Interfacing to C ----------------- -Only a survey of the choices. Little detail on how each works. - -1) Bare metal, wrap your own C-code manually. - - - Plusses: - - - Efficient - - No dependencies on other tools - - - Minuses: - - - Lots of learning overhead: - - - need to learn basics of Python C API - - need to learn basics of numpy C API - - need to learn how to handle reference counting and love it. - - - Reference counting often difficult to get right. - - - getting it wrong leads to memory leaks, and worse, segfaults - - - API will change for Python 3.0! - -2) Cython - - - Plusses: - - - avoid learning C API's - - no dealing with reference counting - - can code in pseudo python and generate C code - - can also interface to existing C code - - should shield you from changes to Python C api - - has become the de-facto standard within the scientific Python community - - fast indexing support for arrays - - - Minuses: - - - Can write code in non-standard form which may become obsolete - - Not as flexible as manual wrapping - -3) ctypes - - - Plusses: - - - part of Python standard library - - good for interfacing to existing sharable libraries, particularly - Windows DLLs - - avoids API/reference counting issues - - good numpy support: arrays have all these in their ctypes - attribute: :: - - a.ctypes.data a.ctypes.get_strides - a.ctypes.data_as a.ctypes.shape - a.ctypes.get_as_parameter a.ctypes.shape_as - a.ctypes.get_data a.ctypes.strides - a.ctypes.get_shape a.ctypes.strides_as - - - Minuses: - - - can't use for writing code to be turned into C extensions, only a wrapper - tool. - -4) SWIG (automatic wrapper generator) - - - Plusses: - - - around a long time - - multiple scripting language support - - C++ support - - Good for wrapping large (many functions) existing C libraries - - - Minuses: - - - generates lots of code between Python and the C code - - can cause performance problems that are nearly impossible to optimize - out - - interface files can be hard to write - - doesn't necessarily avoid reference counting issues or needing to know - API's - -5) scipy.weave - - - Plusses: - - - can turn many numpy expressions into C code - - dynamic compiling and loading of generated C code - - can embed pure C code in Python module and have weave extract, generate - interfaces and compile, etc. - - - Minuses: - - - Future very uncertain: it's the only part of Scipy not ported to Python 3 - and is effectively deprecated in favor of Cython. - -6) Psyco - - - Plusses: - - - Turns pure python into efficient machine code through jit-like - optimizations - - very fast when it optimizes well - - - Minuses: - - - Only on intel (windows?) - - Doesn't do much for numpy? - -Interfacing to Fortran: ------------------------ -The clear choice to wrap Fortran code is -`f2py `_. - -Pyfort is an older alternative, but not supported any longer. -Fwrap is a newer project that looked promising but isn't being developed any -longer. - -Interfacing to C++: -------------------- - 1) Cython - 2) CXX - 3) Boost.python - 4) SWIG - 5) SIP (used mainly in PyQT) - -""" -from __future__ import division, absolute_import, print_function diff --git a/venv/lib/python3.7/site-packages/numpy/doc/structured_arrays.py b/venv/lib/python3.7/site-packages/numpy/doc/structured_arrays.py deleted file mode 100644 index 1343d2a..0000000 --- a/venv/lib/python3.7/site-packages/numpy/doc/structured_arrays.py +++ /dev/null @@ -1,647 +0,0 @@ -""" -================= -Structured Arrays -================= - -Introduction -============ - -Structured arrays are ndarrays whose datatype is a composition of simpler -datatypes organized as a sequence of named :term:`fields `. For example, -:: - - >>> x = np.array([('Rex', 9, 81.0), ('Fido', 3, 27.0)], - ... dtype=[('name', 'U10'), ('age', 'i4'), ('weight', 'f4')]) - >>> x - array([('Rex', 9, 81.), ('Fido', 3, 27.)], - dtype=[('name', 'U10'), ('age', '>> x[1] - ('Fido', 3, 27.0) - -You can access and modify individual fields of a structured array by indexing -with the field name:: - - >>> x['age'] - array([9, 3], dtype=int32) - >>> x['age'] = 5 - >>> x - array([('Rex', 5, 81.), ('Fido', 5, 27.)], - dtype=[('name', 'U10'), ('age', '` reference page, and in -summary they are: - -1. A list of tuples, one tuple per field - - Each tuple has the form ``(fieldname, datatype, shape)`` where shape is - optional. ``fieldname`` is a string (or tuple if titles are used, see - :ref:`Field Titles ` below), ``datatype`` may be any object - convertible to a datatype, and ``shape`` is a tuple of integers specifying - subarray shape. - - >>> np.dtype([('x', 'f4'), ('y', np.float32), ('z', 'f4', (2, 2))]) - dtype([('x', '>> np.dtype([('x', 'f4'), ('', 'i4'), ('z', 'i8')]) - dtype([('x', '` may be used in a string and separated by - commas. The itemsize and byte offsets of the fields are determined - automatically, and the field names are given the default names ``f0``, - ``f1``, etc. :: - - >>> np.dtype('i8, f4, S3') - dtype([('f0', '>> np.dtype('3int8, float32, (2, 3)float64') - dtype([('f0', 'i1', (3,)), ('f1', '>> np.dtype({'names': ['col1', 'col2'], 'formats': ['i4', 'f4']}) - dtype([('col1', '>> np.dtype({'names': ['col1', 'col2'], - ... 'formats': ['i4', 'f4'], - ... 'offsets': [0, 4], - ... 'itemsize': 12}) - dtype({'names':['col1','col2'], 'formats':['` below. - -4. A dictionary of field names - - The use of this form of specification is discouraged, but documented here - because older numpy code may use it. The keys of the dictionary are the - field names and the values are tuples specifying type and offset:: - - >>> np.dtype({'col1': ('i1', 0), 'col2': ('f4', 1)}) - dtype([('col1', 'i1'), ('col2', '` may be - specified by using a 3-tuple, see below. - -Manipulating and Displaying Structured Datatypes ------------------------------------------------- - -The list of field names of a structured datatype can be found in the ``names`` -attribute of the dtype object:: - - >>> d = np.dtype([('x', 'i8'), ('y', 'f4')]) - >>> d.names - ('x', 'y') - -The field names may be modified by assigning to the ``names`` attribute using a -sequence of strings of the same length. - -The dtype object also has a dictionary-like attribute, ``fields``, whose keys -are the field names (and :ref:`Field Titles `, see below) and whose -values are tuples containing the dtype and byte offset of each field. :: - - >>> d.fields - mappingproxy({'x': (dtype('int64'), 0), 'y': (dtype('float32'), 8)}) - -Both the ``names`` and ``fields`` attributes will equal ``None`` for -unstructured arrays. The recommended way to test if a dtype is structured is -with `if dt.names is not None` rather than `if dt.names`, to account for dtypes -with 0 fields. - -The string representation of a structured datatype is shown in the "list of -tuples" form if possible, otherwise numpy falls back to using the more general -dictionary form. - -.. _offsets-and-alignment: - -Automatic Byte Offsets and Alignment ------------------------------------- - -Numpy uses one of two methods to automatically determine the field byte offsets -and the overall itemsize of a structured datatype, depending on whether -``align=True`` was specified as a keyword argument to :func:`numpy.dtype`. - -By default (``align=False``), numpy will pack the fields together such that -each field starts at the byte offset the previous field ended, and the fields -are contiguous in memory. :: - - >>> def print_offsets(d): - ... print("offsets:", [d.fields[name][1] for name in d.names]) - ... print("itemsize:", d.itemsize) - >>> print_offsets(np.dtype('u1, u1, i4, u1, i8, u2')) - offsets: [0, 1, 2, 6, 7, 15] - itemsize: 17 - -If ``align=True`` is set, numpy will pad the structure in the same way many C -compilers would pad a C-struct. Aligned structures can give a performance -improvement in some cases, at the cost of increased datatype size. Padding -bytes are inserted between fields such that each field's byte offset will be a -multiple of that field's alignment, which is usually equal to the field's size -in bytes for simple datatypes, see :c:member:`PyArray_Descr.alignment`. The -structure will also have trailing padding added so that its itemsize is a -multiple of the largest field's alignment. :: - - >>> print_offsets(np.dtype('u1, u1, i4, u1, i8, u2', align=True)) - offsets: [0, 1, 4, 8, 16, 24] - itemsize: 32 - -Note that although almost all modern C compilers pad in this way by default, -padding in C structs is C-implementation-dependent so this memory layout is not -guaranteed to exactly match that of a corresponding struct in a C program. Some -work may be needed, either on the numpy side or the C side, to obtain exact -correspondence. - -If offsets were specified using the optional ``offsets`` key in the -dictionary-based dtype specification, setting ``align=True`` will check that -each field's offset is a multiple of its size and that the itemsize is a -multiple of the largest field size, and raise an exception if not. - -If the offsets of the fields and itemsize of a structured array satisfy the -alignment conditions, the array will have the ``ALIGNED`` :attr:`flag -` set. - -A convenience function :func:`numpy.lib.recfunctions.repack_fields` converts an -aligned dtype or array to a packed one and vice versa. It takes either a dtype -or structured ndarray as an argument, and returns a copy with fields re-packed, -with or without padding bytes. - -.. _titles: - -Field Titles ------------- - -In addition to field names, fields may also have an associated :term:`title`, -an alternate name, which is sometimes used as an additional description or -alias for the field. The title may be used to index an array, just like a -field name. - -To add titles when using the list-of-tuples form of dtype specification, the -field name may be specified as a tuple of two strings instead of a single -string, which will be the field's title and field name respectively. For -example:: - - >>> np.dtype([(('my title', 'name'), 'f4')]) - dtype([(('my title', 'name'), '>> np.dtype({'name': ('i4', 0, 'my title')}) - dtype([(('my title', 'name'), '>> for name in d.names: - ... print(d.fields[name][:2]) - (dtype('int64'), 0) - (dtype('float32'), 8) - -Union types ------------ - -Structured datatypes are implemented in numpy to have base type -:class:`numpy.void` by default, but it is possible to interpret other numpy -types as structured types using the ``(base_dtype, dtype)`` form of dtype -specification described in -:ref:`Data Type Objects `. Here, ``base_dtype`` is -the desired underlying dtype, and fields and flags will be copied from -``dtype``. This dtype is similar to a 'union' in C. - -Indexing and Assignment to Structured arrays -============================================ - -Assigning data to a Structured Array ------------------------------------- - -There are a number of ways to assign values to a structured array: Using python -tuples, using scalar values, or using other structured arrays. - -Assignment from Python Native Types (Tuples) -```````````````````````````````````````````` - -The simplest way to assign values to a structured array is using python tuples. -Each assigned value should be a tuple of length equal to the number of fields -in the array, and not a list or array as these will trigger numpy's -broadcasting rules. The tuple's elements are assigned to the successive fields -of the array, from left to right:: - - >>> x = np.array([(1, 2, 3), (4, 5, 6)], dtype='i8, f4, f8') - >>> x[1] = (7, 8, 9) - >>> x - array([(1, 2., 3.), (7, 8., 9.)], - dtype=[('f0', '>> x = np.zeros(2, dtype='i8, f4, ?, S1') - >>> x[:] = 3 - >>> x - array([(3, 3., True, b'3'), (3, 3., True, b'3')], - dtype=[('f0', '>> x[:] = np.arange(2) - >>> x - array([(0, 0., False, b'0'), (1, 1., True, b'1')], - dtype=[('f0', '>> twofield = np.zeros(2, dtype=[('A', 'i4'), ('B', 'i4')]) - >>> onefield = np.zeros(2, dtype=[('A', 'i4')]) - >>> nostruct = np.zeros(2, dtype='i4') - >>> nostruct[:] = twofield - Traceback (most recent call last): - ... - TypeError: Cannot cast scalar from dtype([('A', '>> a = np.zeros(3, dtype=[('a', 'i8'), ('b', 'f4'), ('c', 'S3')]) - >>> b = np.ones(3, dtype=[('x', 'f4'), ('y', 'S3'), ('z', 'O')]) - >>> b[:] = a - >>> b - array([(0., b'0.0', b''), (0., b'0.0', b''), (0., b'0.0', b'')], - dtype=[('x', '>> x = np.array([(1, 2), (3, 4)], dtype=[('foo', 'i8'), ('bar', 'f4')]) - >>> x['foo'] - array([1, 3]) - >>> x['foo'] = 10 - >>> x - array([(10, 2.), (10, 4.)], - dtype=[('foo', '>> y = x['bar'] - >>> y[:] = 11 - >>> x - array([(10, 11.), (10, 11.)], - dtype=[('foo', '>> y.dtype, y.shape, y.strides - (dtype('float32'), (2,), (12,)) - -If the accessed field is a subarray, the dimensions of the subarray -are appended to the shape of the result:: - - >>> x = np.zeros((2, 2), dtype=[('a', np.int32), ('b', np.float64, (3, 3))]) - >>> x['a'].shape - (2, 2) - >>> x['b'].shape - (2, 2, 3, 3) - -Accessing Multiple Fields -``````````````````````````` - -One can index and assign to a structured array with a multi-field index, where -the index is a list of field names. - -.. warning:: - The behavior of multi-field indexes changed from Numpy 1.15 to Numpy 1.16. - -The result of indexing with a multi-field index is a view into the original -array, as follows:: - - >>> a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'i4'), ('c', 'f4')]) - >>> a[['a', 'c']] - array([(0, 0.), (0, 0.), (0, 0.)], - dtype={'names':['a','c'], 'formats':['>> a[['a', 'c']].view('i8') # Fails in Numpy 1.16 - Traceback (most recent call last): - File "", line 1, in - ValueError: When changing to a smaller dtype, its size must be a divisor of the size of original dtype - - will need to be changed. This code has raised a ``FutureWarning`` since - Numpy 1.12, and similar code has raised ``FutureWarning`` since 1.7. - - In 1.16 a number of functions have been introduced in the - :mod:`numpy.lib.recfunctions` module to help users account for this - change. These are - :func:`numpy.lib.recfunctions.repack_fields`. - :func:`numpy.lib.recfunctions.structured_to_unstructured`, - :func:`numpy.lib.recfunctions.unstructured_to_structured`, - :func:`numpy.lib.recfunctions.apply_along_fields`, - :func:`numpy.lib.recfunctions.assign_fields_by_name`, and - :func:`numpy.lib.recfunctions.require_fields`. - - The function :func:`numpy.lib.recfunctions.repack_fields` can always be - used to reproduce the old behavior, as it will return a packed copy of the - structured array. The code above, for example, can be replaced with: - - >>> from numpy.lib.recfunctions import repack_fields - >>> repack_fields(a[['a', 'c']]).view('i8') # supported in 1.16 - array([0, 0, 0]) - - Furthermore, numpy now provides a new function - :func:`numpy.lib.recfunctions.structured_to_unstructured` which is a safer - and more efficient alternative for users who wish to convert structured - arrays to unstructured arrays, as the view above is often indeded to do. - This function allows safe conversion to an unstructured type taking into - account padding, often avoids a copy, and also casts the datatypes - as needed, unlike the view. Code such as: - - >>> b = np.zeros(3, dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')]) - >>> b[['x', 'z']].view('f4') - array([0., 0., 0., 0., 0., 0., 0., 0., 0.], dtype=float32) - - can be made safer by replacing with: - - >>> from numpy.lib.recfunctions import structured_to_unstructured - >>> structured_to_unstructured(b[['x', 'z']]) - array([0, 0, 0]) - - -Assignment to an array with a multi-field index modifies the original array:: - - >>> a[['a', 'c']] = (2, 3) - >>> a - array([(2, 0, 3.), (2, 0, 3.), (2, 0, 3.)], - dtype=[('a', '>> a[['a', 'c']] = a[['c', 'a']] - -Indexing with an Integer to get a Structured Scalar -``````````````````````````````````````````````````` - -Indexing a single element of a structured array (with an integer index) returns -a structured scalar:: - - >>> x = np.array([(1, 2., 3.)], dtype='i, f, f') - >>> scalar = x[0] - >>> scalar - (1, 2., 3.) - >>> type(scalar) - - -Unlike other numpy scalars, structured scalars are mutable and act like views -into the original array, such that modifying the scalar will modify the -original array. Structured scalars also support access and assignment by field -name:: - - >>> x = np.array([(1, 2), (3, 4)], dtype=[('foo', 'i8'), ('bar', 'f4')]) - >>> s = x[0] - >>> s['bar'] = 100 - >>> x - array([(1, 100.), (3, 4.)], - dtype=[('foo', '>> scalar = np.array([(1, 2., 3.)], dtype='i, f, f')[0] - >>> scalar[0] - 1 - >>> scalar[1] = 4 - -Thus, tuples might be thought of as the native Python equivalent to numpy's -structured types, much like native python integers are the equivalent to -numpy's integer types. Structured scalars may be converted to a tuple by -calling :func:`ndarray.item`:: - - >>> scalar.item(), type(scalar.item()) - ((1, 4.0, 3.0), ) - -Viewing Structured Arrays Containing Objects --------------------------------------------- - -In order to prevent clobbering object pointers in fields of -:class:`numpy.object` type, numpy currently does not allow views of structured -arrays containing objects. - -Structure Comparison --------------------- - -If the dtypes of two void structured arrays are equal, testing the equality of -the arrays will result in a boolean array with the dimensions of the original -arrays, with elements set to ``True`` where all fields of the corresponding -structures are equal. Structured dtypes are equal if the field names, -dtypes and titles are the same, ignoring endianness, and the fields are in -the same order:: - - >>> a = np.zeros(2, dtype=[('a', 'i4'), ('b', 'i4')]) - >>> b = np.ones(2, dtype=[('a', 'i4'), ('b', 'i4')]) - >>> a == b - array([False, False]) - -Currently, if the dtypes of two void structured arrays are not equivalent the -comparison fails, returning the scalar value ``False``. This behavior is -deprecated as of numpy 1.10 and will raise an error or perform elementwise -comparison in the future. - -The ``<`` and ``>`` operators always return ``False`` when comparing void -structured arrays, and arithmetic and bitwise operations are not supported. - -Record Arrays -============= - -As an optional convenience numpy provides an ndarray subclass, -:class:`numpy.recarray`, and associated helper functions in the -:mod:`numpy.rec` submodule, that allows access to fields of structured arrays -by attribute instead of only by index. Record arrays also use a special -datatype, :class:`numpy.record`, that allows field access by attribute on the -structured scalars obtained from the array. - -The simplest way to create a record array is with :func:`numpy.rec.array`:: - - >>> recordarr = np.rec.array([(1, 2., 'Hello'), (2, 3., "World")], - ... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'S10')]) - >>> recordarr.bar - array([ 2., 3.], dtype=float32) - >>> recordarr[1:2] - rec.array([(2, 3., b'World')], - dtype=[('foo', '>> recordarr[1:2].foo - array([2], dtype=int32) - >>> recordarr.foo[1:2] - array([2], dtype=int32) - >>> recordarr[1].baz - b'World' - -:func:`numpy.rec.array` can convert a wide variety of arguments into record -arrays, including structured arrays:: - - >>> arr = np.array([(1, 2., 'Hello'), (2, 3., "World")], - ... dtype=[('foo', 'i4'), ('bar', 'f4'), ('baz', 'S10')]) - >>> recordarr = np.rec.array(arr) - -The :mod:`numpy.rec` module provides a number of other convenience functions for -creating record arrays, see :ref:`record array creation routines -`. - -A record array representation of a structured array can be obtained using the -appropriate `view `_:: - - >>> arr = np.array([(1, 2., 'Hello'), (2, 3., "World")], - ... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'a10')]) - >>> recordarr = arr.view(dtype=np.dtype((np.record, arr.dtype)), - ... type=np.recarray) - -For convenience, viewing an ndarray as type :class:`np.recarray` will -automatically convert to :class:`np.record` datatype, so the dtype can be left -out of the view:: - - >>> recordarr = arr.view(np.recarray) - >>> recordarr.dtype - dtype((numpy.record, [('foo', '>> arr2 = recordarr.view(recordarr.dtype.fields or recordarr.dtype, np.ndarray) - -Record array fields accessed by index or by attribute are returned as a record -array if the field has a structured type but as a plain ndarray otherwise. :: - - >>> recordarr = np.rec.array([('Hello', (1, 2)), ("World", (3, 4))], - ... dtype=[('foo', 'S6'),('bar', [('A', int), ('B', int)])]) - >>> type(recordarr.foo) - - >>> type(recordarr.bar) - - -Note that if a field has the same name as an ndarray attribute, the ndarray -attribute takes precedence. Such fields will be inaccessible by attribute but -will still be accessible by index. - -""" -from __future__ import division, absolute_import, print_function diff --git a/venv/lib/python3.7/site-packages/numpy/doc/subclassing.py b/venv/lib/python3.7/site-packages/numpy/doc/subclassing.py deleted file mode 100644 index d068532..0000000 --- a/venv/lib/python3.7/site-packages/numpy/doc/subclassing.py +++ /dev/null @@ -1,753 +0,0 @@ -"""============================= -Subclassing ndarray in python -============================= - -Introduction ------------- - -Subclassing ndarray is relatively simple, but it has some complications -compared to other Python objects. On this page we explain the machinery -that allows you to subclass ndarray, and the implications for -implementing a subclass. - -ndarrays and object creation -============================ - -Subclassing ndarray is complicated by the fact that new instances of -ndarray classes can come about in three different ways. These are: - -#. Explicit constructor call - as in ``MySubClass(params)``. This is - the usual route to Python instance creation. -#. View casting - casting an existing ndarray as a given subclass -#. New from template - creating a new instance from a template - instance. Examples include returning slices from a subclassed array, - creating return types from ufuncs, and copying arrays. See - :ref:`new-from-template` for more details - -The last two are characteristics of ndarrays - in order to support -things like array slicing. The complications of subclassing ndarray are -due to the mechanisms numpy has to support these latter two routes of -instance creation. - -.. _view-casting: - -View casting ------------- - -*View casting* is the standard ndarray mechanism by which you take an -ndarray of any subclass, and return a view of the array as another -(specified) subclass: - ->>> import numpy as np ->>> # create a completely useless ndarray subclass ->>> class C(np.ndarray): pass ->>> # create a standard ndarray ->>> arr = np.zeros((3,)) ->>> # take a view of it, as our useless subclass ->>> c_arr = arr.view(C) ->>> type(c_arr) - - -.. _new-from-template: - -Creating new from template --------------------------- - -New instances of an ndarray subclass can also come about by a very -similar mechanism to :ref:`view-casting`, when numpy finds it needs to -create a new instance from a template instance. The most obvious place -this has to happen is when you are taking slices of subclassed arrays. -For example: - ->>> v = c_arr[1:] ->>> type(v) # the view is of type 'C' - ->>> v is c_arr # but it's a new instance -False - -The slice is a *view* onto the original ``c_arr`` data. So, when we -take a view from the ndarray, we return a new ndarray, of the same -class, that points to the data in the original. - -There are other points in the use of ndarrays where we need such views, -such as copying arrays (``c_arr.copy()``), creating ufunc output arrays -(see also :ref:`array-wrap`), and reducing methods (like -``c_arr.mean()``. - -Relationship of view casting and new-from-template --------------------------------------------------- - -These paths both use the same machinery. We make the distinction here, -because they result in different input to your methods. Specifically, -:ref:`view-casting` means you have created a new instance of your array -type from any potential subclass of ndarray. :ref:`new-from-template` -means you have created a new instance of your class from a pre-existing -instance, allowing you - for example - to copy across attributes that -are particular to your subclass. - -Implications for subclassing ----------------------------- - -If we subclass ndarray, we need to deal not only with explicit -construction of our array type, but also :ref:`view-casting` or -:ref:`new-from-template`. NumPy has the machinery to do this, and this -machinery that makes subclassing slightly non-standard. - -There are two aspects to the machinery that ndarray uses to support -views and new-from-template in subclasses. - -The first is the use of the ``ndarray.__new__`` method for the main work -of object initialization, rather then the more usual ``__init__`` -method. The second is the use of the ``__array_finalize__`` method to -allow subclasses to clean up after the creation of views and new -instances from templates. - -A brief Python primer on ``__new__`` and ``__init__`` -===================================================== - -``__new__`` is a standard Python method, and, if present, is called -before ``__init__`` when we create a class instance. See the `python -__new__ documentation -`_ for more detail. - -For example, consider the following Python code: - -.. testcode:: - - class C(object): - def __new__(cls, *args): - print('Cls in __new__:', cls) - print('Args in __new__:', args) - # The `object` type __new__ method takes a single argument. - return object.__new__(cls) - - def __init__(self, *args): - print('type(self) in __init__:', type(self)) - print('Args in __init__:', args) - -meaning that we get: - ->>> c = C('hello') -Cls in __new__: -Args in __new__: ('hello',) -type(self) in __init__: -Args in __init__: ('hello',) - -When we call ``C('hello')``, the ``__new__`` method gets its own class -as first argument, and the passed argument, which is the string -``'hello'``. After python calls ``__new__``, it usually (see below) -calls our ``__init__`` method, with the output of ``__new__`` as the -first argument (now a class instance), and the passed arguments -following. - -As you can see, the object can be initialized in the ``__new__`` -method or the ``__init__`` method, or both, and in fact ndarray does -not have an ``__init__`` method, because all the initialization is -done in the ``__new__`` method. - -Why use ``__new__`` rather than just the usual ``__init__``? Because -in some cases, as for ndarray, we want to be able to return an object -of some other class. Consider the following: - -.. testcode:: - - class D(C): - def __new__(cls, *args): - print('D cls is:', cls) - print('D args in __new__:', args) - return C.__new__(C, *args) - - def __init__(self, *args): - # we never get here - print('In D __init__') - -meaning that: - ->>> obj = D('hello') -D cls is: -D args in __new__: ('hello',) -Cls in __new__: -Args in __new__: ('hello',) ->>> type(obj) - - -The definition of ``C`` is the same as before, but for ``D``, the -``__new__`` method returns an instance of class ``C`` rather than -``D``. Note that the ``__init__`` method of ``D`` does not get -called. In general, when the ``__new__`` method returns an object of -class other than the class in which it is defined, the ``__init__`` -method of that class is not called. - -This is how subclasses of the ndarray class are able to return views -that preserve the class type. When taking a view, the standard -ndarray machinery creates the new ndarray object with something -like:: - - obj = ndarray.__new__(subtype, shape, ... - -where ``subdtype`` is the subclass. Thus the returned view is of the -same class as the subclass, rather than being of class ``ndarray``. - -That solves the problem of returning views of the same type, but now -we have a new problem. The machinery of ndarray can set the class -this way, in its standard methods for taking views, but the ndarray -``__new__`` method knows nothing of what we have done in our own -``__new__`` method in order to set attributes, and so on. (Aside - -why not call ``obj = subdtype.__new__(...`` then? Because we may not -have a ``__new__`` method with the same call signature). - -The role of ``__array_finalize__`` -================================== - -``__array_finalize__`` is the mechanism that numpy provides to allow -subclasses to handle the various ways that new instances get created. - -Remember that subclass instances can come about in these three ways: - -#. explicit constructor call (``obj = MySubClass(params)``). This will - call the usual sequence of ``MySubClass.__new__`` then (if it exists) - ``MySubClass.__init__``. -#. :ref:`view-casting` -#. :ref:`new-from-template` - -Our ``MySubClass.__new__`` method only gets called in the case of the -explicit constructor call, so we can't rely on ``MySubClass.__new__`` or -``MySubClass.__init__`` to deal with the view casting and -new-from-template. It turns out that ``MySubClass.__array_finalize__`` -*does* get called for all three methods of object creation, so this is -where our object creation housekeeping usually goes. - -* For the explicit constructor call, our subclass will need to create a - new ndarray instance of its own class. In practice this means that - we, the authors of the code, will need to make a call to - ``ndarray.__new__(MySubClass,...)``, a class-hierarchy prepared call to - ``super(MySubClass, cls).__new__(cls, ...)``, or do view casting of an - existing array (see below) -* For view casting and new-from-template, the equivalent of - ``ndarray.__new__(MySubClass,...`` is called, at the C level. - -The arguments that ``__array_finalize__`` receives differ for the three -methods of instance creation above. - -The following code allows us to look at the call sequences and arguments: - -.. testcode:: - - import numpy as np - - class C(np.ndarray): - def __new__(cls, *args, **kwargs): - print('In __new__ with class %s' % cls) - return super(C, cls).__new__(cls, *args, **kwargs) - - def __init__(self, *args, **kwargs): - # in practice you probably will not need or want an __init__ - # method for your subclass - print('In __init__ with class %s' % self.__class__) - - def __array_finalize__(self, obj): - print('In array_finalize:') - print(' self type is %s' % type(self)) - print(' obj type is %s' % type(obj)) - - -Now: - ->>> # Explicit constructor ->>> c = C((10,)) -In __new__ with class -In array_finalize: - self type is - obj type is -In __init__ with class ->>> # View casting ->>> a = np.arange(10) ->>> cast_a = a.view(C) -In array_finalize: - self type is - obj type is ->>> # Slicing (example of new-from-template) ->>> cv = c[:1] -In array_finalize: - self type is - obj type is - -The signature of ``__array_finalize__`` is:: - - def __array_finalize__(self, obj): - -One sees that the ``super`` call, which goes to -``ndarray.__new__``, passes ``__array_finalize__`` the new object, of our -own class (``self``) as well as the object from which the view has been -taken (``obj``). As you can see from the output above, the ``self`` is -always a newly created instance of our subclass, and the type of ``obj`` -differs for the three instance creation methods: - -* When called from the explicit constructor, ``obj`` is ``None`` -* When called from view casting, ``obj`` can be an instance of any - subclass of ndarray, including our own. -* When called in new-from-template, ``obj`` is another instance of our - own subclass, that we might use to update the new ``self`` instance. - -Because ``__array_finalize__`` is the only method that always sees new -instances being created, it is the sensible place to fill in instance -defaults for new object attributes, among other tasks. - -This may be clearer with an example. - -Simple example - adding an extra attribute to ndarray ------------------------------------------------------ - -.. testcode:: - - import numpy as np - - class InfoArray(np.ndarray): - - def __new__(subtype, shape, dtype=float, buffer=None, offset=0, - strides=None, order=None, info=None): - # Create the ndarray instance of our type, given the usual - # ndarray input arguments. This will call the standard - # ndarray constructor, but return an object of our type. - # It also triggers a call to InfoArray.__array_finalize__ - obj = super(InfoArray, subtype).__new__(subtype, shape, dtype, - buffer, offset, strides, - order) - # set the new 'info' attribute to the value passed - obj.info = info - # Finally, we must return the newly created object: - return obj - - def __array_finalize__(self, obj): - # ``self`` is a new object resulting from - # ndarray.__new__(InfoArray, ...), therefore it only has - # attributes that the ndarray.__new__ constructor gave it - - # i.e. those of a standard ndarray. - # - # We could have got to the ndarray.__new__ call in 3 ways: - # From an explicit constructor - e.g. InfoArray(): - # obj is None - # (we're in the middle of the InfoArray.__new__ - # constructor, and self.info will be set when we return to - # InfoArray.__new__) - if obj is None: return - # From view casting - e.g arr.view(InfoArray): - # obj is arr - # (type(obj) can be InfoArray) - # From new-from-template - e.g infoarr[:3] - # type(obj) is InfoArray - # - # Note that it is here, rather than in the __new__ method, - # that we set the default value for 'info', because this - # method sees all creation of default objects - with the - # InfoArray.__new__ constructor, but also with - # arr.view(InfoArray). - self.info = getattr(obj, 'info', None) - # We do not need to return anything - - -Using the object looks like this: - - >>> obj = InfoArray(shape=(3,)) # explicit constructor - >>> type(obj) - - >>> obj.info is None - True - >>> obj = InfoArray(shape=(3,), info='information') - >>> obj.info - 'information' - >>> v = obj[1:] # new-from-template - here - slicing - >>> type(v) - - >>> v.info - 'information' - >>> arr = np.arange(10) - >>> cast_arr = arr.view(InfoArray) # view casting - >>> type(cast_arr) - - >>> cast_arr.info is None - True - -This class isn't very useful, because it has the same constructor as the -bare ndarray object, including passing in buffers and shapes and so on. -We would probably prefer the constructor to be able to take an already -formed ndarray from the usual numpy calls to ``np.array`` and return an -object. - -Slightly more realistic example - attribute added to existing array -------------------------------------------------------------------- - -Here is a class that takes a standard ndarray that already exists, casts -as our type, and adds an extra attribute. - -.. testcode:: - - import numpy as np - - class RealisticInfoArray(np.ndarray): - - def __new__(cls, input_array, info=None): - # Input array is an already formed ndarray instance - # We first cast to be our class type - obj = np.asarray(input_array).view(cls) - # add the new attribute to the created instance - obj.info = info - # Finally, we must return the newly created object: - return obj - - def __array_finalize__(self, obj): - # see InfoArray.__array_finalize__ for comments - if obj is None: return - self.info = getattr(obj, 'info', None) - - -So: - - >>> arr = np.arange(5) - >>> obj = RealisticInfoArray(arr, info='information') - >>> type(obj) - - >>> obj.info - 'information' - >>> v = obj[1:] - >>> type(v) - - >>> v.info - 'information' - -.. _array-ufunc: - -``__array_ufunc__`` for ufuncs ------------------------------- - - .. versionadded:: 1.13 - -A subclass can override what happens when executing numpy ufuncs on it by -overriding the default ``ndarray.__array_ufunc__`` method. This method is -executed *instead* of the ufunc and should return either the result of the -operation, or :obj:`NotImplemented` if the operation requested is not -implemented. - -The signature of ``__array_ufunc__`` is:: - - def __array_ufunc__(ufunc, method, *inputs, **kwargs): - - - *ufunc* is the ufunc object that was called. - - *method* is a string indicating how the Ufunc was called, either - ``"__call__"`` to indicate it was called directly, or one of its - :ref:`methods`: ``"reduce"``, ``"accumulate"``, - ``"reduceat"``, ``"outer"``, or ``"at"``. - - *inputs* is a tuple of the input arguments to the ``ufunc`` - - *kwargs* contains any optional or keyword arguments passed to the - function. This includes any ``out`` arguments, which are always - contained in a tuple. - -A typical implementation would convert any inputs or outputs that are -instances of one's own class, pass everything on to a superclass using -``super()``, and finally return the results after possible -back-conversion. An example, taken from the test case -``test_ufunc_override_with_super`` in ``core/tests/test_umath.py``, is the -following. - -.. testcode:: - - input numpy as np - - class A(np.ndarray): - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - args = [] - in_no = [] - for i, input_ in enumerate(inputs): - if isinstance(input_, A): - in_no.append(i) - args.append(input_.view(np.ndarray)) - else: - args.append(input_) - - outputs = kwargs.pop('out', None) - out_no = [] - if outputs: - out_args = [] - for j, output in enumerate(outputs): - if isinstance(output, A): - out_no.append(j) - out_args.append(output.view(np.ndarray)) - else: - out_args.append(output) - kwargs['out'] = tuple(out_args) - else: - outputs = (None,) * ufunc.nout - - info = {} - if in_no: - info['inputs'] = in_no - if out_no: - info['outputs'] = out_no - - results = super(A, self).__array_ufunc__(ufunc, method, - *args, **kwargs) - if results is NotImplemented: - return NotImplemented - - if method == 'at': - if isinstance(inputs[0], A): - inputs[0].info = info - return - - if ufunc.nout == 1: - results = (results,) - - results = tuple((np.asarray(result).view(A) - if output is None else output) - for result, output in zip(results, outputs)) - if results and isinstance(results[0], A): - results[0].info = info - - return results[0] if len(results) == 1 else results - -So, this class does not actually do anything interesting: it just -converts any instances of its own to regular ndarray (otherwise, we'd -get infinite recursion!), and adds an ``info`` dictionary that tells -which inputs and outputs it converted. Hence, e.g., - ->>> a = np.arange(5.).view(A) ->>> b = np.sin(a) ->>> b.info -{'inputs': [0]} ->>> b = np.sin(np.arange(5.), out=(a,)) ->>> b.info -{'outputs': [0]} ->>> a = np.arange(5.).view(A) ->>> b = np.ones(1).view(A) ->>> c = a + b ->>> c.info -{'inputs': [0, 1]} ->>> a += b ->>> a.info -{'inputs': [0, 1], 'outputs': [0]} - -Note that another approach would be to to use ``getattr(ufunc, -methods)(*inputs, **kwargs)`` instead of the ``super`` call. For this example, -the result would be identical, but there is a difference if another operand -also defines ``__array_ufunc__``. E.g., lets assume that we evalulate -``np.add(a, b)``, where ``b`` is an instance of another class ``B`` that has -an override. If you use ``super`` as in the example, -``ndarray.__array_ufunc__`` will notice that ``b`` has an override, which -means it cannot evaluate the result itself. Thus, it will return -`NotImplemented` and so will our class ``A``. Then, control will be passed -over to ``b``, which either knows how to deal with us and produces a result, -or does not and returns `NotImplemented`, raising a ``TypeError``. - -If instead, we replace our ``super`` call with ``getattr(ufunc, method)``, we -effectively do ``np.add(a.view(np.ndarray), b)``. Again, ``B.__array_ufunc__`` -will be called, but now it sees an ``ndarray`` as the other argument. Likely, -it will know how to handle this, and return a new instance of the ``B`` class -to us. Our example class is not set up to handle this, but it might well be -the best approach if, e.g., one were to re-implement ``MaskedArray`` using -``__array_ufunc__``. - -As a final note: if the ``super`` route is suited to a given class, an -advantage of using it is that it helps in constructing class hierarchies. -E.g., suppose that our other class ``B`` also used the ``super`` in its -``__array_ufunc__`` implementation, and we created a class ``C`` that depended -on both, i.e., ``class C(A, B)`` (with, for simplicity, not another -``__array_ufunc__`` override). Then any ufunc on an instance of ``C`` would -pass on to ``A.__array_ufunc__``, the ``super`` call in ``A`` would go to -``B.__array_ufunc__``, and the ``super`` call in ``B`` would go to -``ndarray.__array_ufunc__``, thus allowing ``A`` and ``B`` to collaborate. - -.. _array-wrap: - -``__array_wrap__`` for ufuncs and other functions -------------------------------------------------- - -Prior to numpy 1.13, the behaviour of ufuncs could only be tuned using -``__array_wrap__`` and ``__array_prepare__``. These two allowed one to -change the output type of a ufunc, but, in contrast to -``__array_ufunc__``, did not allow one to make any changes to the inputs. -It is hoped to eventually deprecate these, but ``__array_wrap__`` is also -used by other numpy functions and methods, such as ``squeeze``, so at the -present time is still needed for full functionality. - -Conceptually, ``__array_wrap__`` "wraps up the action" in the sense of -allowing a subclass to set the type of the return value and update -attributes and metadata. Let's show how this works with an example. First -we return to the simpler example subclass, but with a different name and -some print statements: - -.. testcode:: - - import numpy as np - - class MySubClass(np.ndarray): - - def __new__(cls, input_array, info=None): - obj = np.asarray(input_array).view(cls) - obj.info = info - return obj - - def __array_finalize__(self, obj): - print('In __array_finalize__:') - print(' self is %s' % repr(self)) - print(' obj is %s' % repr(obj)) - if obj is None: return - self.info = getattr(obj, 'info', None) - - def __array_wrap__(self, out_arr, context=None): - print('In __array_wrap__:') - print(' self is %s' % repr(self)) - print(' arr is %s' % repr(out_arr)) - # then just call the parent - return super(MySubClass, self).__array_wrap__(self, out_arr, context) - -We run a ufunc on an instance of our new array: - ->>> obj = MySubClass(np.arange(5), info='spam') -In __array_finalize__: - self is MySubClass([0, 1, 2, 3, 4]) - obj is array([0, 1, 2, 3, 4]) ->>> arr2 = np.arange(5)+1 ->>> ret = np.add(arr2, obj) -In __array_wrap__: - self is MySubClass([0, 1, 2, 3, 4]) - arr is array([1, 3, 5, 7, 9]) -In __array_finalize__: - self is MySubClass([1, 3, 5, 7, 9]) - obj is MySubClass([0, 1, 2, 3, 4]) ->>> ret -MySubClass([1, 3, 5, 7, 9]) ->>> ret.info -'spam' - -Note that the ufunc (``np.add``) has called the ``__array_wrap__`` method -with arguments ``self`` as ``obj``, and ``out_arr`` as the (ndarray) result -of the addition. In turn, the default ``__array_wrap__`` -(``ndarray.__array_wrap__``) has cast the result to class ``MySubClass``, -and called ``__array_finalize__`` - hence the copying of the ``info`` -attribute. This has all happened at the C level. - -But, we could do anything we wanted: - -.. testcode:: - - class SillySubClass(np.ndarray): - - def __array_wrap__(self, arr, context=None): - return 'I lost your data' - ->>> arr1 = np.arange(5) ->>> obj = arr1.view(SillySubClass) ->>> arr2 = np.arange(5) ->>> ret = np.multiply(obj, arr2) ->>> ret -'I lost your data' - -So, by defining a specific ``__array_wrap__`` method for our subclass, -we can tweak the output from ufuncs. The ``__array_wrap__`` method -requires ``self``, then an argument - which is the result of the ufunc - -and an optional parameter *context*. This parameter is returned by -ufuncs as a 3-element tuple: (name of the ufunc, arguments of the ufunc, -domain of the ufunc), but is not set by other numpy functions. Though, -as seen above, it is possible to do otherwise, ``__array_wrap__`` should -return an instance of its containing class. See the masked array -subclass for an implementation. - -In addition to ``__array_wrap__``, which is called on the way out of the -ufunc, there is also an ``__array_prepare__`` method which is called on -the way into the ufunc, after the output arrays are created but before any -computation has been performed. The default implementation does nothing -but pass through the array. ``__array_prepare__`` should not attempt to -access the array data or resize the array, it is intended for setting the -output array type, updating attributes and metadata, and performing any -checks based on the input that may be desired before computation begins. -Like ``__array_wrap__``, ``__array_prepare__`` must return an ndarray or -subclass thereof or raise an error. - -Extra gotchas - custom ``__del__`` methods and ndarray.base ------------------------------------------------------------ - -One of the problems that ndarray solves is keeping track of memory -ownership of ndarrays and their views. Consider the case where we have -created an ndarray, ``arr`` and have taken a slice with ``v = arr[1:]``. -The two objects are looking at the same memory. NumPy keeps track of -where the data came from for a particular array or view, with the -``base`` attribute: - ->>> # A normal ndarray, that owns its own data ->>> arr = np.zeros((4,)) ->>> # In this case, base is None ->>> arr.base is None -True ->>> # We take a view ->>> v1 = arr[1:] ->>> # base now points to the array that it derived from ->>> v1.base is arr -True ->>> # Take a view of a view ->>> v2 = v1[1:] ->>> # base points to the view it derived from ->>> v2.base is v1 -True - -In general, if the array owns its own memory, as for ``arr`` in this -case, then ``arr.base`` will be None - there are some exceptions to this -- see the numpy book for more details. - -The ``base`` attribute is useful in being able to tell whether we have -a view or the original array. This in turn can be useful if we need -to know whether or not to do some specific cleanup when the subclassed -array is deleted. For example, we may only want to do the cleanup if -the original array is deleted, but not the views. For an example of -how this can work, have a look at the ``memmap`` class in -``numpy.core``. - -Subclassing and Downstream Compatibility ----------------------------------------- - -When sub-classing ``ndarray`` or creating duck-types that mimic the ``ndarray`` -interface, it is your responsibility to decide how aligned your APIs will be -with those of numpy. For convenience, many numpy functions that have a corresponding -``ndarray`` method (e.g., ``sum``, ``mean``, ``take``, ``reshape``) work by checking -if the first argument to a function has a method of the same name. If it exists, the -method is called instead of coercing the arguments to a numpy array. - -For example, if you want your sub-class or duck-type to be compatible with -numpy's ``sum`` function, the method signature for this object's ``sum`` method -should be the following: - -.. testcode:: - - def sum(self, axis=None, dtype=None, out=None, keepdims=False): - ... - -This is the exact same method signature for ``np.sum``, so now if a user calls -``np.sum`` on this object, numpy will call the object's own ``sum`` method and -pass in these arguments enumerated above in the signature, and no errors will -be raised because the signatures are completely compatible with each other. - -If, however, you decide to deviate from this signature and do something like this: - -.. testcode:: - - def sum(self, axis=None, dtype=None): - ... - -This object is no longer compatible with ``np.sum`` because if you call ``np.sum``, -it will pass in unexpected arguments ``out`` and ``keepdims``, causing a TypeError -to be raised. - -If you wish to maintain compatibility with numpy and its subsequent versions (which -might add new keyword arguments) but do not want to surface all of numpy's arguments, -your function's signature should accept ``**kwargs``. For example: - -.. testcode:: - - def sum(self, axis=None, dtype=None, **unused_kwargs): - ... - -This object is now compatible with ``np.sum`` again because any extraneous arguments -(i.e. keywords that are not ``axis`` or ``dtype``) will be hidden away in the -``**unused_kwargs`` parameter. - -""" -from __future__ import division, absolute_import, print_function diff --git a/venv/lib/python3.7/site-packages/numpy/doc/ufuncs.py b/venv/lib/python3.7/site-packages/numpy/doc/ufuncs.py deleted file mode 100644 index df2c455..0000000 --- a/venv/lib/python3.7/site-packages/numpy/doc/ufuncs.py +++ /dev/null @@ -1,138 +0,0 @@ -""" -=================== -Universal Functions -=================== - -Ufuncs are, generally speaking, mathematical functions or operations that are -applied element-by-element to the contents of an array. That is, the result -in each output array element only depends on the value in the corresponding -input array (or arrays) and on no other array elements. NumPy comes with a -large suite of ufuncs, and scipy extends that suite substantially. The simplest -example is the addition operator: :: - - >>> np.array([0,2,3,4]) + np.array([1,1,-1,2]) - array([1, 3, 2, 6]) - -The ufunc module lists all the available ufuncs in numpy. Documentation on -the specific ufuncs may be found in those modules. This documentation is -intended to address the more general aspects of ufuncs common to most of -them. All of the ufuncs that make use of Python operators (e.g., +, -, etc.) -have equivalent functions defined (e.g. add() for +) - -Type coercion -============= - -What happens when a binary operator (e.g., +,-,\\*,/, etc) deals with arrays of -two different types? What is the type of the result? Typically, the result is -the higher of the two types. For example: :: - - float32 + float64 -> float64 - int8 + int32 -> int32 - int16 + float32 -> float32 - float32 + complex64 -> complex64 - -There are some less obvious cases generally involving mixes of types -(e.g. uints, ints and floats) where equal bit sizes for each are not -capable of saving all the information in a different type of equivalent -bit size. Some examples are int32 vs float32 or uint32 vs int32. -Generally, the result is the higher type of larger size than both -(if available). So: :: - - int32 + float32 -> float64 - uint32 + int32 -> int64 - -Finally, the type coercion behavior when expressions involve Python -scalars is different than that seen for arrays. Since Python has a -limited number of types, combining a Python int with a dtype=np.int8 -array does not coerce to the higher type but instead, the type of the -array prevails. So the rules for Python scalars combined with arrays is -that the result will be that of the array equivalent the Python scalar -if the Python scalar is of a higher 'kind' than the array (e.g., float -vs. int), otherwise the resultant type will be that of the array. -For example: :: - - Python int + int8 -> int8 - Python float + int8 -> float64 - -ufunc methods -============= - -Binary ufuncs support 4 methods. - -**.reduce(arr)** applies the binary operator to elements of the array in - sequence. For example: :: - - >>> np.add.reduce(np.arange(10)) # adds all elements of array - 45 - -For multidimensional arrays, the first dimension is reduced by default: :: - - >>> np.add.reduce(np.arange(10).reshape(2,5)) - array([ 5, 7, 9, 11, 13]) - -The axis keyword can be used to specify different axes to reduce: :: - - >>> np.add.reduce(np.arange(10).reshape(2,5),axis=1) - array([10, 35]) - -**.accumulate(arr)** applies the binary operator and generates an an -equivalently shaped array that includes the accumulated amount for each -element of the array. A couple examples: :: - - >>> np.add.accumulate(np.arange(10)) - array([ 0, 1, 3, 6, 10, 15, 21, 28, 36, 45]) - >>> np.multiply.accumulate(np.arange(1,9)) - array([ 1, 2, 6, 24, 120, 720, 5040, 40320]) - -The behavior for multidimensional arrays is the same as for .reduce(), -as is the use of the axis keyword). - -**.reduceat(arr,indices)** allows one to apply reduce to selected parts - of an array. It is a difficult method to understand. See the documentation - at: - -**.outer(arr1,arr2)** generates an outer operation on the two arrays arr1 and - arr2. It will work on multidimensional arrays (the shape of the result is - the concatenation of the two input shapes.: :: - - >>> np.multiply.outer(np.arange(3),np.arange(4)) - array([[0, 0, 0, 0], - [0, 1, 2, 3], - [0, 2, 4, 6]]) - -Output arguments -================ - -All ufuncs accept an optional output array. The array must be of the expected -output shape. Beware that if the type of the output array is of a different -(and lower) type than the output result, the results may be silently truncated -or otherwise corrupted in the downcast to the lower type. This usage is useful -when one wants to avoid creating large temporary arrays and instead allows one -to reuse the same array memory repeatedly (at the expense of not being able to -use more convenient operator notation in expressions). Note that when the -output argument is used, the ufunc still returns a reference to the result. - - >>> x = np.arange(2) - >>> np.add(np.arange(2),np.arange(2.),x) - array([0, 2]) - >>> x - array([0, 2]) - -and & or as ufuncs -================== - -Invariably people try to use the python 'and' and 'or' as logical operators -(and quite understandably). But these operators do not behave as normal -operators since Python treats these quite differently. They cannot be -overloaded with array equivalents. Thus using 'and' or 'or' with an array -results in an error. There are two alternatives: - - 1) use the ufunc functions logical_and() and logical_or(). - 2) use the bitwise operators & and \\|. The drawback of these is that if - the arguments to these operators are not boolean arrays, the result is - likely incorrect. On the other hand, most usages of logical_and and - logical_or are with boolean arrays. As long as one is careful, this is - a convenient way to apply these operators. - -""" -from __future__ import division, absolute_import, print_function diff --git a/venv/lib/python3.7/site-packages/numpy/dual.py b/venv/lib/python3.7/site-packages/numpy/dual.py deleted file mode 100644 index 651e845..0000000 --- a/venv/lib/python3.7/site-packages/numpy/dual.py +++ /dev/null @@ -1,71 +0,0 @@ -""" -Aliases for functions which may be accelerated by Scipy. - -Scipy_ can be built to use accelerated or otherwise improved libraries -for FFTs, linear algebra, and special functions. This module allows -developers to transparently support these accelerated functions when -scipy is available but still support users who have only installed -NumPy. - -.. _Scipy : https://www.scipy.org - -""" -from __future__ import division, absolute_import, print_function - -# This module should be used for functions both in numpy and scipy if -# you want to use the numpy version if available but the scipy version -# otherwise. -# Usage --- from numpy.dual import fft, inv - -__all__ = ['fft', 'ifft', 'fftn', 'ifftn', 'fft2', 'ifft2', - 'norm', 'inv', 'svd', 'solve', 'det', 'eig', 'eigvals', - 'eigh', 'eigvalsh', 'lstsq', 'pinv', 'cholesky', 'i0'] - -import numpy.linalg as linpkg -import numpy.fft as fftpkg -from numpy.lib import i0 -import sys - - -fft = fftpkg.fft -ifft = fftpkg.ifft -fftn = fftpkg.fftn -ifftn = fftpkg.ifftn -fft2 = fftpkg.fft2 -ifft2 = fftpkg.ifft2 - -norm = linpkg.norm -inv = linpkg.inv -svd = linpkg.svd -solve = linpkg.solve -det = linpkg.det -eig = linpkg.eig -eigvals = linpkg.eigvals -eigh = linpkg.eigh -eigvalsh = linpkg.eigvalsh -lstsq = linpkg.lstsq -pinv = linpkg.pinv -cholesky = linpkg.cholesky - -_restore_dict = {} - -def register_func(name, func): - if name not in __all__: - raise ValueError("{} not a dual function.".format(name)) - f = sys._getframe(0).f_globals - _restore_dict[name] = f[name] - f[name] = func - -def restore_func(name): - if name not in __all__: - raise ValueError("{} not a dual function.".format(name)) - try: - val = _restore_dict[name] - except KeyError: - return - else: - sys._getframe(0).f_globals[name] = val - -def restore_all(): - for name in _restore_dict.keys(): - restore_func(name) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/__init__.py b/venv/lib/python3.7/site-packages/numpy/f2py/__init__.py deleted file mode 100644 index 42e3632..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/__init__.py +++ /dev/null @@ -1,122 +0,0 @@ -#!/usr/bin/env python -"""Fortran to Python Interface Generator. - -""" -from __future__ import division, absolute_import, print_function - -__all__ = ['run_main', 'compile', 'f2py_testing'] - -import sys -import subprocess -import os - -import numpy as np - -from . import f2py2e -from . import f2py_testing -from . import diagnose - -run_main = f2py2e.run_main -main = f2py2e.main - - -def compile(source, - modulename='untitled', - extra_args='', - verbose=True, - source_fn=None, - extension='.f' - ): - """ - Build extension module from a Fortran 77 source string with f2py. - - Parameters - ---------- - source : str or bytes - Fortran source of module / subroutine to compile - - .. versionchanged:: 1.16.0 - Accept str as well as bytes - - modulename : str, optional - The name of the compiled python module - extra_args : str or list, optional - Additional parameters passed to f2py - - .. versionchanged:: 1.16.0 - A list of args may also be provided. - - verbose : bool, optional - Print f2py output to screen - source_fn : str, optional - Name of the file where the fortran source is written. - The default is to use a temporary file with the extension - provided by the `extension` parameter - extension : {'.f', '.f90'}, optional - Filename extension if `source_fn` is not provided. - The extension tells which fortran standard is used. - The default is `.f`, which implies F77 standard. - - .. versionadded:: 1.11.0 - - Returns - ------- - result : int - 0 on success - - Examples - -------- - .. include:: compile_session.dat - :literal: - - """ - import tempfile - import shlex - - if source_fn is None: - f, fname = tempfile.mkstemp(suffix=extension) - # f is a file descriptor so need to close it - # carefully -- not with .close() directly - os.close(f) - else: - fname = source_fn - - if not isinstance(source, str): - source = str(source, 'utf-8') - try: - with open(fname, 'w') as f: - f.write(source) - - args = ['-c', '-m', modulename, f.name] - - if isinstance(extra_args, np.compat.basestring): - is_posix = (os.name == 'posix') - extra_args = shlex.split(extra_args, posix=is_posix) - - args.extend(extra_args) - - c = [sys.executable, - '-c', - 'import numpy.f2py as f2py2e;f2py2e.main()'] + args - try: - output = subprocess.check_output(c) - except subprocess.CalledProcessError as exc: - status = exc.returncode - output = '' - except OSError: - # preserve historic status code used by exec_command() - status = 127 - output = '' - else: - status = 0 - output = output.decode() - if verbose: - print(output) - finally: - if source_fn is None: - os.remove(fname) - return status - -from numpy._pytesttester import PytestTester -test = PytestTester(__name__) -del PytestTester diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/__main__.py b/venv/lib/python3.7/site-packages/numpy/f2py/__main__.py deleted file mode 100644 index 708f7f3..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/__main__.py +++ /dev/null @@ -1,6 +0,0 @@ -# See http://cens.ioc.ee/projects/f2py2e/ -from __future__ import division, print_function - -from numpy.f2py.f2py2e import main - -main() diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/__version__.py b/venv/lib/python3.7/site-packages/numpy/f2py/__version__.py deleted file mode 100644 index 49a2199..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/__version__.py +++ /dev/null @@ -1,10 +0,0 @@ -from __future__ import division, absolute_import, print_function - -major = 2 - -try: - from __svn_version__ import version - version_info = (major, version) - version = '%s_%s' % version_info -except (ImportError, ValueError): - version = str(major) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/auxfuncs.py b/venv/lib/python3.7/site-packages/numpy/f2py/auxfuncs.py deleted file mode 100644 index 404bdbd..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/auxfuncs.py +++ /dev/null @@ -1,854 +0,0 @@ -#!/usr/bin/env python -""" - -Auxiliary functions for f2py2e. - -Copyright 1999,2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy (BSD style) LICENSE. - - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/07/24 19:01:55 $ -Pearu Peterson - -""" -from __future__ import division, absolute_import, print_function - -import pprint -import sys -import types -from functools import reduce - -from . import __version__ -from . import cfuncs - -__all__ = [ - 'applyrules', 'debugcapi', 'dictappend', 'errmess', 'gentitle', - 'getargs2', 'getcallprotoargument', 'getcallstatement', - 'getfortranname', 'getpymethoddef', 'getrestdoc', 'getusercode', - 'getusercode1', 'hasbody', 'hascallstatement', 'hascommon', - 'hasexternals', 'hasinitvalue', 'hasnote', 'hasresultnote', - 'isallocatable', 'isarray', 'isarrayofstrings', 'iscomplex', - 'iscomplexarray', 'iscomplexfunction', 'iscomplexfunction_warn', - 'isdouble', 'isdummyroutine', 'isexternal', 'isfunction', - 'isfunction_wrap', 'isint1array', 'isinteger', 'isintent_aux', - 'isintent_c', 'isintent_callback', 'isintent_copy', 'isintent_dict', - 'isintent_hide', 'isintent_in', 'isintent_inout', 'isintent_inplace', - 'isintent_nothide', 'isintent_out', 'isintent_overwrite', 'islogical', - 'islogicalfunction', 'islong_complex', 'islong_double', - 'islong_doublefunction', 'islong_long', 'islong_longfunction', - 'ismodule', 'ismoduleroutine', 'isoptional', 'isprivate', 'isrequired', - 'isroutine', 'isscalar', 'issigned_long_longarray', 'isstring', - 'isstringarray', 'isstringfunction', 'issubroutine', - 'issubroutine_wrap', 'isthreadsafe', 'isunsigned', 'isunsigned_char', - 'isunsigned_chararray', 'isunsigned_long_long', - 'isunsigned_long_longarray', 'isunsigned_short', - 'isunsigned_shortarray', 'l_and', 'l_not', 'l_or', 'outmess', - 'replace', 'show', 'stripcomma', 'throw_error', -] - - -f2py_version = __version__.version - - -errmess = sys.stderr.write -show = pprint.pprint - -options = {} -debugoptions = [] -wrapfuncs = 1 - - -def outmess(t): - if options.get('verbose', 1): - sys.stdout.write(t) - - -def debugcapi(var): - return 'capi' in debugoptions - - -def _isstring(var): - return 'typespec' in var and var['typespec'] == 'character' and \ - not isexternal(var) - - -def isstring(var): - return _isstring(var) and not isarray(var) - - -def ischaracter(var): - return isstring(var) and 'charselector' not in var - - -def isstringarray(var): - return isarray(var) and _isstring(var) - - -def isarrayofstrings(var): - # leaving out '*' for now so that `character*(*) a(m)` and `character - # a(m,*)` are treated differently. Luckily `character**` is illegal. - return isstringarray(var) and var['dimension'][-1] == '(*)' - - -def isarray(var): - return 'dimension' in var and not isexternal(var) - - -def isscalar(var): - return not (isarray(var) or isstring(var) or isexternal(var)) - - -def iscomplex(var): - return isscalar(var) and \ - var.get('typespec') in ['complex', 'double complex'] - - -def islogical(var): - return isscalar(var) and var.get('typespec') == 'logical' - - -def isinteger(var): - return isscalar(var) and var.get('typespec') == 'integer' - - -def isreal(var): - return isscalar(var) and var.get('typespec') == 'real' - - -def get_kind(var): - try: - return var['kindselector']['*'] - except KeyError: - try: - return var['kindselector']['kind'] - except KeyError: - pass - - -def islong_long(var): - if not isscalar(var): - return 0 - if var.get('typespec') not in ['integer', 'logical']: - return 0 - return get_kind(var) == '8' - - -def isunsigned_char(var): - if not isscalar(var): - return 0 - if var.get('typespec') != 'integer': - return 0 - return get_kind(var) == '-1' - - -def isunsigned_short(var): - if not isscalar(var): - return 0 - if var.get('typespec') != 'integer': - return 0 - return get_kind(var) == '-2' - - -def isunsigned(var): - if not isscalar(var): - return 0 - if var.get('typespec') != 'integer': - return 0 - return get_kind(var) == '-4' - - -def isunsigned_long_long(var): - if not isscalar(var): - return 0 - if var.get('typespec') != 'integer': - return 0 - return get_kind(var) == '-8' - - -def isdouble(var): - if not isscalar(var): - return 0 - if not var.get('typespec') == 'real': - return 0 - return get_kind(var) == '8' - - -def islong_double(var): - if not isscalar(var): - return 0 - if not var.get('typespec') == 'real': - return 0 - return get_kind(var) == '16' - - -def islong_complex(var): - if not iscomplex(var): - return 0 - return get_kind(var) == '32' - - -def iscomplexarray(var): - return isarray(var) and \ - var.get('typespec') in ['complex', 'double complex'] - - -def isint1array(var): - return isarray(var) and var.get('typespec') == 'integer' \ - and get_kind(var) == '1' - - -def isunsigned_chararray(var): - return isarray(var) and var.get('typespec') in ['integer', 'logical']\ - and get_kind(var) == '-1' - - -def isunsigned_shortarray(var): - return isarray(var) and var.get('typespec') in ['integer', 'logical']\ - and get_kind(var) == '-2' - - -def isunsignedarray(var): - return isarray(var) and var.get('typespec') in ['integer', 'logical']\ - and get_kind(var) == '-4' - - -def isunsigned_long_longarray(var): - return isarray(var) and var.get('typespec') in ['integer', 'logical']\ - and get_kind(var) == '-8' - - -def issigned_chararray(var): - return isarray(var) and var.get('typespec') in ['integer', 'logical']\ - and get_kind(var) == '1' - - -def issigned_shortarray(var): - return isarray(var) and var.get('typespec') in ['integer', 'logical']\ - and get_kind(var) == '2' - - -def issigned_array(var): - return isarray(var) and var.get('typespec') in ['integer', 'logical']\ - and get_kind(var) == '4' - - -def issigned_long_longarray(var): - return isarray(var) and var.get('typespec') in ['integer', 'logical']\ - and get_kind(var) == '8' - - -def isallocatable(var): - return 'attrspec' in var and 'allocatable' in var['attrspec'] - - -def ismutable(var): - return not ('dimension' not in var or isstring(var)) - - -def ismoduleroutine(rout): - return 'modulename' in rout - - -def ismodule(rout): - return 'block' in rout and 'module' == rout['block'] - - -def isfunction(rout): - return 'block' in rout and 'function' == rout['block'] - -def isfunction_wrap(rout): - if isintent_c(rout): - return 0 - return wrapfuncs and isfunction(rout) and (not isexternal(rout)) - - -def issubroutine(rout): - return 'block' in rout and 'subroutine' == rout['block'] - - -def issubroutine_wrap(rout): - if isintent_c(rout): - return 0 - return issubroutine(rout) and hasassumedshape(rout) - - -def hasassumedshape(rout): - if rout.get('hasassumedshape'): - return True - for a in rout['args']: - for d in rout['vars'].get(a, {}).get('dimension', []): - if d == ':': - rout['hasassumedshape'] = True - return True - return False - - -def isroutine(rout): - return isfunction(rout) or issubroutine(rout) - - -def islogicalfunction(rout): - if not isfunction(rout): - return 0 - if 'result' in rout: - a = rout['result'] - else: - a = rout['name'] - if a in rout['vars']: - return islogical(rout['vars'][a]) - return 0 - - -def islong_longfunction(rout): - if not isfunction(rout): - return 0 - if 'result' in rout: - a = rout['result'] - else: - a = rout['name'] - if a in rout['vars']: - return islong_long(rout['vars'][a]) - return 0 - - -def islong_doublefunction(rout): - if not isfunction(rout): - return 0 - if 'result' in rout: - a = rout['result'] - else: - a = rout['name'] - if a in rout['vars']: - return islong_double(rout['vars'][a]) - return 0 - - -def iscomplexfunction(rout): - if not isfunction(rout): - return 0 - if 'result' in rout: - a = rout['result'] - else: - a = rout['name'] - if a in rout['vars']: - return iscomplex(rout['vars'][a]) - return 0 - - -def iscomplexfunction_warn(rout): - if iscomplexfunction(rout): - outmess("""\ - ************************************************************** - Warning: code with a function returning complex value - may not work correctly with your Fortran compiler. - Run the following test before using it in your applications: - $(f2py install dir)/test-site/{b/runme_scalar,e/runme} - When using GNU gcc/g77 compilers, codes should work correctly. - **************************************************************\n""") - return 1 - return 0 - - -def isstringfunction(rout): - if not isfunction(rout): - return 0 - if 'result' in rout: - a = rout['result'] - else: - a = rout['name'] - if a in rout['vars']: - return isstring(rout['vars'][a]) - return 0 - - -def hasexternals(rout): - return 'externals' in rout and rout['externals'] - - -def isthreadsafe(rout): - return 'f2pyenhancements' in rout and \ - 'threadsafe' in rout['f2pyenhancements'] - - -def hasvariables(rout): - return 'vars' in rout and rout['vars'] - - -def isoptional(var): - return ('attrspec' in var and 'optional' in var['attrspec'] and - 'required' not in var['attrspec']) and isintent_nothide(var) - - -def isexternal(var): - return 'attrspec' in var and 'external' in var['attrspec'] - - -def isrequired(var): - return not isoptional(var) and isintent_nothide(var) - - -def isintent_in(var): - if 'intent' not in var: - return 1 - if 'hide' in var['intent']: - return 0 - if 'inplace' in var['intent']: - return 0 - if 'in' in var['intent']: - return 1 - if 'out' in var['intent']: - return 0 - if 'inout' in var['intent']: - return 0 - if 'outin' in var['intent']: - return 0 - return 1 - - -def isintent_inout(var): - return ('intent' in var and ('inout' in var['intent'] or - 'outin' in var['intent']) and 'in' not in var['intent'] and - 'hide' not in var['intent'] and 'inplace' not in var['intent']) - - -def isintent_out(var): - return 'out' in var.get('intent', []) - - -def isintent_hide(var): - return ('intent' in var and ('hide' in var['intent'] or - ('out' in var['intent'] and 'in' not in var['intent'] and - (not l_or(isintent_inout, isintent_inplace)(var))))) - -def isintent_nothide(var): - return not isintent_hide(var) - - -def isintent_c(var): - return 'c' in var.get('intent', []) - - -def isintent_cache(var): - return 'cache' in var.get('intent', []) - - -def isintent_copy(var): - return 'copy' in var.get('intent', []) - - -def isintent_overwrite(var): - return 'overwrite' in var.get('intent', []) - - -def isintent_callback(var): - return 'callback' in var.get('intent', []) - - -def isintent_inplace(var): - return 'inplace' in var.get('intent', []) - - -def isintent_aux(var): - return 'aux' in var.get('intent', []) - - -def isintent_aligned4(var): - return 'aligned4' in var.get('intent', []) - - -def isintent_aligned8(var): - return 'aligned8' in var.get('intent', []) - - -def isintent_aligned16(var): - return 'aligned16' in var.get('intent', []) - -isintent_dict = {isintent_in: 'INTENT_IN', isintent_inout: 'INTENT_INOUT', - isintent_out: 'INTENT_OUT', isintent_hide: 'INTENT_HIDE', - isintent_cache: 'INTENT_CACHE', - isintent_c: 'INTENT_C', isoptional: 'OPTIONAL', - isintent_inplace: 'INTENT_INPLACE', - isintent_aligned4: 'INTENT_ALIGNED4', - isintent_aligned8: 'INTENT_ALIGNED8', - isintent_aligned16: 'INTENT_ALIGNED16', - } - - -def isprivate(var): - return 'attrspec' in var and 'private' in var['attrspec'] - - -def hasinitvalue(var): - return '=' in var - - -def hasinitvalueasstring(var): - if not hasinitvalue(var): - return 0 - return var['='][0] in ['"', "'"] - - -def hasnote(var): - return 'note' in var - - -def hasresultnote(rout): - if not isfunction(rout): - return 0 - if 'result' in rout: - a = rout['result'] - else: - a = rout['name'] - if a in rout['vars']: - return hasnote(rout['vars'][a]) - return 0 - - -def hascommon(rout): - return 'common' in rout - - -def containscommon(rout): - if hascommon(rout): - return 1 - if hasbody(rout): - for b in rout['body']: - if containscommon(b): - return 1 - return 0 - - -def containsmodule(block): - if ismodule(block): - return 1 - if not hasbody(block): - return 0 - for b in block['body']: - if containsmodule(b): - return 1 - return 0 - - -def hasbody(rout): - return 'body' in rout - - -def hascallstatement(rout): - return getcallstatement(rout) is not None - - -def istrue(var): - return 1 - - -def isfalse(var): - return 0 - - -class F2PYError(Exception): - pass - - -class throw_error(object): - - def __init__(self, mess): - self.mess = mess - - def __call__(self, var): - mess = '\n\n var = %s\n Message: %s\n' % (var, self.mess) - raise F2PYError(mess) - - -def l_and(*f): - l, l2 = 'lambda v', [] - for i in range(len(f)): - l = '%s,f%d=f[%d]' % (l, i, i) - l2.append('f%d(v)' % (i)) - return eval('%s:%s' % (l, ' and '.join(l2))) - - -def l_or(*f): - l, l2 = 'lambda v', [] - for i in range(len(f)): - l = '%s,f%d=f[%d]' % (l, i, i) - l2.append('f%d(v)' % (i)) - return eval('%s:%s' % (l, ' or '.join(l2))) - - -def l_not(f): - return eval('lambda v,f=f:not f(v)') - - -def isdummyroutine(rout): - try: - return rout['f2pyenhancements']['fortranname'] == '' - except KeyError: - return 0 - - -def getfortranname(rout): - try: - name = rout['f2pyenhancements']['fortranname'] - if name == '': - raise KeyError - if not name: - errmess('Failed to use fortranname from %s\n' % - (rout['f2pyenhancements'])) - raise KeyError - except KeyError: - name = rout['name'] - return name - - -def getmultilineblock(rout, blockname, comment=1, counter=0): - try: - r = rout['f2pyenhancements'].get(blockname) - except KeyError: - return - if not r: - return - if counter > 0 and isinstance(r, str): - return - if isinstance(r, list): - if counter >= len(r): - return - r = r[counter] - if r[:3] == "'''": - if comment: - r = '\t/* start ' + blockname + \ - ' multiline (' + repr(counter) + ') */\n' + r[3:] - else: - r = r[3:] - if r[-3:] == "'''": - if comment: - r = r[:-3] + '\n\t/* end multiline (' + repr(counter) + ')*/' - else: - r = r[:-3] - else: - errmess("%s multiline block should end with `'''`: %s\n" - % (blockname, repr(r))) - return r - - -def getcallstatement(rout): - return getmultilineblock(rout, 'callstatement') - - -def getcallprotoargument(rout, cb_map={}): - r = getmultilineblock(rout, 'callprotoargument', comment=0) - if r: - return r - if hascallstatement(rout): - outmess( - 'warning: callstatement is defined without callprotoargument\n') - return - from .capi_maps import getctype - arg_types, arg_types2 = [], [] - if l_and(isstringfunction, l_not(isfunction_wrap))(rout): - arg_types.extend(['char*', 'size_t']) - for n in rout['args']: - var = rout['vars'][n] - if isintent_callback(var): - continue - if n in cb_map: - ctype = cb_map[n] + '_typedef' - else: - ctype = getctype(var) - if l_and(isintent_c, l_or(isscalar, iscomplex))(var): - pass - elif isstring(var): - pass - else: - ctype = ctype + '*' - if isstring(var) or isarrayofstrings(var): - arg_types2.append('size_t') - arg_types.append(ctype) - - proto_args = ','.join(arg_types + arg_types2) - if not proto_args: - proto_args = 'void' - return proto_args - - -def getusercode(rout): - return getmultilineblock(rout, 'usercode') - - -def getusercode1(rout): - return getmultilineblock(rout, 'usercode', counter=1) - - -def getpymethoddef(rout): - return getmultilineblock(rout, 'pymethoddef') - - -def getargs(rout): - sortargs, args = [], [] - if 'args' in rout: - args = rout['args'] - if 'sortvars' in rout: - for a in rout['sortvars']: - if a in args: - sortargs.append(a) - for a in args: - if a not in sortargs: - sortargs.append(a) - else: - sortargs = rout['args'] - return args, sortargs - - -def getargs2(rout): - sortargs, args = [], rout.get('args', []) - auxvars = [a for a in rout['vars'].keys() if isintent_aux(rout['vars'][a]) - and a not in args] - args = auxvars + args - if 'sortvars' in rout: - for a in rout['sortvars']: - if a in args: - sortargs.append(a) - for a in args: - if a not in sortargs: - sortargs.append(a) - else: - sortargs = auxvars + rout['args'] - return args, sortargs - - -def getrestdoc(rout): - if 'f2pymultilines' not in rout: - return None - k = None - if rout['block'] == 'python module': - k = rout['block'], rout['name'] - return rout['f2pymultilines'].get(k, None) - - -def gentitle(name): - l = (80 - len(name) - 6) // 2 - return '/*%s %s %s*/' % (l * '*', name, l * '*') - - -def flatlist(l): - if isinstance(l, list): - return reduce(lambda x, y, f=flatlist: x + f(y), l, []) - return [l] - - -def stripcomma(s): - if s and s[-1] == ',': - return s[:-1] - return s - - -def replace(str, d, defaultsep=''): - if isinstance(d, list): - return [replace(str, _m, defaultsep) for _m in d] - if isinstance(str, list): - return [replace(_m, d, defaultsep) for _m in str] - for k in 2 * list(d.keys()): - if k == 'separatorsfor': - continue - if 'separatorsfor' in d and k in d['separatorsfor']: - sep = d['separatorsfor'][k] - else: - sep = defaultsep - if isinstance(d[k], list): - str = str.replace('#%s#' % (k), sep.join(flatlist(d[k]))) - else: - str = str.replace('#%s#' % (k), d[k]) - return str - - -def dictappend(rd, ar): - if isinstance(ar, list): - for a in ar: - rd = dictappend(rd, a) - return rd - for k in ar.keys(): - if k[0] == '_': - continue - if k in rd: - if isinstance(rd[k], str): - rd[k] = [rd[k]] - if isinstance(rd[k], list): - if isinstance(ar[k], list): - rd[k] = rd[k] + ar[k] - else: - rd[k].append(ar[k]) - elif isinstance(rd[k], dict): - if isinstance(ar[k], dict): - if k == 'separatorsfor': - for k1 in ar[k].keys(): - if k1 not in rd[k]: - rd[k][k1] = ar[k][k1] - else: - rd[k] = dictappend(rd[k], ar[k]) - else: - rd[k] = ar[k] - return rd - - -def applyrules(rules, d, var={}): - ret = {} - if isinstance(rules, list): - for r in rules: - rr = applyrules(r, d, var) - ret = dictappend(ret, rr) - if '_break' in rr: - break - return ret - if '_check' in rules and (not rules['_check'](var)): - return ret - if 'need' in rules: - res = applyrules({'needs': rules['need']}, d, var) - if 'needs' in res: - cfuncs.append_needs(res['needs']) - - for k in rules.keys(): - if k == 'separatorsfor': - ret[k] = rules[k] - continue - if isinstance(rules[k], str): - ret[k] = replace(rules[k], d) - elif isinstance(rules[k], list): - ret[k] = [] - for i in rules[k]: - ar = applyrules({k: i}, d, var) - if k in ar: - ret[k].append(ar[k]) - elif k[0] == '_': - continue - elif isinstance(rules[k], dict): - ret[k] = [] - for k1 in rules[k].keys(): - if isinstance(k1, types.FunctionType) and k1(var): - if isinstance(rules[k][k1], list): - for i in rules[k][k1]: - if isinstance(i, dict): - res = applyrules({'supertext': i}, d, var) - if 'supertext' in res: - i = res['supertext'] - else: - i = '' - ret[k].append(replace(i, d)) - else: - i = rules[k][k1] - if isinstance(i, dict): - res = applyrules({'supertext': i}, d) - if 'supertext' in res: - i = res['supertext'] - else: - i = '' - ret[k].append(replace(i, d)) - else: - errmess('applyrules: ignoring rule %s.\n' % repr(rules[k])) - if isinstance(ret[k], list): - if len(ret[k]) == 1: - ret[k] = ret[k][0] - if ret[k] == []: - del ret[k] - return ret diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/capi_maps.py b/venv/lib/python3.7/site-packages/numpy/f2py/capi_maps.py deleted file mode 100644 index ce79f68..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/capi_maps.py +++ /dev/null @@ -1,849 +0,0 @@ -#!/usr/bin/env python -""" - -Copyright 1999,2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/05/06 10:57:33 $ -Pearu Peterson - -""" -from __future__ import division, absolute_import, print_function - -__version__ = "$Revision: 1.60 $"[10:-1] - -from . import __version__ -f2py_version = __version__.version - -import copy -import re -import os -import sys -from .crackfortran import markoutercomma -from . import cb_rules - -# The eviroment provided by auxfuncs.py is needed for some calls to eval. -# As the needed functions cannot be determined by static inspection of the -# code, it is safest to use import * pending a major refactoring of f2py. -from .auxfuncs import * - -__all__ = [ - 'getctype', 'getstrlength', 'getarrdims', 'getpydocsign', - 'getarrdocsign', 'getinit', 'sign2map', 'routsign2map', 'modsign2map', - 'cb_sign2map', 'cb_routsign2map', 'common_sign2map' -] - - -# Numarray and Numeric users should set this False -using_newcore = True - -depargs = [] -lcb_map = {} -lcb2_map = {} -# forced casting: mainly caused by the fact that Python or Numeric -# C/APIs do not support the corresponding C types. -c2py_map = {'double': 'float', - 'float': 'float', # forced casting - 'long_double': 'float', # forced casting - 'char': 'int', # forced casting - 'signed_char': 'int', # forced casting - 'unsigned_char': 'int', # forced casting - 'short': 'int', # forced casting - 'unsigned_short': 'int', # forced casting - 'int': 'int', # (forced casting) - 'long': 'int', - 'long_long': 'long', - 'unsigned': 'int', # forced casting - 'complex_float': 'complex', # forced casting - 'complex_double': 'complex', - 'complex_long_double': 'complex', # forced casting - 'string': 'string', - } -c2capi_map = {'double': 'NPY_DOUBLE', - 'float': 'NPY_FLOAT', - 'long_double': 'NPY_DOUBLE', # forced casting - 'char': 'NPY_STRING', - 'unsigned_char': 'NPY_UBYTE', - 'signed_char': 'NPY_BYTE', - 'short': 'NPY_SHORT', - 'unsigned_short': 'NPY_USHORT', - 'int': 'NPY_INT', - 'unsigned': 'NPY_UINT', - 'long': 'NPY_LONG', - 'long_long': 'NPY_LONG', # forced casting - 'complex_float': 'NPY_CFLOAT', - 'complex_double': 'NPY_CDOUBLE', - 'complex_long_double': 'NPY_CDOUBLE', # forced casting - 'string': 'NPY_STRING'} - -# These new maps aren't used anyhere yet, but should be by default -# unless building numeric or numarray extensions. -if using_newcore: - c2capi_map = {'double': 'NPY_DOUBLE', - 'float': 'NPY_FLOAT', - 'long_double': 'NPY_LONGDOUBLE', - 'char': 'NPY_BYTE', - 'unsigned_char': 'NPY_UBYTE', - 'signed_char': 'NPY_BYTE', - 'short': 'NPY_SHORT', - 'unsigned_short': 'NPY_USHORT', - 'int': 'NPY_INT', - 'unsigned': 'NPY_UINT', - 'long': 'NPY_LONG', - 'unsigned_long': 'NPY_ULONG', - 'long_long': 'NPY_LONGLONG', - 'unsigned_long_long': 'NPY_ULONGLONG', - 'complex_float': 'NPY_CFLOAT', - 'complex_double': 'NPY_CDOUBLE', - 'complex_long_double': 'NPY_CDOUBLE', - 'string':'NPY_STRING' - - } -c2pycode_map = {'double': 'd', - 'float': 'f', - 'long_double': 'd', # forced casting - 'char': '1', - 'signed_char': '1', - 'unsigned_char': 'b', - 'short': 's', - 'unsigned_short': 'w', - 'int': 'i', - 'unsigned': 'u', - 'long': 'l', - 'long_long': 'L', - 'complex_float': 'F', - 'complex_double': 'D', - 'complex_long_double': 'D', # forced casting - 'string': 'c' - } -if using_newcore: - c2pycode_map = {'double': 'd', - 'float': 'f', - 'long_double': 'g', - 'char': 'b', - 'unsigned_char': 'B', - 'signed_char': 'b', - 'short': 'h', - 'unsigned_short': 'H', - 'int': 'i', - 'unsigned': 'I', - 'long': 'l', - 'unsigned_long': 'L', - 'long_long': 'q', - 'unsigned_long_long': 'Q', - 'complex_float': 'F', - 'complex_double': 'D', - 'complex_long_double': 'G', - 'string': 'S'} -c2buildvalue_map = {'double': 'd', - 'float': 'f', - 'char': 'b', - 'signed_char': 'b', - 'short': 'h', - 'int': 'i', - 'long': 'l', - 'long_long': 'L', - 'complex_float': 'N', - 'complex_double': 'N', - 'complex_long_double': 'N', - 'string': 'z'} - -if sys.version_info[0] >= 3: - # Bytes, not Unicode strings - c2buildvalue_map['string'] = 'y' - -if using_newcore: - # c2buildvalue_map=??? - pass - -f2cmap_all = {'real': {'': 'float', '4': 'float', '8': 'double', - '12': 'long_double', '16': 'long_double'}, - 'integer': {'': 'int', '1': 'signed_char', '2': 'short', - '4': 'int', '8': 'long_long', - '-1': 'unsigned_char', '-2': 'unsigned_short', - '-4': 'unsigned', '-8': 'unsigned_long_long'}, - 'complex': {'': 'complex_float', '8': 'complex_float', - '16': 'complex_double', '24': 'complex_long_double', - '32': 'complex_long_double'}, - 'complexkind': {'': 'complex_float', '4': 'complex_float', - '8': 'complex_double', '12': 'complex_long_double', - '16': 'complex_long_double'}, - 'logical': {'': 'int', '1': 'char', '2': 'short', '4': 'int', - '8': 'long_long'}, - 'double complex': {'': 'complex_double'}, - 'double precision': {'': 'double'}, - 'byte': {'': 'char'}, - 'character': {'': 'string'} - } - -f2cmap_default = copy.deepcopy(f2cmap_all) - - -def load_f2cmap_file(f2cmap_file): - global f2cmap_all - - f2cmap_all = copy.deepcopy(f2cmap_default) - - if f2cmap_file is None: - # Default value - f2cmap_file = '.f2py_f2cmap' - if not os.path.isfile(f2cmap_file): - return - - # User defined additions to f2cmap_all. - # f2cmap_file must contain a dictionary of dictionaries, only. For - # example, {'real':{'low':'float'}} means that Fortran 'real(low)' is - # interpreted as C 'float'. This feature is useful for F90/95 users if - # they use PARAMETERSs in type specifications. - try: - outmess('Reading f2cmap from {!r} ...\n'.format(f2cmap_file)) - with open(f2cmap_file, 'r') as f: - d = eval(f.read(), {}, {}) - for k, d1 in list(d.items()): - for k1 in list(d1.keys()): - d1[k1.lower()] = d1[k1] - d[k.lower()] = d[k] - for k in list(d.keys()): - if k not in f2cmap_all: - f2cmap_all[k] = {} - for k1 in list(d[k].keys()): - if d[k][k1] in c2py_map: - if k1 in f2cmap_all[k]: - outmess( - "\tWarning: redefinition of {'%s':{'%s':'%s'->'%s'}}\n" % (k, k1, f2cmap_all[k][k1], d[k][k1])) - f2cmap_all[k][k1] = d[k][k1] - outmess('\tMapping "%s(kind=%s)" to "%s"\n' % - (k, k1, d[k][k1])) - else: - errmess("\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n" % ( - k, k1, d[k][k1], d[k][k1], list(c2py_map.keys()))) - outmess('Successfully applied user defined f2cmap changes\n') - except Exception as msg: - errmess( - 'Failed to apply user defined f2cmap changes: %s. Skipping.\n' % (msg)) - -cformat_map = {'double': '%g', - 'float': '%g', - 'long_double': '%Lg', - 'char': '%d', - 'signed_char': '%d', - 'unsigned_char': '%hhu', - 'short': '%hd', - 'unsigned_short': '%hu', - 'int': '%d', - 'unsigned': '%u', - 'long': '%ld', - 'unsigned_long': '%lu', - 'long_long': '%ld', - 'complex_float': '(%g,%g)', - 'complex_double': '(%g,%g)', - 'complex_long_double': '(%Lg,%Lg)', - 'string': '%s', - } - -# Auxiliary functions - - -def getctype(var): - """ - Determines C type - """ - ctype = 'void' - if isfunction(var): - if 'result' in var: - a = var['result'] - else: - a = var['name'] - if a in var['vars']: - return getctype(var['vars'][a]) - else: - errmess('getctype: function %s has no return value?!\n' % a) - elif issubroutine(var): - return ctype - elif 'typespec' in var and var['typespec'].lower() in f2cmap_all: - typespec = var['typespec'].lower() - f2cmap = f2cmap_all[typespec] - ctype = f2cmap[''] # default type - if 'kindselector' in var: - if '*' in var['kindselector']: - try: - ctype = f2cmap[var['kindselector']['*']] - except KeyError: - errmess('getctype: "%s %s %s" not supported.\n' % - (var['typespec'], '*', var['kindselector']['*'])) - elif 'kind' in var['kindselector']: - if typespec + 'kind' in f2cmap_all: - f2cmap = f2cmap_all[typespec + 'kind'] - try: - ctype = f2cmap[var['kindselector']['kind']] - except KeyError: - if typespec in f2cmap_all: - f2cmap = f2cmap_all[typespec] - try: - ctype = f2cmap[str(var['kindselector']['kind'])] - except KeyError: - errmess('getctype: "%s(kind=%s)" is mapped to C "%s" (to override define dict(%s = dict(%s="")) in %s/.f2py_f2cmap file).\n' - % (typespec, var['kindselector']['kind'], ctype, - typespec, var['kindselector']['kind'], os.getcwd())) - - else: - if not isexternal(var): - errmess( - 'getctype: No C-type found in "%s", assuming void.\n' % var) - return ctype - - -def getstrlength(var): - if isstringfunction(var): - if 'result' in var: - a = var['result'] - else: - a = var['name'] - if a in var['vars']: - return getstrlength(var['vars'][a]) - else: - errmess('getstrlength: function %s has no return value?!\n' % a) - if not isstring(var): - errmess( - 'getstrlength: expected a signature of a string but got: %s\n' % (repr(var))) - len = '1' - if 'charselector' in var: - a = var['charselector'] - if '*' in a: - len = a['*'] - elif 'len' in a: - len = a['len'] - if re.match(r'\(\s*([*]|[:])\s*\)', len) or re.match(r'([*]|[:])', len): - if isintent_hide(var): - errmess('getstrlength:intent(hide): expected a string with defined length but got: %s\n' % ( - repr(var))) - len = '-1' - return len - - -def getarrdims(a, var, verbose=0): - global depargs - ret = {} - if isstring(var) and not isarray(var): - ret['dims'] = getstrlength(var) - ret['size'] = ret['dims'] - ret['rank'] = '1' - elif isscalar(var): - ret['size'] = '1' - ret['rank'] = '0' - ret['dims'] = '' - elif isarray(var): - dim = copy.copy(var['dimension']) - ret['size'] = '*'.join(dim) - try: - ret['size'] = repr(eval(ret['size'])) - except Exception: - pass - ret['dims'] = ','.join(dim) - ret['rank'] = repr(len(dim)) - ret['rank*[-1]'] = repr(len(dim) * [-1])[1:-1] - for i in range(len(dim)): # solve dim for dependencies - v = [] - if dim[i] in depargs: - v = [dim[i]] - else: - for va in depargs: - if re.match(r'.*?\b%s\b.*' % va, dim[i]): - v.append(va) - for va in v: - if depargs.index(va) > depargs.index(a): - dim[i] = '*' - break - ret['setdims'], i = '', -1 - for d in dim: - i = i + 1 - if d not in ['*', ':', '(*)', '(:)']: - ret['setdims'] = '%s#varname#_Dims[%d]=%s,' % ( - ret['setdims'], i, d) - if ret['setdims']: - ret['setdims'] = ret['setdims'][:-1] - ret['cbsetdims'], i = '', -1 - for d in var['dimension']: - i = i + 1 - if d not in ['*', ':', '(*)', '(:)']: - ret['cbsetdims'] = '%s#varname#_Dims[%d]=%s,' % ( - ret['cbsetdims'], i, d) - elif isintent_in(var): - outmess('getarrdims:warning: assumed shape array, using 0 instead of %r\n' - % (d)) - ret['cbsetdims'] = '%s#varname#_Dims[%d]=%s,' % ( - ret['cbsetdims'], i, 0) - elif verbose: - errmess( - 'getarrdims: If in call-back function: array argument %s must have bounded dimensions: got %s\n' % (repr(a), repr(d))) - if ret['cbsetdims']: - ret['cbsetdims'] = ret['cbsetdims'][:-1] -# if not isintent_c(var): -# var['dimension'].reverse() - return ret - - -def getpydocsign(a, var): - global lcb_map - if isfunction(var): - if 'result' in var: - af = var['result'] - else: - af = var['name'] - if af in var['vars']: - return getpydocsign(af, var['vars'][af]) - else: - errmess('getctype: function %s has no return value?!\n' % af) - return '', '' - sig, sigout = a, a - opt = '' - if isintent_in(var): - opt = 'input' - elif isintent_inout(var): - opt = 'in/output' - out_a = a - if isintent_out(var): - for k in var['intent']: - if k[:4] == 'out=': - out_a = k[4:] - break - init = '' - ctype = getctype(var) - - if hasinitvalue(var): - init, showinit = getinit(a, var) - init = ', optional\\n Default: %s' % showinit - if isscalar(var): - if isintent_inout(var): - sig = '%s : %s rank-0 array(%s,\'%s\')%s' % (a, opt, c2py_map[ctype], - c2pycode_map[ctype], init) - else: - sig = '%s : %s %s%s' % (a, opt, c2py_map[ctype], init) - sigout = '%s : %s' % (out_a, c2py_map[ctype]) - elif isstring(var): - if isintent_inout(var): - sig = '%s : %s rank-0 array(string(len=%s),\'c\')%s' % ( - a, opt, getstrlength(var), init) - else: - sig = '%s : %s string(len=%s)%s' % ( - a, opt, getstrlength(var), init) - sigout = '%s : string(len=%s)' % (out_a, getstrlength(var)) - elif isarray(var): - dim = var['dimension'] - rank = repr(len(dim)) - sig = '%s : %s rank-%s array(\'%s\') with bounds (%s)%s' % (a, opt, rank, - c2pycode_map[ - ctype], - ','.join(dim), init) - if a == out_a: - sigout = '%s : rank-%s array(\'%s\') with bounds (%s)'\ - % (a, rank, c2pycode_map[ctype], ','.join(dim)) - else: - sigout = '%s : rank-%s array(\'%s\') with bounds (%s) and %s storage'\ - % (out_a, rank, c2pycode_map[ctype], ','.join(dim), a) - elif isexternal(var): - ua = '' - if a in lcb_map and lcb_map[a] in lcb2_map and 'argname' in lcb2_map[lcb_map[a]]: - ua = lcb2_map[lcb_map[a]]['argname'] - if not ua == a: - ua = ' => %s' % ua - else: - ua = '' - sig = '%s : call-back function%s' % (a, ua) - sigout = sig - else: - errmess( - 'getpydocsign: Could not resolve docsignature for "%s".\\n' % a) - return sig, sigout - - -def getarrdocsign(a, var): - ctype = getctype(var) - if isstring(var) and (not isarray(var)): - sig = '%s : rank-0 array(string(len=%s),\'c\')' % (a, - getstrlength(var)) - elif isscalar(var): - sig = '%s : rank-0 array(%s,\'%s\')' % (a, c2py_map[ctype], - c2pycode_map[ctype],) - elif isarray(var): - dim = var['dimension'] - rank = repr(len(dim)) - sig = '%s : rank-%s array(\'%s\') with bounds (%s)' % (a, rank, - c2pycode_map[ - ctype], - ','.join(dim)) - return sig - - -def getinit(a, var): - if isstring(var): - init, showinit = '""', "''" - else: - init, showinit = '', '' - if hasinitvalue(var): - init = var['='] - showinit = init - if iscomplex(var) or iscomplexarray(var): - ret = {} - - try: - v = var["="] - if ',' in v: - ret['init.r'], ret['init.i'] = markoutercomma( - v[1:-1]).split('@,@') - else: - v = eval(v, {}, {}) - ret['init.r'], ret['init.i'] = str(v.real), str(v.imag) - except Exception: - raise ValueError( - 'getinit: expected complex number `(r,i)\' but got `%s\' as initial value of %r.' % (init, a)) - if isarray(var): - init = '(capi_c.r=%s,capi_c.i=%s,capi_c)' % ( - ret['init.r'], ret['init.i']) - elif isstring(var): - if not init: - init, showinit = '""', "''" - if init[0] == "'": - init = '"%s"' % (init[1:-1].replace('"', '\\"')) - if init[0] == '"': - showinit = "'%s'" % (init[1:-1]) - return init, showinit - - -def sign2map(a, var): - """ - varname,ctype,atype - init,init.r,init.i,pytype - vardebuginfo,vardebugshowvalue,varshowvalue - varrfromat - intent - """ - global lcb_map, cb_map - out_a = a - if isintent_out(var): - for k in var['intent']: - if k[:4] == 'out=': - out_a = k[4:] - break - ret = {'varname': a, 'outvarname': out_a, 'ctype': getctype(var)} - intent_flags = [] - for f, s in isintent_dict.items(): - if f(var): - intent_flags.append('F2PY_%s' % s) - if intent_flags: - # XXX: Evaluate intent_flags here. - ret['intent'] = '|'.join(intent_flags) - else: - ret['intent'] = 'F2PY_INTENT_IN' - if isarray(var): - ret['varrformat'] = 'N' - elif ret['ctype'] in c2buildvalue_map: - ret['varrformat'] = c2buildvalue_map[ret['ctype']] - else: - ret['varrformat'] = 'O' - ret['init'], ret['showinit'] = getinit(a, var) - if hasinitvalue(var) and iscomplex(var) and not isarray(var): - ret['init.r'], ret['init.i'] = markoutercomma( - ret['init'][1:-1]).split('@,@') - if isexternal(var): - ret['cbnamekey'] = a - if a in lcb_map: - ret['cbname'] = lcb_map[a] - ret['maxnofargs'] = lcb2_map[lcb_map[a]]['maxnofargs'] - ret['nofoptargs'] = lcb2_map[lcb_map[a]]['nofoptargs'] - ret['cbdocstr'] = lcb2_map[lcb_map[a]]['docstr'] - ret['cblatexdocstr'] = lcb2_map[lcb_map[a]]['latexdocstr'] - else: - ret['cbname'] = a - errmess('sign2map: Confused: external %s is not in lcb_map%s.\n' % ( - a, list(lcb_map.keys()))) - if isstring(var): - ret['length'] = getstrlength(var) - if isarray(var): - ret = dictappend(ret, getarrdims(a, var)) - dim = copy.copy(var['dimension']) - if ret['ctype'] in c2capi_map: - ret['atype'] = c2capi_map[ret['ctype']] - # Debug info - if debugcapi(var): - il = [isintent_in, 'input', isintent_out, 'output', - isintent_inout, 'inoutput', isrequired, 'required', - isoptional, 'optional', isintent_hide, 'hidden', - iscomplex, 'complex scalar', - l_and(isscalar, l_not(iscomplex)), 'scalar', - isstring, 'string', isarray, 'array', - iscomplexarray, 'complex array', isstringarray, 'string array', - iscomplexfunction, 'complex function', - l_and(isfunction, l_not(iscomplexfunction)), 'function', - isexternal, 'callback', - isintent_callback, 'callback', - isintent_aux, 'auxiliary', - ] - rl = [] - for i in range(0, len(il), 2): - if il[i](var): - rl.append(il[i + 1]) - if isstring(var): - rl.append('slen(%s)=%s' % (a, ret['length'])) - if isarray(var): - ddim = ','.join( - map(lambda x, y: '%s|%s' % (x, y), var['dimension'], dim)) - rl.append('dims(%s)' % ddim) - if isexternal(var): - ret['vardebuginfo'] = 'debug-capi:%s=>%s:%s' % ( - a, ret['cbname'], ','.join(rl)) - else: - ret['vardebuginfo'] = 'debug-capi:%s %s=%s:%s' % ( - ret['ctype'], a, ret['showinit'], ','.join(rl)) - if isscalar(var): - if ret['ctype'] in cformat_map: - ret['vardebugshowvalue'] = 'debug-capi:%s=%s' % ( - a, cformat_map[ret['ctype']]) - if isstring(var): - ret['vardebugshowvalue'] = 'debug-capi:slen(%s)=%%d %s=\\"%%s\\"' % ( - a, a) - if isexternal(var): - ret['vardebugshowvalue'] = 'debug-capi:%s=%%p' % (a) - if ret['ctype'] in cformat_map: - ret['varshowvalue'] = '#name#:%s=%s' % (a, cformat_map[ret['ctype']]) - ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) - if isstring(var): - ret['varshowvalue'] = '#name#:slen(%s)=%%d %s=\\"%%s\\"' % (a, a) - ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) - if hasnote(var): - ret['note'] = var['note'] - return ret - - -def routsign2map(rout): - """ - name,NAME,begintitle,endtitle - rname,ctype,rformat - routdebugshowvalue - """ - global lcb_map - name = rout['name'] - fname = getfortranname(rout) - ret = {'name': name, - 'texname': name.replace('_', '\\_'), - 'name_lower': name.lower(), - 'NAME': name.upper(), - 'begintitle': gentitle(name), - 'endtitle': gentitle('end of %s' % name), - 'fortranname': fname, - 'FORTRANNAME': fname.upper(), - 'callstatement': getcallstatement(rout) or '', - 'usercode': getusercode(rout) or '', - 'usercode1': getusercode1(rout) or '', - } - if '_' in fname: - ret['F_FUNC'] = 'F_FUNC_US' - else: - ret['F_FUNC'] = 'F_FUNC' - if '_' in name: - ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC_US' - else: - ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC' - lcb_map = {} - if 'use' in rout: - for u in rout['use'].keys(): - if u in cb_rules.cb_map: - for un in cb_rules.cb_map[u]: - ln = un[0] - if 'map' in rout['use'][u]: - for k in rout['use'][u]['map'].keys(): - if rout['use'][u]['map'][k] == un[0]: - ln = k - break - lcb_map[ln] = un[1] - elif 'externals' in rout and rout['externals']: - errmess('routsign2map: Confused: function %s has externals %s but no "use" statement.\n' % ( - ret['name'], repr(rout['externals']))) - ret['callprotoargument'] = getcallprotoargument(rout, lcb_map) or '' - if isfunction(rout): - if 'result' in rout: - a = rout['result'] - else: - a = rout['name'] - ret['rname'] = a - ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, rout) - ret['ctype'] = getctype(rout['vars'][a]) - if hasresultnote(rout): - ret['resultnote'] = rout['vars'][a]['note'] - rout['vars'][a]['note'] = ['See elsewhere.'] - if ret['ctype'] in c2buildvalue_map: - ret['rformat'] = c2buildvalue_map[ret['ctype']] - else: - ret['rformat'] = 'O' - errmess('routsign2map: no c2buildvalue key for type %s\n' % - (repr(ret['ctype']))) - if debugcapi(rout): - if ret['ctype'] in cformat_map: - ret['routdebugshowvalue'] = 'debug-capi:%s=%s' % ( - a, cformat_map[ret['ctype']]) - if isstringfunction(rout): - ret['routdebugshowvalue'] = 'debug-capi:slen(%s)=%%d %s=\\"%%s\\"' % ( - a, a) - if isstringfunction(rout): - ret['rlength'] = getstrlength(rout['vars'][a]) - if ret['rlength'] == '-1': - errmess('routsign2map: expected explicit specification of the length of the string returned by the fortran function %s; taking 10.\n' % ( - repr(rout['name']))) - ret['rlength'] = '10' - if hasnote(rout): - ret['note'] = rout['note'] - rout['note'] = ['See elsewhere.'] - return ret - - -def modsign2map(m): - """ - modulename - """ - if ismodule(m): - ret = {'f90modulename': m['name'], - 'F90MODULENAME': m['name'].upper(), - 'texf90modulename': m['name'].replace('_', '\\_')} - else: - ret = {'modulename': m['name'], - 'MODULENAME': m['name'].upper(), - 'texmodulename': m['name'].replace('_', '\\_')} - ret['restdoc'] = getrestdoc(m) or [] - if hasnote(m): - ret['note'] = m['note'] - ret['usercode'] = getusercode(m) or '' - ret['usercode1'] = getusercode1(m) or '' - if m['body']: - ret['interface_usercode'] = getusercode(m['body'][0]) or '' - else: - ret['interface_usercode'] = '' - ret['pymethoddef'] = getpymethoddef(m) or '' - if 'coutput' in m: - ret['coutput'] = m['coutput'] - if 'f2py_wrapper_output' in m: - ret['f2py_wrapper_output'] = m['f2py_wrapper_output'] - return ret - - -def cb_sign2map(a, var, index=None): - ret = {'varname': a} - ret['varname_i'] = ret['varname'] - ret['ctype'] = getctype(var) - if ret['ctype'] in c2capi_map: - ret['atype'] = c2capi_map[ret['ctype']] - if ret['ctype'] in cformat_map: - ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) - if isarray(var): - ret = dictappend(ret, getarrdims(a, var)) - ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) - if hasnote(var): - ret['note'] = var['note'] - var['note'] = ['See elsewhere.'] - return ret - - -def cb_routsign2map(rout, um): - """ - name,begintitle,endtitle,argname - ctype,rctype,maxnofargs,nofoptargs,returncptr - """ - ret = {'name': 'cb_%s_in_%s' % (rout['name'], um), - 'returncptr': ''} - if isintent_callback(rout): - if '_' in rout['name']: - F_FUNC = 'F_FUNC_US' - else: - F_FUNC = 'F_FUNC' - ret['callbackname'] = '%s(%s,%s)' \ - % (F_FUNC, - rout['name'].lower(), - rout['name'].upper(), - ) - ret['static'] = 'extern' - else: - ret['callbackname'] = ret['name'] - ret['static'] = 'static' - ret['argname'] = rout['name'] - ret['begintitle'] = gentitle(ret['name']) - ret['endtitle'] = gentitle('end of %s' % ret['name']) - ret['ctype'] = getctype(rout) - ret['rctype'] = 'void' - if ret['ctype'] == 'string': - ret['rctype'] = 'void' - else: - ret['rctype'] = ret['ctype'] - if ret['rctype'] != 'void': - if iscomplexfunction(rout): - ret['returncptr'] = """ -#ifdef F2PY_CB_RETURNCOMPLEX -return_value= -#endif -""" - else: - ret['returncptr'] = 'return_value=' - if ret['ctype'] in cformat_map: - ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) - if isstringfunction(rout): - ret['strlength'] = getstrlength(rout) - if isfunction(rout): - if 'result' in rout: - a = rout['result'] - else: - a = rout['name'] - if hasnote(rout['vars'][a]): - ret['note'] = rout['vars'][a]['note'] - rout['vars'][a]['note'] = ['See elsewhere.'] - ret['rname'] = a - ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, rout) - if iscomplexfunction(rout): - ret['rctype'] = """ -#ifdef F2PY_CB_RETURNCOMPLEX -#ctype# -#else -void -#endif -""" - else: - if hasnote(rout): - ret['note'] = rout['note'] - rout['note'] = ['See elsewhere.'] - nofargs = 0 - nofoptargs = 0 - if 'args' in rout and 'vars' in rout: - for a in rout['args']: - var = rout['vars'][a] - if l_or(isintent_in, isintent_inout)(var): - nofargs = nofargs + 1 - if isoptional(var): - nofoptargs = nofoptargs + 1 - ret['maxnofargs'] = repr(nofargs) - ret['nofoptargs'] = repr(nofoptargs) - if hasnote(rout) and isfunction(rout) and 'result' in rout: - ret['routnote'] = rout['note'] - rout['note'] = ['See elsewhere.'] - return ret - - -def common_sign2map(a, var): # obsolute - ret = {'varname': a, 'ctype': getctype(var)} - if isstringarray(var): - ret['ctype'] = 'char' - if ret['ctype'] in c2capi_map: - ret['atype'] = c2capi_map[ret['ctype']] - if ret['ctype'] in cformat_map: - ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) - if isarray(var): - ret = dictappend(ret, getarrdims(a, var)) - elif isstring(var): - ret['size'] = getstrlength(var) - ret['rank'] = '1' - ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) - if hasnote(var): - ret['note'] = var['note'] - var['note'] = ['See elsewhere.'] - # for strings this returns 0-rank but actually is 1-rank - ret['arrdocstr'] = getarrdocsign(a, var) - return ret diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/cb_rules.py b/venv/lib/python3.7/site-packages/numpy/f2py/cb_rules.py deleted file mode 100644 index 183d7c2..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/cb_rules.py +++ /dev/null @@ -1,578 +0,0 @@ -#!/usr/bin/env python -""" - -Build call-back mechanism for f2py2e. - -Copyright 2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/07/20 11:27:58 $ -Pearu Peterson - -""" -from __future__ import division, absolute_import, print_function - -from . import __version__ -from .auxfuncs import ( - applyrules, debugcapi, dictappend, errmess, getargs, hasnote, isarray, - iscomplex, iscomplexarray, iscomplexfunction, isfunction, isintent_c, - isintent_hide, isintent_in, isintent_inout, isintent_nothide, - isintent_out, isoptional, isrequired, isscalar, isstring, - isstringfunction, issubroutine, l_and, l_not, l_or, outmess, replace, - stripcomma, throw_error -) -from . import cfuncs - -f2py_version = __version__.version - - -################## Rules for callback function ############## - -cb_routine_rules = { - 'cbtypedefs': 'typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);', - 'body': """ -#begintitle# -PyObject *#name#_capi = NULL;/*was Py_None*/ -PyTupleObject *#name#_args_capi = NULL; -int #name#_nofargs = 0; -jmp_buf #name#_jmpbuf; -/*typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);*/ -#static# #rctype# #callbackname# (#optargs##args##strarglens##noargs#) { -\tPyTupleObject *capi_arglist = #name#_args_capi; -\tPyObject *capi_return = NULL; -\tPyObject *capi_tmp = NULL; -\tPyObject *capi_arglist_list = NULL; -\tint capi_j,capi_i = 0; -\tint capi_longjmp_ok = 1; -#decl# -#ifdef F2PY_REPORT_ATEXIT -f2py_cb_start_clock(); -#endif -\tCFUNCSMESS(\"cb:Call-back function #name# (maxnofargs=#maxnofargs#(-#nofoptargs#))\\n\"); -\tCFUNCSMESSPY(\"cb:#name#_capi=\",#name#_capi); -\tif (#name#_capi==NULL) { -\t\tcapi_longjmp_ok = 0; -\t\t#name#_capi = PyObject_GetAttrString(#modulename#_module,\"#argname#\"); -\t} -\tif (#name#_capi==NULL) { -\t\tPyErr_SetString(#modulename#_error,\"cb: Callback #argname# not defined (as an argument or module #modulename# attribute).\\n\"); -\t\tgoto capi_fail; -\t} -\tif (F2PyCapsule_Check(#name#_capi)) { -\t#name#_typedef #name#_cptr; -\t#name#_cptr = F2PyCapsule_AsVoidPtr(#name#_capi); -\t#returncptr#(*#name#_cptr)(#optargs_nm##args_nm##strarglens_nm#); -\t#return# -\t} -\tif (capi_arglist==NULL) { -\t\tcapi_longjmp_ok = 0; -\t\tcapi_tmp = PyObject_GetAttrString(#modulename#_module,\"#argname#_extra_args\"); -\t\tif (capi_tmp) { -\t\t\tcapi_arglist = (PyTupleObject *)PySequence_Tuple(capi_tmp); -\t\t\tif (capi_arglist==NULL) { -\t\t\t\tPyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#argname#_extra_args to tuple.\\n\"); -\t\t\t\tgoto capi_fail; -\t\t\t} -\t\t} else { -\t\t\tPyErr_Clear(); -\t\t\tcapi_arglist = (PyTupleObject *)Py_BuildValue(\"()\"); -\t\t} -\t} -\tif (capi_arglist == NULL) { -\t\tPyErr_SetString(#modulename#_error,\"Callback #argname# argument list is not set.\\n\"); -\t\tgoto capi_fail; -\t} -#setdims# -#ifdef PYPY_VERSION -#define CAPI_ARGLIST_SETITEM(idx, value) PyList_SetItem((PyObject *)capi_arglist_list, idx, value) -\tcapi_arglist_list = PySequence_List(capi_arglist); -\tif (capi_arglist_list == NULL) goto capi_fail; -#else -#define CAPI_ARGLIST_SETITEM(idx, value) PyTuple_SetItem((PyObject *)capi_arglist, idx, value) -#endif -#pyobjfrom# -#undef CAPI_ARGLIST_SETITEM -#ifdef PYPY_VERSION -\tCFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist_list); -#else -\tCFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist); -#endif -\tCFUNCSMESS(\"cb:Call-back calling Python function #argname#.\\n\"); -#ifdef F2PY_REPORT_ATEXIT -f2py_cb_start_call_clock(); -#endif -#ifdef PYPY_VERSION -\tcapi_return = PyObject_CallObject(#name#_capi,(PyObject *)capi_arglist_list); -\tPy_DECREF(capi_arglist_list); -\tcapi_arglist_list = NULL; -#else -\tcapi_return = PyObject_CallObject(#name#_capi,(PyObject *)capi_arglist); -#endif -#ifdef F2PY_REPORT_ATEXIT -f2py_cb_stop_call_clock(); -#endif -\tCFUNCSMESSPY(\"cb:capi_return=\",capi_return); -\tif (capi_return == NULL) { -\t\tfprintf(stderr,\"capi_return is NULL\\n\"); -\t\tgoto capi_fail; -\t} -\tif (capi_return == Py_None) { -\t\tPy_DECREF(capi_return); -\t\tcapi_return = Py_BuildValue(\"()\"); -\t} -\telse if (!PyTuple_Check(capi_return)) { -\t\tcapi_return = Py_BuildValue(\"(N)\",capi_return); -\t} -\tcapi_j = PyTuple_Size(capi_return); -\tcapi_i = 0; -#frompyobj# -\tCFUNCSMESS(\"cb:#name#:successful\\n\"); -\tPy_DECREF(capi_return); -#ifdef F2PY_REPORT_ATEXIT -f2py_cb_stop_clock(); -#endif -\tgoto capi_return_pt; -capi_fail: -\tfprintf(stderr,\"Call-back #name# failed.\\n\"); -\tPy_XDECREF(capi_return); -\tPy_XDECREF(capi_arglist_list); -\tif (capi_longjmp_ok) -\t\tlongjmp(#name#_jmpbuf,-1); -capi_return_pt: -\t; -#return# -} -#endtitle# -""", - 'need': ['setjmp.h', 'CFUNCSMESS'], - 'maxnofargs': '#maxnofargs#', - 'nofoptargs': '#nofoptargs#', - 'docstr': """\ -\tdef #argname#(#docsignature#): return #docreturn#\\n\\ -#docstrsigns#""", - 'latexdocstr': """ -{{}\\verb@def #argname#(#latexdocsignature#): return #docreturn#@{}} -#routnote# - -#latexdocstrsigns#""", - 'docstrshort': 'def #argname#(#docsignature#): return #docreturn#' -} -cb_rout_rules = [ - { # Init - 'separatorsfor': {'decl': '\n', - 'args': ',', 'optargs': '', 'pyobjfrom': '\n', 'freemem': '\n', - 'args_td': ',', 'optargs_td': '', - 'args_nm': ',', 'optargs_nm': '', - 'frompyobj': '\n', 'setdims': '\n', - 'docstrsigns': '\\n"\n"', - 'latexdocstrsigns': '\n', - 'latexdocstrreq': '\n', 'latexdocstropt': '\n', - 'latexdocstrout': '\n', 'latexdocstrcbs': '\n', - }, - 'decl': '/*decl*/', 'pyobjfrom': '/*pyobjfrom*/', 'frompyobj': '/*frompyobj*/', - 'args': [], 'optargs': '', 'return': '', 'strarglens': '', 'freemem': '/*freemem*/', - 'args_td': [], 'optargs_td': '', 'strarglens_td': '', - 'args_nm': [], 'optargs_nm': '', 'strarglens_nm': '', - 'noargs': '', - 'setdims': '/*setdims*/', - 'docstrsigns': '', 'latexdocstrsigns': '', - 'docstrreq': '\tRequired arguments:', - 'docstropt': '\tOptional arguments:', - 'docstrout': '\tReturn objects:', - 'docstrcbs': '\tCall-back functions:', - 'docreturn': '', 'docsign': '', 'docsignopt': '', - 'latexdocstrreq': '\\noindent Required arguments:', - 'latexdocstropt': '\\noindent Optional arguments:', - 'latexdocstrout': '\\noindent Return objects:', - 'latexdocstrcbs': '\\noindent Call-back functions:', - 'routnote': {hasnote: '--- #note#', l_not(hasnote): ''}, - }, { # Function - 'decl': '\t#ctype# return_value;', - 'frompyobj': [{debugcapi: '\tCFUNCSMESS("cb:Getting return_value->");'}, - '\tif (capi_j>capi_i)\n\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#,"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n");', - {debugcapi: - '\tfprintf(stderr,"#showvalueformat#.\\n",return_value);'} - ], - 'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'}, 'GETSCALARFROMPYTUPLE'], - 'return': '\treturn return_value;', - '_check': l_and(isfunction, l_not(isstringfunction), l_not(iscomplexfunction)) - }, - { # String function - 'pyobjfrom': {debugcapi: '\tfprintf(stderr,"debug-capi:cb:#name#:%d:\\n",return_value_len);'}, - 'args': '#ctype# return_value,int return_value_len', - 'args_nm': 'return_value,&return_value_len', - 'args_td': '#ctype# ,int', - 'frompyobj': [{debugcapi: '\tCFUNCSMESS("cb:Getting return_value->\\"");'}, - """\tif (capi_j>capi_i) -\t\tGETSTRFROMPYTUPLE(capi_return,capi_i++,return_value,return_value_len);""", - {debugcapi: - '\tfprintf(stderr,"#showvalueformat#\\".\\n",return_value);'} - ], - 'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'}, - 'string.h', 'GETSTRFROMPYTUPLE'], - 'return': 'return;', - '_check': isstringfunction - }, - { # Complex function - 'optargs': """ -#ifndef F2PY_CB_RETURNCOMPLEX -#ctype# *return_value -#endif -""", - 'optargs_nm': """ -#ifndef F2PY_CB_RETURNCOMPLEX -return_value -#endif -""", - 'optargs_td': """ -#ifndef F2PY_CB_RETURNCOMPLEX -#ctype# * -#endif -""", - 'decl': """ -#ifdef F2PY_CB_RETURNCOMPLEX -\t#ctype# return_value; -#endif -""", - 'frompyobj': [{debugcapi: '\tCFUNCSMESS("cb:Getting return_value->");'}, - """\ -\tif (capi_j>capi_i) -#ifdef F2PY_CB_RETURNCOMPLEX -\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#,\"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n\"); -#else -\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,return_value,#ctype#,\"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n\"); -#endif -""", - {debugcapi: """ -#ifdef F2PY_CB_RETURNCOMPLEX -\tfprintf(stderr,\"#showvalueformat#.\\n\",(return_value).r,(return_value).i); -#else -\tfprintf(stderr,\"#showvalueformat#.\\n\",(*return_value).r,(*return_value).i); -#endif - -"""} - ], - 'return': """ -#ifdef F2PY_CB_RETURNCOMPLEX -\treturn return_value; -#else -\treturn; -#endif -""", - 'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'}, - 'string.h', 'GETSCALARFROMPYTUPLE', '#ctype#'], - '_check': iscomplexfunction - }, - {'docstrout': '\t\t#pydocsignout#', - 'latexdocstrout': ['\\item[]{{}\\verb@#pydocsignout#@{}}', - {hasnote: '--- #note#'}], - 'docreturn': '#rname#,', - '_check': isfunction}, - {'_check': issubroutine, 'return': 'return;'} -] - -cb_arg_rules = [ - { # Doc - 'docstropt': {l_and(isoptional, isintent_nothide): '\t\t#pydocsign#'}, - 'docstrreq': {l_and(isrequired, isintent_nothide): '\t\t#pydocsign#'}, - 'docstrout': {isintent_out: '\t\t#pydocsignout#'}, - 'latexdocstropt': {l_and(isoptional, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}', - {hasnote: '--- #note#'}]}, - 'latexdocstrreq': {l_and(isrequired, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}', - {hasnote: '--- #note#'}]}, - 'latexdocstrout': {isintent_out: ['\\item[]{{}\\verb@#pydocsignout#@{}}', - {l_and(hasnote, isintent_hide): '--- #note#', - l_and(hasnote, isintent_nothide): '--- See above.'}]}, - 'docsign': {l_and(isrequired, isintent_nothide): '#varname#,'}, - 'docsignopt': {l_and(isoptional, isintent_nothide): '#varname#,'}, - 'depend': '' - }, - { - 'args': { - l_and(isscalar, isintent_c): '#ctype# #varname_i#', - l_and(isscalar, l_not(isintent_c)): '#ctype# *#varname_i#_cb_capi', - isarray: '#ctype# *#varname_i#', - isstring: '#ctype# #varname_i#' - }, - 'args_nm': { - l_and(isscalar, isintent_c): '#varname_i#', - l_and(isscalar, l_not(isintent_c)): '#varname_i#_cb_capi', - isarray: '#varname_i#', - isstring: '#varname_i#' - }, - 'args_td': { - l_and(isscalar, isintent_c): '#ctype#', - l_and(isscalar, l_not(isintent_c)): '#ctype# *', - isarray: '#ctype# *', - isstring: '#ctype#' - }, - # untested with multiple args - 'strarglens': {isstring: ',int #varname_i#_cb_len'}, - 'strarglens_td': {isstring: ',int'}, # untested with multiple args - # untested with multiple args - 'strarglens_nm': {isstring: ',#varname_i#_cb_len'}, - }, - { # Scalars - 'decl': {l_not(isintent_c): '\t#ctype# #varname_i#=(*#varname_i#_cb_capi);'}, - 'error': {l_and(isintent_c, isintent_out, - throw_error('intent(c,out) is forbidden for callback scalar arguments')): - ''}, - 'frompyobj': [{debugcapi: '\tCFUNCSMESS("cb:Getting #varname#->");'}, - {isintent_out: - '\tif (capi_j>capi_i)\n\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,#varname_i#_cb_capi,#ctype#,"#ctype#_from_pyobj failed in converting argument #varname# of call-back function #name# to C #ctype#\\n");'}, - {l_and(debugcapi, l_and(l_not(iscomplex), isintent_c)): - '\tfprintf(stderr,"#showvalueformat#.\\n",#varname_i#);'}, - {l_and(debugcapi, l_and(l_not(iscomplex), l_not( isintent_c))): - '\tfprintf(stderr,"#showvalueformat#.\\n",*#varname_i#_cb_capi);'}, - {l_and(debugcapi, l_and(iscomplex, isintent_c)): - '\tfprintf(stderr,"#showvalueformat#.\\n",(#varname_i#).r,(#varname_i#).i);'}, - {l_and(debugcapi, l_and(iscomplex, l_not( isintent_c))): - '\tfprintf(stderr,"#showvalueformat#.\\n",(*#varname_i#_cb_capi).r,(*#varname_i#_cb_capi).i);'}, - ], - 'need': [{isintent_out: ['#ctype#_from_pyobj', 'GETSCALARFROMPYTUPLE']}, - {debugcapi: 'CFUNCSMESS'}], - '_check': isscalar - }, { - 'pyobjfrom': [{isintent_in: """\ -\tif (#name#_nofargs>capi_i) -\t\tif (CAPI_ARGLIST_SETITEM(capi_i++,pyobj_from_#ctype#1(#varname_i#))) -\t\t\tgoto capi_fail;"""}, - {isintent_inout: """\ -\tif (#name#_nofargs>capi_i) -\t\tif (CAPI_ARGLIST_SETITEM(capi_i++,pyarr_from_p_#ctype#1(#varname_i#_cb_capi))) -\t\t\tgoto capi_fail;"""}], - 'need': [{isintent_in: 'pyobj_from_#ctype#1'}, - {isintent_inout: 'pyarr_from_p_#ctype#1'}, - {iscomplex: '#ctype#'}], - '_check': l_and(isscalar, isintent_nothide), - '_optional': '' - }, { # String - 'frompyobj': [{debugcapi: '\tCFUNCSMESS("cb:Getting #varname#->\\"");'}, - """\tif (capi_j>capi_i) -\t\tGETSTRFROMPYTUPLE(capi_return,capi_i++,#varname_i#,#varname_i#_cb_len);""", - {debugcapi: - '\tfprintf(stderr,"#showvalueformat#\\":%d:.\\n",#varname_i#,#varname_i#_cb_len);'}, - ], - 'need': ['#ctype#', 'GETSTRFROMPYTUPLE', - {debugcapi: 'CFUNCSMESS'}, 'string.h'], - '_check': l_and(isstring, isintent_out) - }, { - 'pyobjfrom': [{debugcapi: '\tfprintf(stderr,"debug-capi:cb:#varname#=\\"#showvalueformat#\\":%d:\\n",#varname_i#,#varname_i#_cb_len);'}, - {isintent_in: """\ -\tif (#name#_nofargs>capi_i) -\t\tif (CAPI_ARGLIST_SETITEM(capi_i++,pyobj_from_#ctype#1size(#varname_i#,#varname_i#_cb_len))) -\t\t\tgoto capi_fail;"""}, - {isintent_inout: """\ -\tif (#name#_nofargs>capi_i) { -\t\tint #varname_i#_cb_dims[] = {#varname_i#_cb_len}; -\t\tif (CAPI_ARGLIST_SETITEM(capi_i++,pyarr_from_p_#ctype#1(#varname_i#,#varname_i#_cb_dims))) -\t\t\tgoto capi_fail; -\t}"""}], - 'need': [{isintent_in: 'pyobj_from_#ctype#1size'}, - {isintent_inout: 'pyarr_from_p_#ctype#1'}], - '_check': l_and(isstring, isintent_nothide), - '_optional': '' - }, - # Array ... - { - 'decl': '\tnpy_intp #varname_i#_Dims[#rank#] = {#rank*[-1]#};', - 'setdims': '\t#cbsetdims#;', - '_check': isarray, - '_depend': '' - }, - { - 'pyobjfrom': [{debugcapi: '\tfprintf(stderr,"debug-capi:cb:#varname#\\n");'}, - {isintent_c: """\ -\tif (#name#_nofargs>capi_i) { -\t\tint itemsize_ = #atype# == NPY_STRING ? 1 : 0; -\t\t/*XXX: Hmm, what will destroy this array??? */ -\t\tPyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,#rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,itemsize_,NPY_ARRAY_CARRAY,NULL); -""", - l_not(isintent_c): """\ -\tif (#name#_nofargs>capi_i) { -\t\tint itemsize_ = #atype# == NPY_STRING ? 1 : 0; -\t\t/*XXX: Hmm, what will destroy this array??? */ -\t\tPyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,#rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,itemsize_,NPY_ARRAY_FARRAY,NULL); -""", - }, - """ -\t\tif (tmp_arr==NULL) -\t\t\tgoto capi_fail; -\t\tif (CAPI_ARGLIST_SETITEM(capi_i++,(PyObject *)tmp_arr)) -\t\t\tgoto capi_fail; -}"""], - '_check': l_and(isarray, isintent_nothide, l_or(isintent_in, isintent_inout)), - '_optional': '', - }, { - 'frompyobj': [{debugcapi: '\tCFUNCSMESS("cb:Getting #varname#->");'}, - """\tif (capi_j>capi_i) { -\t\tPyArrayObject *rv_cb_arr = NULL; -\t\tif ((capi_tmp = PyTuple_GetItem(capi_return,capi_i++))==NULL) goto capi_fail; -\t\trv_cb_arr = array_from_pyobj(#atype#,#varname_i#_Dims,#rank#,F2PY_INTENT_IN""", - {isintent_c: '|F2PY_INTENT_C'}, - """,capi_tmp); -\t\tif (rv_cb_arr == NULL) { -\t\t\tfprintf(stderr,\"rv_cb_arr is NULL\\n\"); -\t\t\tgoto capi_fail; -\t\t} -\t\tMEMCOPY(#varname_i#,PyArray_DATA(rv_cb_arr),PyArray_NBYTES(rv_cb_arr)); -\t\tif (capi_tmp != (PyObject *)rv_cb_arr) { -\t\t\tPy_DECREF(rv_cb_arr); -\t\t} -\t}""", - {debugcapi: '\tfprintf(stderr,"<-.\\n");'}, - ], - 'need': ['MEMCOPY', {iscomplexarray: '#ctype#'}], - '_check': l_and(isarray, isintent_out) - }, { - 'docreturn': '#varname#,', - '_check': isintent_out - } -] - -################## Build call-back module ############# -cb_map = {} - - -def buildcallbacks(m): - global cb_map - cb_map[m['name']] = [] - for bi in m['body']: - if bi['block'] == 'interface': - for b in bi['body']: - if b: - buildcallback(b, m['name']) - else: - errmess('warning: empty body for %s\n' % (m['name'])) - - -def buildcallback(rout, um): - global cb_map - from . import capi_maps - - outmess('\tConstructing call-back function "cb_%s_in_%s"\n' % - (rout['name'], um)) - args, depargs = getargs(rout) - capi_maps.depargs = depargs - var = rout['vars'] - vrd = capi_maps.cb_routsign2map(rout, um) - rd = dictappend({}, vrd) - cb_map[um].append([rout['name'], rd['name']]) - for r in cb_rout_rules: - if ('_check' in r and r['_check'](rout)) or ('_check' not in r): - ar = applyrules(r, vrd, rout) - rd = dictappend(rd, ar) - savevrd = {} - for i, a in enumerate(args): - vrd = capi_maps.cb_sign2map(a, var[a], index=i) - savevrd[a] = vrd - for r in cb_arg_rules: - if '_depend' in r: - continue - if '_optional' in r and isoptional(var[a]): - continue - if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): - ar = applyrules(r, vrd, var[a]) - rd = dictappend(rd, ar) - if '_break' in r: - break - for a in args: - vrd = savevrd[a] - for r in cb_arg_rules: - if '_depend' in r: - continue - if ('_optional' not in r) or ('_optional' in r and isrequired(var[a])): - continue - if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): - ar = applyrules(r, vrd, var[a]) - rd = dictappend(rd, ar) - if '_break' in r: - break - for a in depargs: - vrd = savevrd[a] - for r in cb_arg_rules: - if '_depend' not in r: - continue - if '_optional' in r: - continue - if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): - ar = applyrules(r, vrd, var[a]) - rd = dictappend(rd, ar) - if '_break' in r: - break - if 'args' in rd and 'optargs' in rd: - if isinstance(rd['optargs'], list): - rd['optargs'] = rd['optargs'] + [""" -#ifndef F2PY_CB_RETURNCOMPLEX -, -#endif -"""] - rd['optargs_nm'] = rd['optargs_nm'] + [""" -#ifndef F2PY_CB_RETURNCOMPLEX -, -#endif -"""] - rd['optargs_td'] = rd['optargs_td'] + [""" -#ifndef F2PY_CB_RETURNCOMPLEX -, -#endif -"""] - if isinstance(rd['docreturn'], list): - rd['docreturn'] = stripcomma( - replace('#docreturn#', {'docreturn': rd['docreturn']})) - optargs = stripcomma(replace('#docsignopt#', - {'docsignopt': rd['docsignopt']} - )) - if optargs == '': - rd['docsignature'] = stripcomma( - replace('#docsign#', {'docsign': rd['docsign']})) - else: - rd['docsignature'] = replace('#docsign#[#docsignopt#]', - {'docsign': rd['docsign'], - 'docsignopt': optargs, - }) - rd['latexdocsignature'] = rd['docsignature'].replace('_', '\\_') - rd['latexdocsignature'] = rd['latexdocsignature'].replace(',', ', ') - rd['docstrsigns'] = [] - rd['latexdocstrsigns'] = [] - for k in ['docstrreq', 'docstropt', 'docstrout', 'docstrcbs']: - if k in rd and isinstance(rd[k], list): - rd['docstrsigns'] = rd['docstrsigns'] + rd[k] - k = 'latex' + k - if k in rd and isinstance(rd[k], list): - rd['latexdocstrsigns'] = rd['latexdocstrsigns'] + rd[k][0:1] +\ - ['\\begin{description}'] + rd[k][1:] +\ - ['\\end{description}'] - if 'args' not in rd: - rd['args'] = '' - rd['args_td'] = '' - rd['args_nm'] = '' - if not (rd.get('args') or rd.get('optargs') or rd.get('strarglens')): - rd['noargs'] = 'void' - - ar = applyrules(cb_routine_rules, rd) - cfuncs.callbacks[rd['name']] = ar['body'] - if isinstance(ar['need'], str): - ar['need'] = [ar['need']] - - if 'need' in rd: - for t in cfuncs.typedefs.keys(): - if t in rd['need']: - ar['need'].append(t) - - cfuncs.typedefs_generated[rd['name'] + '_typedef'] = ar['cbtypedefs'] - ar['need'].append(rd['name'] + '_typedef') - cfuncs.needs[rd['name']] = ar['need'] - - capi_maps.lcb2_map[rd['name']] = {'maxnofargs': ar['maxnofargs'], - 'nofoptargs': ar['nofoptargs'], - 'docstr': ar['docstr'], - 'latexdocstr': ar['latexdocstr'], - 'argname': rd['argname'] - } - outmess('\t %s\n' % (ar['docstrshort'])) - return -################## Build call-back function ############# diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/cfuncs.py b/venv/lib/python3.7/site-packages/numpy/f2py/cfuncs.py deleted file mode 100644 index ccb7b3a..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/cfuncs.py +++ /dev/null @@ -1,1275 +0,0 @@ -#!/usr/bin/env python -""" - -C declarations, CPP macros, and C functions for f2py2e. -Only required declarations/macros/functions will be used. - -Copyright 1999,2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/05/06 11:42:34 $ -Pearu Peterson - -""" -from __future__ import division, absolute_import, print_function - -import sys -import copy - -from . import __version__ - -f2py_version = __version__.version -errmess = sys.stderr.write - -##################### Definitions ################## - -outneeds = {'includes0': [], 'includes': [], 'typedefs': [], 'typedefs_generated': [], - 'userincludes': [], - 'cppmacros': [], 'cfuncs': [], 'callbacks': [], 'f90modhooks': [], - 'commonhooks': []} -needs = {} -includes0 = {'includes0': '/*need_includes0*/'} -includes = {'includes': '/*need_includes*/'} -userincludes = {'userincludes': '/*need_userincludes*/'} -typedefs = {'typedefs': '/*need_typedefs*/'} -typedefs_generated = {'typedefs_generated': '/*need_typedefs_generated*/'} -cppmacros = {'cppmacros': '/*need_cppmacros*/'} -cfuncs = {'cfuncs': '/*need_cfuncs*/'} -callbacks = {'callbacks': '/*need_callbacks*/'} -f90modhooks = {'f90modhooks': '/*need_f90modhooks*/', - 'initf90modhooksstatic': '/*initf90modhooksstatic*/', - 'initf90modhooksdynamic': '/*initf90modhooksdynamic*/', - } -commonhooks = {'commonhooks': '/*need_commonhooks*/', - 'initcommonhooks': '/*need_initcommonhooks*/', - } - -############ Includes ################### - -includes0['math.h'] = '#include ' -includes0['string.h'] = '#include ' -includes0['setjmp.h'] = '#include ' - -includes['Python.h'] = '#include "Python.h"' -needs['arrayobject.h'] = ['Python.h'] -includes['arrayobject.h'] = '''#define PY_ARRAY_UNIQUE_SYMBOL PyArray_API -#include "arrayobject.h"''' - -includes['arrayobject.h'] = '#include "fortranobject.h"' -includes['stdarg.h'] = '#include ' - -############# Type definitions ############### - -typedefs['unsigned_char'] = 'typedef unsigned char unsigned_char;' -typedefs['unsigned_short'] = 'typedef unsigned short unsigned_short;' -typedefs['unsigned_long'] = 'typedef unsigned long unsigned_long;' -typedefs['signed_char'] = 'typedef signed char signed_char;' -typedefs['long_long'] = """\ -#ifdef _WIN32 -typedef __int64 long_long; -#else -typedef long long long_long; -typedef unsigned long long unsigned_long_long; -#endif -""" -typedefs['unsigned_long_long'] = """\ -#ifdef _WIN32 -typedef __uint64 long_long; -#else -typedef unsigned long long unsigned_long_long; -#endif -""" -typedefs['long_double'] = """\ -#ifndef _LONG_DOUBLE -typedef long double long_double; -#endif -""" -typedefs[ - 'complex_long_double'] = 'typedef struct {long double r,i;} complex_long_double;' -typedefs['complex_float'] = 'typedef struct {float r,i;} complex_float;' -typedefs['complex_double'] = 'typedef struct {double r,i;} complex_double;' -typedefs['string'] = """typedef char * string;""" - - -############### CPP macros #################### -cppmacros['CFUNCSMESS'] = """\ -#ifdef DEBUGCFUNCS -#define CFUNCSMESS(mess) fprintf(stderr,\"debug-capi:\"mess); -#define CFUNCSMESSPY(mess,obj) CFUNCSMESS(mess) \\ - PyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\ - fprintf(stderr,\"\\n\"); -#else -#define CFUNCSMESS(mess) -#define CFUNCSMESSPY(mess,obj) -#endif -""" -cppmacros['F_FUNC'] = """\ -#if defined(PREPEND_FORTRAN) -#if defined(NO_APPEND_FORTRAN) -#if defined(UPPERCASE_FORTRAN) -#define F_FUNC(f,F) _##F -#else -#define F_FUNC(f,F) _##f -#endif -#else -#if defined(UPPERCASE_FORTRAN) -#define F_FUNC(f,F) _##F##_ -#else -#define F_FUNC(f,F) _##f##_ -#endif -#endif -#else -#if defined(NO_APPEND_FORTRAN) -#if defined(UPPERCASE_FORTRAN) -#define F_FUNC(f,F) F -#else -#define F_FUNC(f,F) f -#endif -#else -#if defined(UPPERCASE_FORTRAN) -#define F_FUNC(f,F) F##_ -#else -#define F_FUNC(f,F) f##_ -#endif -#endif -#endif -#if defined(UNDERSCORE_G77) -#define F_FUNC_US(f,F) F_FUNC(f##_,F##_) -#else -#define F_FUNC_US(f,F) F_FUNC(f,F) -#endif -""" -cppmacros['F_WRAPPEDFUNC'] = """\ -#if defined(PREPEND_FORTRAN) -#if defined(NO_APPEND_FORTRAN) -#if defined(UPPERCASE_FORTRAN) -#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F -#else -#define F_WRAPPEDFUNC(f,F) _f2pywrap##f -#endif -#else -#if defined(UPPERCASE_FORTRAN) -#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F##_ -#else -#define F_WRAPPEDFUNC(f,F) _f2pywrap##f##_ -#endif -#endif -#else -#if defined(NO_APPEND_FORTRAN) -#if defined(UPPERCASE_FORTRAN) -#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F -#else -#define F_WRAPPEDFUNC(f,F) f2pywrap##f -#endif -#else -#if defined(UPPERCASE_FORTRAN) -#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F##_ -#else -#define F_WRAPPEDFUNC(f,F) f2pywrap##f##_ -#endif -#endif -#endif -#if defined(UNDERSCORE_G77) -#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f##_,F##_) -#else -#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f,F) -#endif -""" -cppmacros['F_MODFUNC'] = """\ -#if defined(F90MOD2CCONV1) /*E.g. Compaq Fortran */ -#if defined(NO_APPEND_FORTRAN) -#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f -#else -#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f ## _ -#endif -#endif - -#if defined(F90MOD2CCONV2) /*E.g. IBM XL Fortran, not tested though */ -#if defined(NO_APPEND_FORTRAN) -#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f -#else -#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f ## _ -#endif -#endif - -#if defined(F90MOD2CCONV3) /*E.g. MIPSPro Compilers */ -#if defined(NO_APPEND_FORTRAN) -#define F_MODFUNCNAME(m,f) f ## .in. ## m -#else -#define F_MODFUNCNAME(m,f) f ## .in. ## m ## _ -#endif -#endif -/* -#if defined(UPPERCASE_FORTRAN) -#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(M,F) -#else -#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(m,f) -#endif -*/ - -#define F_MODFUNC(m,f) (*(f2pymodstruct##m##.##f)) -""" -cppmacros['SWAPUNSAFE'] = """\ -#define SWAP(a,b) (size_t)(a) = ((size_t)(a) ^ (size_t)(b));\\ - (size_t)(b) = ((size_t)(a) ^ (size_t)(b));\\ - (size_t)(a) = ((size_t)(a) ^ (size_t)(b)) -""" -cppmacros['SWAP'] = """\ -#define SWAP(a,b,t) {\\ - t *c;\\ - c = a;\\ - a = b;\\ - b = c;} -""" -# cppmacros['ISCONTIGUOUS']='#define ISCONTIGUOUS(m) (PyArray_FLAGS(m) & -# NPY_ARRAY_C_CONTIGUOUS)' -cppmacros['PRINTPYOBJERR'] = """\ -#define PRINTPYOBJERR(obj)\\ - fprintf(stderr,\"#modulename#.error is related to \");\\ - PyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\ - fprintf(stderr,\"\\n\"); -""" -cppmacros['MINMAX'] = """\ -#ifndef max -#define max(a,b) ((a > b) ? (a) : (b)) -#endif -#ifndef min -#define min(a,b) ((a < b) ? (a) : (b)) -#endif -#ifndef MAX -#define MAX(a,b) ((a > b) ? (a) : (b)) -#endif -#ifndef MIN -#define MIN(a,b) ((a < b) ? (a) : (b)) -#endif -""" -needs['len..'] = ['f2py_size'] -cppmacros['len..'] = """\ -#define rank(var) var ## _Rank -#define shape(var,dim) var ## _Dims[dim] -#define old_rank(var) (PyArray_NDIM((PyArrayObject *)(capi_ ## var ## _tmp))) -#define old_shape(var,dim) PyArray_DIM(((PyArrayObject *)(capi_ ## var ## _tmp)),dim) -#define fshape(var,dim) shape(var,rank(var)-dim-1) -#define len(var) shape(var,0) -#define flen(var) fshape(var,0) -#define old_size(var) PyArray_SIZE((PyArrayObject *)(capi_ ## var ## _tmp)) -/* #define index(i) capi_i ## i */ -#define slen(var) capi_ ## var ## _len -#define size(var, ...) f2py_size((PyArrayObject *)(capi_ ## var ## _tmp), ## __VA_ARGS__, -1) -""" -needs['f2py_size'] = ['stdarg.h'] -cfuncs['f2py_size'] = """\ -static int f2py_size(PyArrayObject* var, ...) -{ - npy_int sz = 0; - npy_int dim; - npy_int rank; - va_list argp; - va_start(argp, var); - dim = va_arg(argp, npy_int); - if (dim==-1) - { - sz = PyArray_SIZE(var); - } - else - { - rank = PyArray_NDIM(var); - if (dim>=1 && dim<=rank) - sz = PyArray_DIM(var, dim-1); - else - fprintf(stderr, \"f2py_size: 2nd argument value=%d fails to satisfy 1<=value<=%d. Result will be 0.\\n\", dim, rank); - } - va_end(argp); - return sz; -} -""" - -cppmacros[ - 'pyobj_from_char1'] = '#define pyobj_from_char1(v) (PyInt_FromLong(v))' -cppmacros[ - 'pyobj_from_short1'] = '#define pyobj_from_short1(v) (PyInt_FromLong(v))' -needs['pyobj_from_int1'] = ['signed_char'] -cppmacros['pyobj_from_int1'] = '#define pyobj_from_int1(v) (PyInt_FromLong(v))' -cppmacros[ - 'pyobj_from_long1'] = '#define pyobj_from_long1(v) (PyLong_FromLong(v))' -needs['pyobj_from_long_long1'] = ['long_long'] -cppmacros['pyobj_from_long_long1'] = """\ -#ifdef HAVE_LONG_LONG -#define pyobj_from_long_long1(v) (PyLong_FromLongLong(v)) -#else -#warning HAVE_LONG_LONG is not available. Redefining pyobj_from_long_long. -#define pyobj_from_long_long1(v) (PyLong_FromLong(v)) -#endif -""" -needs['pyobj_from_long_double1'] = ['long_double'] -cppmacros[ - 'pyobj_from_long_double1'] = '#define pyobj_from_long_double1(v) (PyFloat_FromDouble(v))' -cppmacros[ - 'pyobj_from_double1'] = '#define pyobj_from_double1(v) (PyFloat_FromDouble(v))' -cppmacros[ - 'pyobj_from_float1'] = '#define pyobj_from_float1(v) (PyFloat_FromDouble(v))' -needs['pyobj_from_complex_long_double1'] = ['complex_long_double'] -cppmacros[ - 'pyobj_from_complex_long_double1'] = '#define pyobj_from_complex_long_double1(v) (PyComplex_FromDoubles(v.r,v.i))' -needs['pyobj_from_complex_double1'] = ['complex_double'] -cppmacros[ - 'pyobj_from_complex_double1'] = '#define pyobj_from_complex_double1(v) (PyComplex_FromDoubles(v.r,v.i))' -needs['pyobj_from_complex_float1'] = ['complex_float'] -cppmacros[ - 'pyobj_from_complex_float1'] = '#define pyobj_from_complex_float1(v) (PyComplex_FromDoubles(v.r,v.i))' -needs['pyobj_from_string1'] = ['string'] -cppmacros[ - 'pyobj_from_string1'] = '#define pyobj_from_string1(v) (PyString_FromString((char *)v))' -needs['pyobj_from_string1size'] = ['string'] -cppmacros[ - 'pyobj_from_string1size'] = '#define pyobj_from_string1size(v,len) (PyUString_FromStringAndSize((char *)v, len))' -needs['TRYPYARRAYTEMPLATE'] = ['PRINTPYOBJERR'] -cppmacros['TRYPYARRAYTEMPLATE'] = """\ -/* New SciPy */ -#define TRYPYARRAYTEMPLATECHAR case NPY_STRING: *(char *)(PyArray_DATA(arr))=*v; break; -#define TRYPYARRAYTEMPLATELONG case NPY_LONG: *(long *)(PyArray_DATA(arr))=*v; break; -#define TRYPYARRAYTEMPLATEOBJECT case NPY_OBJECT: PyArray_SETITEM(arr,PyArray_DATA(arr),pyobj_from_ ## ctype ## 1(*v)); break; - -#define TRYPYARRAYTEMPLATE(ctype,typecode) \\ - PyArrayObject *arr = NULL;\\ - if (!obj) return -2;\\ - if (!PyArray_Check(obj)) return -1;\\ - if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\ - if (PyArray_DESCR(arr)->type==typecode) {*(ctype *)(PyArray_DATA(arr))=*v; return 1;}\\ - switch (PyArray_TYPE(arr)) {\\ - case NPY_DOUBLE: *(double *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_INT: *(int *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_LONG: *(long *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_FLOAT: *(float *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_CDOUBLE: *(double *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_CFLOAT: *(float *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_BOOL: *(npy_bool *)(PyArray_DATA(arr))=(*v!=0); break;\\ - case NPY_UBYTE: *(unsigned char *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_BYTE: *(signed char *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_SHORT: *(short *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_USHORT: *(npy_ushort *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_UINT: *(npy_uint *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_ULONG: *(npy_ulong *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_LONGLONG: *(npy_longlong *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_CLONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_ ## ctype ## 1(*v)); break;\\ - default: return -2;\\ - };\\ - return 1 -""" - -needs['TRYCOMPLEXPYARRAYTEMPLATE'] = ['PRINTPYOBJERR'] -cppmacros['TRYCOMPLEXPYARRAYTEMPLATE'] = """\ -#define TRYCOMPLEXPYARRAYTEMPLATEOBJECT case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_complex_ ## ctype ## 1((*v))); break; -#define TRYCOMPLEXPYARRAYTEMPLATE(ctype,typecode)\\ - PyArrayObject *arr = NULL;\\ - if (!obj) return -2;\\ - if (!PyArray_Check(obj)) return -1;\\ - if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYCOMPLEXPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\ - if (PyArray_DESCR(arr)->type==typecode) {\\ - *(ctype *)(PyArray_DATA(arr))=(*v).r;\\ - *(ctype *)(PyArray_DATA(arr)+sizeof(ctype))=(*v).i;\\ - return 1;\\ - }\\ - switch (PyArray_TYPE(arr)) {\\ - case NPY_CDOUBLE: *(double *)(PyArray_DATA(arr))=(*v).r;*(double *)(PyArray_DATA(arr)+sizeof(double))=(*v).i;break;\\ - case NPY_CFLOAT: *(float *)(PyArray_DATA(arr))=(*v).r;*(float *)(PyArray_DATA(arr)+sizeof(float))=(*v).i;break;\\ - case NPY_DOUBLE: *(double *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_LONG: *(long *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_FLOAT: *(float *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_INT: *(int *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_SHORT: *(short *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_UBYTE: *(unsigned char *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_BYTE: *(signed char *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_BOOL: *(npy_bool *)(PyArray_DATA(arr))=((*v).r!=0 && (*v).i!=0); break;\\ - case NPY_USHORT: *(npy_ushort *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_UINT: *(npy_uint *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_ULONG: *(npy_ulong *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_LONGLONG: *(npy_longlong *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_CLONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r;*(npy_longdouble *)(PyArray_DATA(arr)+sizeof(npy_longdouble))=(*v).i;break;\\ - case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_complex_ ## ctype ## 1((*v))); break;\\ - default: return -2;\\ - };\\ - return -1; -""" -# cppmacros['NUMFROMARROBJ']="""\ -# define NUMFROMARROBJ(typenum,ctype) \\ -# if (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\ -# else arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\ -# if (arr) {\\ -# if (PyArray_TYPE(arr)==NPY_OBJECT) {\\ -# if (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\ -# goto capi_fail;\\ -# } else {\\ -# (PyArray_DESCR(arr)->cast[typenum])(PyArray_DATA(arr),1,(char*)v,1,1);\\ -# }\\ -# if ((PyObject *)arr != obj) { Py_DECREF(arr); }\\ -# return 1;\\ -# } -# """ -# XXX: Note that CNUMFROMARROBJ is identical with NUMFROMARROBJ -# cppmacros['CNUMFROMARROBJ']="""\ -# define CNUMFROMARROBJ(typenum,ctype) \\ -# if (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\ -# else arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\ -# if (arr) {\\ -# if (PyArray_TYPE(arr)==NPY_OBJECT) {\\ -# if (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\ -# goto capi_fail;\\ -# } else {\\ -# (PyArray_DESCR(arr)->cast[typenum])((void *)(PyArray_DATA(arr)),1,(void *)(v),1,1);\\ -# }\\ -# if ((PyObject *)arr != obj) { Py_DECREF(arr); }\\ -# return 1;\\ -# } -# """ - - -needs['GETSTRFROMPYTUPLE'] = ['STRINGCOPYN', 'PRINTPYOBJERR'] -cppmacros['GETSTRFROMPYTUPLE'] = """\ -#define GETSTRFROMPYTUPLE(tuple,index,str,len) {\\ - PyObject *rv_cb_str = PyTuple_GetItem((tuple),(index));\\ - if (rv_cb_str == NULL)\\ - goto capi_fail;\\ - if (PyString_Check(rv_cb_str)) {\\ - str[len-1]='\\0';\\ - STRINGCOPYN((str),PyString_AS_STRING((PyStringObject*)rv_cb_str),(len));\\ - } else {\\ - PRINTPYOBJERR(rv_cb_str);\\ - PyErr_SetString(#modulename#_error,\"string object expected\");\\ - goto capi_fail;\\ - }\\ - } -""" -cppmacros['GETSCALARFROMPYTUPLE'] = """\ -#define GETSCALARFROMPYTUPLE(tuple,index,var,ctype,mess) {\\ - if ((capi_tmp = PyTuple_GetItem((tuple),(index)))==NULL) goto capi_fail;\\ - if (!(ctype ## _from_pyobj((var),capi_tmp,mess)))\\ - goto capi_fail;\\ - } -""" - -cppmacros['FAILNULL'] = """\\ -#define FAILNULL(p) do { \\ - if ((p) == NULL) { \\ - PyErr_SetString(PyExc_MemoryError, "NULL pointer found"); \\ - goto capi_fail; \\ - } \\ -} while (0) -""" -needs['MEMCOPY'] = ['string.h', 'FAILNULL'] -cppmacros['MEMCOPY'] = """\ -#define MEMCOPY(to,from,n)\\ - do { FAILNULL(to); FAILNULL(from); (void)memcpy(to,from,n); } while (0) -""" -cppmacros['STRINGMALLOC'] = """\ -#define STRINGMALLOC(str,len)\\ - if ((str = (string)malloc(sizeof(char)*(len+1))) == NULL) {\\ - PyErr_SetString(PyExc_MemoryError, \"out of memory\");\\ - goto capi_fail;\\ - } else {\\ - (str)[len] = '\\0';\\ - } -""" -cppmacros['STRINGFREE'] = """\ -#define STRINGFREE(str) do {if (!(str == NULL)) free(str);} while (0) -""" -needs['STRINGCOPYN'] = ['string.h', 'FAILNULL'] -cppmacros['STRINGCOPYN'] = """\ -#define STRINGCOPYN(to,from,buf_size) \\ - do { \\ - int _m = (buf_size); \\ - char *_to = (to); \\ - char *_from = (from); \\ - FAILNULL(_to); FAILNULL(_from); \\ - (void)strncpy(_to, _from, sizeof(char)*_m); \\ - _to[_m-1] = '\\0'; \\ - /* Padding with spaces instead of nulls */ \\ - for (_m -= 2; _m >= 0 && _to[_m] == '\\0'; _m--) { \\ - _to[_m] = ' '; \\ - } \\ - } while (0) -""" -needs['STRINGCOPY'] = ['string.h', 'FAILNULL'] -cppmacros['STRINGCOPY'] = """\ -#define STRINGCOPY(to,from)\\ - do { FAILNULL(to); FAILNULL(from); (void)strcpy(to,from); } while (0) -""" -cppmacros['CHECKGENERIC'] = """\ -#define CHECKGENERIC(check,tcheck,name) \\ - if (!(check)) {\\ - PyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ - /*goto capi_fail;*/\\ - } else """ -cppmacros['CHECKARRAY'] = """\ -#define CHECKARRAY(check,tcheck,name) \\ - if (!(check)) {\\ - PyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ - /*goto capi_fail;*/\\ - } else """ -cppmacros['CHECKSTRING'] = """\ -#define CHECKSTRING(check,tcheck,name,show,var)\\ - if (!(check)) {\\ - char errstring[256];\\ - sprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, slen(var), var);\\ - PyErr_SetString(#modulename#_error, errstring);\\ - /*goto capi_fail;*/\\ - } else """ -cppmacros['CHECKSCALAR'] = """\ -#define CHECKSCALAR(check,tcheck,name,show,var)\\ - if (!(check)) {\\ - char errstring[256];\\ - sprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, var);\\ - PyErr_SetString(#modulename#_error,errstring);\\ - /*goto capi_fail;*/\\ - } else """ -# cppmacros['CHECKDIMS']="""\ -# define CHECKDIMS(dims,rank) \\ -# for (int i=0;i<(rank);i++)\\ -# if (dims[i]<0) {\\ -# fprintf(stderr,\"Unspecified array argument requires a complete dimension specification.\\n\");\\ -# goto capi_fail;\\ -# } -# """ -cppmacros[ - 'ARRSIZE'] = '#define ARRSIZE(dims,rank) (_PyArray_multiply_list(dims,rank))' -cppmacros['OLDPYNUM'] = """\ -#ifdef OLDPYNUM -#error You need to install NumPy version 13 or higher. See https://scipy.org/install.html -#endif -""" -################# C functions ############### - -cfuncs['calcarrindex'] = """\ -static int calcarrindex(int *i,PyArrayObject *arr) { - int k,ii = i[0]; - for (k=1; k < PyArray_NDIM(arr); k++) - ii += (ii*(PyArray_DIM(arr,k) - 1)+i[k]); /* assuming contiguous arr */ - return ii; -}""" -cfuncs['calcarrindextr'] = """\ -static int calcarrindextr(int *i,PyArrayObject *arr) { - int k,ii = i[PyArray_NDIM(arr)-1]; - for (k=1; k < PyArray_NDIM(arr); k++) - ii += (ii*(PyArray_DIM(arr,PyArray_NDIM(arr)-k-1) - 1)+i[PyArray_NDIM(arr)-k-1]); /* assuming contiguous arr */ - return ii; -}""" -cfuncs['forcomb'] = """\ -static struct { int nd;npy_intp *d;int *i,*i_tr,tr; } forcombcache; -static int initforcomb(npy_intp *dims,int nd,int tr) { - int k; - if (dims==NULL) return 0; - if (nd<0) return 0; - forcombcache.nd = nd; - forcombcache.d = dims; - forcombcache.tr = tr; - if ((forcombcache.i = (int *)malloc(sizeof(int)*nd))==NULL) return 0; - if ((forcombcache.i_tr = (int *)malloc(sizeof(int)*nd))==NULL) return 0; - for (k=1;k= 0x03000000 - else if (PyUnicode_Check(obj)) { - tmp = PyUnicode_AsASCIIString(obj); - } - else { - PyObject *tmp2; - tmp2 = PyObject_Str(obj); - if (tmp2) { - tmp = PyUnicode_AsASCIIString(tmp2); - Py_DECREF(tmp2); - } - else { - tmp = NULL; - } - } -#else - else { - tmp = PyObject_Str(obj); - } -#endif - if (tmp == NULL) goto capi_fail; - if (*len == -1) - *len = PyString_GET_SIZE(tmp); - STRINGMALLOC(*str,*len); - STRINGCOPYN(*str,PyString_AS_STRING(tmp),*len+1); - Py_DECREF(tmp); - return 1; -capi_fail: - Py_XDECREF(tmp); - { - PyObject* err = PyErr_Occurred(); - if (err==NULL) err = #modulename#_error; - PyErr_SetString(err,errmess); - } - return 0; -} -""" -needs['char_from_pyobj'] = ['int_from_pyobj'] -cfuncs['char_from_pyobj'] = """\ -static int char_from_pyobj(char* v,PyObject *obj,const char *errmess) { - int i=0; - if (int_from_pyobj(&i,obj,errmess)) { - *v = (char)i; - return 1; - } - return 0; -} -""" -needs['signed_char_from_pyobj'] = ['int_from_pyobj', 'signed_char'] -cfuncs['signed_char_from_pyobj'] = """\ -static int signed_char_from_pyobj(signed_char* v,PyObject *obj,const char *errmess) { - int i=0; - if (int_from_pyobj(&i,obj,errmess)) { - *v = (signed_char)i; - return 1; - } - return 0; -} -""" -needs['short_from_pyobj'] = ['int_from_pyobj'] -cfuncs['short_from_pyobj'] = """\ -static int short_from_pyobj(short* v,PyObject *obj,const char *errmess) { - int i=0; - if (int_from_pyobj(&i,obj,errmess)) { - *v = (short)i; - return 1; - } - return 0; -} -""" -cfuncs['int_from_pyobj'] = """\ -static int int_from_pyobj(int* v,PyObject *obj,const char *errmess) { - PyObject* tmp = NULL; - if (PyInt_Check(obj)) { - *v = (int)PyInt_AS_LONG(obj); - return 1; - } - tmp = PyNumber_Int(obj); - if (tmp) { - *v = PyInt_AS_LONG(tmp); - Py_DECREF(tmp); - return 1; - } - if (PyComplex_Check(obj)) - tmp = PyObject_GetAttrString(obj,\"real\"); - else if (PyString_Check(obj) || PyUnicode_Check(obj)) - /*pass*/; - else if (PySequence_Check(obj)) - tmp = PySequence_GetItem(obj,0); - if (tmp) { - PyErr_Clear(); - if (int_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} - Py_DECREF(tmp); - } - { - PyObject* err = PyErr_Occurred(); - if (err==NULL) err = #modulename#_error; - PyErr_SetString(err,errmess); - } - return 0; -} -""" -cfuncs['long_from_pyobj'] = """\ -static int long_from_pyobj(long* v,PyObject *obj,const char *errmess) { - PyObject* tmp = NULL; - if (PyInt_Check(obj)) { - *v = PyInt_AS_LONG(obj); - return 1; - } - tmp = PyNumber_Int(obj); - if (tmp) { - *v = PyInt_AS_LONG(tmp); - Py_DECREF(tmp); - return 1; - } - if (PyComplex_Check(obj)) - tmp = PyObject_GetAttrString(obj,\"real\"); - else if (PyString_Check(obj) || PyUnicode_Check(obj)) - /*pass*/; - else if (PySequence_Check(obj)) - tmp = PySequence_GetItem(obj,0); - if (tmp) { - PyErr_Clear(); - if (long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} - Py_DECREF(tmp); - } - { - PyObject* err = PyErr_Occurred(); - if (err==NULL) err = #modulename#_error; - PyErr_SetString(err,errmess); - } - return 0; -} -""" -needs['long_long_from_pyobj'] = ['long_long'] -cfuncs['long_long_from_pyobj'] = """\ -static int long_long_from_pyobj(long_long* v,PyObject *obj,const char *errmess) { - PyObject* tmp = NULL; - if (PyLong_Check(obj)) { - *v = PyLong_AsLongLong(obj); - return (!PyErr_Occurred()); - } - if (PyInt_Check(obj)) { - *v = (long_long)PyInt_AS_LONG(obj); - return 1; - } - tmp = PyNumber_Long(obj); - if (tmp) { - *v = PyLong_AsLongLong(tmp); - Py_DECREF(tmp); - return (!PyErr_Occurred()); - } - if (PyComplex_Check(obj)) - tmp = PyObject_GetAttrString(obj,\"real\"); - else if (PyString_Check(obj) || PyUnicode_Check(obj)) - /*pass*/; - else if (PySequence_Check(obj)) - tmp = PySequence_GetItem(obj,0); - if (tmp) { - PyErr_Clear(); - if (long_long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} - Py_DECREF(tmp); - } - { - PyObject* err = PyErr_Occurred(); - if (err==NULL) err = #modulename#_error; - PyErr_SetString(err,errmess); - } - return 0; -} -""" -needs['long_double_from_pyobj'] = ['double_from_pyobj', 'long_double'] -cfuncs['long_double_from_pyobj'] = """\ -static int long_double_from_pyobj(long_double* v,PyObject *obj,const char *errmess) { - double d=0; - if (PyArray_CheckScalar(obj)){ - if PyArray_IsScalar(obj, LongDouble) { - PyArray_ScalarAsCtype(obj, v); - return 1; - } - else if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_LONGDOUBLE) { - (*v) = *((npy_longdouble *)PyArray_DATA(obj)); - return 1; - } - } - if (double_from_pyobj(&d,obj,errmess)) { - *v = (long_double)d; - return 1; - } - return 0; -} -""" -cfuncs['double_from_pyobj'] = """\ -static int double_from_pyobj(double* v,PyObject *obj,const char *errmess) { - PyObject* tmp = NULL; - if (PyFloat_Check(obj)) { -#ifdef __sgi - *v = PyFloat_AsDouble(obj); -#else - *v = PyFloat_AS_DOUBLE(obj); -#endif - return 1; - } - tmp = PyNumber_Float(obj); - if (tmp) { -#ifdef __sgi - *v = PyFloat_AsDouble(tmp); -#else - *v = PyFloat_AS_DOUBLE(tmp); -#endif - Py_DECREF(tmp); - return 1; - } - if (PyComplex_Check(obj)) - tmp = PyObject_GetAttrString(obj,\"real\"); - else if (PyString_Check(obj) || PyUnicode_Check(obj)) - /*pass*/; - else if (PySequence_Check(obj)) - tmp = PySequence_GetItem(obj,0); - if (tmp) { - PyErr_Clear(); - if (double_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} - Py_DECREF(tmp); - } - { - PyObject* err = PyErr_Occurred(); - if (err==NULL) err = #modulename#_error; - PyErr_SetString(err,errmess); - } - return 0; -} -""" -needs['float_from_pyobj'] = ['double_from_pyobj'] -cfuncs['float_from_pyobj'] = """\ -static int float_from_pyobj(float* v,PyObject *obj,const char *errmess) { - double d=0.0; - if (double_from_pyobj(&d,obj,errmess)) { - *v = (float)d; - return 1; - } - return 0; -} -""" -needs['complex_long_double_from_pyobj'] = ['complex_long_double', 'long_double', - 'complex_double_from_pyobj'] -cfuncs['complex_long_double_from_pyobj'] = """\ -static int complex_long_double_from_pyobj(complex_long_double* v,PyObject *obj,const char *errmess) { - complex_double cd={0.0,0.0}; - if (PyArray_CheckScalar(obj)){ - if PyArray_IsScalar(obj, CLongDouble) { - PyArray_ScalarAsCtype(obj, v); - return 1; - } - else if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_CLONGDOUBLE) { - (*v).r = ((npy_clongdouble *)PyArray_DATA(obj))->real; - (*v).i = ((npy_clongdouble *)PyArray_DATA(obj))->imag; - return 1; - } - } - if (complex_double_from_pyobj(&cd,obj,errmess)) { - (*v).r = (long_double)cd.r; - (*v).i = (long_double)cd.i; - return 1; - } - return 0; -} -""" -needs['complex_double_from_pyobj'] = ['complex_double'] -cfuncs['complex_double_from_pyobj'] = """\ -static int complex_double_from_pyobj(complex_double* v,PyObject *obj,const char *errmess) { - Py_complex c; - if (PyComplex_Check(obj)) { - c=PyComplex_AsCComplex(obj); - (*v).r=c.real, (*v).i=c.imag; - return 1; - } - if (PyArray_IsScalar(obj, ComplexFloating)) { - if (PyArray_IsScalar(obj, CFloat)) { - npy_cfloat new; - PyArray_ScalarAsCtype(obj, &new); - (*v).r = (double)new.real; - (*v).i = (double)new.imag; - } - else if (PyArray_IsScalar(obj, CLongDouble)) { - npy_clongdouble new; - PyArray_ScalarAsCtype(obj, &new); - (*v).r = (double)new.real; - (*v).i = (double)new.imag; - } - else { /* if (PyArray_IsScalar(obj, CDouble)) */ - PyArray_ScalarAsCtype(obj, v); - } - return 1; - } - if (PyArray_CheckScalar(obj)) { /* 0-dim array or still array scalar */ - PyObject *arr; - if (PyArray_Check(obj)) { - arr = PyArray_Cast((PyArrayObject *)obj, NPY_CDOUBLE); - } - else { - arr = PyArray_FromScalar(obj, PyArray_DescrFromType(NPY_CDOUBLE)); - } - if (arr==NULL) return 0; - (*v).r = ((npy_cdouble *)PyArray_DATA(arr))->real; - (*v).i = ((npy_cdouble *)PyArray_DATA(arr))->imag; - return 1; - } - /* Python does not provide PyNumber_Complex function :-( */ - (*v).i=0.0; - if (PyFloat_Check(obj)) { -#ifdef __sgi - (*v).r = PyFloat_AsDouble(obj); -#else - (*v).r = PyFloat_AS_DOUBLE(obj); -#endif - return 1; - } - if (PyInt_Check(obj)) { - (*v).r = (double)PyInt_AS_LONG(obj); - return 1; - } - if (PyLong_Check(obj)) { - (*v).r = PyLong_AsDouble(obj); - return (!PyErr_Occurred()); - } - if (PySequence_Check(obj) && !(PyString_Check(obj) || PyUnicode_Check(obj))) { - PyObject *tmp = PySequence_GetItem(obj,0); - if (tmp) { - if (complex_double_from_pyobj(v,tmp,errmess)) { - Py_DECREF(tmp); - return 1; - } - Py_DECREF(tmp); - } - } - { - PyObject* err = PyErr_Occurred(); - if (err==NULL) - err = PyExc_TypeError; - PyErr_SetString(err,errmess); - } - return 0; -} -""" -needs['complex_float_from_pyobj'] = [ - 'complex_float', 'complex_double_from_pyobj'] -cfuncs['complex_float_from_pyobj'] = """\ -static int complex_float_from_pyobj(complex_float* v,PyObject *obj,const char *errmess) { - complex_double cd={0.0,0.0}; - if (complex_double_from_pyobj(&cd,obj,errmess)) { - (*v).r = (float)cd.r; - (*v).i = (float)cd.i; - return 1; - } - return 0; -} -""" -needs['try_pyarr_from_char'] = ['pyobj_from_char1', 'TRYPYARRAYTEMPLATE'] -cfuncs[ - 'try_pyarr_from_char'] = 'static int try_pyarr_from_char(PyObject* obj,char* v) {\n TRYPYARRAYTEMPLATE(char,\'c\');\n}\n' -needs['try_pyarr_from_signed_char'] = ['TRYPYARRAYTEMPLATE', 'unsigned_char'] -cfuncs[ - 'try_pyarr_from_unsigned_char'] = 'static int try_pyarr_from_unsigned_char(PyObject* obj,unsigned_char* v) {\n TRYPYARRAYTEMPLATE(unsigned_char,\'b\');\n}\n' -needs['try_pyarr_from_signed_char'] = ['TRYPYARRAYTEMPLATE', 'signed_char'] -cfuncs[ - 'try_pyarr_from_signed_char'] = 'static int try_pyarr_from_signed_char(PyObject* obj,signed_char* v) {\n TRYPYARRAYTEMPLATE(signed_char,\'1\');\n}\n' -needs['try_pyarr_from_short'] = ['pyobj_from_short1', 'TRYPYARRAYTEMPLATE'] -cfuncs[ - 'try_pyarr_from_short'] = 'static int try_pyarr_from_short(PyObject* obj,short* v) {\n TRYPYARRAYTEMPLATE(short,\'s\');\n}\n' -needs['try_pyarr_from_int'] = ['pyobj_from_int1', 'TRYPYARRAYTEMPLATE'] -cfuncs[ - 'try_pyarr_from_int'] = 'static int try_pyarr_from_int(PyObject* obj,int* v) {\n TRYPYARRAYTEMPLATE(int,\'i\');\n}\n' -needs['try_pyarr_from_long'] = ['pyobj_from_long1', 'TRYPYARRAYTEMPLATE'] -cfuncs[ - 'try_pyarr_from_long'] = 'static int try_pyarr_from_long(PyObject* obj,long* v) {\n TRYPYARRAYTEMPLATE(long,\'l\');\n}\n' -needs['try_pyarr_from_long_long'] = [ - 'pyobj_from_long_long1', 'TRYPYARRAYTEMPLATE', 'long_long'] -cfuncs[ - 'try_pyarr_from_long_long'] = 'static int try_pyarr_from_long_long(PyObject* obj,long_long* v) {\n TRYPYARRAYTEMPLATE(long_long,\'L\');\n}\n' -needs['try_pyarr_from_float'] = ['pyobj_from_float1', 'TRYPYARRAYTEMPLATE'] -cfuncs[ - 'try_pyarr_from_float'] = 'static int try_pyarr_from_float(PyObject* obj,float* v) {\n TRYPYARRAYTEMPLATE(float,\'f\');\n}\n' -needs['try_pyarr_from_double'] = ['pyobj_from_double1', 'TRYPYARRAYTEMPLATE'] -cfuncs[ - 'try_pyarr_from_double'] = 'static int try_pyarr_from_double(PyObject* obj,double* v) {\n TRYPYARRAYTEMPLATE(double,\'d\');\n}\n' -needs['try_pyarr_from_complex_float'] = [ - 'pyobj_from_complex_float1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_float'] -cfuncs[ - 'try_pyarr_from_complex_float'] = 'static int try_pyarr_from_complex_float(PyObject* obj,complex_float* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(float,\'F\');\n}\n' -needs['try_pyarr_from_complex_double'] = [ - 'pyobj_from_complex_double1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_double'] -cfuncs[ - 'try_pyarr_from_complex_double'] = 'static int try_pyarr_from_complex_double(PyObject* obj,complex_double* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(double,\'D\');\n}\n' - -needs['create_cb_arglist'] = ['CFUNCSMESS', 'PRINTPYOBJERR', 'MINMAX'] -cfuncs['create_cb_arglist'] = """\ -static int create_cb_arglist(PyObject* fun,PyTupleObject* xa,const int maxnofargs,const int nofoptargs,int *nofargs,PyTupleObject **args,const char *errmess) { - PyObject *tmp = NULL; - PyObject *tmp_fun = NULL; - int tot,opt,ext,siz,i,di=0; - CFUNCSMESS(\"create_cb_arglist\\n\"); - tot=opt=ext=siz=0; - /* Get the total number of arguments */ - if (PyFunction_Check(fun)) { - tmp_fun = fun; - Py_INCREF(tmp_fun); - } - else { - di = 1; - if (PyObject_HasAttrString(fun,\"im_func\")) { - tmp_fun = PyObject_GetAttrString(fun,\"im_func\"); - } - else if (PyObject_HasAttrString(fun,\"__call__\")) { - tmp = PyObject_GetAttrString(fun,\"__call__\"); - if (PyObject_HasAttrString(tmp,\"im_func\")) - tmp_fun = PyObject_GetAttrString(tmp,\"im_func\"); - else { - tmp_fun = fun; /* built-in function */ - Py_INCREF(tmp_fun); - tot = maxnofargs; - if (xa != NULL) - tot += PyTuple_Size((PyObject *)xa); - } - Py_XDECREF(tmp); - } - else if (PyFortran_Check(fun) || PyFortran_Check1(fun)) { - tot = maxnofargs; - if (xa != NULL) - tot += PyTuple_Size((PyObject *)xa); - tmp_fun = fun; - Py_INCREF(tmp_fun); - } - else if (F2PyCapsule_Check(fun)) { - tot = maxnofargs; - if (xa != NULL) - ext = PyTuple_Size((PyObject *)xa); - if(ext>0) { - fprintf(stderr,\"extra arguments tuple cannot be used with CObject call-back\\n\"); - goto capi_fail; - } - tmp_fun = fun; - Py_INCREF(tmp_fun); - } - } -if (tmp_fun==NULL) { -fprintf(stderr,\"Call-back argument must be function|instance|instance.__call__|f2py-function but got %s.\\n\",(fun==NULL?\"NULL\":Py_TYPE(fun)->tp_name)); -goto capi_fail; -} -#if PY_VERSION_HEX >= 0x03000000 - if (PyObject_HasAttrString(tmp_fun,\"__code__\")) { - if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"__code__\"),\"co_argcount\")) { -#else - if (PyObject_HasAttrString(tmp_fun,\"func_code\")) { - if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"func_code\"),\"co_argcount\")) { -#endif - PyObject *tmp_argcount = PyObject_GetAttrString(tmp,\"co_argcount\"); - Py_DECREF(tmp); - if (tmp_argcount == NULL) { - goto capi_fail; - } - tot = PyInt_AsLong(tmp_argcount) - di; - Py_DECREF(tmp_argcount); - } - } - /* Get the number of optional arguments */ -#if PY_VERSION_HEX >= 0x03000000 - if (PyObject_HasAttrString(tmp_fun,\"__defaults__\")) { - if (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"__defaults__\"))) -#else - if (PyObject_HasAttrString(tmp_fun,\"func_defaults\")) { - if (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"func_defaults\"))) -#endif - opt = PyTuple_Size(tmp); - Py_XDECREF(tmp); - } - /* Get the number of extra arguments */ - if (xa != NULL) - ext = PyTuple_Size((PyObject *)xa); - /* Calculate the size of call-backs argument list */ - siz = MIN(maxnofargs+ext,tot); - *nofargs = MAX(0,siz-ext); -#ifdef DEBUGCFUNCS - fprintf(stderr,\"debug-capi:create_cb_arglist:maxnofargs(-nofoptargs),tot,opt,ext,siz,nofargs=%d(-%d),%d,%d,%d,%d,%d\\n\",maxnofargs,nofoptargs,tot,opt,ext,siz,*nofargs); -#endif - if (siz 0: - if outneeds[n][0] not in needs: - out.append(outneeds[n][0]) - del outneeds[n][0] - else: - flag = 0 - for k in outneeds[n][1:]: - if k in needs[outneeds[n][0]]: - flag = 1 - break - if flag: - outneeds[n] = outneeds[n][1:] + [outneeds[n][0]] - else: - out.append(outneeds[n][0]) - del outneeds[n][0] - if saveout and (0 not in map(lambda x, y: x == y, saveout, outneeds[n])) \ - and outneeds[n] != []: - print(n, saveout) - errmess( - 'get_needs: no progress in sorting needs, probably circular dependence, skipping.\n') - out = out + saveout - break - saveout = copy.copy(outneeds[n]) - if out == []: - out = [n] - res[n] = out - return res diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/common_rules.py b/venv/lib/python3.7/site-packages/numpy/f2py/common_rules.py deleted file mode 100644 index f61d881..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/common_rules.py +++ /dev/null @@ -1,149 +0,0 @@ -#!/usr/bin/env python -""" - -Build common block mechanism for f2py2e. - -Copyright 2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/05/06 10:57:33 $ -Pearu Peterson - -""" -from __future__ import division, absolute_import, print_function - -__version__ = "$Revision: 1.19 $"[10:-1] - -from . import __version__ -f2py_version = __version__.version - -from .auxfuncs import ( - hasbody, hascommon, hasnote, isintent_hide, outmess -) -from . import capi_maps -from . import func2subr -from .crackfortran import rmbadname - - -def findcommonblocks(block, top=1): - ret = [] - if hascommon(block): - for key, value in block['common'].items(): - vars_ = {v: block['vars'][v] for v in value} - ret.append((key, value, vars_)) - elif hasbody(block): - for b in block['body']: - ret = ret + findcommonblocks(b, 0) - if top: - tret = [] - names = [] - for t in ret: - if t[0] not in names: - names.append(t[0]) - tret.append(t) - return tret - return ret - - -def buildhooks(m): - ret = {'commonhooks': [], 'initcommonhooks': [], - 'docs': ['"COMMON blocks:\\n"']} - fwrap = [''] - - def fadd(line, s=fwrap): - s[0] = '%s\n %s' % (s[0], line) - chooks = [''] - - def cadd(line, s=chooks): - s[0] = '%s\n%s' % (s[0], line) - ihooks = [''] - - def iadd(line, s=ihooks): - s[0] = '%s\n%s' % (s[0], line) - doc = [''] - - def dadd(line, s=doc): - s[0] = '%s\n%s' % (s[0], line) - for (name, vnames, vars) in findcommonblocks(m): - lower_name = name.lower() - hnames, inames = [], [] - for n in vnames: - if isintent_hide(vars[n]): - hnames.append(n) - else: - inames.append(n) - if hnames: - outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n\t\t Hidden: %s\n' % ( - name, ','.join(inames), ','.join(hnames))) - else: - outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n' % ( - name, ','.join(inames))) - fadd('subroutine f2pyinit%s(setupfunc)' % name) - fadd('external setupfunc') - for n in vnames: - fadd(func2subr.var2fixfortran(vars, n)) - if name == '_BLNK_': - fadd('common %s' % (','.join(vnames))) - else: - fadd('common /%s/ %s' % (name, ','.join(vnames))) - fadd('call setupfunc(%s)' % (','.join(inames))) - fadd('end\n') - cadd('static FortranDataDef f2py_%s_def[] = {' % (name)) - idims = [] - for n in inames: - ct = capi_maps.getctype(vars[n]) - at = capi_maps.c2capi_map[ct] - dm = capi_maps.getarrdims(n, vars[n]) - if dm['dims']: - idims.append('(%s)' % (dm['dims'])) - else: - idims.append('') - dms = dm['dims'].strip() - if not dms: - dms = '-1' - cadd('\t{\"%s\",%s,{{%s}},%s},' % (n, dm['rank'], dms, at)) - cadd('\t{NULL}\n};') - inames1 = rmbadname(inames) - inames1_tps = ','.join(['char *' + s for s in inames1]) - cadd('static void f2py_setup_%s(%s) {' % (name, inames1_tps)) - cadd('\tint i_f2py=0;') - for n in inames1: - cadd('\tf2py_%s_def[i_f2py++].data = %s;' % (name, n)) - cadd('}') - if '_' in lower_name: - F_FUNC = 'F_FUNC_US' - else: - F_FUNC = 'F_FUNC' - cadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void(*)(%s));' - % (F_FUNC, lower_name, name.upper(), - ','.join(['char*'] * len(inames1)))) - cadd('static void f2py_init_%s(void) {' % name) - cadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);' - % (F_FUNC, lower_name, name.upper(), name)) - cadd('}\n') - iadd('\ttmp = PyFortranObject_New(f2py_%s_def,f2py_init_%s);' % (name, name)) - iadd('\tF2PyDict_SetItemString(d, \"%s\", tmp);' % name) - iadd('\tPy_DECREF(tmp);') - tname = name.replace('_', '\\_') - dadd('\\subsection{Common block \\texttt{%s}}\n' % (tname)) - dadd('\\begin{description}') - for n in inames: - dadd('\\item[]{{}\\verb@%s@{}}' % - (capi_maps.getarrdocsign(n, vars[n]))) - if hasnote(vars[n]): - note = vars[n]['note'] - if isinstance(note, list): - note = '\n'.join(note) - dadd('--- %s' % (note)) - dadd('\\end{description}') - ret['docs'].append( - '"\t/%s/ %s\\n"' % (name, ','.join(map(lambda v, d: v + d, inames, idims)))) - ret['commonhooks'] = chooks - ret['initcommonhooks'] = ihooks - ret['latexdoc'] = doc[0] - if len(ret['docs']) <= 1: - ret['docs'] = '' - return ret, fwrap[0] diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/crackfortran.py b/venv/lib/python3.7/site-packages/numpy/f2py/crackfortran.py deleted file mode 100644 index 2aaf5d7..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/crackfortran.py +++ /dev/null @@ -1,3345 +0,0 @@ -#!/usr/bin/env python -""" -crackfortran --- read fortran (77,90) code and extract declaration information. - -Copyright 1999-2004 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/09/27 07:13:49 $ -Pearu Peterson - - -Usage of crackfortran: -====================== -Command line keys: -quiet,-verbose,-fix,-f77,-f90,-show,-h - -m ,--ignore-contains -Functions: crackfortran, crack2fortran -The following Fortran statements/constructions are supported -(or will be if needed): - block data,byte,call,character,common,complex,contains,data, - dimension,double complex,double precision,end,external,function, - implicit,integer,intent,interface,intrinsic, - logical,module,optional,parameter,private,public, - program,real,(sequence?),subroutine,type,use,virtual, - include,pythonmodule -Note: 'virtual' is mapped to 'dimension'. -Note: 'implicit integer (z) static (z)' is 'implicit static (z)' (this is minor bug). -Note: code after 'contains' will be ignored until its scope ends. -Note: 'common' statement is extended: dimensions are moved to variable definitions -Note: f2py directive: f2py is read as -Note: pythonmodule is introduced to represent Python module - -Usage: - `postlist=crackfortran(files)` - `postlist` contains declaration information read from the list of files `files`. - `crack2fortran(postlist)` returns a fortran code to be saved to pyf-file - - `postlist` has the following structure: - *** it is a list of dictionaries containing `blocks': - B = {'block','body','vars','parent_block'[,'name','prefix','args','result', - 'implicit','externals','interfaced','common','sortvars', - 'commonvars','note']} - B['block'] = 'interface' | 'function' | 'subroutine' | 'module' | - 'program' | 'block data' | 'type' | 'pythonmodule' - B['body'] --- list containing `subblocks' with the same structure as `blocks' - B['parent_block'] --- dictionary of a parent block: - C['body'][]['parent_block'] is C - B['vars'] --- dictionary of variable definitions - B['sortvars'] --- dictionary of variable definitions sorted by dependence (independent first) - B['name'] --- name of the block (not if B['block']=='interface') - B['prefix'] --- prefix string (only if B['block']=='function') - B['args'] --- list of argument names if B['block']== 'function' | 'subroutine' - B['result'] --- name of the return value (only if B['block']=='function') - B['implicit'] --- dictionary {'a':,'b':...} | None - B['externals'] --- list of variables being external - B['interfaced'] --- list of variables being external and defined - B['common'] --- dictionary of common blocks (list of objects) - B['commonvars'] --- list of variables used in common blocks (dimensions are moved to variable definitions) - B['from'] --- string showing the 'parents' of the current block - B['use'] --- dictionary of modules used in current block: - {:{['only':<0|1>],['map':{:,...}]}} - B['note'] --- list of LaTeX comments on the block - B['f2pyenhancements'] --- optional dictionary - {'threadsafe':'','fortranname':, - 'callstatement':|, - 'callprotoargument':, - 'usercode':|, - 'pymethoddef:' - } - B['entry'] --- dictionary {entryname:argslist,..} - B['varnames'] --- list of variable names given in the order of reading the - Fortran code, useful for derived types. - B['saved_interface'] --- a string of scanned routine signature, defines explicit interface - *** Variable definition is a dictionary - D = B['vars'][] = - {'typespec'[,'attrspec','kindselector','charselector','=','typename']} - D['typespec'] = 'byte' | 'character' | 'complex' | 'double complex' | - 'double precision' | 'integer' | 'logical' | 'real' | 'type' - D['attrspec'] --- list of attributes (e.g. 'dimension()', - 'external','intent(in|out|inout|hide|c|callback|cache|aligned4|aligned8|aligned16)', - 'optional','required', etc) - K = D['kindselector'] = {['*','kind']} (only if D['typespec'] = - 'complex' | 'integer' | 'logical' | 'real' ) - C = D['charselector'] = {['*','len','kind']} - (only if D['typespec']=='character') - D['='] --- initialization expression string - D['typename'] --- name of the type if D['typespec']=='type' - D['dimension'] --- list of dimension bounds - D['intent'] --- list of intent specifications - D['depend'] --- list of variable names on which current variable depends on - D['check'] --- list of C-expressions; if C-expr returns zero, exception is raised - D['note'] --- list of LaTeX comments on the variable - *** Meaning of kind/char selectors (few examples): - D['typespec>']*K['*'] - D['typespec'](kind=K['kind']) - character*C['*'] - character(len=C['len'],kind=C['kind']) - (see also fortran type declaration statement formats below) - -Fortran 90 type declaration statement format (F77 is subset of F90) -==================================================================== -(Main source: IBM XL Fortran 5.1 Language Reference Manual) -type declaration = [[]::] - = byte | - character[] | - complex[] | - double complex | - double precision | - integer[] | - logical[] | - real[] | - type() - = * | - ([len=][,[kind=]]) | - (kind=[,len=]) - = * | - ([kind=]) - = comma separated list of attributes. - Only the following attributes are used in - building up the interface: - external - (parameter --- affects '=' key) - optional - intent - Other attributes are ignored. - = in | out | inout - = comma separated list of dimension bounds. - = [[*][()] | [()]*] - [// | =] [,] - -In addition, the following attributes are used: check,depend,note - -TODO: - * Apply 'parameter' attribute (e.g. 'integer parameter :: i=2' 'real x(i)' - -> 'real x(2)') - The above may be solved by creating appropriate preprocessor program, for example. - -""" -from __future__ import division, absolute_import, print_function - -import sys -import string -import fileinput -import re -import os -import copy -import platform - -from . import __version__ - -# The eviroment provided by auxfuncs.py is needed for some calls to eval. -# As the needed functions cannot be determined by static inspection of the -# code, it is safest to use import * pending a major refactoring of f2py. -from .auxfuncs import * - - -f2py_version = __version__.version - -# Global flags: -strictf77 = 1 # Ignore `!' comments unless line[0]=='!' -sourcecodeform = 'fix' # 'fix','free' -quiet = 0 # Be verbose if 0 (Obsolete: not used any more) -verbose = 1 # Be quiet if 0, extra verbose if > 1. -tabchar = 4 * ' ' -pyffilename = '' -f77modulename = '' -skipemptyends = 0 # for old F77 programs without 'program' statement -ignorecontains = 1 -dolowercase = 1 -debug = [] - -# Global variables -beginpattern = '' -currentfilename = '' -expectbegin = 1 -f90modulevars = {} -filepositiontext = '' -gotnextfile = 1 -groupcache = None -groupcounter = 0 -grouplist = {groupcounter: []} -groupname = '' -include_paths = [] -neededmodule = -1 -onlyfuncs = [] -previous_context = None -skipblocksuntil = -1 -skipfuncs = [] -skipfunctions = [] -usermodules = [] - - -def reset_global_f2py_vars(): - global groupcounter, grouplist, neededmodule, expectbegin - global skipblocksuntil, usermodules, f90modulevars, gotnextfile - global filepositiontext, currentfilename, skipfunctions, skipfuncs - global onlyfuncs, include_paths, previous_context - global strictf77, sourcecodeform, quiet, verbose, tabchar, pyffilename - global f77modulename, skipemptyends, ignorecontains, dolowercase, debug - - # flags - strictf77 = 1 - sourcecodeform = 'fix' - quiet = 0 - verbose = 1 - tabchar = 4 * ' ' - pyffilename = '' - f77modulename = '' - skipemptyends = 0 - ignorecontains = 1 - dolowercase = 1 - debug = [] - # variables - groupcounter = 0 - grouplist = {groupcounter: []} - neededmodule = -1 - expectbegin = 1 - skipblocksuntil = -1 - usermodules = [] - f90modulevars = {} - gotnextfile = 1 - filepositiontext = '' - currentfilename = '' - skipfunctions = [] - skipfuncs = [] - onlyfuncs = [] - include_paths = [] - previous_context = None - - -def outmess(line, flag=1): - global filepositiontext - - if not verbose: - return - if not quiet: - if flag: - sys.stdout.write(filepositiontext) - sys.stdout.write(line) - -re._MAXCACHE = 50 -defaultimplicitrules = {} -for c in "abcdefghopqrstuvwxyz$_": - defaultimplicitrules[c] = {'typespec': 'real'} -for c in "ijklmn": - defaultimplicitrules[c] = {'typespec': 'integer'} -del c -badnames = {} -invbadnames = {} -for n in ['int', 'double', 'float', 'char', 'short', 'long', 'void', 'case', 'while', - 'return', 'signed', 'unsigned', 'if', 'for', 'typedef', 'sizeof', 'union', - 'struct', 'static', 'register', 'new', 'break', 'do', 'goto', 'switch', - 'continue', 'else', 'inline', 'extern', 'delete', 'const', 'auto', - 'len', 'rank', 'shape', 'index', 'slen', 'size', '_i', - 'max', 'min', - 'flen', 'fshape', - 'string', 'complex_double', 'float_double', 'stdin', 'stderr', 'stdout', - 'type', 'default']: - badnames[n] = n + '_bn' - invbadnames[n + '_bn'] = n - - -def rmbadname1(name): - if name in badnames: - errmess('rmbadname1: Replacing "%s" with "%s".\n' % - (name, badnames[name])) - return badnames[name] - return name - - -def rmbadname(names): - return [rmbadname1(_m) for _m in names] - - -def undo_rmbadname1(name): - if name in invbadnames: - errmess('undo_rmbadname1: Replacing "%s" with "%s".\n' - % (name, invbadnames[name])) - return invbadnames[name] - return name - - -def undo_rmbadname(names): - return [undo_rmbadname1(_m) for _m in names] - - -def getextension(name): - i = name.rfind('.') - if i == -1: - return '' - if '\\' in name[i:]: - return '' - if '/' in name[i:]: - return '' - return name[i + 1:] - -is_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z', re.I).match -_has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-', re.I).search -_has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-', re.I).search -_has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-', re.I).search -_free_f90_start = re.compile(r'[^c*]\s*[^\s\d\t]', re.I).match - - -def is_free_format(file): - """Check if file is in free format Fortran.""" - # f90 allows both fixed and free format, assuming fixed unless - # signs of free format are detected. - result = 0 - with open(file, 'r') as f: - line = f.readline() - n = 15 # the number of non-comment lines to scan for hints - if _has_f_header(line): - n = 0 - elif _has_f90_header(line): - n = 0 - result = 1 - while n > 0 and line: - if line[0] != '!' and line.strip(): - n -= 1 - if (line[0] != '\t' and _free_f90_start(line[:5])) or line[-2:-1] == '&': - result = 1 - break - line = f.readline() - return result - - -# Read fortran (77,90) code -def readfortrancode(ffile, dowithline=show, istop=1): - """ - Read fortran codes from files and - 1) Get rid of comments, line continuations, and empty lines; lower cases. - 2) Call dowithline(line) on every line. - 3) Recursively call itself when statement \"include ''\" is met. - """ - global gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77 - global beginpattern, quiet, verbose, dolowercase, include_paths - - if not istop: - saveglobals = gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\ - beginpattern, quiet, verbose, dolowercase - if ffile == []: - return - localdolowercase = dolowercase - cont = 0 - finalline = '' - ll = '' - includeline = re.compile( - r'\s*include\s*(\'|")(?P[^\'"]*)(\'|")', re.I) - cont1 = re.compile(r'(?P.*)&\s*\Z') - cont2 = re.compile(r'(\s*&|)(?P.*)') - mline_mark = re.compile(r".*?'''") - if istop: - dowithline('', -1) - ll, l1 = '', '' - spacedigits = [' '] + [str(_m) for _m in range(10)] - filepositiontext = '' - fin = fileinput.FileInput(ffile) - while True: - l = fin.readline() - if not l: - break - if fin.isfirstline(): - filepositiontext = '' - currentfilename = fin.filename() - gotnextfile = 1 - l1 = l - strictf77 = 0 - sourcecodeform = 'fix' - ext = os.path.splitext(currentfilename)[1] - if is_f_file(currentfilename) and \ - not (_has_f90_header(l) or _has_fix_header(l)): - strictf77 = 1 - elif is_free_format(currentfilename) and not _has_fix_header(l): - sourcecodeform = 'free' - if strictf77: - beginpattern = beginpattern77 - else: - beginpattern = beginpattern90 - outmess('\tReading file %s (format:%s%s)\n' - % (repr(currentfilename), sourcecodeform, - strictf77 and ',strict' or '')) - - l = l.expandtabs().replace('\xa0', ' ') - # Get rid of newline characters - while not l == '': - if l[-1] not in "\n\r\f": - break - l = l[:-1] - if not strictf77: - (l, rl) = split_by_unquoted(l, '!') - l += ' ' - if rl[:5].lower() == '!f2py': # f2py directive - l, _ = split_by_unquoted(l + 4 * ' ' + rl[5:], '!') - if l.strip() == '': # Skip empty line - cont = 0 - continue - if sourcecodeform == 'fix': - if l[0] in ['*', 'c', '!', 'C', '#']: - if l[1:5].lower() == 'f2py': # f2py directive - l = ' ' + l[5:] - else: # Skip comment line - cont = 0 - continue - elif strictf77: - if len(l) > 72: - l = l[:72] - if not (l[0] in spacedigits): - raise Exception('readfortrancode: Found non-(space,digit) char ' - 'in the first column.\n\tAre you sure that ' - 'this code is in fix form?\n\tline=%s' % repr(l)) - - if (not cont or strictf77) and (len(l) > 5 and not l[5] == ' '): - # Continuation of a previous line - ll = ll + l[6:] - finalline = '' - origfinalline = '' - else: - if not strictf77: - # F90 continuation - r = cont1.match(l) - if r: - l = r.group('line') # Continuation follows .. - if cont: - ll = ll + cont2.match(l).group('line') - finalline = '' - origfinalline = '' - else: - # clean up line beginning from possible digits. - l = ' ' + l[5:] - if localdolowercase: - finalline = ll.lower() - else: - finalline = ll - origfinalline = ll - ll = l - cont = (r is not None) - else: - # clean up line beginning from possible digits. - l = ' ' + l[5:] - if localdolowercase: - finalline = ll.lower() - else: - finalline = ll - origfinalline = ll - ll = l - - elif sourcecodeform == 'free': - if not cont and ext == '.pyf' and mline_mark.match(l): - l = l + '\n' - while True: - lc = fin.readline() - if not lc: - errmess( - 'Unexpected end of file when reading multiline\n') - break - l = l + lc - if mline_mark.match(lc): - break - l = l.rstrip() - r = cont1.match(l) - if r: - l = r.group('line') # Continuation follows .. - if cont: - ll = ll + cont2.match(l).group('line') - finalline = '' - origfinalline = '' - else: - if localdolowercase: - finalline = ll.lower() - else: - finalline = ll - origfinalline = ll - ll = l - cont = (r is not None) - else: - raise ValueError( - "Flag sourcecodeform must be either 'fix' or 'free': %s" % repr(sourcecodeform)) - filepositiontext = 'Line #%d in %s:"%s"\n\t' % ( - fin.filelineno() - 1, currentfilename, l1) - m = includeline.match(origfinalline) - if m: - fn = m.group('name') - if os.path.isfile(fn): - readfortrancode(fn, dowithline=dowithline, istop=0) - else: - include_dirs = [ - os.path.dirname(currentfilename)] + include_paths - foundfile = 0 - for inc_dir in include_dirs: - fn1 = os.path.join(inc_dir, fn) - if os.path.isfile(fn1): - foundfile = 1 - readfortrancode(fn1, dowithline=dowithline, istop=0) - break - if not foundfile: - outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % ( - repr(fn), os.pathsep.join(include_dirs))) - else: - dowithline(finalline) - l1 = ll - if localdolowercase: - finalline = ll.lower() - else: - finalline = ll - origfinalline = ll - filepositiontext = 'Line #%d in %s:"%s"\n\t' % ( - fin.filelineno() - 1, currentfilename, l1) - m = includeline.match(origfinalline) - if m: - fn = m.group('name') - if os.path.isfile(fn): - readfortrancode(fn, dowithline=dowithline, istop=0) - else: - include_dirs = [os.path.dirname(currentfilename)] + include_paths - foundfile = 0 - for inc_dir in include_dirs: - fn1 = os.path.join(inc_dir, fn) - if os.path.isfile(fn1): - foundfile = 1 - readfortrancode(fn1, dowithline=dowithline, istop=0) - break - if not foundfile: - outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % ( - repr(fn), os.pathsep.join(include_dirs))) - else: - dowithline(finalline) - filepositiontext = '' - fin.close() - if istop: - dowithline('', 1) - else: - gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\ - beginpattern, quiet, verbose, dolowercase = saveglobals - -# Crack line -beforethisafter = r'\s*(?P%s(?=\s*(\b(%s)\b)))' + \ - r'\s*(?P(\b(%s)\b))' + \ - r'\s*(?P%s)\s*\Z' -## -fortrantypes = r'character|logical|integer|real|complex|double\s*(precision\s*(complex|)|complex)|type(?=\s*\([\w\s,=(*)]*\))|byte' -typespattern = re.compile( - beforethisafter % ('', fortrantypes, fortrantypes, '.*'), re.I), 'type' -typespattern4implicit = re.compile(beforethisafter % ( - '', fortrantypes + '|static|automatic|undefined', fortrantypes + '|static|automatic|undefined', '.*'), re.I) -# -functionpattern = re.compile(beforethisafter % ( - r'([a-z]+[\w\s(=*+-/)]*?|)', 'function', 'function', '.*'), re.I), 'begin' -subroutinepattern = re.compile(beforethisafter % ( - r'[a-z\s]*?', 'subroutine', 'subroutine', '.*'), re.I), 'begin' -# modulepattern=re.compile(beforethisafter%('[a-z\s]*?','module','module','.*'),re.I),'begin' -# -groupbegins77 = r'program|block\s*data' -beginpattern77 = re.compile( - beforethisafter % ('', groupbegins77, groupbegins77, '.*'), re.I), 'begin' -groupbegins90 = groupbegins77 + \ - r'|module(?!\s*procedure)|python\s*module|interface|type(?!\s*\()' -beginpattern90 = re.compile( - beforethisafter % ('', groupbegins90, groupbegins90, '.*'), re.I), 'begin' -groupends = r'end|endprogram|endblockdata|endmodule|endpythonmodule|endinterface' -endpattern = re.compile( - beforethisafter % ('', groupends, groupends, r'[\w\s]*'), re.I), 'end' -# endifs='end\s*(if|do|where|select|while|forall)' -endifs = r'(end\s*(if|do|where|select|while|forall))|(module\s*procedure)' -endifpattern = re.compile( - beforethisafter % (r'[\w]*?', endifs, endifs, r'[\w\s]*'), re.I), 'endif' -# -implicitpattern = re.compile( - beforethisafter % ('', 'implicit', 'implicit', '.*'), re.I), 'implicit' -dimensionpattern = re.compile(beforethisafter % ( - '', 'dimension|virtual', 'dimension|virtual', '.*'), re.I), 'dimension' -externalpattern = re.compile( - beforethisafter % ('', 'external', 'external', '.*'), re.I), 'external' -optionalpattern = re.compile( - beforethisafter % ('', 'optional', 'optional', '.*'), re.I), 'optional' -requiredpattern = re.compile( - beforethisafter % ('', 'required', 'required', '.*'), re.I), 'required' -publicpattern = re.compile( - beforethisafter % ('', 'public', 'public', '.*'), re.I), 'public' -privatepattern = re.compile( - beforethisafter % ('', 'private', 'private', '.*'), re.I), 'private' -intrisicpattern = re.compile( - beforethisafter % ('', 'intrisic', 'intrisic', '.*'), re.I), 'intrisic' -intentpattern = re.compile(beforethisafter % ( - '', 'intent|depend|note|check', 'intent|depend|note|check', r'\s*\(.*?\).*'), re.I), 'intent' -parameterpattern = re.compile( - beforethisafter % ('', 'parameter', 'parameter', r'\s*\(.*'), re.I), 'parameter' -datapattern = re.compile( - beforethisafter % ('', 'data', 'data', '.*'), re.I), 'data' -callpattern = re.compile( - beforethisafter % ('', 'call', 'call', '.*'), re.I), 'call' -entrypattern = re.compile( - beforethisafter % ('', 'entry', 'entry', '.*'), re.I), 'entry' -callfunpattern = re.compile( - beforethisafter % ('', 'callfun', 'callfun', '.*'), re.I), 'callfun' -commonpattern = re.compile( - beforethisafter % ('', 'common', 'common', '.*'), re.I), 'common' -usepattern = re.compile( - beforethisafter % ('', 'use', 'use', '.*'), re.I), 'use' -containspattern = re.compile( - beforethisafter % ('', 'contains', 'contains', ''), re.I), 'contains' -formatpattern = re.compile( - beforethisafter % ('', 'format', 'format', '.*'), re.I), 'format' -# Non-fortran and f2py-specific statements -f2pyenhancementspattern = re.compile(beforethisafter % ('', 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', - 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', '.*'), re.I | re.S), 'f2pyenhancements' -multilinepattern = re.compile( - r"\s*(?P''')(?P.*?)(?P''')\s*\Z", re.S), 'multiline' -## - -def split_by_unquoted(line, characters): - """ - Splits the line into (line[:i], line[i:]), - where i is the index of first occurrence of one of the characters - not within quotes, or len(line) if no such index exists - """ - assert not (set('"\'') & set(characters)), "cannot split by unquoted quotes" - r = re.compile( - r"\A(?P({single_quoted}|{double_quoted}|{not_quoted})*)" - r"(?P{char}.*)\Z".format( - not_quoted="[^\"'{}]".format(re.escape(characters)), - char="[{}]".format(re.escape(characters)), - single_quoted=r"('([^'\\]|(\\.))*')", - double_quoted=r'("([^"\\]|(\\.))*")')) - m = r.match(line) - if m: - d = m.groupdict() - return (d["before"], d["after"]) - return (line, "") - -def _simplifyargs(argsline): - a = [] - for n in markoutercomma(argsline).split('@,@'): - for r in '(),': - n = n.replace(r, '_') - a.append(n) - return ','.join(a) - -crackline_re_1 = re.compile(r'\s*(?P\b[a-z]+[\w]*\b)\s*[=].*', re.I) - - -def crackline(line, reset=0): - """ - reset=-1 --- initialize - reset=0 --- crack the line - reset=1 --- final check if mismatch of blocks occurred - - Cracked data is saved in grouplist[0]. - """ - global beginpattern, groupcounter, groupname, groupcache, grouplist - global filepositiontext, currentfilename, neededmodule, expectbegin - global skipblocksuntil, skipemptyends, previous_context, gotnextfile - - _, has_semicolon = split_by_unquoted(line, ";") - if has_semicolon and not (f2pyenhancementspattern[0].match(line) or - multilinepattern[0].match(line)): - # XXX: non-zero reset values need testing - assert reset == 0, repr(reset) - # split line on unquoted semicolons - line, semicolon_line = split_by_unquoted(line, ";") - while semicolon_line: - crackline(line, reset) - line, semicolon_line = split_by_unquoted(semicolon_line[1:], ";") - crackline(line, reset) - return - if reset < 0: - groupcounter = 0 - groupname = {groupcounter: ''} - groupcache = {groupcounter: {}} - grouplist = {groupcounter: []} - groupcache[groupcounter]['body'] = [] - groupcache[groupcounter]['vars'] = {} - groupcache[groupcounter]['block'] = '' - groupcache[groupcounter]['name'] = '' - neededmodule = -1 - skipblocksuntil = -1 - return - if reset > 0: - fl = 0 - if f77modulename and neededmodule == groupcounter: - fl = 2 - while groupcounter > fl: - outmess('crackline: groupcounter=%s groupname=%s\n' % - (repr(groupcounter), repr(groupname))) - outmess( - 'crackline: Mismatch of blocks encountered. Trying to fix it by assuming "end" statement.\n') - grouplist[groupcounter - 1].append(groupcache[groupcounter]) - grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] - del grouplist[groupcounter] - groupcounter = groupcounter - 1 - if f77modulename and neededmodule == groupcounter: - grouplist[groupcounter - 1].append(groupcache[groupcounter]) - grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] - del grouplist[groupcounter] - groupcounter = groupcounter - 1 # end interface - grouplist[groupcounter - 1].append(groupcache[groupcounter]) - grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] - del grouplist[groupcounter] - groupcounter = groupcounter - 1 # end module - neededmodule = -1 - return - if line == '': - return - flag = 0 - for pat in [dimensionpattern, externalpattern, intentpattern, optionalpattern, - requiredpattern, - parameterpattern, datapattern, publicpattern, privatepattern, - intrisicpattern, - endifpattern, endpattern, - formatpattern, - beginpattern, functionpattern, subroutinepattern, - implicitpattern, typespattern, commonpattern, - callpattern, usepattern, containspattern, - entrypattern, - f2pyenhancementspattern, - multilinepattern - ]: - m = pat[0].match(line) - if m: - break - flag = flag + 1 - if not m: - re_1 = crackline_re_1 - if 0 <= skipblocksuntil <= groupcounter: - return - if 'externals' in groupcache[groupcounter]: - for name in groupcache[groupcounter]['externals']: - if name in invbadnames: - name = invbadnames[name] - if 'interfaced' in groupcache[groupcounter] and name in groupcache[groupcounter]['interfaced']: - continue - m1 = re.match( - r'(?P[^"]*)\b%s\b\s*@\(@(?P[^@]*)@\)@.*\Z' % name, markouterparen(line), re.I) - if m1: - m2 = re_1.match(m1.group('before')) - a = _simplifyargs(m1.group('args')) - if m2: - line = 'callfun %s(%s) result (%s)' % ( - name, a, m2.group('result')) - else: - line = 'callfun %s(%s)' % (name, a) - m = callfunpattern[0].match(line) - if not m: - outmess( - 'crackline: could not resolve function call for line=%s.\n' % repr(line)) - return - analyzeline(m, 'callfun', line) - return - if verbose > 1 or (verbose == 1 and currentfilename.lower().endswith('.pyf')): - previous_context = None - outmess('crackline:%d: No pattern for line\n' % (groupcounter)) - return - elif pat[1] == 'end': - if 0 <= skipblocksuntil < groupcounter: - groupcounter = groupcounter - 1 - if skipblocksuntil <= groupcounter: - return - if groupcounter <= 0: - raise Exception('crackline: groupcounter(=%s) is nonpositive. ' - 'Check the blocks.' - % (groupcounter)) - m1 = beginpattern[0].match((line)) - if (m1) and (not m1.group('this') == groupname[groupcounter]): - raise Exception('crackline: End group %s does not match with ' - 'previous Begin group %s\n\t%s' % - (repr(m1.group('this')), repr(groupname[groupcounter]), - filepositiontext) - ) - if skipblocksuntil == groupcounter: - skipblocksuntil = -1 - grouplist[groupcounter - 1].append(groupcache[groupcounter]) - grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] - del grouplist[groupcounter] - groupcounter = groupcounter - 1 - if not skipemptyends: - expectbegin = 1 - elif pat[1] == 'begin': - if 0 <= skipblocksuntil <= groupcounter: - groupcounter = groupcounter + 1 - return - gotnextfile = 0 - analyzeline(m, pat[1], line) - expectbegin = 0 - elif pat[1] == 'endif': - pass - elif pat[1] == 'contains': - if ignorecontains: - return - if 0 <= skipblocksuntil <= groupcounter: - return - skipblocksuntil = groupcounter - else: - if 0 <= skipblocksuntil <= groupcounter: - return - analyzeline(m, pat[1], line) - - -def markouterparen(line): - l = '' - f = 0 - for c in line: - if c == '(': - f = f + 1 - if f == 1: - l = l + '@(@' - continue - elif c == ')': - f = f - 1 - if f == 0: - l = l + '@)@' - continue - l = l + c - return l - - -def markoutercomma(line, comma=','): - l = '' - f = 0 - before, after = split_by_unquoted(line, comma + '()') - l += before - while after: - if (after[0] == comma) and (f == 0): - l += '@' + comma + '@' - else: - l += after[0] - if after[0] == '(': - f += 1 - elif after[0] == ')': - f -= 1 - before, after = split_by_unquoted(after[1:], comma + '()') - l += before - assert not f, repr((f, line, l)) - return l - -def unmarkouterparen(line): - r = line.replace('@(@', '(').replace('@)@', ')') - return r - - -def appenddecl(decl, decl2, force=1): - if not decl: - decl = {} - if not decl2: - return decl - if decl is decl2: - return decl - for k in list(decl2.keys()): - if k == 'typespec': - if force or k not in decl: - decl[k] = decl2[k] - elif k == 'attrspec': - for l in decl2[k]: - decl = setattrspec(decl, l, force) - elif k == 'kindselector': - decl = setkindselector(decl, decl2[k], force) - elif k == 'charselector': - decl = setcharselector(decl, decl2[k], force) - elif k in ['=', 'typename']: - if force or k not in decl: - decl[k] = decl2[k] - elif k == 'note': - pass - elif k in ['intent', 'check', 'dimension', 'optional', 'required']: - errmess('appenddecl: "%s" not implemented.\n' % k) - else: - raise Exception('appenddecl: Unknown variable definition key:' + - str(k)) - return decl - -selectpattern = re.compile( - r'\s*(?P(@\(@.*?@\)@|[*][\d*]+|[*]\s*@\(@.*?@\)@|))(?P.*)\Z', re.I) -nameargspattern = re.compile( - r'\s*(?P\b[\w$]+\b)\s*(@\(@\s*(?P[\w\s,]*)\s*@\)@|)\s*((result(\s*@\(@\s*(?P\b[\w$]+\b)\s*@\)@|))|(bind\s*@\(@\s*(?P.*)\s*@\)@))*\s*\Z', re.I) -callnameargspattern = re.compile( - r'\s*(?P\b[\w$]+\b)\s*@\(@\s*(?P.*)\s*@\)@\s*\Z', re.I) -real16pattern = re.compile( - r'([-+]?(?:\d+(?:\.\d*)?|\d*\.\d+))[dD]((?:[-+]?\d+)?)') -real8pattern = re.compile( - r'([-+]?((?:\d+(?:\.\d*)?|\d*\.\d+))[eE]((?:[-+]?\d+)?)|(\d+\.\d*))') - -_intentcallbackpattern = re.compile(r'intent\s*\(.*?\bcallback\b', re.I) - - -def _is_intent_callback(vdecl): - for a in vdecl.get('attrspec', []): - if _intentcallbackpattern.match(a): - return 1 - return 0 - - -def _resolvenameargspattern(line): - line = markouterparen(line) - m1 = nameargspattern.match(line) - if m1: - return m1.group('name'), m1.group('args'), m1.group('result'), m1.group('bind') - m1 = callnameargspattern.match(line) - if m1: - return m1.group('name'), m1.group('args'), None, None - return None, [], None, None - - -def analyzeline(m, case, line): - global groupcounter, groupname, groupcache, grouplist, filepositiontext - global currentfilename, f77modulename, neededinterface, neededmodule - global expectbegin, gotnextfile, previous_context - - block = m.group('this') - if case != 'multiline': - previous_context = None - if expectbegin and case not in ['begin', 'call', 'callfun', 'type'] \ - and not skipemptyends and groupcounter < 1: - newname = os.path.basename(currentfilename).split('.')[0] - outmess( - 'analyzeline: no group yet. Creating program group with name "%s".\n' % newname) - gotnextfile = 0 - groupcounter = groupcounter + 1 - groupname[groupcounter] = 'program' - groupcache[groupcounter] = {} - grouplist[groupcounter] = [] - groupcache[groupcounter]['body'] = [] - groupcache[groupcounter]['vars'] = {} - groupcache[groupcounter]['block'] = 'program' - groupcache[groupcounter]['name'] = newname - groupcache[groupcounter]['from'] = 'fromsky' - expectbegin = 0 - if case in ['begin', 'call', 'callfun']: - # Crack line => block,name,args,result - block = block.lower() - if re.match(r'block\s*data', block, re.I): - block = 'block data' - if re.match(r'python\s*module', block, re.I): - block = 'python module' - name, args, result, bind = _resolvenameargspattern(m.group('after')) - if name is None: - if block == 'block data': - name = '_BLOCK_DATA_' - else: - name = '' - if block not in ['interface', 'block data']: - outmess('analyzeline: No name/args pattern found for line.\n') - - previous_context = (block, name, groupcounter) - if args: - args = rmbadname([x.strip() - for x in markoutercomma(args).split('@,@')]) - else: - args = [] - if '' in args: - while '' in args: - args.remove('') - outmess( - 'analyzeline: argument list is malformed (missing argument).\n') - - # end of crack line => block,name,args,result - needmodule = 0 - needinterface = 0 - - if case in ['call', 'callfun']: - needinterface = 1 - if 'args' not in groupcache[groupcounter]: - return - if name not in groupcache[groupcounter]['args']: - return - for it in grouplist[groupcounter]: - if it['name'] == name: - return - if name in groupcache[groupcounter]['interfaced']: - return - block = {'call': 'subroutine', 'callfun': 'function'}[case] - if f77modulename and neededmodule == -1 and groupcounter <= 1: - neededmodule = groupcounter + 2 - needmodule = 1 - if block != 'interface': - needinterface = 1 - # Create new block(s) - groupcounter = groupcounter + 1 - groupcache[groupcounter] = {} - grouplist[groupcounter] = [] - if needmodule: - if verbose > 1: - outmess('analyzeline: Creating module block %s\n' % - repr(f77modulename), 0) - groupname[groupcounter] = 'module' - groupcache[groupcounter]['block'] = 'python module' - groupcache[groupcounter]['name'] = f77modulename - groupcache[groupcounter]['from'] = '' - groupcache[groupcounter]['body'] = [] - groupcache[groupcounter]['externals'] = [] - groupcache[groupcounter]['interfaced'] = [] - groupcache[groupcounter]['vars'] = {} - groupcounter = groupcounter + 1 - groupcache[groupcounter] = {} - grouplist[groupcounter] = [] - if needinterface: - if verbose > 1: - outmess('analyzeline: Creating additional interface block (groupcounter=%s).\n' % ( - groupcounter), 0) - groupname[groupcounter] = 'interface' - groupcache[groupcounter]['block'] = 'interface' - groupcache[groupcounter]['name'] = 'unknown_interface' - groupcache[groupcounter]['from'] = '%s:%s' % ( - groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name']) - groupcache[groupcounter]['body'] = [] - groupcache[groupcounter]['externals'] = [] - groupcache[groupcounter]['interfaced'] = [] - groupcache[groupcounter]['vars'] = {} - groupcounter = groupcounter + 1 - groupcache[groupcounter] = {} - grouplist[groupcounter] = [] - groupname[groupcounter] = block - groupcache[groupcounter]['block'] = block - if not name: - name = 'unknown_' + block - groupcache[groupcounter]['prefix'] = m.group('before') - groupcache[groupcounter]['name'] = rmbadname1(name) - groupcache[groupcounter]['result'] = result - if groupcounter == 1: - groupcache[groupcounter]['from'] = currentfilename - else: - if f77modulename and groupcounter == 3: - groupcache[groupcounter]['from'] = '%s:%s' % ( - groupcache[groupcounter - 1]['from'], currentfilename) - else: - groupcache[groupcounter]['from'] = '%s:%s' % ( - groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name']) - for k in list(groupcache[groupcounter].keys()): - if not groupcache[groupcounter][k]: - del groupcache[groupcounter][k] - - groupcache[groupcounter]['args'] = args - groupcache[groupcounter]['body'] = [] - groupcache[groupcounter]['externals'] = [] - groupcache[groupcounter]['interfaced'] = [] - groupcache[groupcounter]['vars'] = {} - groupcache[groupcounter]['entry'] = {} - # end of creation - if block == 'type': - groupcache[groupcounter]['varnames'] = [] - - if case in ['call', 'callfun']: # set parents variables - if name not in groupcache[groupcounter - 2]['externals']: - groupcache[groupcounter - 2]['externals'].append(name) - groupcache[groupcounter]['vars'] = copy.deepcopy( - groupcache[groupcounter - 2]['vars']) - try: - del groupcache[groupcounter]['vars'][name][ - groupcache[groupcounter]['vars'][name]['attrspec'].index('external')] - except Exception: - pass - if block in ['function', 'subroutine']: # set global attributes - try: - groupcache[groupcounter]['vars'][name] = appenddecl( - groupcache[groupcounter]['vars'][name], groupcache[groupcounter - 2]['vars']['']) - except Exception: - pass - if case == 'callfun': # return type - if result and result in groupcache[groupcounter]['vars']: - if not name == result: - groupcache[groupcounter]['vars'][name] = appenddecl( - groupcache[groupcounter]['vars'][name], groupcache[groupcounter]['vars'][result]) - # if groupcounter>1: # name is interfaced - try: - groupcache[groupcounter - 2]['interfaced'].append(name) - except Exception: - pass - if block == 'function': - t = typespattern[0].match(m.group('before') + ' ' + name) - if t: - typespec, selector, attr, edecl = cracktypespec0( - t.group('this'), t.group('after')) - updatevars(typespec, selector, attr, edecl) - - if case in ['call', 'callfun']: - grouplist[groupcounter - 1].append(groupcache[groupcounter]) - grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] - del grouplist[groupcounter] - groupcounter = groupcounter - 1 # end routine - grouplist[groupcounter - 1].append(groupcache[groupcounter]) - grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] - del grouplist[groupcounter] - groupcounter = groupcounter - 1 # end interface - - elif case == 'entry': - name, args, result, bind = _resolvenameargspattern(m.group('after')) - if name is not None: - if args: - args = rmbadname([x.strip() - for x in markoutercomma(args).split('@,@')]) - else: - args = [] - assert result is None, repr(result) - groupcache[groupcounter]['entry'][name] = args - previous_context = ('entry', name, groupcounter) - elif case == 'type': - typespec, selector, attr, edecl = cracktypespec0( - block, m.group('after')) - last_name = updatevars(typespec, selector, attr, edecl) - if last_name is not None: - previous_context = ('variable', last_name, groupcounter) - elif case in ['dimension', 'intent', 'optional', 'required', 'external', 'public', 'private', 'intrisic']: - edecl = groupcache[groupcounter]['vars'] - ll = m.group('after').strip() - i = ll.find('::') - if i < 0 and case == 'intent': - i = markouterparen(ll).find('@)@') - 2 - ll = ll[:i + 1] + '::' + ll[i + 1:] - i = ll.find('::') - if ll[i:] == '::' and 'args' in groupcache[groupcounter]: - outmess('All arguments will have attribute %s%s\n' % - (m.group('this'), ll[:i])) - ll = ll + ','.join(groupcache[groupcounter]['args']) - if i < 0: - i = 0 - pl = '' - else: - pl = ll[:i].strip() - ll = ll[i + 2:] - ch = markoutercomma(pl).split('@,@') - if len(ch) > 1: - pl = ch[0] - outmess('analyzeline: cannot handle multiple attributes without type specification. Ignoring %r.\n' % ( - ','.join(ch[1:]))) - last_name = None - - for e in [x.strip() for x in markoutercomma(ll).split('@,@')]: - m1 = namepattern.match(e) - if not m1: - if case in ['public', 'private']: - k = '' - else: - print(m.groupdict()) - outmess('analyzeline: no name pattern found in %s statement for %s. Skipping.\n' % ( - case, repr(e))) - continue - else: - k = rmbadname1(m1.group('name')) - if k not in edecl: - edecl[k] = {} - if case == 'dimension': - ap = case + m1.group('after') - if case == 'intent': - ap = m.group('this') + pl - if _intentcallbackpattern.match(ap): - if k not in groupcache[groupcounter]['args']: - if groupcounter > 1: - if '__user__' not in groupcache[groupcounter - 2]['name']: - outmess( - 'analyzeline: missing __user__ module (could be nothing)\n') - # fixes ticket 1693 - if k != groupcache[groupcounter]['name']: - outmess('analyzeline: appending intent(callback) %s' - ' to %s arguments\n' % (k, groupcache[groupcounter]['name'])) - groupcache[groupcounter]['args'].append(k) - else: - errmess( - 'analyzeline: intent(callback) %s is ignored' % (k)) - else: - errmess('analyzeline: intent(callback) %s is already' - ' in argument list' % (k)) - if case in ['optional', 'required', 'public', 'external', 'private', 'intrisic']: - ap = case - if 'attrspec' in edecl[k]: - edecl[k]['attrspec'].append(ap) - else: - edecl[k]['attrspec'] = [ap] - if case == 'external': - if groupcache[groupcounter]['block'] == 'program': - outmess('analyzeline: ignoring program arguments\n') - continue - if k not in groupcache[groupcounter]['args']: - continue - if 'externals' not in groupcache[groupcounter]: - groupcache[groupcounter]['externals'] = [] - groupcache[groupcounter]['externals'].append(k) - last_name = k - groupcache[groupcounter]['vars'] = edecl - if last_name is not None: - previous_context = ('variable', last_name, groupcounter) - elif case == 'parameter': - edecl = groupcache[groupcounter]['vars'] - ll = m.group('after').strip()[1:-1] - last_name = None - for e in markoutercomma(ll).split('@,@'): - try: - k, initexpr = [x.strip() for x in e.split('=')] - except Exception: - outmess( - 'analyzeline: could not extract name,expr in parameter statement "%s" of "%s"\n' % (e, ll)) - continue - params = get_parameters(edecl) - k = rmbadname1(k) - if k not in edecl: - edecl[k] = {} - if '=' in edecl[k] and (not edecl[k]['='] == initexpr): - outmess('analyzeline: Overwriting the value of parameter "%s" ("%s") with "%s".\n' % ( - k, edecl[k]['='], initexpr)) - t = determineexprtype(initexpr, params) - if t: - if t.get('typespec') == 'real': - tt = list(initexpr) - for m in real16pattern.finditer(initexpr): - tt[m.start():m.end()] = list( - initexpr[m.start():m.end()].lower().replace('d', 'e')) - initexpr = ''.join(tt) - elif t.get('typespec') == 'complex': - initexpr = initexpr[1:].lower().replace('d', 'e').\ - replace(',', '+1j*(') - try: - v = eval(initexpr, {}, params) - except (SyntaxError, NameError, TypeError) as msg: - errmess('analyzeline: Failed to evaluate %r. Ignoring: %s\n' - % (initexpr, msg)) - continue - edecl[k]['='] = repr(v) - if 'attrspec' in edecl[k]: - edecl[k]['attrspec'].append('parameter') - else: - edecl[k]['attrspec'] = ['parameter'] - last_name = k - groupcache[groupcounter]['vars'] = edecl - if last_name is not None: - previous_context = ('variable', last_name, groupcounter) - elif case == 'implicit': - if m.group('after').strip().lower() == 'none': - groupcache[groupcounter]['implicit'] = None - elif m.group('after'): - if 'implicit' in groupcache[groupcounter]: - impl = groupcache[groupcounter]['implicit'] - else: - impl = {} - if impl is None: - outmess( - 'analyzeline: Overwriting earlier "implicit none" statement.\n') - impl = {} - for e in markoutercomma(m.group('after')).split('@,@'): - decl = {} - m1 = re.match( - r'\s*(?P.*?)\s*(\(\s*(?P[a-z-, ]+)\s*\)\s*|)\Z', e, re.I) - if not m1: - outmess( - 'analyzeline: could not extract info of implicit statement part "%s"\n' % (e)) - continue - m2 = typespattern4implicit.match(m1.group('this')) - if not m2: - outmess( - 'analyzeline: could not extract types pattern of implicit statement part "%s"\n' % (e)) - continue - typespec, selector, attr, edecl = cracktypespec0( - m2.group('this'), m2.group('after')) - kindselect, charselect, typename = cracktypespec( - typespec, selector) - decl['typespec'] = typespec - decl['kindselector'] = kindselect - decl['charselector'] = charselect - decl['typename'] = typename - for k in list(decl.keys()): - if not decl[k]: - del decl[k] - for r in markoutercomma(m1.group('after')).split('@,@'): - if '-' in r: - try: - begc, endc = [x.strip() for x in r.split('-')] - except Exception: - outmess( - 'analyzeline: expected "-" instead of "%s" in range list of implicit statement\n' % r) - continue - else: - begc = endc = r.strip() - if not len(begc) == len(endc) == 1: - outmess( - 'analyzeline: expected "-" instead of "%s" in range list of implicit statement (2)\n' % r) - continue - for o in range(ord(begc), ord(endc) + 1): - impl[chr(o)] = decl - groupcache[groupcounter]['implicit'] = impl - elif case == 'data': - ll = [] - dl = '' - il = '' - f = 0 - fc = 1 - inp = 0 - for c in m.group('after'): - if not inp: - if c == "'": - fc = not fc - if c == '/' and fc: - f = f + 1 - continue - if c == '(': - inp = inp + 1 - elif c == ')': - inp = inp - 1 - if f == 0: - dl = dl + c - elif f == 1: - il = il + c - elif f == 2: - dl = dl.strip() - if dl.startswith(','): - dl = dl[1:].strip() - ll.append([dl, il]) - dl = c - il = '' - f = 0 - if f == 2: - dl = dl.strip() - if dl.startswith(','): - dl = dl[1:].strip() - ll.append([dl, il]) - vars = {} - if 'vars' in groupcache[groupcounter]: - vars = groupcache[groupcounter]['vars'] - last_name = None - for l in ll: - l = [x.strip() for x in l] - if l[0][0] == ',': - l[0] = l[0][1:] - if l[0][0] == '(': - outmess( - 'analyzeline: implied-DO list "%s" is not supported. Skipping.\n' % l[0]) - continue - i = 0 - j = 0 - llen = len(l[1]) - for v in rmbadname([x.strip() for x in markoutercomma(l[0]).split('@,@')]): - if v[0] == '(': - outmess( - 'analyzeline: implied-DO list "%s" is not supported. Skipping.\n' % v) - # XXX: subsequent init expressions may get wrong values. - # Ignoring since data statements are irrelevant for - # wrapping. - continue - fc = 0 - while (i < llen) and (fc or not l[1][i] == ','): - if l[1][i] == "'": - fc = not fc - i = i + 1 - i = i + 1 - if v not in vars: - vars[v] = {} - if '=' in vars[v] and not vars[v]['='] == l[1][j:i - 1]: - outmess('analyzeline: changing init expression of "%s" ("%s") to "%s"\n' % ( - v, vars[v]['='], l[1][j:i - 1])) - vars[v]['='] = l[1][j:i - 1] - j = i - last_name = v - groupcache[groupcounter]['vars'] = vars - if last_name is not None: - previous_context = ('variable', last_name, groupcounter) - elif case == 'common': - line = m.group('after').strip() - if not line[0] == '/': - line = '//' + line - cl = [] - f = 0 - bn = '' - ol = '' - for c in line: - if c == '/': - f = f + 1 - continue - if f >= 3: - bn = bn.strip() - if not bn: - bn = '_BLNK_' - cl.append([bn, ol]) - f = f - 2 - bn = '' - ol = '' - if f % 2: - bn = bn + c - else: - ol = ol + c - bn = bn.strip() - if not bn: - bn = '_BLNK_' - cl.append([bn, ol]) - commonkey = {} - if 'common' in groupcache[groupcounter]: - commonkey = groupcache[groupcounter]['common'] - for c in cl: - if c[0] not in commonkey: - commonkey[c[0]] = [] - for i in [x.strip() for x in markoutercomma(c[1]).split('@,@')]: - if i: - commonkey[c[0]].append(i) - groupcache[groupcounter]['common'] = commonkey - previous_context = ('common', bn, groupcounter) - elif case == 'use': - m1 = re.match( - r'\A\s*(?P\b[\w]+\b)\s*((,(\s*\bonly\b\s*:|(?P))\s*(?P.*))|)\s*\Z', m.group('after'), re.I) - if m1: - mm = m1.groupdict() - if 'use' not in groupcache[groupcounter]: - groupcache[groupcounter]['use'] = {} - name = m1.group('name') - groupcache[groupcounter]['use'][name] = {} - isonly = 0 - if 'list' in mm and mm['list'] is not None: - if 'notonly' in mm and mm['notonly'] is None: - isonly = 1 - groupcache[groupcounter]['use'][name]['only'] = isonly - ll = [x.strip() for x in mm['list'].split(',')] - rl = {} - for l in ll: - if '=' in l: - m2 = re.match( - r'\A\s*(?P\b[\w]+\b)\s*=\s*>\s*(?P\b[\w]+\b)\s*\Z', l, re.I) - if m2: - rl[m2.group('local').strip()] = m2.group( - 'use').strip() - else: - outmess( - 'analyzeline: Not local=>use pattern found in %s\n' % repr(l)) - else: - rl[l] = l - groupcache[groupcounter]['use'][name]['map'] = rl - else: - pass - else: - print(m.groupdict()) - outmess('analyzeline: Could not crack the use statement.\n') - elif case in ['f2pyenhancements']: - if 'f2pyenhancements' not in groupcache[groupcounter]: - groupcache[groupcounter]['f2pyenhancements'] = {} - d = groupcache[groupcounter]['f2pyenhancements'] - if m.group('this') == 'usercode' and 'usercode' in d: - if isinstance(d['usercode'], str): - d['usercode'] = [d['usercode']] - d['usercode'].append(m.group('after')) - else: - d[m.group('this')] = m.group('after') - elif case == 'multiline': - if previous_context is None: - if verbose: - outmess('analyzeline: No context for multiline block.\n') - return - gc = groupcounter - appendmultiline(groupcache[gc], - previous_context[:2], - m.group('this')) - else: - if verbose > 1: - print(m.groupdict()) - outmess('analyzeline: No code implemented for line.\n') - - -def appendmultiline(group, context_name, ml): - if 'f2pymultilines' not in group: - group['f2pymultilines'] = {} - d = group['f2pymultilines'] - if context_name not in d: - d[context_name] = [] - d[context_name].append(ml) - return - - -def cracktypespec0(typespec, ll): - selector = None - attr = None - if re.match(r'double\s*complex', typespec, re.I): - typespec = 'double complex' - elif re.match(r'double\s*precision', typespec, re.I): - typespec = 'double precision' - else: - typespec = typespec.strip().lower() - m1 = selectpattern.match(markouterparen(ll)) - if not m1: - outmess( - 'cracktypespec0: no kind/char_selector pattern found for line.\n') - return - d = m1.groupdict() - for k in list(d.keys()): - d[k] = unmarkouterparen(d[k]) - if typespec in ['complex', 'integer', 'logical', 'real', 'character', 'type']: - selector = d['this'] - ll = d['after'] - i = ll.find('::') - if i >= 0: - attr = ll[:i].strip() - ll = ll[i + 2:] - return typespec, selector, attr, ll -##### -namepattern = re.compile(r'\s*(?P\b[\w]+\b)\s*(?P.*)\s*\Z', re.I) -kindselector = re.compile( - r'\s*(\(\s*(kind\s*=)?\s*(?P.*)\s*\)|[*]\s*(?P.*?))\s*\Z', re.I) -charselector = re.compile( - r'\s*(\((?P.*)\)|[*]\s*(?P.*))\s*\Z', re.I) -lenkindpattern = re.compile( - r'\s*(kind\s*=\s*(?P.*?)\s*(@,@\s*len\s*=\s*(?P.*)|)|(len\s*=\s*|)(?P.*?)\s*(@,@\s*(kind\s*=\s*|)(?P.*)|))\s*\Z', re.I) -lenarraypattern = re.compile( - r'\s*(@\(@\s*(?!/)\s*(?P.*?)\s*@\)@\s*[*]\s*(?P.*?)|([*]\s*(?P.*?)|)\s*(@\(@\s*(?!/)\s*(?P.*?)\s*@\)@|))\s*(=\s*(?P.*?)|(@\(@|)/\s*(?P.*?)\s*/(@\)@|)|)\s*\Z', re.I) - - -def removespaces(expr): - expr = expr.strip() - if len(expr) <= 1: - return expr - expr2 = expr[0] - for i in range(1, len(expr) - 1): - if (expr[i] == ' ' and - ((expr[i + 1] in "()[]{}=+-/* ") or - (expr[i - 1] in "()[]{}=+-/* "))): - continue - expr2 = expr2 + expr[i] - expr2 = expr2 + expr[-1] - return expr2 - - -def markinnerspaces(line): - l = '' - f = 0 - cc = '\'' - cb = '' - for c in line: - if cb == '\\' and c in ['\\', '\'', '"']: - l = l + c - cb = c - continue - if f == 0 and c in ['\'', '"']: - cc = c - if c == cc: - f = f + 1 - elif c == cc: - f = f - 1 - elif c == ' ' and f == 1: - l = l + '@_@' - continue - l = l + c - cb = c - return l - - -def updatevars(typespec, selector, attrspec, entitydecl): - global groupcache, groupcounter - - last_name = None - kindselect, charselect, typename = cracktypespec(typespec, selector) - if attrspec: - attrspec = [x.strip() for x in markoutercomma(attrspec).split('@,@')] - l = [] - c = re.compile(r'(?P[a-zA-Z]+)') - for a in attrspec: - if not a: - continue - m = c.match(a) - if m: - s = m.group('start').lower() - a = s + a[len(s):] - l.append(a) - attrspec = l - el = [x.strip() for x in markoutercomma(entitydecl).split('@,@')] - el1 = [] - for e in el: - for e1 in [x.strip() for x in markoutercomma(removespaces(markinnerspaces(e)), comma=' ').split('@ @')]: - if e1: - el1.append(e1.replace('@_@', ' ')) - for e in el1: - m = namepattern.match(e) - if not m: - outmess( - 'updatevars: no name pattern found for entity=%s. Skipping.\n' % (repr(e))) - continue - ename = rmbadname1(m.group('name')) - edecl = {} - if ename in groupcache[groupcounter]['vars']: - edecl = groupcache[groupcounter]['vars'][ename].copy() - not_has_typespec = 'typespec' not in edecl - if not_has_typespec: - edecl['typespec'] = typespec - elif typespec and (not typespec == edecl['typespec']): - outmess('updatevars: attempt to change the type of "%s" ("%s") to "%s". Ignoring.\n' % ( - ename, edecl['typespec'], typespec)) - if 'kindselector' not in edecl: - edecl['kindselector'] = copy.copy(kindselect) - elif kindselect: - for k in list(kindselect.keys()): - if k in edecl['kindselector'] and (not kindselect[k] == edecl['kindselector'][k]): - outmess('updatevars: attempt to change the kindselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % ( - k, ename, edecl['kindselector'][k], kindselect[k])) - else: - edecl['kindselector'][k] = copy.copy(kindselect[k]) - if 'charselector' not in edecl and charselect: - if not_has_typespec: - edecl['charselector'] = charselect - else: - errmess('updatevars:%s: attempt to change empty charselector to %r. Ignoring.\n' - % (ename, charselect)) - elif charselect: - for k in list(charselect.keys()): - if k in edecl['charselector'] and (not charselect[k] == edecl['charselector'][k]): - outmess('updatevars: attempt to change the charselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % ( - k, ename, edecl['charselector'][k], charselect[k])) - else: - edecl['charselector'][k] = copy.copy(charselect[k]) - if 'typename' not in edecl: - edecl['typename'] = typename - elif typename and (not edecl['typename'] == typename): - outmess('updatevars: attempt to change the typename of "%s" ("%s") to "%s". Ignoring.\n' % ( - ename, edecl['typename'], typename)) - if 'attrspec' not in edecl: - edecl['attrspec'] = copy.copy(attrspec) - elif attrspec: - for a in attrspec: - if a not in edecl['attrspec']: - edecl['attrspec'].append(a) - else: - edecl['typespec'] = copy.copy(typespec) - edecl['kindselector'] = copy.copy(kindselect) - edecl['charselector'] = copy.copy(charselect) - edecl['typename'] = typename - edecl['attrspec'] = copy.copy(attrspec) - if m.group('after'): - m1 = lenarraypattern.match(markouterparen(m.group('after'))) - if m1: - d1 = m1.groupdict() - for lk in ['len', 'array', 'init']: - if d1[lk + '2'] is not None: - d1[lk] = d1[lk + '2'] - del d1[lk + '2'] - for k in list(d1.keys()): - if d1[k] is not None: - d1[k] = unmarkouterparen(d1[k]) - else: - del d1[k] - if 'len' in d1 and 'array' in d1: - if d1['len'] == '': - d1['len'] = d1['array'] - del d1['array'] - else: - d1['array'] = d1['array'] + ',' + d1['len'] - del d1['len'] - errmess('updatevars: "%s %s" is mapped to "%s %s(%s)"\n' % ( - typespec, e, typespec, ename, d1['array'])) - if 'array' in d1: - dm = 'dimension(%s)' % d1['array'] - if 'attrspec' not in edecl or (not edecl['attrspec']): - edecl['attrspec'] = [dm] - else: - edecl['attrspec'].append(dm) - for dm1 in edecl['attrspec']: - if dm1[:9] == 'dimension' and dm1 != dm: - del edecl['attrspec'][-1] - errmess('updatevars:%s: attempt to change %r to %r. Ignoring.\n' - % (ename, dm1, dm)) - break - - if 'len' in d1: - if typespec in ['complex', 'integer', 'logical', 'real']: - if ('kindselector' not in edecl) or (not edecl['kindselector']): - edecl['kindselector'] = {} - edecl['kindselector']['*'] = d1['len'] - elif typespec == 'character': - if ('charselector' not in edecl) or (not edecl['charselector']): - edecl['charselector'] = {} - if 'len' in edecl['charselector']: - del edecl['charselector']['len'] - edecl['charselector']['*'] = d1['len'] - if 'init' in d1: - if '=' in edecl and (not edecl['='] == d1['init']): - outmess('updatevars: attempt to change the init expression of "%s" ("%s") to "%s". Ignoring.\n' % ( - ename, edecl['='], d1['init'])) - else: - edecl['='] = d1['init'] - else: - outmess('updatevars: could not crack entity declaration "%s". Ignoring.\n' % ( - ename + m.group('after'))) - for k in list(edecl.keys()): - if not edecl[k]: - del edecl[k] - groupcache[groupcounter]['vars'][ename] = edecl - if 'varnames' in groupcache[groupcounter]: - groupcache[groupcounter]['varnames'].append(ename) - last_name = ename - return last_name - - -def cracktypespec(typespec, selector): - kindselect = None - charselect = None - typename = None - if selector: - if typespec in ['complex', 'integer', 'logical', 'real']: - kindselect = kindselector.match(selector) - if not kindselect: - outmess( - 'cracktypespec: no kindselector pattern found for %s\n' % (repr(selector))) - return - kindselect = kindselect.groupdict() - kindselect['*'] = kindselect['kind2'] - del kindselect['kind2'] - for k in list(kindselect.keys()): - if not kindselect[k]: - del kindselect[k] - for k, i in list(kindselect.items()): - kindselect[k] = rmbadname1(i) - elif typespec == 'character': - charselect = charselector.match(selector) - if not charselect: - outmess( - 'cracktypespec: no charselector pattern found for %s\n' % (repr(selector))) - return - charselect = charselect.groupdict() - charselect['*'] = charselect['charlen'] - del charselect['charlen'] - if charselect['lenkind']: - lenkind = lenkindpattern.match( - markoutercomma(charselect['lenkind'])) - lenkind = lenkind.groupdict() - for lk in ['len', 'kind']: - if lenkind[lk + '2']: - lenkind[lk] = lenkind[lk + '2'] - charselect[lk] = lenkind[lk] - del lenkind[lk + '2'] - del charselect['lenkind'] - for k in list(charselect.keys()): - if not charselect[k]: - del charselect[k] - for k, i in list(charselect.items()): - charselect[k] = rmbadname1(i) - elif typespec == 'type': - typename = re.match(r'\s*\(\s*(?P\w+)\s*\)', selector, re.I) - if typename: - typename = typename.group('name') - else: - outmess('cracktypespec: no typename found in %s\n' % - (repr(typespec + selector))) - else: - outmess('cracktypespec: no selector used for %s\n' % - (repr(selector))) - return kindselect, charselect, typename -###### - - -def setattrspec(decl, attr, force=0): - if not decl: - decl = {} - if not attr: - return decl - if 'attrspec' not in decl: - decl['attrspec'] = [attr] - return decl - if force: - decl['attrspec'].append(attr) - if attr in decl['attrspec']: - return decl - if attr == 'static' and 'automatic' not in decl['attrspec']: - decl['attrspec'].append(attr) - elif attr == 'automatic' and 'static' not in decl['attrspec']: - decl['attrspec'].append(attr) - elif attr == 'public' and 'private' not in decl['attrspec']: - decl['attrspec'].append(attr) - elif attr == 'private' and 'public' not in decl['attrspec']: - decl['attrspec'].append(attr) - else: - decl['attrspec'].append(attr) - return decl - - -def setkindselector(decl, sel, force=0): - if not decl: - decl = {} - if not sel: - return decl - if 'kindselector' not in decl: - decl['kindselector'] = sel - return decl - for k in list(sel.keys()): - if force or k not in decl['kindselector']: - decl['kindselector'][k] = sel[k] - return decl - - -def setcharselector(decl, sel, force=0): - if not decl: - decl = {} - if not sel: - return decl - if 'charselector' not in decl: - decl['charselector'] = sel - return decl - for k in list(sel.keys()): - if force or k not in decl['charselector']: - decl['charselector'][k] = sel[k] - return decl - - -def getblockname(block, unknown='unknown'): - if 'name' in block: - return block['name'] - return unknown - -# post processing - - -def setmesstext(block): - global filepositiontext - - try: - filepositiontext = 'In: %s:%s\n' % (block['from'], block['name']) - except Exception: - pass - - -def get_usedict(block): - usedict = {} - if 'parent_block' in block: - usedict = get_usedict(block['parent_block']) - if 'use' in block: - usedict.update(block['use']) - return usedict - - -def get_useparameters(block, param_map=None): - global f90modulevars - - if param_map is None: - param_map = {} - usedict = get_usedict(block) - if not usedict: - return param_map - for usename, mapping in list(usedict.items()): - usename = usename.lower() - if usename not in f90modulevars: - outmess('get_useparameters: no module %s info used by %s\n' % - (usename, block.get('name'))) - continue - mvars = f90modulevars[usename] - params = get_parameters(mvars) - if not params: - continue - # XXX: apply mapping - if mapping: - errmess('get_useparameters: mapping for %s not impl.' % (mapping)) - for k, v in list(params.items()): - if k in param_map: - outmess('get_useparameters: overriding parameter %s with' - ' value from module %s' % (repr(k), repr(usename))) - param_map[k] = v - - return param_map - - -def postcrack2(block, tab='', param_map=None): - global f90modulevars - - if not f90modulevars: - return block - if isinstance(block, list): - ret = [postcrack2(g, tab=tab + '\t', param_map=param_map) - for g in block] - return ret - setmesstext(block) - outmess('%sBlock: %s\n' % (tab, block['name']), 0) - - if param_map is None: - param_map = get_useparameters(block) - - if param_map is not None and 'vars' in block: - vars = block['vars'] - for n in list(vars.keys()): - var = vars[n] - if 'kindselector' in var: - kind = var['kindselector'] - if 'kind' in kind: - val = kind['kind'] - if val in param_map: - kind['kind'] = param_map[val] - new_body = [postcrack2(b, tab=tab + '\t', param_map=param_map) - for b in block['body']] - block['body'] = new_body - - return block - - -def postcrack(block, args=None, tab=''): - """ - TODO: - function return values - determine expression types if in argument list - """ - global usermodules, onlyfunctions - - if isinstance(block, list): - gret = [] - uret = [] - for g in block: - setmesstext(g) - g = postcrack(g, tab=tab + '\t') - # sort user routines to appear first - if 'name' in g and '__user__' in g['name']: - uret.append(g) - else: - gret.append(g) - return uret + gret - setmesstext(block) - if not isinstance(block, dict) and 'block' not in block: - raise Exception('postcrack: Expected block dictionary instead of ' + - str(block)) - if 'name' in block and not block['name'] == 'unknown_interface': - outmess('%sBlock: %s\n' % (tab, block['name']), 0) - block = analyzeargs(block) - block = analyzecommon(block) - block['vars'] = analyzevars(block) - block['sortvars'] = sortvarnames(block['vars']) - if 'args' in block and block['args']: - args = block['args'] - block['body'] = analyzebody(block, args, tab=tab) - - userisdefined = [] - if 'use' in block: - useblock = block['use'] - for k in list(useblock.keys()): - if '__user__' in k: - userisdefined.append(k) - else: - useblock = {} - name = '' - if 'name' in block: - name = block['name'] - # and not userisdefined: # Build a __user__ module - if 'externals' in block and block['externals']: - interfaced = [] - if 'interfaced' in block: - interfaced = block['interfaced'] - mvars = copy.copy(block['vars']) - if name: - mname = name + '__user__routines' - else: - mname = 'unknown__user__routines' - if mname in userisdefined: - i = 1 - while '%s_%i' % (mname, i) in userisdefined: - i = i + 1 - mname = '%s_%i' % (mname, i) - interface = {'block': 'interface', 'body': [], - 'vars': {}, 'name': name + '_user_interface'} - for e in block['externals']: - if e in interfaced: - edef = [] - j = -1 - for b in block['body']: - j = j + 1 - if b['block'] == 'interface': - i = -1 - for bb in b['body']: - i = i + 1 - if 'name' in bb and bb['name'] == e: - edef = copy.copy(bb) - del b['body'][i] - break - if edef: - if not b['body']: - del block['body'][j] - del interfaced[interfaced.index(e)] - break - interface['body'].append(edef) - else: - if e in mvars and not isexternal(mvars[e]): - interface['vars'][e] = mvars[e] - if interface['vars'] or interface['body']: - block['interfaced'] = interfaced - mblock = {'block': 'python module', 'body': [ - interface], 'vars': {}, 'name': mname, 'interfaced': block['externals']} - useblock[mname] = {} - usermodules.append(mblock) - if useblock: - block['use'] = useblock - return block - - -def sortvarnames(vars): - indep = [] - dep = [] - for v in list(vars.keys()): - if 'depend' in vars[v] and vars[v]['depend']: - dep.append(v) - else: - indep.append(v) - n = len(dep) - i = 0 - while dep: # XXX: How to catch dependence cycles correctly? - v = dep[0] - fl = 0 - for w in dep[1:]: - if w in vars[v]['depend']: - fl = 1 - break - if fl: - dep = dep[1:] + [v] - i = i + 1 - if i > n: - errmess('sortvarnames: failed to compute dependencies because' - ' of cyclic dependencies between ' - + ', '.join(dep) + '\n') - indep = indep + dep - break - else: - indep.append(v) - dep = dep[1:] - n = len(dep) - i = 0 - return indep - - -def analyzecommon(block): - if not hascommon(block): - return block - commonvars = [] - for k in list(block['common'].keys()): - comvars = [] - for e in block['common'][k]: - m = re.match( - r'\A\s*\b(?P.*?)\b\s*(\((?P.*?)\)|)\s*\Z', e, re.I) - if m: - dims = [] - if m.group('dims'): - dims = [x.strip() - for x in markoutercomma(m.group('dims')).split('@,@')] - n = rmbadname1(m.group('name').strip()) - if n in block['vars']: - if 'attrspec' in block['vars'][n]: - block['vars'][n]['attrspec'].append( - 'dimension(%s)' % (','.join(dims))) - else: - block['vars'][n]['attrspec'] = [ - 'dimension(%s)' % (','.join(dims))] - else: - if dims: - block['vars'][n] = { - 'attrspec': ['dimension(%s)' % (','.join(dims))]} - else: - block['vars'][n] = {} - if n not in commonvars: - commonvars.append(n) - else: - n = e - errmess( - 'analyzecommon: failed to extract "[()]" from "%s" in common /%s/.\n' % (e, k)) - comvars.append(n) - block['common'][k] = comvars - if 'commonvars' not in block: - block['commonvars'] = commonvars - else: - block['commonvars'] = block['commonvars'] + commonvars - return block - - -def analyzebody(block, args, tab=''): - global usermodules, skipfuncs, onlyfuncs, f90modulevars - - setmesstext(block) - body = [] - for b in block['body']: - b['parent_block'] = block - if b['block'] in ['function', 'subroutine']: - if args is not None and b['name'] not in args: - continue - else: - as_ = b['args'] - if b['name'] in skipfuncs: - continue - if onlyfuncs and b['name'] not in onlyfuncs: - continue - b['saved_interface'] = crack2fortrangen( - b, '\n' + ' ' * 6, as_interface=True) - - else: - as_ = args - b = postcrack(b, as_, tab=tab + '\t') - if b['block'] == 'interface' and not b['body']: - if 'f2pyenhancements' not in b: - continue - if b['block'].replace(' ', '') == 'pythonmodule': - usermodules.append(b) - else: - if b['block'] == 'module': - f90modulevars[b['name']] = b['vars'] - body.append(b) - return body - - -def buildimplicitrules(block): - setmesstext(block) - implicitrules = defaultimplicitrules - attrrules = {} - if 'implicit' in block: - if block['implicit'] is None: - implicitrules = None - if verbose > 1: - outmess( - 'buildimplicitrules: no implicit rules for routine %s.\n' % repr(block['name'])) - else: - for k in list(block['implicit'].keys()): - if block['implicit'][k].get('typespec') not in ['static', 'automatic']: - implicitrules[k] = block['implicit'][k] - else: - attrrules[k] = block['implicit'][k]['typespec'] - return implicitrules, attrrules - - -def myeval(e, g=None, l=None): - r = eval(e, g, l) - if type(r) in [type(0), type(0.0)]: - return r - raise ValueError('r=%r' % (r)) - -getlincoef_re_1 = re.compile(r'\A\b\w+\b\Z', re.I) - - -def getlincoef(e, xset): # e = a*x+b ; x in xset - try: - c = int(myeval(e, {}, {})) - return 0, c, None - except Exception: - pass - if getlincoef_re_1.match(e): - return 1, 0, e - len_e = len(e) - for x in xset: - if len(x) > len_e: - continue - if re.search(r'\w\s*\([^)]*\b' + x + r'\b', e): - # skip function calls having x as an argument, e.g max(1, x) - continue - re_1 = re.compile(r'(?P.*?)\b' + x + r'\b(?P.*)', re.I) - m = re_1.match(e) - if m: - try: - m1 = re_1.match(e) - while m1: - ee = '%s(%s)%s' % ( - m1.group('before'), 0, m1.group('after')) - m1 = re_1.match(ee) - b = myeval(ee, {}, {}) - m1 = re_1.match(e) - while m1: - ee = '%s(%s)%s' % ( - m1.group('before'), 1, m1.group('after')) - m1 = re_1.match(ee) - a = myeval(ee, {}, {}) - b - m1 = re_1.match(e) - while m1: - ee = '%s(%s)%s' % ( - m1.group('before'), 0.5, m1.group('after')) - m1 = re_1.match(ee) - c = myeval(ee, {}, {}) - # computing another point to be sure that expression is linear - m1 = re_1.match(e) - while m1: - ee = '%s(%s)%s' % ( - m1.group('before'), 1.5, m1.group('after')) - m1 = re_1.match(ee) - c2 = myeval(ee, {}, {}) - if (a * 0.5 + b == c and a * 1.5 + b == c2): - return a, b, x - except Exception: - pass - break - return None, None, None - -_varname_match = re.compile(r'\A[a-z]\w*\Z').match - - -def getarrlen(dl, args, star='*'): - edl = [] - try: - edl.append(myeval(dl[0], {}, {})) - except Exception: - edl.append(dl[0]) - try: - edl.append(myeval(dl[1], {}, {})) - except Exception: - edl.append(dl[1]) - if isinstance(edl[0], int): - p1 = 1 - edl[0] - if p1 == 0: - d = str(dl[1]) - elif p1 < 0: - d = '%s-%s' % (dl[1], -p1) - else: - d = '%s+%s' % (dl[1], p1) - elif isinstance(edl[1], int): - p1 = 1 + edl[1] - if p1 == 0: - d = '-(%s)' % (dl[0]) - else: - d = '%s-(%s)' % (p1, dl[0]) - else: - d = '%s-(%s)+1' % (dl[1], dl[0]) - try: - return repr(myeval(d, {}, {})), None, None - except Exception: - pass - d1, d2 = getlincoef(dl[0], args), getlincoef(dl[1], args) - if None not in [d1[0], d2[0]]: - if (d1[0], d2[0]) == (0, 0): - return repr(d2[1] - d1[1] + 1), None, None - b = d2[1] - d1[1] + 1 - d1 = (d1[0], 0, d1[2]) - d2 = (d2[0], b, d2[2]) - if d1[0] == 0 and d2[2] in args: - if b < 0: - return '%s * %s - %s' % (d2[0], d2[2], -b), d2[2], '+%s)/(%s)' % (-b, d2[0]) - elif b: - return '%s * %s + %s' % (d2[0], d2[2], b), d2[2], '-%s)/(%s)' % (b, d2[0]) - else: - return '%s * %s' % (d2[0], d2[2]), d2[2], ')/(%s)' % (d2[0]) - if d2[0] == 0 and d1[2] in args: - - if b < 0: - return '%s * %s - %s' % (-d1[0], d1[2], -b), d1[2], '+%s)/(%s)' % (-b, -d1[0]) - elif b: - return '%s * %s + %s' % (-d1[0], d1[2], b), d1[2], '-%s)/(%s)' % (b, -d1[0]) - else: - return '%s * %s' % (-d1[0], d1[2]), d1[2], ')/(%s)' % (-d1[0]) - if d1[2] == d2[2] and d1[2] in args: - a = d2[0] - d1[0] - if not a: - return repr(b), None, None - if b < 0: - return '%s * %s - %s' % (a, d1[2], -b), d2[2], '+%s)/(%s)' % (-b, a) - elif b: - return '%s * %s + %s' % (a, d1[2], b), d2[2], '-%s)/(%s)' % (b, a) - else: - return '%s * %s' % (a, d1[2]), d2[2], ')/(%s)' % (a) - if d1[0] == d2[0] == 1: - c = str(d1[2]) - if c not in args: - if _varname_match(c): - outmess('\tgetarrlen:variable "%s" undefined\n' % (c)) - c = '(%s)' % c - if b == 0: - d = '%s-%s' % (d2[2], c) - elif b < 0: - d = '%s-%s-%s' % (d2[2], c, -b) - else: - d = '%s-%s+%s' % (d2[2], c, b) - elif d1[0] == 0: - c2 = str(d2[2]) - if c2 not in args: - if _varname_match(c2): - outmess('\tgetarrlen:variable "%s" undefined\n' % (c2)) - c2 = '(%s)' % c2 - if d2[0] == 1: - pass - elif d2[0] == -1: - c2 = '-%s' % c2 - else: - c2 = '%s*%s' % (d2[0], c2) - - if b == 0: - d = c2 - elif b < 0: - d = '%s-%s' % (c2, -b) - else: - d = '%s+%s' % (c2, b) - elif d2[0] == 0: - c1 = str(d1[2]) - if c1 not in args: - if _varname_match(c1): - outmess('\tgetarrlen:variable "%s" undefined\n' % (c1)) - c1 = '(%s)' % c1 - if d1[0] == 1: - c1 = '-%s' % c1 - elif d1[0] == -1: - c1 = '+%s' % c1 - elif d1[0] < 0: - c1 = '+%s*%s' % (-d1[0], c1) - else: - c1 = '-%s*%s' % (d1[0], c1) - - if b == 0: - d = c1 - elif b < 0: - d = '%s-%s' % (c1, -b) - else: - d = '%s+%s' % (c1, b) - else: - c1 = str(d1[2]) - if c1 not in args: - if _varname_match(c1): - outmess('\tgetarrlen:variable "%s" undefined\n' % (c1)) - c1 = '(%s)' % c1 - if d1[0] == 1: - c1 = '-%s' % c1 - elif d1[0] == -1: - c1 = '+%s' % c1 - elif d1[0] < 0: - c1 = '+%s*%s' % (-d1[0], c1) - else: - c1 = '-%s*%s' % (d1[0], c1) - - c2 = str(d2[2]) - if c2 not in args: - if _varname_match(c2): - outmess('\tgetarrlen:variable "%s" undefined\n' % (c2)) - c2 = '(%s)' % c2 - if d2[0] == 1: - pass - elif d2[0] == -1: - c2 = '-%s' % c2 - else: - c2 = '%s*%s' % (d2[0], c2) - - if b == 0: - d = '%s%s' % (c2, c1) - elif b < 0: - d = '%s%s-%s' % (c2, c1, -b) - else: - d = '%s%s+%s' % (c2, c1, b) - return d, None, None - -word_pattern = re.compile(r'\b[a-z][\w$]*\b', re.I) - - -def _get_depend_dict(name, vars, deps): - if name in vars: - words = vars[name].get('depend', []) - - if '=' in vars[name] and not isstring(vars[name]): - for word in word_pattern.findall(vars[name]['=']): - if word not in words and word in vars: - words.append(word) - for word in words[:]: - for w in deps.get(word, []) \ - or _get_depend_dict(word, vars, deps): - if w not in words: - words.append(w) - else: - outmess('_get_depend_dict: no dependence info for %s\n' % (repr(name))) - words = [] - deps[name] = words - return words - - -def _calc_depend_dict(vars): - names = list(vars.keys()) - depend_dict = {} - for n in names: - _get_depend_dict(n, vars, depend_dict) - return depend_dict - - -def get_sorted_names(vars): - """ - """ - depend_dict = _calc_depend_dict(vars) - names = [] - for name in list(depend_dict.keys()): - if not depend_dict[name]: - names.append(name) - del depend_dict[name] - while depend_dict: - for name, lst in list(depend_dict.items()): - new_lst = [n for n in lst if n in depend_dict] - if not new_lst: - names.append(name) - del depend_dict[name] - else: - depend_dict[name] = new_lst - return [name for name in names if name in vars] - - -def _kind_func(string): - # XXX: return something sensible. - if string[0] in "'\"": - string = string[1:-1] - if real16pattern.match(string): - return 8 - elif real8pattern.match(string): - return 4 - return 'kind(' + string + ')' - - -def _selected_int_kind_func(r): - # XXX: This should be processor dependent - m = 10 ** r - if m <= 2 ** 8: - return 1 - if m <= 2 ** 16: - return 2 - if m <= 2 ** 32: - return 4 - if m <= 2 ** 63: - return 8 - if m <= 2 ** 128: - return 16 - return -1 - - -def _selected_real_kind_func(p, r=0, radix=0): - # XXX: This should be processor dependent - # This is only good for 0 <= p <= 20 - if p < 7: - return 4 - if p < 16: - return 8 - machine = platform.machine().lower() - if machine.startswith(('aarch64', 'power', 'ppc', 'riscv', 's390x', 'sparc')): - if p <= 20: - return 16 - else: - if p < 19: - return 10 - elif p <= 20: - return 16 - return -1 - - -def get_parameters(vars, global_params={}): - params = copy.copy(global_params) - g_params = copy.copy(global_params) - for name, func in [('kind', _kind_func), - ('selected_int_kind', _selected_int_kind_func), - ('selected_real_kind', _selected_real_kind_func), ]: - if name not in g_params: - g_params[name] = func - param_names = [] - for n in get_sorted_names(vars): - if 'attrspec' in vars[n] and 'parameter' in vars[n]['attrspec']: - param_names.append(n) - kind_re = re.compile(r'\bkind\s*\(\s*(?P.*)\s*\)', re.I) - selected_int_kind_re = re.compile( - r'\bselected_int_kind\s*\(\s*(?P.*)\s*\)', re.I) - selected_kind_re = re.compile( - r'\bselected_(int|real)_kind\s*\(\s*(?P.*)\s*\)', re.I) - for n in param_names: - if '=' in vars[n]: - v = vars[n]['='] - if islogical(vars[n]): - v = v.lower() - for repl in [ - ('.false.', 'False'), - ('.true.', 'True'), - # TODO: test .eq., .neq., etc replacements. - ]: - v = v.replace(*repl) - v = kind_re.sub(r'kind("\1")', v) - v = selected_int_kind_re.sub(r'selected_int_kind(\1)', v) - - # We need to act according to the data. - # The easy case is if the data has a kind-specifier, - # then we may easily remove those specifiers. - # However, it may be that the user uses other specifiers...(!) - is_replaced = False - if 'kindselector' in vars[n]: - if 'kind' in vars[n]['kindselector']: - orig_v_len = len(v) - v = v.replace('_' + vars[n]['kindselector']['kind'], '') - # Again, this will be true if even a single specifier - # has been replaced, see comment above. - is_replaced = len(v) < orig_v_len - - if not is_replaced: - if not selected_kind_re.match(v): - v_ = v.split('_') - # In case there are additive parameters - if len(v_) > 1: - v = ''.join(v_[:-1]).lower().replace(v_[-1].lower(), '') - - # Currently this will not work for complex numbers. - # There is missing code for extracting a complex number, - # which may be defined in either of these: - # a) (Re, Im) - # b) cmplx(Re, Im) - # c) dcmplx(Re, Im) - # d) cmplx(Re, Im, ) - - if isdouble(vars[n]): - tt = list(v) - for m in real16pattern.finditer(v): - tt[m.start():m.end()] = list( - v[m.start():m.end()].lower().replace('d', 'e')) - v = ''.join(tt) - - elif iscomplex(vars[n]): - # FIXME complex numbers may also have exponents - if v[0] == '(' and v[-1] == ')': - # FIXME, unused l looks like potential bug - l = markoutercomma(v[1:-1]).split('@,@') - - try: - params[n] = eval(v, g_params, params) - except Exception as msg: - params[n] = v - outmess('get_parameters: got "%s" on %s\n' % (msg, repr(v))) - if isstring(vars[n]) and isinstance(params[n], int): - params[n] = chr(params[n]) - nl = n.lower() - if nl != n: - params[nl] = params[n] - else: - print(vars[n]) - outmess( - 'get_parameters:parameter %s does not have value?!\n' % (repr(n))) - return params - - -def _eval_length(length, params): - if length in ['(:)', '(*)', '*']: - return '(*)' - return _eval_scalar(length, params) - -_is_kind_number = re.compile(r'\d+_').match - - -def _eval_scalar(value, params): - if _is_kind_number(value): - value = value.split('_')[0] - try: - value = str(eval(value, {}, params)) - except (NameError, SyntaxError, TypeError): - return value - except Exception as msg: - errmess('"%s" in evaluating %r ' - '(available names: %s)\n' - % (msg, value, list(params.keys()))) - return value - - -def analyzevars(block): - global f90modulevars - - setmesstext(block) - implicitrules, attrrules = buildimplicitrules(block) - vars = copy.copy(block['vars']) - if block['block'] == 'function' and block['name'] not in vars: - vars[block['name']] = {} - if '' in block['vars']: - del vars[''] - if 'attrspec' in block['vars']['']: - gen = block['vars']['']['attrspec'] - for n in list(vars.keys()): - for k in ['public', 'private']: - if k in gen: - vars[n] = setattrspec(vars[n], k) - svars = [] - args = block['args'] - for a in args: - try: - vars[a] - svars.append(a) - except KeyError: - pass - for n in list(vars.keys()): - if n not in args: - svars.append(n) - - params = get_parameters(vars, get_useparameters(block)) - - dep_matches = {} - name_match = re.compile(r'\w[\w\d_$]*').match - for v in list(vars.keys()): - m = name_match(v) - if m: - n = v[m.start():m.end()] - try: - dep_matches[n] - except KeyError: - dep_matches[n] = re.compile(r'.*\b%s\b' % (v), re.I).match - for n in svars: - if n[0] in list(attrrules.keys()): - vars[n] = setattrspec(vars[n], attrrules[n[0]]) - if 'typespec' not in vars[n]: - if not('attrspec' in vars[n] and 'external' in vars[n]['attrspec']): - if implicitrules: - ln0 = n[0].lower() - for k in list(implicitrules[ln0].keys()): - if k == 'typespec' and implicitrules[ln0][k] == 'undefined': - continue - if k not in vars[n]: - vars[n][k] = implicitrules[ln0][k] - elif k == 'attrspec': - for l in implicitrules[ln0][k]: - vars[n] = setattrspec(vars[n], l) - elif n in block['args']: - outmess('analyzevars: typespec of variable %s is not defined in routine %s.\n' % ( - repr(n), block['name'])) - - if 'charselector' in vars[n]: - if 'len' in vars[n]['charselector']: - l = vars[n]['charselector']['len'] - try: - l = str(eval(l, {}, params)) - except Exception: - pass - vars[n]['charselector']['len'] = l - - if 'kindselector' in vars[n]: - if 'kind' in vars[n]['kindselector']: - l = vars[n]['kindselector']['kind'] - try: - l = str(eval(l, {}, params)) - except Exception: - pass - vars[n]['kindselector']['kind'] = l - - savelindims = {} - if 'attrspec' in vars[n]: - attr = vars[n]['attrspec'] - attr.reverse() - vars[n]['attrspec'] = [] - dim, intent, depend, check, note = None, None, None, None, None - for a in attr: - if a[:9] == 'dimension': - dim = (a[9:].strip())[1:-1] - elif a[:6] == 'intent': - intent = (a[6:].strip())[1:-1] - elif a[:6] == 'depend': - depend = (a[6:].strip())[1:-1] - elif a[:5] == 'check': - check = (a[5:].strip())[1:-1] - elif a[:4] == 'note': - note = (a[4:].strip())[1:-1] - else: - vars[n] = setattrspec(vars[n], a) - if intent: - if 'intent' not in vars[n]: - vars[n]['intent'] = [] - for c in [x.strip() for x in markoutercomma(intent).split('@,@')]: - # Remove spaces so that 'in out' becomes 'inout' - tmp = c.replace(' ', '') - if tmp not in vars[n]['intent']: - vars[n]['intent'].append(tmp) - intent = None - if note: - note = note.replace('\\n\\n', '\n\n') - note = note.replace('\\n ', '\n') - if 'note' not in vars[n]: - vars[n]['note'] = [note] - else: - vars[n]['note'].append(note) - note = None - if depend is not None: - if 'depend' not in vars[n]: - vars[n]['depend'] = [] - for c in rmbadname([x.strip() for x in markoutercomma(depend).split('@,@')]): - if c not in vars[n]['depend']: - vars[n]['depend'].append(c) - depend = None - if check is not None: - if 'check' not in vars[n]: - vars[n]['check'] = [] - for c in [x.strip() for x in markoutercomma(check).split('@,@')]: - if c not in vars[n]['check']: - vars[n]['check'].append(c) - check = None - if dim and 'dimension' not in vars[n]: - vars[n]['dimension'] = [] - for d in rmbadname([x.strip() for x in markoutercomma(dim).split('@,@')]): - star = '*' - if d == ':': - star = ':' - if d in params: - d = str(params[d]) - for p in list(params.keys()): - re_1 = re.compile(r'(?P.*?)\b' + p + r'\b(?P.*)', re.I) - m = re_1.match(d) - while m: - d = m.group('before') + \ - str(params[p]) + m.group('after') - m = re_1.match(d) - if d == star: - dl = [star] - else: - dl = markoutercomma(d, ':').split('@:@') - if len(dl) == 2 and '*' in dl: # e.g. dimension(5:*) - dl = ['*'] - d = '*' - if len(dl) == 1 and not dl[0] == star: - dl = ['1', dl[0]] - if len(dl) == 2: - d, v, di = getarrlen(dl, list(block['vars'].keys())) - if d[:4] == '1 * ': - d = d[4:] - if di and di[-4:] == '/(1)': - di = di[:-4] - if v: - savelindims[d] = v, di - vars[n]['dimension'].append(d) - if 'dimension' in vars[n]: - if isintent_c(vars[n]): - shape_macro = 'shape' - else: - shape_macro = 'shape' # 'fshape' - if isstringarray(vars[n]): - if 'charselector' in vars[n]: - d = vars[n]['charselector'] - if '*' in d: - d = d['*'] - errmess('analyzevars: character array "character*%s %s(%s)" is considered as "character %s(%s)"; "intent(c)" is forced.\n' - % (d, n, - ','.join(vars[n]['dimension']), - n, ','.join(vars[n]['dimension'] + [d]))) - vars[n]['dimension'].append(d) - del vars[n]['charselector'] - if 'intent' not in vars[n]: - vars[n]['intent'] = [] - if 'c' not in vars[n]['intent']: - vars[n]['intent'].append('c') - else: - errmess( - "analyzevars: charselector=%r unhandled." % (d)) - if 'check' not in vars[n] and 'args' in block and n in block['args']: - flag = 'depend' not in vars[n] - if flag: - vars[n]['depend'] = [] - vars[n]['check'] = [] - if 'dimension' in vars[n]: - #/----< no check - i = -1 - ni = len(vars[n]['dimension']) - for d in vars[n]['dimension']: - ddeps = [] # dependencies of 'd' - ad = '' - pd = '' - if d not in vars: - if d in savelindims: - pd, ad = '(', savelindims[d][1] - d = savelindims[d][0] - else: - for r in block['args']: - if r not in vars: - continue - if re.match(r'.*?\b' + r + r'\b', d, re.I): - ddeps.append(r) - if d in vars: - if 'attrspec' in vars[d]: - for aa in vars[d]['attrspec']: - if aa[:6] == 'depend': - ddeps += aa[6:].strip()[1:-1].split(',') - if 'depend' in vars[d]: - ddeps = ddeps + vars[d]['depend'] - i = i + 1 - if d in vars and ('depend' not in vars[d]) \ - and ('=' not in vars[d]) and (d not in vars[n]['depend']) \ - and l_or(isintent_in, isintent_inout, isintent_inplace)(vars[n]): - vars[d]['depend'] = [n] - if ni > 1: - vars[d]['='] = '%s%s(%s,%s)%s' % ( - pd, shape_macro, n, i, ad) - else: - vars[d]['='] = '%slen(%s)%s' % (pd, n, ad) - # /---< no check - if 1 and 'check' not in vars[d]: - if ni > 1: - vars[d]['check'] = ['%s%s(%s,%i)%s==%s' - % (pd, shape_macro, n, i, ad, d)] - else: - vars[d]['check'] = [ - '%slen(%s)%s>=%s' % (pd, n, ad, d)] - if 'attrspec' not in vars[d]: - vars[d]['attrspec'] = ['optional'] - if ('optional' not in vars[d]['attrspec']) and\ - ('required' not in vars[d]['attrspec']): - vars[d]['attrspec'].append('optional') - elif d not in ['*', ':']: - #/----< no check - if flag: - if d in vars: - if n not in ddeps: - vars[n]['depend'].append(d) - else: - vars[n]['depend'] = vars[n]['depend'] + ddeps - elif isstring(vars[n]): - length = '1' - if 'charselector' in vars[n]: - if '*' in vars[n]['charselector']: - length = _eval_length(vars[n]['charselector']['*'], - params) - vars[n]['charselector']['*'] = length - elif 'len' in vars[n]['charselector']: - length = _eval_length(vars[n]['charselector']['len'], - params) - del vars[n]['charselector']['len'] - vars[n]['charselector']['*'] = length - - if not vars[n]['check']: - del vars[n]['check'] - if flag and not vars[n]['depend']: - del vars[n]['depend'] - if '=' in vars[n]: - if 'attrspec' not in vars[n]: - vars[n]['attrspec'] = [] - if ('optional' not in vars[n]['attrspec']) and \ - ('required' not in vars[n]['attrspec']): - vars[n]['attrspec'].append('optional') - if 'depend' not in vars[n]: - vars[n]['depend'] = [] - for v, m in list(dep_matches.items()): - if m(vars[n]['=']): - vars[n]['depend'].append(v) - if not vars[n]['depend']: - del vars[n]['depend'] - if isscalar(vars[n]): - vars[n]['='] = _eval_scalar(vars[n]['='], params) - - for n in list(vars.keys()): - if n == block['name']: # n is block name - if 'note' in vars[n]: - block['note'] = vars[n]['note'] - if block['block'] == 'function': - if 'result' in block and block['result'] in vars: - vars[n] = appenddecl(vars[n], vars[block['result']]) - if 'prefix' in block: - pr = block['prefix'] - ispure = 0 - isrec = 1 - pr1 = pr.replace('pure', '') - ispure = (not pr == pr1) - pr = pr1.replace('recursive', '') - isrec = (not pr == pr1) - m = typespattern[0].match(pr) - if m: - typespec, selector, attr, edecl = cracktypespec0( - m.group('this'), m.group('after')) - kindselect, charselect, typename = cracktypespec( - typespec, selector) - vars[n]['typespec'] = typespec - if kindselect: - if 'kind' in kindselect: - try: - kindselect['kind'] = eval( - kindselect['kind'], {}, params) - except Exception: - pass - vars[n]['kindselector'] = kindselect - if charselect: - vars[n]['charselector'] = charselect - if typename: - vars[n]['typename'] = typename - if ispure: - vars[n] = setattrspec(vars[n], 'pure') - if isrec: - vars[n] = setattrspec(vars[n], 'recursive') - else: - outmess( - 'analyzevars: prefix (%s) were not used\n' % repr(block['prefix'])) - if not block['block'] in ['module', 'pythonmodule', 'python module', 'block data']: - if 'commonvars' in block: - neededvars = copy.copy(block['args'] + block['commonvars']) - else: - neededvars = copy.copy(block['args']) - for n in list(vars.keys()): - if l_or(isintent_callback, isintent_aux)(vars[n]): - neededvars.append(n) - if 'entry' in block: - neededvars.extend(list(block['entry'].keys())) - for k in list(block['entry'].keys()): - for n in block['entry'][k]: - if n not in neededvars: - neededvars.append(n) - if block['block'] == 'function': - if 'result' in block: - neededvars.append(block['result']) - else: - neededvars.append(block['name']) - if block['block'] in ['subroutine', 'function']: - name = block['name'] - if name in vars and 'intent' in vars[name]: - block['intent'] = vars[name]['intent'] - if block['block'] == 'type': - neededvars.extend(list(vars.keys())) - for n in list(vars.keys()): - if n not in neededvars: - del vars[n] - return vars - -analyzeargs_re_1 = re.compile(r'\A[a-z]+[\w$]*\Z', re.I) - - -def expr2name(a, block, args=[]): - orig_a = a - a_is_expr = not analyzeargs_re_1.match(a) - if a_is_expr: # `a` is an expression - implicitrules, attrrules = buildimplicitrules(block) - at = determineexprtype(a, block['vars'], implicitrules) - na = 'e_' - for c in a: - c = c.lower() - if c not in string.ascii_lowercase + string.digits: - c = '_' - na = na + c - if na[-1] == '_': - na = na + 'e' - else: - na = na + '_e' - a = na - while a in block['vars'] or a in block['args']: - a = a + 'r' - if a in args: - k = 1 - while a + str(k) in args: - k = k + 1 - a = a + str(k) - if a_is_expr: - block['vars'][a] = at - else: - if a not in block['vars']: - if orig_a in block['vars']: - block['vars'][a] = block['vars'][orig_a] - else: - block['vars'][a] = {} - if 'externals' in block and orig_a in block['externals'] + block['interfaced']: - block['vars'][a] = setattrspec(block['vars'][a], 'external') - return a - - -def analyzeargs(block): - setmesstext(block) - implicitrules, attrrules = buildimplicitrules(block) - if 'args' not in block: - block['args'] = [] - args = [] - for a in block['args']: - a = expr2name(a, block, args) - args.append(a) - block['args'] = args - if 'entry' in block: - for k, args1 in list(block['entry'].items()): - for a in args1: - if a not in block['vars']: - block['vars'][a] = {} - - for b in block['body']: - if b['name'] in args: - if 'externals' not in block: - block['externals'] = [] - if b['name'] not in block['externals']: - block['externals'].append(b['name']) - if 'result' in block and block['result'] not in block['vars']: - block['vars'][block['result']] = {} - return block - -determineexprtype_re_1 = re.compile(r'\A\(.+?[,].+?\)\Z', re.I) -determineexprtype_re_2 = re.compile(r'\A[+-]?\d+(_(?P[\w]+)|)\Z', re.I) -determineexprtype_re_3 = re.compile( - r'\A[+-]?[\d.]+[\d+\-de.]*(_(?P[\w]+)|)\Z', re.I) -determineexprtype_re_4 = re.compile(r'\A\(.*\)\Z', re.I) -determineexprtype_re_5 = re.compile(r'\A(?P\w+)\s*\(.*?\)\s*\Z', re.I) - - -def _ensure_exprdict(r): - if isinstance(r, int): - return {'typespec': 'integer'} - if isinstance(r, float): - return {'typespec': 'real'} - if isinstance(r, complex): - return {'typespec': 'complex'} - if isinstance(r, dict): - return r - raise AssertionError(repr(r)) - - -def determineexprtype(expr, vars, rules={}): - if expr in vars: - return _ensure_exprdict(vars[expr]) - expr = expr.strip() - if determineexprtype_re_1.match(expr): - return {'typespec': 'complex'} - m = determineexprtype_re_2.match(expr) - if m: - if 'name' in m.groupdict() and m.group('name'): - outmess( - 'determineexprtype: selected kind types not supported (%s)\n' % repr(expr)) - return {'typespec': 'integer'} - m = determineexprtype_re_3.match(expr) - if m: - if 'name' in m.groupdict() and m.group('name'): - outmess( - 'determineexprtype: selected kind types not supported (%s)\n' % repr(expr)) - return {'typespec': 'real'} - for op in ['+', '-', '*', '/']: - for e in [x.strip() for x in markoutercomma(expr, comma=op).split('@' + op + '@')]: - if e in vars: - return _ensure_exprdict(vars[e]) - t = {} - if determineexprtype_re_4.match(expr): # in parenthesis - t = determineexprtype(expr[1:-1], vars, rules) - else: - m = determineexprtype_re_5.match(expr) - if m: - rn = m.group('name') - t = determineexprtype(m.group('name'), vars, rules) - if t and 'attrspec' in t: - del t['attrspec'] - if not t: - if rn[0] in rules: - return _ensure_exprdict(rules[rn[0]]) - if expr[0] in '\'"': - return {'typespec': 'character', 'charselector': {'*': '*'}} - if not t: - outmess( - 'determineexprtype: could not determine expressions (%s) type.\n' % (repr(expr))) - return t - -###### - - -def crack2fortrangen(block, tab='\n', as_interface=False): - global skipfuncs, onlyfuncs - - setmesstext(block) - ret = '' - if isinstance(block, list): - for g in block: - if g and g['block'] in ['function', 'subroutine']: - if g['name'] in skipfuncs: - continue - if onlyfuncs and g['name'] not in onlyfuncs: - continue - ret = ret + crack2fortrangen(g, tab, as_interface=as_interface) - return ret - prefix = '' - name = '' - args = '' - blocktype = block['block'] - if blocktype == 'program': - return '' - argsl = [] - if 'name' in block: - name = block['name'] - if 'args' in block: - vars = block['vars'] - for a in block['args']: - a = expr2name(a, block, argsl) - if not isintent_callback(vars[a]): - argsl.append(a) - if block['block'] == 'function' or argsl: - args = '(%s)' % ','.join(argsl) - f2pyenhancements = '' - if 'f2pyenhancements' in block: - for k in list(block['f2pyenhancements'].keys()): - f2pyenhancements = '%s%s%s %s' % ( - f2pyenhancements, tab + tabchar, k, block['f2pyenhancements'][k]) - intent_lst = block.get('intent', [])[:] - if blocktype == 'function' and 'callback' in intent_lst: - intent_lst.remove('callback') - if intent_lst: - f2pyenhancements = '%s%sintent(%s) %s' %\ - (f2pyenhancements, tab + tabchar, - ','.join(intent_lst), name) - use = '' - if 'use' in block: - use = use2fortran(block['use'], tab + tabchar) - common = '' - if 'common' in block: - common = common2fortran(block['common'], tab + tabchar) - if name == 'unknown_interface': - name = '' - result = '' - if 'result' in block: - result = ' result (%s)' % block['result'] - if block['result'] not in argsl: - argsl.append(block['result']) - body = crack2fortrangen(block['body'], tab + tabchar) - vars = vars2fortran( - block, block['vars'], argsl, tab + tabchar, as_interface=as_interface) - mess = '' - if 'from' in block and not as_interface: - mess = '! in %s' % block['from'] - if 'entry' in block: - entry_stmts = '' - for k, i in list(block['entry'].items()): - entry_stmts = '%s%sentry %s(%s)' \ - % (entry_stmts, tab + tabchar, k, ','.join(i)) - body = body + entry_stmts - if blocktype == 'block data' and name == '_BLOCK_DATA_': - name = '' - ret = '%s%s%s %s%s%s %s%s%s%s%s%s%send %s %s' % ( - tab, prefix, blocktype, name, args, result, mess, f2pyenhancements, use, vars, common, body, tab, blocktype, name) - return ret - - -def common2fortran(common, tab=''): - ret = '' - for k in list(common.keys()): - if k == '_BLNK_': - ret = '%s%scommon %s' % (ret, tab, ','.join(common[k])) - else: - ret = '%s%scommon /%s/ %s' % (ret, tab, k, ','.join(common[k])) - return ret - - -def use2fortran(use, tab=''): - ret = '' - for m in list(use.keys()): - ret = '%s%suse %s,' % (ret, tab, m) - if use[m] == {}: - if ret and ret[-1] == ',': - ret = ret[:-1] - continue - if 'only' in use[m] and use[m]['only']: - ret = '%s only:' % (ret) - if 'map' in use[m] and use[m]['map']: - c = ' ' - for k in list(use[m]['map'].keys()): - if k == use[m]['map'][k]: - ret = '%s%s%s' % (ret, c, k) - c = ',' - else: - ret = '%s%s%s=>%s' % (ret, c, k, use[m]['map'][k]) - c = ',' - if ret and ret[-1] == ',': - ret = ret[:-1] - return ret - - -def true_intent_list(var): - lst = var['intent'] - ret = [] - for intent in lst: - try: - c = eval('isintent_%s(var)' % intent) - except NameError: - c = 0 - if c: - ret.append(intent) - return ret - - -def vars2fortran(block, vars, args, tab='', as_interface=False): - """ - TODO: - public sub - ... - """ - setmesstext(block) - ret = '' - nout = [] - for a in args: - if a in block['vars']: - nout.append(a) - if 'commonvars' in block: - for a in block['commonvars']: - if a in vars: - if a not in nout: - nout.append(a) - else: - errmess( - 'vars2fortran: Confused?!: "%s" is not defined in vars.\n' % a) - if 'varnames' in block: - nout.extend(block['varnames']) - if not as_interface: - for a in list(vars.keys()): - if a not in nout: - nout.append(a) - for a in nout: - if 'depend' in vars[a]: - for d in vars[a]['depend']: - if d in vars and 'depend' in vars[d] and a in vars[d]['depend']: - errmess( - 'vars2fortran: Warning: cross-dependence between variables "%s" and "%s"\n' % (a, d)) - if 'externals' in block and a in block['externals']: - if isintent_callback(vars[a]): - ret = '%s%sintent(callback) %s' % (ret, tab, a) - ret = '%s%sexternal %s' % (ret, tab, a) - if isoptional(vars[a]): - ret = '%s%soptional %s' % (ret, tab, a) - if a in vars and 'typespec' not in vars[a]: - continue - cont = 1 - for b in block['body']: - if a == b['name'] and b['block'] == 'function': - cont = 0 - break - if cont: - continue - if a not in vars: - show(vars) - outmess('vars2fortran: No definition for argument "%s".\n' % a) - continue - if a == block['name'] and not block['block'] == 'function': - continue - if 'typespec' not in vars[a]: - if 'attrspec' in vars[a] and 'external' in vars[a]['attrspec']: - if a in args: - ret = '%s%sexternal %s' % (ret, tab, a) - continue - show(vars[a]) - outmess('vars2fortran: No typespec for argument "%s".\n' % a) - continue - vardef = vars[a]['typespec'] - if vardef == 'type' and 'typename' in vars[a]: - vardef = '%s(%s)' % (vardef, vars[a]['typename']) - selector = {} - if 'kindselector' in vars[a]: - selector = vars[a]['kindselector'] - elif 'charselector' in vars[a]: - selector = vars[a]['charselector'] - if '*' in selector: - if selector['*'] in ['*', ':']: - vardef = '%s*(%s)' % (vardef, selector['*']) - else: - vardef = '%s*%s' % (vardef, selector['*']) - else: - if 'len' in selector: - vardef = '%s(len=%s' % (vardef, selector['len']) - if 'kind' in selector: - vardef = '%s,kind=%s)' % (vardef, selector['kind']) - else: - vardef = '%s)' % (vardef) - elif 'kind' in selector: - vardef = '%s(kind=%s)' % (vardef, selector['kind']) - c = ' ' - if 'attrspec' in vars[a]: - attr = [l for l in vars[a]['attrspec'] - if l not in ['external']] - if attr: - vardef = '%s, %s' % (vardef, ','.join(attr)) - c = ',' - if 'dimension' in vars[a]: - vardef = '%s%sdimension(%s)' % ( - vardef, c, ','.join(vars[a]['dimension'])) - c = ',' - if 'intent' in vars[a]: - lst = true_intent_list(vars[a]) - if lst: - vardef = '%s%sintent(%s)' % (vardef, c, ','.join(lst)) - c = ',' - if 'check' in vars[a]: - vardef = '%s%scheck(%s)' % (vardef, c, ','.join(vars[a]['check'])) - c = ',' - if 'depend' in vars[a]: - vardef = '%s%sdepend(%s)' % ( - vardef, c, ','.join(vars[a]['depend'])) - c = ',' - if '=' in vars[a]: - v = vars[a]['='] - if vars[a]['typespec'] in ['complex', 'double complex']: - try: - v = eval(v) - v = '(%s,%s)' % (v.real, v.imag) - except Exception: - pass - vardef = '%s :: %s=%s' % (vardef, a, v) - else: - vardef = '%s :: %s' % (vardef, a) - ret = '%s%s%s' % (ret, tab, vardef) - return ret -###### - - -def crackfortran(files): - global usermodules - - outmess('Reading fortran codes...\n', 0) - readfortrancode(files, crackline) - outmess('Post-processing...\n', 0) - usermodules = [] - postlist = postcrack(grouplist[0]) - outmess('Post-processing (stage 2)...\n', 0) - postlist = postcrack2(postlist) - return usermodules + postlist - - -def crack2fortran(block): - global f2py_version - - pyf = crack2fortrangen(block) + '\n' - header = """! -*- f90 -*- -! Note: the context of this file is case sensitive. -""" - footer = """ -! This file was auto-generated with f2py (version:%s). -! See http://cens.ioc.ee/projects/f2py2e/ -""" % (f2py_version) - return header + pyf + footer - -if __name__ == "__main__": - files = [] - funcs = [] - f = 1 - f2 = 0 - f3 = 0 - showblocklist = 0 - for l in sys.argv[1:]: - if l == '': - pass - elif l[0] == ':': - f = 0 - elif l == '-quiet': - quiet = 1 - verbose = 0 - elif l == '-verbose': - verbose = 2 - quiet = 0 - elif l == '-fix': - if strictf77: - outmess( - 'Use option -f90 before -fix if Fortran 90 code is in fix form.\n', 0) - skipemptyends = 1 - sourcecodeform = 'fix' - elif l == '-skipemptyends': - skipemptyends = 1 - elif l == '--ignore-contains': - ignorecontains = 1 - elif l == '-f77': - strictf77 = 1 - sourcecodeform = 'fix' - elif l == '-f90': - strictf77 = 0 - sourcecodeform = 'free' - skipemptyends = 1 - elif l == '-h': - f2 = 1 - elif l == '-show': - showblocklist = 1 - elif l == '-m': - f3 = 1 - elif l[0] == '-': - errmess('Unknown option %s\n' % repr(l)) - elif f2: - f2 = 0 - pyffilename = l - elif f3: - f3 = 0 - f77modulename = l - elif f: - try: - open(l).close() - files.append(l) - except IOError as detail: - errmess('IOError: %s\n' % str(detail)) - else: - funcs.append(l) - if not strictf77 and f77modulename and not skipemptyends: - outmess("""\ - Warning: You have specified module name for non Fortran 77 code - that should not need one (expect if you are scanning F90 code - for non module blocks but then you should use flag -skipemptyends - and also be sure that the files do not contain programs without program statement). -""", 0) - - postlist = crackfortran(files) - if pyffilename: - outmess('Writing fortran code to file %s\n' % repr(pyffilename), 0) - pyf = crack2fortran(postlist) - with open(pyffilename, 'w') as f: - f.write(pyf) - if showblocklist: - show(postlist) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/diagnose.py b/venv/lib/python3.7/site-packages/numpy/f2py/diagnose.py deleted file mode 100644 index 0241fed..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/diagnose.py +++ /dev/null @@ -1,156 +0,0 @@ -#!/usr/bin/env python -from __future__ import division, absolute_import, print_function - -import os -import sys -import tempfile - - -def run_command(cmd): - print('Running %r:' % (cmd)) - os.system(cmd) - print('------') - - -def run(): - _path = os.getcwd() - os.chdir(tempfile.gettempdir()) - print('------') - print('os.name=%r' % (os.name)) - print('------') - print('sys.platform=%r' % (sys.platform)) - print('------') - print('sys.version:') - print(sys.version) - print('------') - print('sys.prefix:') - print(sys.prefix) - print('------') - print('sys.path=%r' % (':'.join(sys.path))) - print('------') - - try: - import numpy - has_newnumpy = 1 - except ImportError: - print('Failed to import new numpy:', sys.exc_info()[1]) - has_newnumpy = 0 - - try: - from numpy.f2py import f2py2e - has_f2py2e = 1 - except ImportError: - print('Failed to import f2py2e:', sys.exc_info()[1]) - has_f2py2e = 0 - - try: - import numpy.distutils - has_numpy_distutils = 2 - except ImportError: - try: - import numpy_distutils - has_numpy_distutils = 1 - except ImportError: - print('Failed to import numpy_distutils:', sys.exc_info()[1]) - has_numpy_distutils = 0 - - if has_newnumpy: - try: - print('Found new numpy version %r in %s' % - (numpy.__version__, numpy.__file__)) - except Exception as msg: - print('error:', msg) - print('------') - - if has_f2py2e: - try: - print('Found f2py2e version %r in %s' % - (f2py2e.__version__.version, f2py2e.__file__)) - except Exception as msg: - print('error:', msg) - print('------') - - if has_numpy_distutils: - try: - if has_numpy_distutils == 2: - print('Found numpy.distutils version %r in %r' % ( - numpy.distutils.__version__, - numpy.distutils.__file__)) - else: - print('Found numpy_distutils version %r in %r' % ( - numpy_distutils.numpy_distutils_version.numpy_distutils_version, - numpy_distutils.__file__)) - print('------') - except Exception as msg: - print('error:', msg) - print('------') - try: - if has_numpy_distutils == 1: - print( - 'Importing numpy_distutils.command.build_flib ...', end=' ') - import numpy_distutils.command.build_flib as build_flib - print('ok') - print('------') - try: - print( - 'Checking availability of supported Fortran compilers:') - for compiler_class in build_flib.all_compilers: - compiler_class(verbose=1).is_available() - print('------') - except Exception as msg: - print('error:', msg) - print('------') - except Exception as msg: - print( - 'error:', msg, '(ignore it, build_flib is obsolute for numpy.distutils 0.2.2 and up)') - print('------') - try: - if has_numpy_distutils == 2: - print('Importing numpy.distutils.fcompiler ...', end=' ') - import numpy.distutils.fcompiler as fcompiler - else: - print('Importing numpy_distutils.fcompiler ...', end=' ') - import numpy_distutils.fcompiler as fcompiler - print('ok') - print('------') - try: - print('Checking availability of supported Fortran compilers:') - fcompiler.show_fcompilers() - print('------') - except Exception as msg: - print('error:', msg) - print('------') - except Exception as msg: - print('error:', msg) - print('------') - try: - if has_numpy_distutils == 2: - print('Importing numpy.distutils.cpuinfo ...', end=' ') - from numpy.distutils.cpuinfo import cpuinfo - print('ok') - print('------') - else: - try: - print( - 'Importing numpy_distutils.command.cpuinfo ...', end=' ') - from numpy_distutils.command.cpuinfo import cpuinfo - print('ok') - print('------') - except Exception as msg: - print('error:', msg, '(ignore it)') - print('Importing numpy_distutils.cpuinfo ...', end=' ') - from numpy_distutils.cpuinfo import cpuinfo - print('ok') - print('------') - cpu = cpuinfo() - print('CPU information:', end=' ') - for name in dir(cpuinfo): - if name[0] == '_' and name[1] != '_' and getattr(cpu, name[1:])(): - print(name[1:], end=' ') - print('------') - except Exception as msg: - print('error:', msg) - print('------') - os.chdir(_path) -if __name__ == "__main__": - run() diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/f2py2e.py b/venv/lib/python3.7/site-packages/numpy/f2py/f2py2e.py deleted file mode 100644 index d03eff9..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/f2py2e.py +++ /dev/null @@ -1,696 +0,0 @@ -#!/usr/bin/env python -""" - -f2py2e - Fortran to Python C/API generator. 2nd Edition. - See __usage__ below. - -Copyright 1999--2011 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/05/06 08:31:19 $ -Pearu Peterson - -""" -from __future__ import division, absolute_import, print_function - -import sys -import os -import pprint -import re - -from . import crackfortran -from . import rules -from . import cb_rules -from . import auxfuncs -from . import cfuncs -from . import f90mod_rules -from . import __version__ -from . import capi_maps - -f2py_version = __version__.version -errmess = sys.stderr.write -# outmess=sys.stdout.write -show = pprint.pprint -outmess = auxfuncs.outmess - -try: - from numpy import __version__ as numpy_version -except ImportError: - numpy_version = 'N/A' - -__usage__ = """\ -Usage: - -1) To construct extension module sources: - - f2py [] [[[only:]||[skip:]] \\ - ] \\ - [: ...] - -2) To compile fortran files and build extension modules: - - f2py -c [, , ] - -3) To generate signature files: - - f2py -h ...< same options as in (1) > - -Description: This program generates a Python C/API file (module.c) - that contains wrappers for given fortran functions so that they - can be called from Python. With the -c option the corresponding - extension modules are built. - -Options: - - --2d-numpy Use numpy.f2py tool with NumPy support. [DEFAULT] - --2d-numeric Use f2py2e tool with Numeric support. - --2d-numarray Use f2py2e tool with Numarray support. - --g3-numpy Use 3rd generation f2py from the separate f2py package. - [NOT AVAILABLE YET] - - -h Write signatures of the fortran routines to file - and exit. You can then edit and use it instead - of . If ==stdout then the - signatures are printed to stdout. - Names of fortran routines for which Python C/API - functions will be generated. Default is all that are found - in . - Paths to fortran/signature files that will be scanned for - in order to determine their signatures. - skip: Ignore fortran functions that follow until `:'. - only: Use only fortran functions that follow until `:'. - : Get back to mode. - - -m Name of the module; f2py generates a Python/C API - file module.c or extension module . - Default is 'untitled'. - - --[no-]lower Do [not] lower the cases in . By default, - --lower is assumed with -h key, and --no-lower without -h key. - - --build-dir All f2py generated files are created in . - Default is tempfile.mkdtemp(). - - --overwrite-signature Overwrite existing signature file. - - --[no-]latex-doc Create (or not) module.tex. - Default is --no-latex-doc. - --short-latex Create 'incomplete' LaTeX document (without commands - \\documentclass, \\tableofcontents, and \\begin{document}, - \\end{document}). - - --[no-]rest-doc Create (or not) module.rst. - Default is --no-rest-doc. - - --debug-capi Create C/API code that reports the state of the wrappers - during runtime. Useful for debugging. - - --[no-]wrap-functions Create Fortran subroutine wrappers to Fortran 77 - functions. --wrap-functions is default because it ensures - maximum portability/compiler independence. - - --include-paths ::... Search include files from the given - directories. - - --help-link [..] List system resources found by system_info.py. See also - --link- switch below. [..] is optional list - of resources names. E.g. try 'f2py --help-link lapack_opt'. - - --f2cmap Load Fortran-to-Python KIND specification from the given - file. Default: .f2py_f2cmap in current directory. - - --quiet Run quietly. - --verbose Run with extra verbosity. - -v Print f2py version ID and exit. - - -numpy.distutils options (only effective with -c): - - --fcompiler= Specify Fortran compiler type by vendor - --compiler= Specify C compiler type (as defined by distutils) - - --help-fcompiler List available Fortran compilers and exit - --f77exec= Specify the path to F77 compiler - --f90exec= Specify the path to F90 compiler - --f77flags= Specify F77 compiler flags - --f90flags= Specify F90 compiler flags - --opt= Specify optimization flags - --arch= Specify architecture specific optimization flags - --noopt Compile without optimization - --noarch Compile without arch-dependent optimization - --debug Compile with debugging information - -Extra options (only effective with -c): - - --link- Link extension module with as defined - by numpy.distutils/system_info.py. E.g. to link - with optimized LAPACK libraries (vecLib on MacOSX, - ATLAS elsewhere), use --link-lapack_opt. - See also --help-link switch. - - -L/path/to/lib/ -l - -D -U - -I/path/to/include/ - .o .so .a - - Using the following macros may be required with non-gcc Fortran - compilers: - -DPREPEND_FORTRAN -DNO_APPEND_FORTRAN -DUPPERCASE_FORTRAN - -DUNDERSCORE_G77 - - When using -DF2PY_REPORT_ATEXIT, a performance report of F2PY - interface is printed out at exit (platforms: Linux). - - When using -DF2PY_REPORT_ON_ARRAY_COPY=, a message is - sent to stderr whenever F2PY interface makes a copy of an - array. Integer sets the threshold for array sizes when - a message should be shown. - -Version: %s -numpy Version: %s -Requires: Python 2.3 or higher. -License: NumPy license (see LICENSE.txt in the NumPy source code) -Copyright 1999 - 2011 Pearu Peterson all rights reserved. -http://cens.ioc.ee/projects/f2py2e/""" % (f2py_version, numpy_version) - - -def scaninputline(inputline): - files, skipfuncs, onlyfuncs, debug = [], [], [], [] - f, f2, f3, f5, f6, f7, f8, f9, f10 = 1, 0, 0, 0, 0, 0, 0, 0, 0 - verbose = 1 - dolc = -1 - dolatexdoc = 0 - dorestdoc = 0 - wrapfuncs = 1 - buildpath = '.' - include_paths = [] - signsfile, modulename = None, None - options = {'buildpath': buildpath, - 'coutput': None, - 'f2py_wrapper_output': None} - for l in inputline: - if l == '': - pass - elif l == 'only:': - f = 0 - elif l == 'skip:': - f = -1 - elif l == ':': - f = 1 - elif l[:8] == '--debug-': - debug.append(l[8:]) - elif l == '--lower': - dolc = 1 - elif l == '--build-dir': - f6 = 1 - elif l == '--no-lower': - dolc = 0 - elif l == '--quiet': - verbose = 0 - elif l == '--verbose': - verbose += 1 - elif l == '--latex-doc': - dolatexdoc = 1 - elif l == '--no-latex-doc': - dolatexdoc = 0 - elif l == '--rest-doc': - dorestdoc = 1 - elif l == '--no-rest-doc': - dorestdoc = 0 - elif l == '--wrap-functions': - wrapfuncs = 1 - elif l == '--no-wrap-functions': - wrapfuncs = 0 - elif l == '--short-latex': - options['shortlatex'] = 1 - elif l == '--coutput': - f8 = 1 - elif l == '--f2py-wrapper-output': - f9 = 1 - elif l == '--f2cmap': - f10 = 1 - elif l == '--overwrite-signature': - options['h-overwrite'] = 1 - elif l == '-h': - f2 = 1 - elif l == '-m': - f3 = 1 - elif l[:2] == '-v': - print(f2py_version) - sys.exit() - elif l == '--show-compilers': - f5 = 1 - elif l[:8] == '-include': - cfuncs.outneeds['userincludes'].append(l[9:-1]) - cfuncs.userincludes[l[9:-1]] = '#include ' + l[8:] - elif l[:15] in '--include_paths': - outmess( - 'f2py option --include_paths is deprecated, use --include-paths instead.\n') - f7 = 1 - elif l[:15] in '--include-paths': - f7 = 1 - elif l[0] == '-': - errmess('Unknown option %s\n' % repr(l)) - sys.exit() - elif f2: - f2 = 0 - signsfile = l - elif f3: - f3 = 0 - modulename = l - elif f6: - f6 = 0 - buildpath = l - elif f7: - f7 = 0 - include_paths.extend(l.split(os.pathsep)) - elif f8: - f8 = 0 - options["coutput"] = l - elif f9: - f9 = 0 - options["f2py_wrapper_output"] = l - elif f10: - f10 = 0 - options["f2cmap_file"] = l - elif f == 1: - try: - with open(l): - pass - files.append(l) - except IOError as detail: - errmess('IOError: %s. Skipping file "%s".\n' % - (str(detail), l)) - elif f == -1: - skipfuncs.append(l) - elif f == 0: - onlyfuncs.append(l) - if not f5 and not files and not modulename: - print(__usage__) - sys.exit() - if not os.path.isdir(buildpath): - if not verbose: - outmess('Creating build directory %s' % (buildpath)) - os.mkdir(buildpath) - if signsfile: - signsfile = os.path.join(buildpath, signsfile) - if signsfile and os.path.isfile(signsfile) and 'h-overwrite' not in options: - errmess( - 'Signature file "%s" exists!!! Use --overwrite-signature to overwrite.\n' % (signsfile)) - sys.exit() - - options['debug'] = debug - options['verbose'] = verbose - if dolc == -1 and not signsfile: - options['do-lower'] = 0 - else: - options['do-lower'] = dolc - if modulename: - options['module'] = modulename - if signsfile: - options['signsfile'] = signsfile - if onlyfuncs: - options['onlyfuncs'] = onlyfuncs - if skipfuncs: - options['skipfuncs'] = skipfuncs - options['dolatexdoc'] = dolatexdoc - options['dorestdoc'] = dorestdoc - options['wrapfuncs'] = wrapfuncs - options['buildpath'] = buildpath - options['include_paths'] = include_paths - options.setdefault('f2cmap_file', None) - return files, options - - -def callcrackfortran(files, options): - rules.options = options - crackfortran.debug = options['debug'] - crackfortran.verbose = options['verbose'] - if 'module' in options: - crackfortran.f77modulename = options['module'] - if 'skipfuncs' in options: - crackfortran.skipfuncs = options['skipfuncs'] - if 'onlyfuncs' in options: - crackfortran.onlyfuncs = options['onlyfuncs'] - crackfortran.include_paths[:] = options['include_paths'] - crackfortran.dolowercase = options['do-lower'] - postlist = crackfortran.crackfortran(files) - if 'signsfile' in options: - outmess('Saving signatures to file "%s"\n' % (options['signsfile'])) - pyf = crackfortran.crack2fortran(postlist) - if options['signsfile'][-6:] == 'stdout': - sys.stdout.write(pyf) - else: - with open(options['signsfile'], 'w') as f: - f.write(pyf) - if options["coutput"] is None: - for mod in postlist: - mod["coutput"] = "%smodule.c" % mod["name"] - else: - for mod in postlist: - mod["coutput"] = options["coutput"] - if options["f2py_wrapper_output"] is None: - for mod in postlist: - mod["f2py_wrapper_output"] = "%s-f2pywrappers.f" % mod["name"] - else: - for mod in postlist: - mod["f2py_wrapper_output"] = options["f2py_wrapper_output"] - return postlist - - -def buildmodules(lst): - cfuncs.buildcfuncs() - outmess('Building modules...\n') - modules, mnames, isusedby = [], [], {} - for i in range(len(lst)): - if '__user__' in lst[i]['name']: - cb_rules.buildcallbacks(lst[i]) - else: - if 'use' in lst[i]: - for u in lst[i]['use'].keys(): - if u not in isusedby: - isusedby[u] = [] - isusedby[u].append(lst[i]['name']) - modules.append(lst[i]) - mnames.append(lst[i]['name']) - ret = {} - for i in range(len(mnames)): - if mnames[i] in isusedby: - outmess('\tSkipping module "%s" which is used by %s.\n' % ( - mnames[i], ','.join(['"%s"' % s for s in isusedby[mnames[i]]]))) - else: - um = [] - if 'use' in modules[i]: - for u in modules[i]['use'].keys(): - if u in isusedby and u in mnames: - um.append(modules[mnames.index(u)]) - else: - outmess( - '\tModule "%s" uses nonexisting "%s" which will be ignored.\n' % (mnames[i], u)) - ret[mnames[i]] = {} - dict_append(ret[mnames[i]], rules.buildmodule(modules[i], um)) - return ret - - -def dict_append(d_out, d_in): - for (k, v) in d_in.items(): - if k not in d_out: - d_out[k] = [] - if isinstance(v, list): - d_out[k] = d_out[k] + v - else: - d_out[k].append(v) - - -def run_main(comline_list): - """ - Equivalent to running:: - - f2py - - where ``=string.join(,' ')``, but in Python. Unless - ``-h`` is used, this function returns a dictionary containing - information on generated modules and their dependencies on source - files. For example, the command ``f2py -m scalar scalar.f`` can be - executed from Python as follows - - You cannot build extension modules with this function, that is, - using ``-c`` is not allowed. Use ``compile`` command instead - - Examples - -------- - .. include:: run_main_session.dat - :literal: - - """ - crackfortran.reset_global_f2py_vars() - f2pydir = os.path.dirname(os.path.abspath(cfuncs.__file__)) - fobjhsrc = os.path.join(f2pydir, 'src', 'fortranobject.h') - fobjcsrc = os.path.join(f2pydir, 'src', 'fortranobject.c') - files, options = scaninputline(comline_list) - auxfuncs.options = options - capi_maps.load_f2cmap_file(options['f2cmap_file']) - postlist = callcrackfortran(files, options) - isusedby = {} - for i in range(len(postlist)): - if 'use' in postlist[i]: - for u in postlist[i]['use'].keys(): - if u not in isusedby: - isusedby[u] = [] - isusedby[u].append(postlist[i]['name']) - for i in range(len(postlist)): - if postlist[i]['block'] == 'python module' and '__user__' in postlist[i]['name']: - if postlist[i]['name'] in isusedby: - # if not quiet: - outmess('Skipping Makefile build for module "%s" which is used by %s\n' % ( - postlist[i]['name'], ','.join(['"%s"' % s for s in isusedby[postlist[i]['name']]]))) - if 'signsfile' in options: - if options['verbose'] > 1: - outmess( - 'Stopping. Edit the signature file and then run f2py on the signature file: ') - outmess('%s %s\n' % - (os.path.basename(sys.argv[0]), options['signsfile'])) - return - for i in range(len(postlist)): - if postlist[i]['block'] != 'python module': - if 'python module' not in options: - errmess( - 'Tip: If your original code is Fortran source then you must use -m option.\n') - raise TypeError('All blocks must be python module blocks but got %s' % ( - repr(postlist[i]['block']))) - auxfuncs.debugoptions = options['debug'] - f90mod_rules.options = options - auxfuncs.wrapfuncs = options['wrapfuncs'] - - ret = buildmodules(postlist) - - for mn in ret.keys(): - dict_append(ret[mn], {'csrc': fobjcsrc, 'h': fobjhsrc}) - return ret - - -def filter_files(prefix, suffix, files, remove_prefix=None): - """ - Filter files by prefix and suffix. - """ - filtered, rest = [], [] - match = re.compile(prefix + r'.*' + suffix + r'\Z').match - if remove_prefix: - ind = len(prefix) - else: - ind = 0 - for file in [x.strip() for x in files]: - if match(file): - filtered.append(file[ind:]) - else: - rest.append(file) - return filtered, rest - - -def get_prefix(module): - p = os.path.dirname(os.path.dirname(module.__file__)) - return p - - -def run_compile(): - """ - Do it all in one call! - """ - import tempfile - - i = sys.argv.index('-c') - del sys.argv[i] - - remove_build_dir = 0 - try: - i = sys.argv.index('--build-dir') - except ValueError: - i = None - if i is not None: - build_dir = sys.argv[i + 1] - del sys.argv[i + 1] - del sys.argv[i] - else: - remove_build_dir = 1 - build_dir = tempfile.mkdtemp() - - _reg1 = re.compile(r'[-][-]link[-]') - sysinfo_flags = [_m for _m in sys.argv[1:] if _reg1.match(_m)] - sys.argv = [_m for _m in sys.argv if _m not in sysinfo_flags] - if sysinfo_flags: - sysinfo_flags = [f[7:] for f in sysinfo_flags] - - _reg2 = re.compile( - r'[-][-]((no[-]|)(wrap[-]functions|lower)|debug[-]capi|quiet)|[-]include') - f2py_flags = [_m for _m in sys.argv[1:] if _reg2.match(_m)] - sys.argv = [_m for _m in sys.argv if _m not in f2py_flags] - f2py_flags2 = [] - fl = 0 - for a in sys.argv[1:]: - if a in ['only:', 'skip:']: - fl = 1 - elif a == ':': - fl = 0 - if fl or a == ':': - f2py_flags2.append(a) - if f2py_flags2 and f2py_flags2[-1] != ':': - f2py_flags2.append(':') - f2py_flags.extend(f2py_flags2) - - sys.argv = [_m for _m in sys.argv if _m not in f2py_flags2] - _reg3 = re.compile( - r'[-][-]((f(90)?compiler([-]exec|)|compiler)=|help[-]compiler)') - flib_flags = [_m for _m in sys.argv[1:] if _reg3.match(_m)] - sys.argv = [_m for _m in sys.argv if _m not in flib_flags] - _reg4 = re.compile( - r'[-][-]((f(77|90)(flags|exec)|opt|arch)=|(debug|noopt|noarch|help[-]fcompiler))') - fc_flags = [_m for _m in sys.argv[1:] if _reg4.match(_m)] - sys.argv = [_m for _m in sys.argv if _m not in fc_flags] - - if 1: - del_list = [] - for s in flib_flags: - v = '--fcompiler=' - if s[:len(v)] == v: - from numpy.distutils import fcompiler - fcompiler.load_all_fcompiler_classes() - allowed_keys = list(fcompiler.fcompiler_class.keys()) - nv = ov = s[len(v):].lower() - if ov not in allowed_keys: - vmap = {} # XXX - try: - nv = vmap[ov] - except KeyError: - if ov not in vmap.values(): - print('Unknown vendor: "%s"' % (s[len(v):])) - nv = ov - i = flib_flags.index(s) - flib_flags[i] = '--fcompiler=' + nv - continue - for s in del_list: - i = flib_flags.index(s) - del flib_flags[i] - assert len(flib_flags) <= 2, repr(flib_flags) - - _reg5 = re.compile(r'[-][-](verbose)') - setup_flags = [_m for _m in sys.argv[1:] if _reg5.match(_m)] - sys.argv = [_m for _m in sys.argv if _m not in setup_flags] - - if '--quiet' in f2py_flags: - setup_flags.append('--quiet') - - modulename = 'untitled' - sources = sys.argv[1:] - - for optname in ['--include_paths', '--include-paths', '--f2cmap']: - if optname in sys.argv: - i = sys.argv.index(optname) - f2py_flags.extend(sys.argv[i:i + 2]) - del sys.argv[i + 1], sys.argv[i] - sources = sys.argv[1:] - - if '-m' in sys.argv: - i = sys.argv.index('-m') - modulename = sys.argv[i + 1] - del sys.argv[i + 1], sys.argv[i] - sources = sys.argv[1:] - else: - from numpy.distutils.command.build_src import get_f2py_modulename - pyf_files, sources = filter_files('', '[.]pyf([.]src|)', sources) - sources = pyf_files + sources - for f in pyf_files: - modulename = get_f2py_modulename(f) - if modulename: - break - - extra_objects, sources = filter_files('', '[.](o|a|so)', sources) - include_dirs, sources = filter_files('-I', '', sources, remove_prefix=1) - library_dirs, sources = filter_files('-L', '', sources, remove_prefix=1) - libraries, sources = filter_files('-l', '', sources, remove_prefix=1) - undef_macros, sources = filter_files('-U', '', sources, remove_prefix=1) - define_macros, sources = filter_files('-D', '', sources, remove_prefix=1) - for i in range(len(define_macros)): - name_value = define_macros[i].split('=', 1) - if len(name_value) == 1: - name_value.append(None) - if len(name_value) == 2: - define_macros[i] = tuple(name_value) - else: - print('Invalid use of -D:', name_value) - - from numpy.distutils.system_info import get_info - - num_info = {} - if num_info: - include_dirs.extend(num_info.get('include_dirs', [])) - - from numpy.distutils.core import setup, Extension - ext_args = {'name': modulename, 'sources': sources, - 'include_dirs': include_dirs, - 'library_dirs': library_dirs, - 'libraries': libraries, - 'define_macros': define_macros, - 'undef_macros': undef_macros, - 'extra_objects': extra_objects, - 'f2py_options': f2py_flags, - } - - if sysinfo_flags: - from numpy.distutils.misc_util import dict_append - for n in sysinfo_flags: - i = get_info(n) - if not i: - outmess('No %s resources found in system' - ' (try `f2py --help-link`)\n' % (repr(n))) - dict_append(ext_args, **i) - - ext = Extension(**ext_args) - sys.argv = [sys.argv[0]] + setup_flags - sys.argv.extend(['build', - '--build-temp', build_dir, - '--build-base', build_dir, - '--build-platlib', '.']) - if fc_flags: - sys.argv.extend(['config_fc'] + fc_flags) - if flib_flags: - sys.argv.extend(['build_ext'] + flib_flags) - - setup(ext_modules=[ext]) - - if remove_build_dir and os.path.exists(build_dir): - import shutil - outmess('Removing build directory %s\n' % (build_dir)) - shutil.rmtree(build_dir) - - -def main(): - if '--help-link' in sys.argv[1:]: - sys.argv.remove('--help-link') - from numpy.distutils.system_info import show_all - show_all() - return - - # Probably outdated options that were not working before 1.16 - if '--g3-numpy' in sys.argv[1:]: - sys.stderr.write("G3 f2py support is not implemented, yet.\\n") - sys.exit(1) - elif '--2e-numeric' in sys.argv[1:]: - sys.argv.remove('--2e-numeric') - elif '--2e-numarray' in sys.argv[1:]: - # Note that this errors becaust the -DNUMARRAY argument is - # not recognized. Just here for back compatibility and the - # error message. - sys.argv.append("-DNUMARRAY") - sys.argv.remove('--2e-numarray') - elif '--2e-numpy' in sys.argv[1:]: - sys.argv.remove('--2e-numpy') - else: - pass - - if '-c' in sys.argv[1:]: - run_compile() - else: - run_main(sys.argv[1:]) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/f2py_testing.py b/venv/lib/python3.7/site-packages/numpy/f2py/f2py_testing.py deleted file mode 100644 index f5d5fa6..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/f2py_testing.py +++ /dev/null @@ -1,48 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import re - -from numpy.testing import jiffies, memusage - - -def cmdline(): - m = re.compile(r'\A\d+\Z') - args = [] - repeat = 1 - for a in sys.argv[1:]: - if m.match(a): - repeat = eval(a) - else: - args.append(a) - f2py_opts = ' '.join(args) - return repeat, f2py_opts - - -def run(runtest, test_functions, repeat=1): - l = [(t, repr(t.__doc__.split('\n')[1].strip())) for t in test_functions] - start_memusage = memusage() - diff_memusage = None - start_jiffies = jiffies() - i = 0 - while i < repeat: - i += 1 - for t, fname in l: - runtest(t) - if start_memusage is None: - continue - if diff_memusage is None: - diff_memusage = memusage() - start_memusage - else: - diff_memusage2 = memusage() - start_memusage - if diff_memusage2 != diff_memusage: - print('memory usage change at step %i:' % i, - diff_memusage2 - diff_memusage, - fname) - diff_memusage = diff_memusage2 - current_memusage = memusage() - print('run', repeat * len(test_functions), 'tests', - 'in %.2f seconds' % ((jiffies() - start_jiffies) / 100.0)) - if start_memusage: - print('initial virtual memory size:', start_memusage, 'bytes') - print('current virtual memory size:', current_memusage, 'bytes') diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/f90mod_rules.py b/venv/lib/python3.7/site-packages/numpy/f2py/f90mod_rules.py deleted file mode 100644 index 85eae80..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/f90mod_rules.py +++ /dev/null @@ -1,272 +0,0 @@ -#!/usr/bin/env python -""" - -Build F90 module support for f2py2e. - -Copyright 2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/02/03 19:30:23 $ -Pearu Peterson - -""" -from __future__ import division, absolute_import, print_function - -__version__ = "$Revision: 1.27 $"[10:-1] - -f2py_version = 'See `f2py -v`' - -import numpy as np - -from . import capi_maps -from . import func2subr -from .crackfortran import undo_rmbadname, undo_rmbadname1 - -# The eviroment provided by auxfuncs.py is needed for some calls to eval. -# As the needed functions cannot be determined by static inspection of the -# code, it is safest to use import * pending a major refactoring of f2py. -from .auxfuncs import * - -options = {} - - -def findf90modules(m): - if ismodule(m): - return [m] - if not hasbody(m): - return [] - ret = [] - for b in m['body']: - if ismodule(b): - ret.append(b) - else: - ret = ret + findf90modules(b) - return ret - -fgetdims1 = """\ - external f2pysetdata - logical ns - integer r,i - integer(%d) s(*) - ns = .FALSE. - if (allocated(d)) then - do i=1,r - if ((size(d,i).ne.s(i)).and.(s(i).ge.0)) then - ns = .TRUE. - end if - end do - if (ns) then - deallocate(d) - end if - end if - if ((.not.allocated(d)).and.(s(1).ge.1)) then""" % np.intp().itemsize - -fgetdims2 = """\ - end if - if (allocated(d)) then - do i=1,r - s(i) = size(d,i) - end do - end if - flag = 1 - call f2pysetdata(d,allocated(d))""" - -fgetdims2_sa = """\ - end if - if (allocated(d)) then - do i=1,r - s(i) = size(d,i) - end do - !s(r) must be equal to len(d(1)) - end if - flag = 2 - call f2pysetdata(d,allocated(d))""" - - -def buildhooks(pymod): - global fgetdims1, fgetdims2 - from . import rules - ret = {'f90modhooks': [], 'initf90modhooks': [], 'body': [], - 'need': ['F_FUNC', 'arrayobject.h'], - 'separatorsfor': {'includes0': '\n', 'includes': '\n'}, - 'docs': ['"Fortran 90/95 modules:\\n"'], - 'latexdoc': []} - fhooks = [''] - - def fadd(line, s=fhooks): - s[0] = '%s\n %s' % (s[0], line) - doc = [''] - - def dadd(line, s=doc): - s[0] = '%s\n%s' % (s[0], line) - for m in findf90modules(pymod): - sargs, fargs, efargs, modobjs, notvars, onlyvars = [], [], [], [], [ - m['name']], [] - sargsp = [] - ifargs = [] - mfargs = [] - if hasbody(m): - for b in m['body']: - notvars.append(b['name']) - for n in m['vars'].keys(): - var = m['vars'][n] - if (n not in notvars) and (not l_or(isintent_hide, isprivate)(var)): - onlyvars.append(n) - mfargs.append(n) - outmess('\t\tConstructing F90 module support for "%s"...\n' % - (m['name'])) - if onlyvars: - outmess('\t\t Variables: %s\n' % (' '.join(onlyvars))) - chooks = [''] - - def cadd(line, s=chooks): - s[0] = '%s\n%s' % (s[0], line) - ihooks = [''] - - def iadd(line, s=ihooks): - s[0] = '%s\n%s' % (s[0], line) - - vrd = capi_maps.modsign2map(m) - cadd('static FortranDataDef f2py_%s_def[] = {' % (m['name'])) - dadd('\\subsection{Fortran 90/95 module \\texttt{%s}}\n' % (m['name'])) - if hasnote(m): - note = m['note'] - if isinstance(note, list): - note = '\n'.join(note) - dadd(note) - if onlyvars: - dadd('\\begin{description}') - for n in onlyvars: - var = m['vars'][n] - modobjs.append(n) - ct = capi_maps.getctype(var) - at = capi_maps.c2capi_map[ct] - dm = capi_maps.getarrdims(n, var) - dms = dm['dims'].replace('*', '-1').strip() - dms = dms.replace(':', '-1').strip() - if not dms: - dms = '-1' - use_fgetdims2 = fgetdims2 - if isstringarray(var): - if 'charselector' in var and 'len' in var['charselector']: - cadd('\t{"%s",%s,{{%s,%s}},%s},' - % (undo_rmbadname1(n), dm['rank'], dms, var['charselector']['len'], at)) - use_fgetdims2 = fgetdims2_sa - else: - cadd('\t{"%s",%s,{{%s}},%s},' % - (undo_rmbadname1(n), dm['rank'], dms, at)) - else: - cadd('\t{"%s",%s,{{%s}},%s},' % - (undo_rmbadname1(n), dm['rank'], dms, at)) - dadd('\\item[]{{}\\verb@%s@{}}' % - (capi_maps.getarrdocsign(n, var))) - if hasnote(var): - note = var['note'] - if isinstance(note, list): - note = '\n'.join(note) - dadd('--- %s' % (note)) - if isallocatable(var): - fargs.append('f2py_%s_getdims_%s' % (m['name'], n)) - efargs.append(fargs[-1]) - sargs.append( - 'void (*%s)(int*,int*,void(*)(char*,int*),int*)' % (n)) - sargsp.append('void (*)(int*,int*,void(*)(char*,int*),int*)') - iadd('\tf2py_%s_def[i_f2py++].func = %s;' % (m['name'], n)) - fadd('subroutine %s(r,s,f2pysetdata,flag)' % (fargs[-1])) - fadd('use %s, only: d => %s\n' % - (m['name'], undo_rmbadname1(n))) - fadd('integer flag\n') - fhooks[0] = fhooks[0] + fgetdims1 - dms = eval('range(1,%s+1)' % (dm['rank'])) - fadd(' allocate(d(%s))\n' % - (','.join(['s(%s)' % i for i in dms]))) - fhooks[0] = fhooks[0] + use_fgetdims2 - fadd('end subroutine %s' % (fargs[-1])) - else: - fargs.append(n) - sargs.append('char *%s' % (n)) - sargsp.append('char*') - iadd('\tf2py_%s_def[i_f2py++].data = %s;' % (m['name'], n)) - if onlyvars: - dadd('\\end{description}') - if hasbody(m): - for b in m['body']: - if not isroutine(b): - print('Skipping', b['block'], b['name']) - continue - modobjs.append('%s()' % (b['name'])) - b['modulename'] = m['name'] - api, wrap = rules.buildapi(b) - if isfunction(b): - fhooks[0] = fhooks[0] + wrap - fargs.append('f2pywrap_%s_%s' % (m['name'], b['name'])) - ifargs.append(func2subr.createfuncwrapper(b, signature=1)) - else: - if wrap: - fhooks[0] = fhooks[0] + wrap - fargs.append('f2pywrap_%s_%s' % (m['name'], b['name'])) - ifargs.append( - func2subr.createsubrwrapper(b, signature=1)) - else: - fargs.append(b['name']) - mfargs.append(fargs[-1]) - api['externroutines'] = [] - ar = applyrules(api, vrd) - ar['docs'] = [] - ar['docshort'] = [] - ret = dictappend(ret, ar) - cadd('\t{"%s",-1,{{-1}},0,NULL,(void *)f2py_rout_#modulename#_%s_%s,doc_f2py_rout_#modulename#_%s_%s},' % - (b['name'], m['name'], b['name'], m['name'], b['name'])) - sargs.append('char *%s' % (b['name'])) - sargsp.append('char *') - iadd('\tf2py_%s_def[i_f2py++].data = %s;' % - (m['name'], b['name'])) - cadd('\t{NULL}\n};\n') - iadd('}') - ihooks[0] = 'static void f2py_setup_%s(%s) {\n\tint i_f2py=0;%s' % ( - m['name'], ','.join(sargs), ihooks[0]) - if '_' in m['name']: - F_FUNC = 'F_FUNC_US' - else: - F_FUNC = 'F_FUNC' - iadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void (*)(%s));' - % (F_FUNC, m['name'], m['name'].upper(), ','.join(sargsp))) - iadd('static void f2py_init_%s(void) {' % (m['name'])) - iadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);' - % (F_FUNC, m['name'], m['name'].upper(), m['name'])) - iadd('}\n') - ret['f90modhooks'] = ret['f90modhooks'] + chooks + ihooks - ret['initf90modhooks'] = ['\tPyDict_SetItemString(d, "%s", PyFortranObject_New(f2py_%s_def,f2py_init_%s));' % ( - m['name'], m['name'], m['name'])] + ret['initf90modhooks'] - fadd('') - fadd('subroutine f2pyinit%s(f2pysetupfunc)' % (m['name'])) - if mfargs: - for a in undo_rmbadname(mfargs): - fadd('use %s, only : %s' % (m['name'], a)) - if ifargs: - fadd(' '.join(['interface'] + ifargs)) - fadd('end interface') - fadd('external f2pysetupfunc') - if efargs: - for a in undo_rmbadname(efargs): - fadd('external %s' % (a)) - fadd('call f2pysetupfunc(%s)' % (','.join(undo_rmbadname(fargs)))) - fadd('end subroutine f2pyinit%s\n' % (m['name'])) - - dadd('\n'.join(ret['latexdoc']).replace( - r'\subsection{', r'\subsubsection{')) - - ret['latexdoc'] = [] - ret['docs'].append('"\t%s --- %s"' % (m['name'], - ','.join(undo_rmbadname(modobjs)))) - - ret['routine_defs'] = '' - ret['doc'] = [] - ret['docshort'] = [] - ret['latexdoc'] = doc[0] - if len(ret['docs']) <= 1: - ret['docs'] = '' - return ret, fhooks[0] diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/func2subr.py b/venv/lib/python3.7/site-packages/numpy/f2py/func2subr.py deleted file mode 100644 index 6010d5a..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/func2subr.py +++ /dev/null @@ -1,299 +0,0 @@ -#!/usr/bin/env python -""" - -Rules for building C/API module with f2py2e. - -Copyright 1999,2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2004/11/26 11:13:06 $ -Pearu Peterson - -""" -from __future__ import division, absolute_import, print_function - -__version__ = "$Revision: 1.16 $"[10:-1] - -f2py_version = 'See `f2py -v`' - -import copy - -from .auxfuncs import ( - getfortranname, isexternal, isfunction, isfunction_wrap, isintent_in, - isintent_out, islogicalfunction, ismoduleroutine, isscalar, - issubroutine, issubroutine_wrap, outmess, show -) - - -def var2fixfortran(vars, a, fa=None, f90mode=None): - if fa is None: - fa = a - if a not in vars: - show(vars) - outmess('var2fixfortran: No definition for argument "%s".\n' % a) - return '' - if 'typespec' not in vars[a]: - show(vars[a]) - outmess('var2fixfortran: No typespec for argument "%s".\n' % a) - return '' - vardef = vars[a]['typespec'] - if vardef == 'type' and 'typename' in vars[a]: - vardef = '%s(%s)' % (vardef, vars[a]['typename']) - selector = {} - lk = '' - if 'kindselector' in vars[a]: - selector = vars[a]['kindselector'] - lk = 'kind' - elif 'charselector' in vars[a]: - selector = vars[a]['charselector'] - lk = 'len' - if '*' in selector: - if f90mode: - if selector['*'] in ['*', ':', '(*)']: - vardef = '%s(len=*)' % (vardef) - else: - vardef = '%s(%s=%s)' % (vardef, lk, selector['*']) - else: - if selector['*'] in ['*', ':']: - vardef = '%s*(%s)' % (vardef, selector['*']) - else: - vardef = '%s*%s' % (vardef, selector['*']) - else: - if 'len' in selector: - vardef = '%s(len=%s' % (vardef, selector['len']) - if 'kind' in selector: - vardef = '%s,kind=%s)' % (vardef, selector['kind']) - else: - vardef = '%s)' % (vardef) - elif 'kind' in selector: - vardef = '%s(kind=%s)' % (vardef, selector['kind']) - - vardef = '%s %s' % (vardef, fa) - if 'dimension' in vars[a]: - vardef = '%s(%s)' % (vardef, ','.join(vars[a]['dimension'])) - return vardef - - -def createfuncwrapper(rout, signature=0): - assert isfunction(rout) - - extra_args = [] - vars = rout['vars'] - for a in rout['args']: - v = rout['vars'][a] - for i, d in enumerate(v.get('dimension', [])): - if d == ':': - dn = 'f2py_%s_d%s' % (a, i) - dv = dict(typespec='integer', intent=['hide']) - dv['='] = 'shape(%s, %s)' % (a, i) - extra_args.append(dn) - vars[dn] = dv - v['dimension'][i] = dn - rout['args'].extend(extra_args) - need_interface = bool(extra_args) - - ret = [''] - - def add(line, ret=ret): - ret[0] = '%s\n %s' % (ret[0], line) - name = rout['name'] - fortranname = getfortranname(rout) - f90mode = ismoduleroutine(rout) - newname = '%sf2pywrap' % (name) - - if newname not in vars: - vars[newname] = vars[name] - args = [newname] + rout['args'][1:] - else: - args = [newname] + rout['args'] - - l = var2fixfortran(vars, name, newname, f90mode) - if l[:13] == 'character*(*)': - if f90mode: - l = 'character(len=10)' + l[13:] - else: - l = 'character*10' + l[13:] - charselect = vars[name]['charselector'] - if charselect.get('*', '') == '(*)': - charselect['*'] = '10' - sargs = ', '.join(args) - if f90mode: - add('subroutine f2pywrap_%s_%s (%s)' % - (rout['modulename'], name, sargs)) - if not signature: - add('use %s, only : %s' % (rout['modulename'], fortranname)) - else: - add('subroutine f2pywrap%s (%s)' % (name, sargs)) - if not need_interface: - add('external %s' % (fortranname)) - l = l + ', ' + fortranname - if need_interface: - for line in rout['saved_interface'].split('\n'): - if line.lstrip().startswith('use '): - add(line) - - args = args[1:] - dumped_args = [] - for a in args: - if isexternal(vars[a]): - add('external %s' % (a)) - dumped_args.append(a) - for a in args: - if a in dumped_args: - continue - if isscalar(vars[a]): - add(var2fixfortran(vars, a, f90mode=f90mode)) - dumped_args.append(a) - for a in args: - if a in dumped_args: - continue - if isintent_in(vars[a]): - add(var2fixfortran(vars, a, f90mode=f90mode)) - dumped_args.append(a) - for a in args: - if a in dumped_args: - continue - add(var2fixfortran(vars, a, f90mode=f90mode)) - - add(l) - - if need_interface: - if f90mode: - # f90 module already defines needed interface - pass - else: - add('interface') - add(rout['saved_interface'].lstrip()) - add('end interface') - - sargs = ', '.join([a for a in args if a not in extra_args]) - - if not signature: - if islogicalfunction(rout): - add('%s = .not.(.not.%s(%s))' % (newname, fortranname, sargs)) - else: - add('%s = %s(%s)' % (newname, fortranname, sargs)) - if f90mode: - add('end subroutine f2pywrap_%s_%s' % (rout['modulename'], name)) - else: - add('end') - return ret[0] - - -def createsubrwrapper(rout, signature=0): - assert issubroutine(rout) - - extra_args = [] - vars = rout['vars'] - for a in rout['args']: - v = rout['vars'][a] - for i, d in enumerate(v.get('dimension', [])): - if d == ':': - dn = 'f2py_%s_d%s' % (a, i) - dv = dict(typespec='integer', intent=['hide']) - dv['='] = 'shape(%s, %s)' % (a, i) - extra_args.append(dn) - vars[dn] = dv - v['dimension'][i] = dn - rout['args'].extend(extra_args) - need_interface = bool(extra_args) - - ret = [''] - - def add(line, ret=ret): - ret[0] = '%s\n %s' % (ret[0], line) - name = rout['name'] - fortranname = getfortranname(rout) - f90mode = ismoduleroutine(rout) - - args = rout['args'] - - sargs = ', '.join(args) - if f90mode: - add('subroutine f2pywrap_%s_%s (%s)' % - (rout['modulename'], name, sargs)) - if not signature: - add('use %s, only : %s' % (rout['modulename'], fortranname)) - else: - add('subroutine f2pywrap%s (%s)' % (name, sargs)) - if not need_interface: - add('external %s' % (fortranname)) - - if need_interface: - for line in rout['saved_interface'].split('\n'): - if line.lstrip().startswith('use '): - add(line) - - dumped_args = [] - for a in args: - if isexternal(vars[a]): - add('external %s' % (a)) - dumped_args.append(a) - for a in args: - if a in dumped_args: - continue - if isscalar(vars[a]): - add(var2fixfortran(vars, a, f90mode=f90mode)) - dumped_args.append(a) - for a in args: - if a in dumped_args: - continue - add(var2fixfortran(vars, a, f90mode=f90mode)) - - if need_interface: - if f90mode: - # f90 module already defines needed interface - pass - else: - add('interface') - add(rout['saved_interface'].lstrip()) - add('end interface') - - sargs = ', '.join([a for a in args if a not in extra_args]) - - if not signature: - add('call %s(%s)' % (fortranname, sargs)) - if f90mode: - add('end subroutine f2pywrap_%s_%s' % (rout['modulename'], name)) - else: - add('end') - return ret[0] - - -def assubr(rout): - if isfunction_wrap(rout): - fortranname = getfortranname(rout) - name = rout['name'] - outmess('\t\tCreating wrapper for Fortran function "%s"("%s")...\n' % ( - name, fortranname)) - rout = copy.copy(rout) - fname = name - rname = fname - if 'result' in rout: - rname = rout['result'] - rout['vars'][fname] = rout['vars'][rname] - fvar = rout['vars'][fname] - if not isintent_out(fvar): - if 'intent' not in fvar: - fvar['intent'] = [] - fvar['intent'].append('out') - flag = 1 - for i in fvar['intent']: - if i.startswith('out='): - flag = 0 - break - if flag: - fvar['intent'].append('out=%s' % (rname)) - rout['args'][:] = [fname] + rout['args'] - return rout, createfuncwrapper(rout) - if issubroutine_wrap(rout): - fortranname = getfortranname(rout) - name = rout['name'] - outmess('\t\tCreating wrapper for Fortran subroutine "%s"("%s")...\n' % ( - name, fortranname)) - rout = copy.copy(rout) - return rout, createsubrwrapper(rout) - return rout, '' diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/rules.py b/venv/lib/python3.7/site-packages/numpy/f2py/rules.py deleted file mode 100644 index f2f713b..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/rules.py +++ /dev/null @@ -1,1488 +0,0 @@ -#!/usr/bin/env python -""" - -Rules for building C/API module with f2py2e. - -Here is a skeleton of a new wrapper function (13Dec2001): - -wrapper_function(args) - declarations - get_python_arguments, say, `a' and `b' - - get_a_from_python - if (successful) { - - get_b_from_python - if (successful) { - - callfortran - if (successful) { - - put_a_to_python - if (successful) { - - put_b_to_python - if (successful) { - - buildvalue = ... - - } - - } - - } - - } - cleanup_b - - } - cleanup_a - - return buildvalue - -Copyright 1999,2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/08/30 08:58:42 $ -Pearu Peterson - -""" -from __future__ import division, absolute_import, print_function - -__version__ = "$Revision: 1.129 $"[10:-1] - -from . import __version__ -f2py_version = __version__.version - -import os -import time -import copy - -from .auxfuncs import ( - applyrules, debugcapi, dictappend, errmess, gentitle, getargs2, - hascallstatement, hasexternals, hasinitvalue, hasnote, hasresultnote, - isarray, isarrayofstrings, iscomplex, iscomplexarray, - iscomplexfunction, iscomplexfunction_warn, isdummyroutine, isexternal, - isfunction, isfunction_wrap, isint1array, isintent_aux, isintent_c, - isintent_callback, isintent_copy, isintent_hide, isintent_inout, - isintent_nothide, isintent_out, isintent_overwrite, islogical, - islong_complex, islong_double, islong_doublefunction, islong_long, - islong_longfunction, ismoduleroutine, isoptional, isrequired, isscalar, - issigned_long_longarray, isstring, isstringarray, isstringfunction, - issubroutine, issubroutine_wrap, isthreadsafe, isunsigned, - isunsigned_char, isunsigned_chararray, isunsigned_long_long, - isunsigned_long_longarray, isunsigned_short, isunsigned_shortarray, - l_and, l_not, l_or, outmess, replace, stripcomma, -) - -from . import capi_maps -from . import cfuncs -from . import common_rules -from . import use_rules -from . import f90mod_rules -from . import func2subr - -options = {} -sepdict = {} -#for k in ['need_cfuncs']: sepdict[k]=',' -for k in ['decl', - 'frompyobj', - 'cleanupfrompyobj', - 'topyarr', 'method', - 'pyobjfrom', 'closepyobjfrom', - 'freemem', - 'userincludes', - 'includes0', 'includes', 'typedefs', 'typedefs_generated', - 'cppmacros', 'cfuncs', 'callbacks', - 'latexdoc', - 'restdoc', - 'routine_defs', 'externroutines', - 'initf2pywraphooks', - 'commonhooks', 'initcommonhooks', - 'f90modhooks', 'initf90modhooks']: - sepdict[k] = '\n' - -#################### Rules for C/API module ################# - -generationtime = int(os.environ.get('SOURCE_DATE_EPOCH', time.time())) -module_rules = { - 'modulebody': """\ -/* File: #modulename#module.c - * This file is auto-generated with f2py (version:#f2py_version#). - * f2py is a Fortran to Python Interface Generator (FPIG), Second Edition, - * written by Pearu Peterson . - * Generation date: """ + time.asctime(time.gmtime(generationtime)) + """ - * Do not edit this file directly unless you know what you are doing!!! - */ - -#ifdef __cplusplus -extern \"C\" { -#endif - -""" + gentitle("See f2py2e/cfuncs.py: includes") + """ -#includes# -#includes0# - -""" + gentitle("See f2py2e/rules.py: mod_rules['modulebody']") + """ -static PyObject *#modulename#_error; -static PyObject *#modulename#_module; - -""" + gentitle("See f2py2e/cfuncs.py: typedefs") + """ -#typedefs# - -""" + gentitle("See f2py2e/cfuncs.py: typedefs_generated") + """ -#typedefs_generated# - -""" + gentitle("See f2py2e/cfuncs.py: cppmacros") + """ -#cppmacros# - -""" + gentitle("See f2py2e/cfuncs.py: cfuncs") + """ -#cfuncs# - -""" + gentitle("See f2py2e/cfuncs.py: userincludes") + """ -#userincludes# - -""" + gentitle("See f2py2e/capi_rules.py: usercode") + """ -#usercode# - -/* See f2py2e/rules.py */ -#externroutines# - -""" + gentitle("See f2py2e/capi_rules.py: usercode1") + """ -#usercode1# - -""" + gentitle("See f2py2e/cb_rules.py: buildcallback") + """ -#callbacks# - -""" + gentitle("See f2py2e/rules.py: buildapi") + """ -#body# - -""" + gentitle("See f2py2e/f90mod_rules.py: buildhooks") + """ -#f90modhooks# - -""" + gentitle("See f2py2e/rules.py: module_rules['modulebody']") + """ - -""" + gentitle("See f2py2e/common_rules.py: buildhooks") + """ -#commonhooks# - -""" + gentitle("See f2py2e/rules.py") + """ - -static FortranDataDef f2py_routine_defs[] = { -#routine_defs# -\t{NULL} -}; - -static PyMethodDef f2py_module_methods[] = { -#pymethoddef# -\t{NULL,NULL} -}; - -#if PY_VERSION_HEX >= 0x03000000 -static struct PyModuleDef moduledef = { -\tPyModuleDef_HEAD_INIT, -\t"#modulename#", -\tNULL, -\t-1, -\tf2py_module_methods, -\tNULL, -\tNULL, -\tNULL, -\tNULL -}; -#endif - -#if PY_VERSION_HEX >= 0x03000000 -#define RETVAL m -PyMODINIT_FUNC PyInit_#modulename#(void) { -#else -#define RETVAL -PyMODINIT_FUNC init#modulename#(void) { -#endif -\tint i; -\tPyObject *m,*d, *s, *tmp; -#if PY_VERSION_HEX >= 0x03000000 -\tm = #modulename#_module = PyModule_Create(&moduledef); -#else -\tm = #modulename#_module = Py_InitModule(\"#modulename#\", f2py_module_methods); -#endif -\tPy_TYPE(&PyFortran_Type) = &PyType_Type; -\timport_array(); -\tif (PyErr_Occurred()) -\t\t{PyErr_SetString(PyExc_ImportError, \"can't initialize module #modulename# (failed to import numpy)\"); return RETVAL;} -\td = PyModule_GetDict(m); -\ts = PyString_FromString(\"$R""" + """evision: $\"); -\tPyDict_SetItemString(d, \"__version__\", s); -\tPy_DECREF(s); -#if PY_VERSION_HEX >= 0x03000000 -\ts = PyUnicode_FromString( -#else -\ts = PyString_FromString( -#endif -\t\t\"This module '#modulename#' is auto-generated with f2py (version:#f2py_version#).\\nFunctions:\\n\"\n#docs#\".\"); -\tPyDict_SetItemString(d, \"__doc__\", s); -\tPy_DECREF(s); -\t#modulename#_error = PyErr_NewException (\"#modulename#.error\", NULL, NULL); -\t/* -\t * Store the error object inside the dict, so that it could get deallocated. -\t * (in practice, this is a module, so it likely will not and cannot.) -\t */ -\tPyDict_SetItemString(d, \"_#modulename#_error\", #modulename#_error); -\tPy_DECREF(#modulename#_error); -\tfor(i=0;f2py_routine_defs[i].name!=NULL;i++) { -\t\ttmp = PyFortranObject_NewAsAttr(&f2py_routine_defs[i]); -\t\tPyDict_SetItemString(d, f2py_routine_defs[i].name, tmp); -\t\tPy_DECREF(tmp); -\t} -#initf2pywraphooks# -#initf90modhooks# -#initcommonhooks# -#interface_usercode# - -#ifdef F2PY_REPORT_ATEXIT -\tif (! PyErr_Occurred()) -\t\ton_exit(f2py_report_on_exit,(void*)\"#modulename#\"); -#endif -\treturn RETVAL; -} -#ifdef __cplusplus -} -#endif -""", - 'separatorsfor': {'latexdoc': '\n\n', - 'restdoc': '\n\n'}, - 'latexdoc': ['\\section{Module \\texttt{#texmodulename#}}\n', - '#modnote#\n', - '#latexdoc#'], - 'restdoc': ['Module #modulename#\n' + '=' * 80, - '\n#restdoc#'] -} - -defmod_rules = [ - {'body': '/*eof body*/', - 'method': '/*eof method*/', - 'externroutines': '/*eof externroutines*/', - 'routine_defs': '/*eof routine_defs*/', - 'initf90modhooks': '/*eof initf90modhooks*/', - 'initf2pywraphooks': '/*eof initf2pywraphooks*/', - 'initcommonhooks': '/*eof initcommonhooks*/', - 'latexdoc': '', - 'restdoc': '', - 'modnote': {hasnote: '#note#', l_not(hasnote): ''}, - } -] - -routine_rules = { - 'separatorsfor': sepdict, - 'body': """ -#begintitle# -static char doc_#apiname#[] = \"\\\n#docreturn##name#(#docsignatureshort#)\\n\\nWrapper for ``#name#``.\\\n\\n#docstrsigns#\"; -/* #declfortranroutine# */ -static PyObject *#apiname#(const PyObject *capi_self, - PyObject *capi_args, - PyObject *capi_keywds, - #functype# (*f2py_func)(#callprotoargument#)) { -\tPyObject * volatile capi_buildvalue = NULL; -\tvolatile int f2py_success = 1; -#decl# -\tstatic char *capi_kwlist[] = {#kwlist##kwlistopt##kwlistxa#NULL}; -#usercode# -#routdebugenter# -#ifdef F2PY_REPORT_ATEXIT -f2py_start_clock(); -#endif -\tif (!PyArg_ParseTupleAndKeywords(capi_args,capi_keywds,\\ -\t\t\"#argformat##keyformat##xaformat#:#pyname#\",\\ -\t\tcapi_kwlist#args_capi##keys_capi##keys_xa#))\n\t\treturn NULL; -#frompyobj# -/*end of frompyobj*/ -#ifdef F2PY_REPORT_ATEXIT -f2py_start_call_clock(); -#endif -#callfortranroutine# -if (PyErr_Occurred()) - f2py_success = 0; -#ifdef F2PY_REPORT_ATEXIT -f2py_stop_call_clock(); -#endif -/*end of callfortranroutine*/ -\t\tif (f2py_success) { -#pyobjfrom# -/*end of pyobjfrom*/ -\t\tCFUNCSMESS(\"Building return value.\\n\"); -\t\tcapi_buildvalue = Py_BuildValue(\"#returnformat#\"#return#); -/*closepyobjfrom*/ -#closepyobjfrom# -\t\t} /*if (f2py_success) after callfortranroutine*/ -/*cleanupfrompyobj*/ -#cleanupfrompyobj# -\tif (capi_buildvalue == NULL) { -#routdebugfailure# -\t} else { -#routdebugleave# -\t} -\tCFUNCSMESS(\"Freeing memory.\\n\"); -#freemem# -#ifdef F2PY_REPORT_ATEXIT -f2py_stop_clock(); -#endif -\treturn capi_buildvalue; -} -#endtitle# -""", - 'routine_defs': '#routine_def#', - 'initf2pywraphooks': '#initf2pywraphook#', - 'externroutines': '#declfortranroutine#', - 'doc': '#docreturn##name#(#docsignature#)', - 'docshort': '#docreturn##name#(#docsignatureshort#)', - 'docs': '"\t#docreturn##name#(#docsignature#)\\n"\n', - 'need': ['arrayobject.h', 'CFUNCSMESS', 'MINMAX'], - 'cppmacros': {debugcapi: '#define DEBUGCFUNCS'}, - 'latexdoc': ['\\subsection{Wrapper function \\texttt{#texname#}}\n', - """ -\\noindent{{}\\verb@#docreturn##name#@{}}\\texttt{(#latexdocsignatureshort#)} -#routnote# - -#latexdocstrsigns# -"""], - 'restdoc': ['Wrapped function ``#name#``\n' + '-' * 80, - - ] -} - -################## Rules for C/API function ############## - -rout_rules = [ - { # Init - 'separatorsfor': {'callfortranroutine': '\n', 'routdebugenter': '\n', 'decl': '\n', - 'routdebugleave': '\n', 'routdebugfailure': '\n', - 'setjmpbuf': ' || ', - 'docstrreq': '\n', 'docstropt': '\n', 'docstrout': '\n', - 'docstrcbs': '\n', 'docstrsigns': '\\n"\n"', - 'latexdocstrsigns': '\n', - 'latexdocstrreq': '\n', 'latexdocstropt': '\n', - 'latexdocstrout': '\n', 'latexdocstrcbs': '\n', - }, - 'kwlist': '', 'kwlistopt': '', 'callfortran': '', 'callfortranappend': '', - 'docsign': '', 'docsignopt': '', 'decl': '/*decl*/', - 'freemem': '/*freemem*/', - 'docsignshort': '', 'docsignoptshort': '', - 'docstrsigns': '', 'latexdocstrsigns': '', - 'docstrreq': '\\nParameters\\n----------', - 'docstropt': '\\nOther Parameters\\n----------------', - 'docstrout': '\\nReturns\\n-------', - 'docstrcbs': '\\nNotes\\n-----\\nCall-back functions::\\n', - 'latexdocstrreq': '\\noindent Required arguments:', - 'latexdocstropt': '\\noindent Optional arguments:', - 'latexdocstrout': '\\noindent Return objects:', - 'latexdocstrcbs': '\\noindent Call-back functions:', - 'args_capi': '', 'keys_capi': '', 'functype': '', - 'frompyobj': '/*frompyobj*/', - # this list will be reversed - 'cleanupfrompyobj': ['/*end of cleanupfrompyobj*/'], - 'pyobjfrom': '/*pyobjfrom*/', - # this list will be reversed - 'closepyobjfrom': ['/*end of closepyobjfrom*/'], - 'topyarr': '/*topyarr*/', 'routdebugleave': '/*routdebugleave*/', - 'routdebugenter': '/*routdebugenter*/', - 'routdebugfailure': '/*routdebugfailure*/', - 'callfortranroutine': '/*callfortranroutine*/', - 'argformat': '', 'keyformat': '', 'need_cfuncs': '', - 'docreturn': '', 'return': '', 'returnformat': '', 'rformat': '', - 'kwlistxa': '', 'keys_xa': '', 'xaformat': '', 'docsignxa': '', 'docsignxashort': '', - 'initf2pywraphook': '', - 'routnote': {hasnote: '--- #note#', l_not(hasnote): ''}, - }, { - 'apiname': 'f2py_rout_#modulename#_#name#', - 'pyname': '#modulename#.#name#', - 'decl': '', - '_check': l_not(ismoduleroutine) - }, { - 'apiname': 'f2py_rout_#modulename#_#f90modulename#_#name#', - 'pyname': '#modulename#.#f90modulename#.#name#', - 'decl': '', - '_check': ismoduleroutine - }, { # Subroutine - 'functype': 'void', - 'declfortranroutine': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', - l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): 'extern void #fortranname#(#callprotoargument#);', - ismoduleroutine: '', - isdummyroutine: '' - }, - 'routine_def': {l_not(l_or(ismoduleroutine, isintent_c, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', - l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},', - l_and(l_not(ismoduleroutine), isdummyroutine): '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', - }, - 'need': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'F_FUNC'}, - 'callfortranroutine': [ - {debugcapi: [ - """\tfprintf(stderr,\"debug-capi:Fortran subroutine `#fortranname#(#callfortran#)\'\\n\");"""]}, - {hasexternals: """\ -\t\tif (#setjmpbuf#) { -\t\t\tf2py_success = 0; -\t\t} else {"""}, - {isthreadsafe: '\t\t\tPy_BEGIN_ALLOW_THREADS'}, - {hascallstatement: '''\t\t\t\t#callstatement#; -\t\t\t\t/*(*f2py_func)(#callfortran#);*/'''}, - {l_not(l_or(hascallstatement, isdummyroutine)) - : '\t\t\t\t(*f2py_func)(#callfortran#);'}, - {isthreadsafe: '\t\t\tPy_END_ALLOW_THREADS'}, - {hasexternals: """\t\t}"""} - ], - '_check': l_and(issubroutine, l_not(issubroutine_wrap)), - }, { # Wrapped function - 'functype': 'void', - 'declfortranroutine': {l_not(l_or(ismoduleroutine, isdummyroutine)): 'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);', - isdummyroutine: '', - }, - - 'routine_def': {l_not(l_or(ismoduleroutine, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},', - isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', - }, - 'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)): ''' - { - extern #ctype# #F_FUNC#(#name_lower#,#NAME#)(void); - PyObject* o = PyDict_GetItemString(d,"#name#"); - tmp = F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL); - PyObject_SetAttrString(o,"_cpointer", tmp); - Py_DECREF(tmp); -#if PY_VERSION_HEX >= 0x03000000 - s = PyUnicode_FromString("#name#"); -#else - s = PyString_FromString("#name#"); -#endif - PyObject_SetAttrString(o,"__name__", s); - Py_DECREF(s); - } - '''}, - 'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']}, - 'callfortranroutine': [ - {debugcapi: [ - """\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]}, - {hasexternals: """\ -\tif (#setjmpbuf#) { -\t\tf2py_success = 0; -\t} else {"""}, - {isthreadsafe: '\tPy_BEGIN_ALLOW_THREADS'}, - {l_not(l_or(hascallstatement, isdummyroutine)) - : '\t(*f2py_func)(#callfortran#);'}, - {hascallstatement: - '\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'}, - {isthreadsafe: '\tPy_END_ALLOW_THREADS'}, - {hasexternals: '\t}'} - ], - '_check': isfunction_wrap, - }, { # Wrapped subroutine - 'functype': 'void', - 'declfortranroutine': {l_not(l_or(ismoduleroutine, isdummyroutine)): 'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);', - isdummyroutine: '', - }, - - 'routine_def': {l_not(l_or(ismoduleroutine, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},', - isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', - }, - 'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)): ''' - { - extern void #F_FUNC#(#name_lower#,#NAME#)(void); - PyObject* o = PyDict_GetItemString(d,"#name#"); - tmp = F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL); - PyObject_SetAttrString(o,"_cpointer", tmp); - Py_DECREF(tmp); -#if PY_VERSION_HEX >= 0x03000000 - s = PyUnicode_FromString("#name#"); -#else - s = PyString_FromString("#name#"); -#endif - PyObject_SetAttrString(o,"__name__", s); - Py_DECREF(s); - } - '''}, - 'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']}, - 'callfortranroutine': [ - {debugcapi: [ - """\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]}, - {hasexternals: """\ -\tif (#setjmpbuf#) { -\t\tf2py_success = 0; -\t} else {"""}, - {isthreadsafe: '\tPy_BEGIN_ALLOW_THREADS'}, - {l_not(l_or(hascallstatement, isdummyroutine)) - : '\t(*f2py_func)(#callfortran#);'}, - {hascallstatement: - '\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'}, - {isthreadsafe: '\tPy_END_ALLOW_THREADS'}, - {hasexternals: '\t}'} - ], - '_check': issubroutine_wrap, - }, { # Function - 'functype': '#ctype#', - 'docreturn': {l_not(isintent_hide): '#rname#,'}, - 'docstrout': '#pydocsignout#', - 'latexdocstrout': ['\\item[]{{}\\verb@#pydocsignout#@{}}', - {hasresultnote: '--- #resultnote#'}], - 'callfortranroutine': [{l_and(debugcapi, isstringfunction): """\ -#ifdef USESCOMPAQFORTRAN -\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callcompaqfortran#)\\n\"); -#else -\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\"); -#endif -"""}, - {l_and(debugcapi, l_not(isstringfunction)): """\ -\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\"); -"""} - ], - '_check': l_and(isfunction, l_not(isfunction_wrap)) - }, { # Scalar function - 'declfortranroutine': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'extern #ctype# #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', - l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): 'extern #ctype# #fortranname#(#callprotoargument#);', - isdummyroutine: '' - }, - 'routine_def': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', - l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},', - isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', - }, - 'decl': [{iscomplexfunction_warn: '\t#ctype# #name#_return_value={0,0};', - l_not(iscomplexfunction): '\t#ctype# #name#_return_value=0;'}, - {iscomplexfunction: - '\tPyObject *#name#_return_value_capi = Py_None;'} - ], - 'callfortranroutine': [ - {hasexternals: """\ -\tif (#setjmpbuf#) { -\t\tf2py_success = 0; -\t} else {"""}, - {isthreadsafe: '\tPy_BEGIN_ALLOW_THREADS'}, - {hascallstatement: '''\t#callstatement#; -/*\t#name#_return_value = (*f2py_func)(#callfortran#);*/ -'''}, - {l_not(l_or(hascallstatement, isdummyroutine)) - : '\t#name#_return_value = (*f2py_func)(#callfortran#);'}, - {isthreadsafe: '\tPy_END_ALLOW_THREADS'}, - {hasexternals: '\t}'}, - {l_and(debugcapi, iscomplexfunction) - : '\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value.r,#name#_return_value.i);'}, - {l_and(debugcapi, l_not(iscomplexfunction)): '\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value);'}], - 'pyobjfrom': {iscomplexfunction: '\t#name#_return_value_capi = pyobj_from_#ctype#1(#name#_return_value);'}, - 'need': [{l_not(isdummyroutine): 'F_FUNC'}, - {iscomplexfunction: 'pyobj_from_#ctype#1'}, - {islong_longfunction: 'long_long'}, - {islong_doublefunction: 'long_double'}], - 'returnformat': {l_not(isintent_hide): '#rformat#'}, - 'return': {iscomplexfunction: ',#name#_return_value_capi', - l_not(l_or(iscomplexfunction, isintent_hide)): ',#name#_return_value'}, - '_check': l_and(isfunction, l_not(isstringfunction), l_not(isfunction_wrap)) - }, { # String function # in use for --no-wrap - 'declfortranroutine': 'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', - 'routine_def': {l_not(l_or(ismoduleroutine, isintent_c)): - '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', - l_and(l_not(ismoduleroutine), isintent_c): - '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},' - }, - 'decl': ['\t#ctype# #name#_return_value = NULL;', - '\tint #name#_return_value_len = 0;'], - 'callfortran':'#name#_return_value,#name#_return_value_len,', - 'callfortranroutine':['\t#name#_return_value_len = #rlength#;', - '\tif ((#name#_return_value = (string)malloc(sizeof(char)*(#name#_return_value_len+1))) == NULL) {', - '\t\tPyErr_SetString(PyExc_MemoryError, \"out of memory\");', - '\t\tf2py_success = 0;', - '\t} else {', - "\t\t(#name#_return_value)[#name#_return_value_len] = '\\0';", - '\t}', - '\tif (f2py_success) {', - {hasexternals: """\ -\t\tif (#setjmpbuf#) { -\t\t\tf2py_success = 0; -\t\t} else {"""}, - {isthreadsafe: '\t\tPy_BEGIN_ALLOW_THREADS'}, - """\ -#ifdef USESCOMPAQFORTRAN -\t\t(*f2py_func)(#callcompaqfortran#); -#else -\t\t(*f2py_func)(#callfortran#); -#endif -""", - {isthreadsafe: '\t\tPy_END_ALLOW_THREADS'}, - {hasexternals: '\t\t}'}, - {debugcapi: - '\t\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value_len,#name#_return_value);'}, - '\t} /* if (f2py_success) after (string)malloc */', - ], - 'returnformat': '#rformat#', - 'return': ',#name#_return_value', - 'freemem': '\tSTRINGFREE(#name#_return_value);', - 'need': ['F_FUNC', '#ctype#', 'STRINGFREE'], - '_check':l_and(isstringfunction, l_not(isfunction_wrap)) # ???obsolete - }, - { # Debugging - 'routdebugenter': '\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#(#docsignature#)\\n");', - 'routdebugleave': '\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: successful.\\n");', - 'routdebugfailure': '\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: failure.\\n");', - '_check': debugcapi - } -] - -################ Rules for arguments ################## - -typedef_need_dict = {islong_long: 'long_long', - islong_double: 'long_double', - islong_complex: 'complex_long_double', - isunsigned_char: 'unsigned_char', - isunsigned_short: 'unsigned_short', - isunsigned: 'unsigned', - isunsigned_long_long: 'unsigned_long_long', - isunsigned_chararray: 'unsigned_char', - isunsigned_shortarray: 'unsigned_short', - isunsigned_long_longarray: 'unsigned_long_long', - issigned_long_longarray: 'long_long', - } - -aux_rules = [ - { - 'separatorsfor': sepdict - }, - { # Common - 'frompyobj': ['\t/* Processing auxiliary variable #varname# */', - {debugcapi: '\tfprintf(stderr,"#vardebuginfo#\\n");'}, ], - 'cleanupfrompyobj': '\t/* End of cleaning variable #varname# */', - 'need': typedef_need_dict, - }, - # Scalars (not complex) - { # Common - 'decl': '\t#ctype# #varname# = 0;', - 'need': {hasinitvalue: 'math.h'}, - 'frompyobj': {hasinitvalue: '\t#varname# = #init#;'}, - '_check': l_and(isscalar, l_not(iscomplex)), - }, - { - 'return': ',#varname#', - 'docstrout': '#pydocsignout#', - 'docreturn': '#outvarname#,', - 'returnformat': '#varrformat#', - '_check': l_and(isscalar, l_not(iscomplex), isintent_out), - }, - # Complex scalars - { # Common - 'decl': '\t#ctype# #varname#;', - 'frompyobj': {hasinitvalue: '\t#varname#.r = #init.r#, #varname#.i = #init.i#;'}, - '_check': iscomplex - }, - # String - { # Common - 'decl': ['\t#ctype# #varname# = NULL;', - '\tint slen(#varname#);', - ], - 'need':['len..'], - '_check':isstring - }, - # Array - { # Common - 'decl': ['\t#ctype# *#varname# = NULL;', - '\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};', - '\tconst int #varname#_Rank = #rank#;', - ], - 'need':['len..', {hasinitvalue: 'forcomb'}, {hasinitvalue: 'CFUNCSMESS'}], - '_check': isarray - }, - # Scalararray - { # Common - '_check': l_and(isarray, l_not(iscomplexarray)) - }, { # Not hidden - '_check': l_and(isarray, l_not(iscomplexarray), isintent_nothide) - }, - # Integer*1 array - {'need': '#ctype#', - '_check': isint1array, - '_depend': '' - }, - # Integer*-1 array - {'need': '#ctype#', - '_check': isunsigned_chararray, - '_depend': '' - }, - # Integer*-2 array - {'need': '#ctype#', - '_check': isunsigned_shortarray, - '_depend': '' - }, - # Integer*-8 array - {'need': '#ctype#', - '_check': isunsigned_long_longarray, - '_depend': '' - }, - # Complexarray - {'need': '#ctype#', - '_check': iscomplexarray, - '_depend': '' - }, - # Stringarray - { - 'callfortranappend': {isarrayofstrings: 'flen(#varname#),'}, - 'need': 'string', - '_check': isstringarray - } -] - -arg_rules = [ - { - 'separatorsfor': sepdict - }, - { # Common - 'frompyobj': ['\t/* Processing variable #varname# */', - {debugcapi: '\tfprintf(stderr,"#vardebuginfo#\\n");'}, ], - 'cleanupfrompyobj': '\t/* End of cleaning variable #varname# */', - '_depend': '', - 'need': typedef_need_dict, - }, - # Doc signatures - { - 'docstropt': {l_and(isoptional, isintent_nothide): '#pydocsign#'}, - 'docstrreq': {l_and(isrequired, isintent_nothide): '#pydocsign#'}, - 'docstrout': {isintent_out: '#pydocsignout#'}, - 'latexdocstropt': {l_and(isoptional, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}', - {hasnote: '--- #note#'}]}, - 'latexdocstrreq': {l_and(isrequired, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}', - {hasnote: '--- #note#'}]}, - 'latexdocstrout': {isintent_out: ['\\item[]{{}\\verb@#pydocsignout#@{}}', - {l_and(hasnote, isintent_hide): '--- #note#', - l_and(hasnote, isintent_nothide): '--- See above.'}]}, - 'depend': '' - }, - # Required/Optional arguments - { - 'kwlist': '"#varname#",', - 'docsign': '#varname#,', - '_check': l_and(isintent_nothide, l_not(isoptional)) - }, - { - 'kwlistopt': '"#varname#",', - 'docsignopt': '#varname#=#showinit#,', - 'docsignoptshort': '#varname#,', - '_check': l_and(isintent_nothide, isoptional) - }, - # Docstring/BuildValue - { - 'docreturn': '#outvarname#,', - 'returnformat': '#varrformat#', - '_check': isintent_out - }, - # Externals (call-back functions) - { # Common - 'docsignxa': {isintent_nothide: '#varname#_extra_args=(),'}, - 'docsignxashort': {isintent_nothide: '#varname#_extra_args,'}, - 'docstropt': {isintent_nothide: '#varname#_extra_args : input tuple, optional\\n Default: ()'}, - 'docstrcbs': '#cbdocstr#', - 'latexdocstrcbs': '\\item[] #cblatexdocstr#', - 'latexdocstropt': {isintent_nothide: '\\item[]{{}\\verb@#varname#_extra_args := () input tuple@{}} --- Extra arguments for call-back function {{}\\verb@#varname#@{}}.'}, - 'decl': ['\tPyObject *#varname#_capi = Py_None;', - '\tPyTupleObject *#varname#_xa_capi = NULL;', - '\tPyTupleObject *#varname#_args_capi = NULL;', - '\tint #varname#_nofargs_capi = 0;', - {l_not(isintent_callback): - '\t#cbname#_typedef #varname#_cptr;'} - ], - 'kwlistxa': {isintent_nothide: '"#varname#_extra_args",'}, - 'argformat': {isrequired: 'O'}, - 'keyformat': {isoptional: 'O'}, - 'xaformat': {isintent_nothide: 'O!'}, - 'args_capi': {isrequired: ',&#varname#_capi'}, - 'keys_capi': {isoptional: ',&#varname#_capi'}, - 'keys_xa': ',&PyTuple_Type,&#varname#_xa_capi', - 'setjmpbuf': '(setjmp(#cbname#_jmpbuf))', - 'callfortran': {l_not(isintent_callback): '#varname#_cptr,'}, - 'need': ['#cbname#', 'setjmp.h'], - '_check':isexternal - }, - { - 'frompyobj': [{l_not(isintent_callback): """\ -if(F2PyCapsule_Check(#varname#_capi)) { - #varname#_cptr = F2PyCapsule_AsVoidPtr(#varname#_capi); -} else { - #varname#_cptr = #cbname#; -} -"""}, {isintent_callback: """\ -if (#varname#_capi==Py_None) { - #varname#_capi = PyObject_GetAttrString(#modulename#_module,\"#varname#\"); - if (#varname#_capi) { - if (#varname#_xa_capi==NULL) { - if (PyObject_HasAttrString(#modulename#_module,\"#varname#_extra_args\")) { - PyObject* capi_tmp = PyObject_GetAttrString(#modulename#_module,\"#varname#_extra_args\"); - if (capi_tmp) { - #varname#_xa_capi = (PyTupleObject *)PySequence_Tuple(capi_tmp); - Py_DECREF(capi_tmp); - } - else { - #varname#_xa_capi = (PyTupleObject *)Py_BuildValue(\"()\"); - } - if (#varname#_xa_capi==NULL) { - PyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#varname#_extra_args to tuple.\\n\"); - return NULL; - } - } - } - } - if (#varname#_capi==NULL) { - PyErr_SetString(#modulename#_error,\"Callback #varname# not defined (as an argument or module #modulename# attribute).\\n\"); - return NULL; - } -} -"""}, - """\ -\t#varname#_nofargs_capi = #cbname#_nofargs; -\tif (create_cb_arglist(#varname#_capi,#varname#_xa_capi,#maxnofargs#,#nofoptargs#,&#cbname#_nofargs,&#varname#_args_capi,\"failed in processing argument list for call-back #varname#.\")) { -\t\tjmp_buf #varname#_jmpbuf;""", - {debugcapi: ["""\ -\t\tfprintf(stderr,\"debug-capi:Assuming %d arguments; at most #maxnofargs#(-#nofoptargs#) is expected.\\n\",#cbname#_nofargs); -\t\tCFUNCSMESSPY(\"for #varname#=\",#cbname#_capi);""", - {l_not(isintent_callback): """\t\tfprintf(stderr,\"#vardebugshowvalue# (call-back in C).\\n\",#cbname#);"""}]}, - """\ -\t\tCFUNCSMESS(\"Saving jmpbuf for `#varname#`.\\n\"); -\t\tSWAP(#varname#_capi,#cbname#_capi,PyObject); -\t\tSWAP(#varname#_args_capi,#cbname#_args_capi,PyTupleObject); -\t\tmemcpy(&#varname#_jmpbuf,&#cbname#_jmpbuf,sizeof(jmp_buf));""", - ], - 'cleanupfrompyobj': - """\ -\t\tCFUNCSMESS(\"Restoring jmpbuf for `#varname#`.\\n\"); -\t\t#cbname#_capi = #varname#_capi; -\t\tPy_DECREF(#cbname#_args_capi); -\t\t#cbname#_args_capi = #varname#_args_capi; -\t\t#cbname#_nofargs = #varname#_nofargs_capi; -\t\tmemcpy(&#cbname#_jmpbuf,&#varname#_jmpbuf,sizeof(jmp_buf)); -\t}""", - 'need': ['SWAP', 'create_cb_arglist'], - '_check':isexternal, - '_depend':'' - }, - # Scalars (not complex) - { # Common - 'decl': '\t#ctype# #varname# = 0;', - 'pyobjfrom': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'}, - 'callfortran': {isintent_c: '#varname#,', l_not(isintent_c): '&#varname#,'}, - 'return': {isintent_out: ',#varname#'}, - '_check': l_and(isscalar, l_not(iscomplex)) - }, { - 'need': {hasinitvalue: 'math.h'}, - '_check': l_and(isscalar, l_not(iscomplex)), - }, { # Not hidden - 'decl': '\tPyObject *#varname#_capi = Py_None;', - 'argformat': {isrequired: 'O'}, - 'keyformat': {isoptional: 'O'}, - 'args_capi': {isrequired: ',&#varname#_capi'}, - 'keys_capi': {isoptional: ',&#varname#_capi'}, - 'pyobjfrom': {isintent_inout: """\ -\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#); -\tif (f2py_success) {"""}, - 'closepyobjfrom': {isintent_inout: "\t} /*if (f2py_success) of #varname# pyobjfrom*/"}, - 'need': {isintent_inout: 'try_pyarr_from_#ctype#'}, - '_check': l_and(isscalar, l_not(iscomplex), isintent_nothide) - }, { - 'frompyobj': [ - # hasinitvalue... - # if pyobj is None: - # varname = init - # else - # from_pyobj(varname) - # - # isoptional and noinitvalue... - # if pyobj is not None: - # from_pyobj(varname) - # else: - # varname is uninitialized - # - # ... - # from_pyobj(varname) - # - {hasinitvalue: '\tif (#varname#_capi == Py_None) #varname# = #init#; else', - '_depend': ''}, - {l_and(isoptional, l_not(hasinitvalue)): '\tif (#varname#_capi != Py_None)', - '_depend': ''}, - {l_not(islogical): '''\ -\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#"); -\tif (f2py_success) {'''}, - {islogical: '''\ -\t\t#varname# = (#ctype#)PyObject_IsTrue(#varname#_capi); -\t\tf2py_success = 1; -\tif (f2py_success) {'''}, - ], - 'cleanupfrompyobj': '\t} /*if (f2py_success) of #varname#*/', - 'need': {l_not(islogical): '#ctype#_from_pyobj'}, - '_check': l_and(isscalar, l_not(iscomplex), isintent_nothide), - '_depend': '' - }, { # Hidden - 'frompyobj': {hasinitvalue: '\t#varname# = #init#;'}, - 'need': typedef_need_dict, - '_check': l_and(isscalar, l_not(iscomplex), isintent_hide), - '_depend': '' - }, { # Common - 'frompyobj': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'}, - '_check': l_and(isscalar, l_not(iscomplex)), - '_depend': '' - }, - # Complex scalars - { # Common - 'decl': '\t#ctype# #varname#;', - 'callfortran': {isintent_c: '#varname#,', l_not(isintent_c): '&#varname#,'}, - 'pyobjfrom': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'}, - 'return': {isintent_out: ',#varname#_capi'}, - '_check': iscomplex - }, { # Not hidden - 'decl': '\tPyObject *#varname#_capi = Py_None;', - 'argformat': {isrequired: 'O'}, - 'keyformat': {isoptional: 'O'}, - 'args_capi': {isrequired: ',&#varname#_capi'}, - 'keys_capi': {isoptional: ',&#varname#_capi'}, - 'need': {isintent_inout: 'try_pyarr_from_#ctype#'}, - 'pyobjfrom': {isintent_inout: """\ -\t\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#); -\t\tif (f2py_success) {"""}, - 'closepyobjfrom': {isintent_inout: "\t\t} /*if (f2py_success) of #varname# pyobjfrom*/"}, - '_check': l_and(iscomplex, isintent_nothide) - }, { - 'frompyobj': [{hasinitvalue: '\tif (#varname#_capi==Py_None) {#varname#.r = #init.r#, #varname#.i = #init.i#;} else'}, - {l_and(isoptional, l_not(hasinitvalue)) - : '\tif (#varname#_capi != Py_None)'}, - '\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");' - '\n\tif (f2py_success) {'], - 'cleanupfrompyobj': '\t} /*if (f2py_success) of #varname# frompyobj*/', - 'need': ['#ctype#_from_pyobj'], - '_check': l_and(iscomplex, isintent_nothide), - '_depend': '' - }, { # Hidden - 'decl': {isintent_out: '\tPyObject *#varname#_capi = Py_None;'}, - '_check': l_and(iscomplex, isintent_hide) - }, { - 'frompyobj': {hasinitvalue: '\t#varname#.r = #init.r#, #varname#.i = #init.i#;'}, - '_check': l_and(iscomplex, isintent_hide), - '_depend': '' - }, { # Common - 'pyobjfrom': {isintent_out: '\t#varname#_capi = pyobj_from_#ctype#1(#varname#);'}, - 'need': ['pyobj_from_#ctype#1'], - '_check': iscomplex - }, { - 'frompyobj': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'}, - '_check': iscomplex, - '_depend': '' - }, - # String - { # Common - 'decl': ['\t#ctype# #varname# = NULL;', - '\tint slen(#varname#);', - '\tPyObject *#varname#_capi = Py_None;'], - 'callfortran':'#varname#,', - 'callfortranappend':'slen(#varname#),', - 'pyobjfrom':{debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'}, - 'return': {isintent_out: ',#varname#'}, - 'need': ['len..'], # 'STRINGFREE'], - '_check':isstring - }, { # Common - 'frompyobj': """\ -\tslen(#varname#) = #length#; -\tf2py_success = #ctype#_from_pyobj(&#varname#,&slen(#varname#),#init#,#varname#_capi,\"#ctype#_from_pyobj failed in converting #nth# `#varname#\' of #pyname# to C #ctype#\"); -\tif (f2py_success) {""", - 'cleanupfrompyobj': """\ -\t\tSTRINGFREE(#varname#); -\t} /*if (f2py_success) of #varname#*/""", - 'need': ['#ctype#_from_pyobj', 'len..', 'STRINGFREE'], - '_check':isstring, - '_depend':'' - }, { # Not hidden - 'argformat': {isrequired: 'O'}, - 'keyformat': {isoptional: 'O'}, - 'args_capi': {isrequired: ',&#varname#_capi'}, - 'keys_capi': {isoptional: ',&#varname#_capi'}, - 'pyobjfrom': {isintent_inout: '''\ -\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,#varname#); -\tif (f2py_success) {'''}, - 'closepyobjfrom': {isintent_inout: '\t} /*if (f2py_success) of #varname# pyobjfrom*/'}, - 'need': {isintent_inout: 'try_pyarr_from_#ctype#'}, - '_check': l_and(isstring, isintent_nothide) - }, { # Hidden - '_check': l_and(isstring, isintent_hide) - }, { - 'frompyobj': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'}, - '_check': isstring, - '_depend': '' - }, - # Array - { # Common - 'decl': ['\t#ctype# *#varname# = NULL;', - '\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};', - '\tconst int #varname#_Rank = #rank#;', - '\tPyArrayObject *capi_#varname#_tmp = NULL;', - '\tint capi_#varname#_intent = 0;', - ], - 'callfortran':'#varname#,', - 'return':{isintent_out: ',capi_#varname#_tmp'}, - 'need': 'len..', - '_check': isarray - }, { # intent(overwrite) array - 'decl': '\tint capi_overwrite_#varname# = 1;', - 'kwlistxa': '"overwrite_#varname#",', - 'xaformat': 'i', - 'keys_xa': ',&capi_overwrite_#varname#', - 'docsignxa': 'overwrite_#varname#=1,', - 'docsignxashort': 'overwrite_#varname#,', - 'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 1', - '_check': l_and(isarray, isintent_overwrite), - }, { - 'frompyobj': '\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);', - '_check': l_and(isarray, isintent_overwrite), - '_depend': '', - }, - { # intent(copy) array - 'decl': '\tint capi_overwrite_#varname# = 0;', - 'kwlistxa': '"overwrite_#varname#",', - 'xaformat': 'i', - 'keys_xa': ',&capi_overwrite_#varname#', - 'docsignxa': 'overwrite_#varname#=0,', - 'docsignxashort': 'overwrite_#varname#,', - 'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 0', - '_check': l_and(isarray, isintent_copy), - }, { - 'frompyobj': '\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);', - '_check': l_and(isarray, isintent_copy), - '_depend': '', - }, { - 'need': [{hasinitvalue: 'forcomb'}, {hasinitvalue: 'CFUNCSMESS'}], - '_check': isarray, - '_depend': '' - }, { # Not hidden - 'decl': '\tPyObject *#varname#_capi = Py_None;', - 'argformat': {isrequired: 'O'}, - 'keyformat': {isoptional: 'O'}, - 'args_capi': {isrequired: ',&#varname#_capi'}, - 'keys_capi': {isoptional: ',&#varname#_capi'}, - '_check': l_and(isarray, isintent_nothide) - }, { - 'frompyobj': ['\t#setdims#;', - '\tcapi_#varname#_intent |= #intent#;', - {isintent_hide: - '\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,Py_None);'}, - {isintent_nothide: - '\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,#varname#_capi);'}, - """\ -\tif (capi_#varname#_tmp == NULL) { -\t\tif (!PyErr_Occurred()) -\t\t\tPyErr_SetString(#modulename#_error,\"failed in converting #nth# `#varname#\' of #pyname# to C/Fortran array\" ); -\t} else { -\t\t#varname# = (#ctype# *)(PyArray_DATA(capi_#varname#_tmp)); -""", - {hasinitvalue: [ - {isintent_nothide: - '\tif (#varname#_capi == Py_None) {'}, - {isintent_hide: '\t{'}, - {iscomplexarray: '\t\t#ctype# capi_c;'}, - """\ -\t\tint *_i,capi_i=0; -\t\tCFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\"); -\t\tif (initforcomb(PyArray_DIMS(capi_#varname#_tmp),PyArray_NDIM(capi_#varname#_tmp),1)) { -\t\t\twhile ((_i = nextforcomb())) -\t\t\t\t#varname#[capi_i++] = #init#; /* fortran way */ -\t\t} else { -\t\t\tif (!PyErr_Occurred()) -\t\t\t\tPyErr_SetString(#modulename#_error,\"Initialization of #nth# #varname# failed (initforcomb).\"); -\t\t\tf2py_success = 0; -\t\t} -\t} -\tif (f2py_success) {"""]}, - ], - 'cleanupfrompyobj': [ # note that this list will be reversed - '\t} /*if (capi_#varname#_tmp == NULL) ... else of #varname#*/', - {l_not(l_or(isintent_out, isintent_hide)): """\ -\tif((PyObject *)capi_#varname#_tmp!=#varname#_capi) { -\t\tPy_XDECREF(capi_#varname#_tmp); }"""}, - {l_and(isintent_hide, l_not(isintent_out)) - : """\t\tPy_XDECREF(capi_#varname#_tmp);"""}, - {hasinitvalue: '\t} /*if (f2py_success) of #varname# init*/'}, - ], - '_check': isarray, - '_depend': '' - }, - # Scalararray - { # Common - '_check': l_and(isarray, l_not(iscomplexarray)) - }, { # Not hidden - '_check': l_and(isarray, l_not(iscomplexarray), isintent_nothide) - }, - # Integer*1 array - {'need': '#ctype#', - '_check': isint1array, - '_depend': '' - }, - # Integer*-1 array - {'need': '#ctype#', - '_check': isunsigned_chararray, - '_depend': '' - }, - # Integer*-2 array - {'need': '#ctype#', - '_check': isunsigned_shortarray, - '_depend': '' - }, - # Integer*-8 array - {'need': '#ctype#', - '_check': isunsigned_long_longarray, - '_depend': '' - }, - # Complexarray - {'need': '#ctype#', - '_check': iscomplexarray, - '_depend': '' - }, - # Stringarray - { - 'callfortranappend': {isarrayofstrings: 'flen(#varname#),'}, - 'need': 'string', - '_check': isstringarray - } -] - -################# Rules for checking ############### - -check_rules = [ - { - 'frompyobj': {debugcapi: '\tfprintf(stderr,\"debug-capi:Checking `#check#\'\\n\");'}, - 'need': 'len..' - }, { - 'frompyobj': '\tCHECKSCALAR(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {', - 'cleanupfrompyobj': '\t} /*CHECKSCALAR(#check#)*/', - 'need': 'CHECKSCALAR', - '_check': l_and(isscalar, l_not(iscomplex)), - '_break': '' - }, { - 'frompyobj': '\tCHECKSTRING(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {', - 'cleanupfrompyobj': '\t} /*CHECKSTRING(#check#)*/', - 'need': 'CHECKSTRING', - '_check': isstring, - '_break': '' - }, { - 'need': 'CHECKARRAY', - 'frompyobj': '\tCHECKARRAY(#check#,\"#check#\",\"#nth# #varname#\") {', - 'cleanupfrompyobj': '\t} /*CHECKARRAY(#check#)*/', - '_check': isarray, - '_break': '' - }, { - 'need': 'CHECKGENERIC', - 'frompyobj': '\tCHECKGENERIC(#check#,\"#check#\",\"#nth# #varname#\") {', - 'cleanupfrompyobj': '\t} /*CHECKGENERIC(#check#)*/', - } -] - -########## Applying the rules. No need to modify what follows ############# - -#################### Build C/API module ####################### - - -def buildmodule(m, um): - """ - Return - """ - global f2py_version, options - outmess('\tBuilding module "%s"...\n' % (m['name'])) - ret = {} - mod_rules = defmod_rules[:] - vrd = capi_maps.modsign2map(m) - rd = dictappend({'f2py_version': f2py_version}, vrd) - funcwrappers = [] - funcwrappers2 = [] # F90 codes - for n in m['interfaced']: - nb = None - for bi in m['body']: - if not bi['block'] == 'interface': - errmess('buildmodule: Expected interface block. Skipping.\n') - continue - for b in bi['body']: - if b['name'] == n: - nb = b - break - - if not nb: - errmess( - 'buildmodule: Could not found the body of interfaced routine "%s". Skipping.\n' % (n)) - continue - nb_list = [nb] - if 'entry' in nb: - for k, a in nb['entry'].items(): - nb1 = copy.deepcopy(nb) - del nb1['entry'] - nb1['name'] = k - nb1['args'] = a - nb_list.append(nb1) - for nb in nb_list: - api, wrap = buildapi(nb) - if wrap: - if ismoduleroutine(nb): - funcwrappers2.append(wrap) - else: - funcwrappers.append(wrap) - ar = applyrules(api, vrd) - rd = dictappend(rd, ar) - - # Construct COMMON block support - cr, wrap = common_rules.buildhooks(m) - if wrap: - funcwrappers.append(wrap) - ar = applyrules(cr, vrd) - rd = dictappend(rd, ar) - - # Construct F90 module support - mr, wrap = f90mod_rules.buildhooks(m) - if wrap: - funcwrappers2.append(wrap) - ar = applyrules(mr, vrd) - rd = dictappend(rd, ar) - - for u in um: - ar = use_rules.buildusevars(u, m['use'][u['name']]) - rd = dictappend(rd, ar) - - needs = cfuncs.get_needs() - code = {} - for n in needs.keys(): - code[n] = [] - for k in needs[n]: - c = '' - if k in cfuncs.includes0: - c = cfuncs.includes0[k] - elif k in cfuncs.includes: - c = cfuncs.includes[k] - elif k in cfuncs.userincludes: - c = cfuncs.userincludes[k] - elif k in cfuncs.typedefs: - c = cfuncs.typedefs[k] - elif k in cfuncs.typedefs_generated: - c = cfuncs.typedefs_generated[k] - elif k in cfuncs.cppmacros: - c = cfuncs.cppmacros[k] - elif k in cfuncs.cfuncs: - c = cfuncs.cfuncs[k] - elif k in cfuncs.callbacks: - c = cfuncs.callbacks[k] - elif k in cfuncs.f90modhooks: - c = cfuncs.f90modhooks[k] - elif k in cfuncs.commonhooks: - c = cfuncs.commonhooks[k] - else: - errmess('buildmodule: unknown need %s.\n' % (repr(k))) - continue - code[n].append(c) - mod_rules.append(code) - for r in mod_rules: - if ('_check' in r and r['_check'](m)) or ('_check' not in r): - ar = applyrules(r, vrd, m) - rd = dictappend(rd, ar) - ar = applyrules(module_rules, rd) - - fn = os.path.join(options['buildpath'], vrd['coutput']) - ret['csrc'] = fn - with open(fn, 'w') as f: - f.write(ar['modulebody'].replace('\t', 2 * ' ')) - outmess('\tWrote C/API module "%s" to file "%s"\n' % (m['name'], fn)) - - if options['dorestdoc']: - fn = os.path.join( - options['buildpath'], vrd['modulename'] + 'module.rest') - with open(fn, 'w') as f: - f.write('.. -*- rest -*-\n') - f.write('\n'.join(ar['restdoc'])) - outmess('\tReST Documentation is saved to file "%s/%smodule.rest"\n' % - (options['buildpath'], vrd['modulename'])) - if options['dolatexdoc']: - fn = os.path.join( - options['buildpath'], vrd['modulename'] + 'module.tex') - ret['ltx'] = fn - with open(fn, 'w') as f: - f.write( - '%% This file is auto-generated with f2py (version:%s)\n' % (f2py_version)) - if 'shortlatex' not in options: - f.write( - '\\documentclass{article}\n\\usepackage{a4wide}\n\\begin{document}\n\\tableofcontents\n\n') - f.write('\n'.join(ar['latexdoc'])) - if 'shortlatex' not in options: - f.write('\\end{document}') - outmess('\tDocumentation is saved to file "%s/%smodule.tex"\n' % - (options['buildpath'], vrd['modulename'])) - if funcwrappers: - wn = os.path.join(options['buildpath'], vrd['f2py_wrapper_output']) - ret['fsrc'] = wn - with open(wn, 'w') as f: - f.write('C -*- fortran -*-\n') - f.write( - 'C This file is autogenerated with f2py (version:%s)\n' % (f2py_version)) - f.write( - 'C It contains Fortran 77 wrappers to fortran functions.\n') - lines = [] - for l in ('\n\n'.join(funcwrappers) + '\n').split('\n'): - if l and l[0] == ' ': - while len(l) >= 66: - lines.append(l[:66] + '\n &') - l = l[66:] - lines.append(l + '\n') - else: - lines.append(l + '\n') - lines = ''.join(lines).replace('\n &\n', '\n') - f.write(lines) - outmess('\tFortran 77 wrappers are saved to "%s"\n' % (wn)) - if funcwrappers2: - wn = os.path.join( - options['buildpath'], '%s-f2pywrappers2.f90' % (vrd['modulename'])) - ret['fsrc'] = wn - with open(wn, 'w') as f: - f.write('! -*- f90 -*-\n') - f.write( - '! This file is autogenerated with f2py (version:%s)\n' % (f2py_version)) - f.write( - '! It contains Fortran 90 wrappers to fortran functions.\n') - lines = [] - for l in ('\n\n'.join(funcwrappers2) + '\n').split('\n'): - if len(l) > 72 and l[0] == ' ': - lines.append(l[:72] + '&\n &') - l = l[72:] - while len(l) > 66: - lines.append(l[:66] + '&\n &') - l = l[66:] - lines.append(l + '\n') - else: - lines.append(l + '\n') - lines = ''.join(lines).replace('\n &\n', '\n') - f.write(lines) - outmess('\tFortran 90 wrappers are saved to "%s"\n' % (wn)) - return ret - -################## Build C/API function ############# - -stnd = {1: 'st', 2: 'nd', 3: 'rd', 4: 'th', 5: 'th', - 6: 'th', 7: 'th', 8: 'th', 9: 'th', 0: 'th'} - - -def buildapi(rout): - rout, wrap = func2subr.assubr(rout) - args, depargs = getargs2(rout) - capi_maps.depargs = depargs - var = rout['vars'] - - if ismoduleroutine(rout): - outmess('\t\t\tConstructing wrapper function "%s.%s"...\n' % - (rout['modulename'], rout['name'])) - else: - outmess('\t\tConstructing wrapper function "%s"...\n' % (rout['name'])) - # Routine - vrd = capi_maps.routsign2map(rout) - rd = dictappend({}, vrd) - for r in rout_rules: - if ('_check' in r and r['_check'](rout)) or ('_check' not in r): - ar = applyrules(r, vrd, rout) - rd = dictappend(rd, ar) - - # Args - nth, nthk = 0, 0 - savevrd = {} - for a in args: - vrd = capi_maps.sign2map(a, var[a]) - if isintent_aux(var[a]): - _rules = aux_rules - else: - _rules = arg_rules - if not isintent_hide(var[a]): - if not isoptional(var[a]): - nth = nth + 1 - vrd['nth'] = repr(nth) + stnd[nth % 10] + ' argument' - else: - nthk = nthk + 1 - vrd['nth'] = repr(nthk) + stnd[nthk % 10] + ' keyword' - else: - vrd['nth'] = 'hidden' - savevrd[a] = vrd - for r in _rules: - if '_depend' in r: - continue - if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): - ar = applyrules(r, vrd, var[a]) - rd = dictappend(rd, ar) - if '_break' in r: - break - for a in depargs: - if isintent_aux(var[a]): - _rules = aux_rules - else: - _rules = arg_rules - vrd = savevrd[a] - for r in _rules: - if '_depend' not in r: - continue - if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): - ar = applyrules(r, vrd, var[a]) - rd = dictappend(rd, ar) - if '_break' in r: - break - if 'check' in var[a]: - for c in var[a]['check']: - vrd['check'] = c - ar = applyrules(check_rules, vrd, var[a]) - rd = dictappend(rd, ar) - if isinstance(rd['cleanupfrompyobj'], list): - rd['cleanupfrompyobj'].reverse() - if isinstance(rd['closepyobjfrom'], list): - rd['closepyobjfrom'].reverse() - rd['docsignature'] = stripcomma(replace('#docsign##docsignopt##docsignxa#', - {'docsign': rd['docsign'], - 'docsignopt': rd['docsignopt'], - 'docsignxa': rd['docsignxa']})) - optargs = stripcomma(replace('#docsignopt##docsignxa#', - {'docsignxa': rd['docsignxashort'], - 'docsignopt': rd['docsignoptshort']} - )) - if optargs == '': - rd['docsignatureshort'] = stripcomma( - replace('#docsign#', {'docsign': rd['docsign']})) - else: - rd['docsignatureshort'] = replace('#docsign#[#docsignopt#]', - {'docsign': rd['docsign'], - 'docsignopt': optargs, - }) - rd['latexdocsignatureshort'] = rd['docsignatureshort'].replace('_', '\\_') - rd['latexdocsignatureshort'] = rd[ - 'latexdocsignatureshort'].replace(',', ', ') - cfs = stripcomma(replace('#callfortran##callfortranappend#', { - 'callfortran': rd['callfortran'], 'callfortranappend': rd['callfortranappend']})) - if len(rd['callfortranappend']) > 1: - rd['callcompaqfortran'] = stripcomma(replace('#callfortran# 0,#callfortranappend#', { - 'callfortran': rd['callfortran'], 'callfortranappend': rd['callfortranappend']})) - else: - rd['callcompaqfortran'] = cfs - rd['callfortran'] = cfs - if isinstance(rd['docreturn'], list): - rd['docreturn'] = stripcomma( - replace('#docreturn#', {'docreturn': rd['docreturn']})) + ' = ' - rd['docstrsigns'] = [] - rd['latexdocstrsigns'] = [] - for k in ['docstrreq', 'docstropt', 'docstrout', 'docstrcbs']: - if k in rd and isinstance(rd[k], list): - rd['docstrsigns'] = rd['docstrsigns'] + rd[k] - k = 'latex' + k - if k in rd and isinstance(rd[k], list): - rd['latexdocstrsigns'] = rd['latexdocstrsigns'] + rd[k][0:1] +\ - ['\\begin{description}'] + rd[k][1:] +\ - ['\\end{description}'] - - # Workaround for Python 2.6, 2.6.1 bug: https://bugs.python.org/issue4720 - if rd['keyformat'] or rd['xaformat']: - argformat = rd['argformat'] - if isinstance(argformat, list): - argformat.append('|') - else: - assert isinstance(argformat, str), repr( - (argformat, type(argformat))) - rd['argformat'] += '|' - - ar = applyrules(routine_rules, rd) - if ismoduleroutine(rout): - outmess('\t\t\t %s\n' % (ar['docshort'])) - else: - outmess('\t\t %s\n' % (ar['docshort'])) - return ar, wrap - - -#################### EOF rules.py ####################### diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/setup.py b/venv/lib/python3.7/site-packages/numpy/f2py/setup.py deleted file mode 100644 index a8c1401..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/setup.py +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/env python -""" -setup.py for installing F2PY - -Usage: - pip install . - -Copyright 2001-2005 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Revision: 1.32 $ -$Date: 2005/01/30 17:22:14 $ -Pearu Peterson - -""" -from __future__ import division, print_function - -from numpy.distutils.core import setup -from numpy.distutils.misc_util import Configuration - - -from __version__ import version - - -def configuration(parent_package='', top_path=None): - config = Configuration('f2py', parent_package, top_path) - config.add_data_dir('tests') - config.add_data_files( - 'src/fortranobject.c', - 'src/fortranobject.h') - return config - - -if __name__ == "__main__": - - config = configuration(top_path='') - config = config.todict() - - config['download_url'] = "http://cens.ioc.ee/projects/f2py2e/2.x"\ - "/F2PY-2-latest.tar.gz" - config['classifiers'] = [ - 'Development Status :: 5 - Production/Stable', - 'Intended Audience :: Developers', - 'Intended Audience :: Science/Research', - 'License :: OSI Approved :: NumPy License', - 'Natural Language :: English', - 'Operating System :: OS Independent', - 'Programming Language :: C', - 'Programming Language :: Fortran', - 'Programming Language :: Python', - 'Topic :: Scientific/Engineering', - 'Topic :: Software Development :: Code Generators', - ] - setup(version=version, - description="F2PY - Fortran to Python Interface Generator", - author="Pearu Peterson", - author_email="pearu@cens.ioc.ee", - maintainer="Pearu Peterson", - maintainer_email="pearu@cens.ioc.ee", - license="BSD", - platforms="Unix, Windows (mingw|cygwin), Mac OSX", - long_description="""\ -The Fortran to Python Interface Generator, or F2PY for short, is a -command line tool (f2py) for generating Python C/API modules for -wrapping Fortran 77/90/95 subroutines, accessing common blocks from -Python, and calling Python functions from Fortran (call-backs). -Interfacing subroutines/data from Fortran 90/95 modules is supported.""", - url="http://cens.ioc.ee/projects/f2py2e/", - keywords=['Fortran', 'f2py'], - **config) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/src/fortranobject.c b/venv/lib/python3.7/site-packages/numpy/f2py/src/fortranobject.c deleted file mode 100644 index 8aa5555..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/src/fortranobject.c +++ /dev/null @@ -1,1109 +0,0 @@ -#define FORTRANOBJECT_C -#include "fortranobject.h" - -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include - -/* - This file implements: FortranObject, array_from_pyobj, copy_ND_array - - Author: Pearu Peterson - $Revision: 1.52 $ - $Date: 2005/07/11 07:44:20 $ -*/ - -int -F2PyDict_SetItemString(PyObject *dict, char *name, PyObject *obj) -{ - if (obj==NULL) { - fprintf(stderr, "Error loading %s\n", name); - if (PyErr_Occurred()) { - PyErr_Print(); - PyErr_Clear(); - } - return -1; - } - return PyDict_SetItemString(dict, name, obj); -} - -/************************* FortranObject *******************************/ - -typedef PyObject *(*fortranfunc)(PyObject *,PyObject *,PyObject *,void *); - -PyObject * -PyFortranObject_New(FortranDataDef* defs, f2py_void_func init) { - int i; - PyFortranObject *fp = NULL; - PyObject *v = NULL; - if (init!=NULL) { /* Initialize F90 module objects */ - (*(init))(); - } - fp = PyObject_New(PyFortranObject, &PyFortran_Type); - if (fp == NULL) { - return NULL; - } - if ((fp->dict = PyDict_New()) == NULL) { - Py_DECREF(fp); - return NULL; - } - fp->len = 0; - while (defs[fp->len].name != NULL) { - fp->len++; - } - if (fp->len == 0) { - goto fail; - } - fp->defs = defs; - for (i=0;ilen;i++) { - if (fp->defs[i].rank == -1) { /* Is Fortran routine */ - v = PyFortranObject_NewAsAttr(&(fp->defs[i])); - if (v==NULL) { - goto fail; - } - PyDict_SetItemString(fp->dict,fp->defs[i].name,v); - Py_XDECREF(v); - } else - if ((fp->defs[i].data)!=NULL) { /* Is Fortran variable or array (not allocatable) */ - if (fp->defs[i].type == NPY_STRING) { - int n = fp->defs[i].rank-1; - v = PyArray_New(&PyArray_Type, n, fp->defs[i].dims.d, - NPY_STRING, NULL, fp->defs[i].data, fp->defs[i].dims.d[n], - NPY_ARRAY_FARRAY, NULL); - } - else { - v = PyArray_New(&PyArray_Type, fp->defs[i].rank, fp->defs[i].dims.d, - fp->defs[i].type, NULL, fp->defs[i].data, 0, NPY_ARRAY_FARRAY, - NULL); - } - if (v==NULL) { - goto fail; - } - PyDict_SetItemString(fp->dict,fp->defs[i].name,v); - Py_XDECREF(v); - } - } - return (PyObject *)fp; - fail: - Py_XDECREF(fp); - return NULL; -} - -PyObject * -PyFortranObject_NewAsAttr(FortranDataDef* defs) { /* used for calling F90 module routines */ - PyFortranObject *fp = NULL; - fp = PyObject_New(PyFortranObject, &PyFortran_Type); - if (fp == NULL) return NULL; - if ((fp->dict = PyDict_New())==NULL) { - PyObject_Del(fp); - return NULL; - } - fp->len = 1; - fp->defs = defs; - return (PyObject *)fp; -} - -/* Fortran methods */ - -static void -fortran_dealloc(PyFortranObject *fp) { - Py_XDECREF(fp->dict); - PyObject_Del(fp); -} - - -#if PY_VERSION_HEX >= 0x03000000 -#else -static PyMethodDef fortran_methods[] = { - {NULL, NULL} /* sentinel */ -}; -#endif - - -/* Returns number of bytes consumed from buf, or -1 on error. */ -static Py_ssize_t -format_def(char *buf, Py_ssize_t size, FortranDataDef def) -{ - char *p = buf; - int i, n; - - n = PyOS_snprintf(p, size, "array(%" NPY_INTP_FMT, def.dims.d[0]); - if (n < 0 || n >= size) { - return -1; - } - p += n; - size -= n; - - for (i = 1; i < def.rank; i++) { - n = PyOS_snprintf(p, size, ",%" NPY_INTP_FMT, def.dims.d[i]); - if (n < 0 || n >= size) { - return -1; - } - p += n; - size -= n; - } - - if (size <= 0) { - return -1; - } - - *p++ = ')'; - size--; - - if (def.data == NULL) { - static const char notalloc[] = ", not allocated"; - if ((size_t) size < sizeof(notalloc)) { - return -1; - } - memcpy(p, notalloc, sizeof(notalloc)); - } - - return p - buf; -} - -static PyObject * -fortran_doc(FortranDataDef def) -{ - char *buf, *p; - PyObject *s = NULL; - Py_ssize_t n, origsize, size = 100; - - if (def.doc != NULL) { - size += strlen(def.doc); - } - origsize = size; - buf = p = (char *)PyMem_Malloc(size); - if (buf == NULL) { - return PyErr_NoMemory(); - } - - if (def.rank == -1) { - if (def.doc) { - n = strlen(def.doc); - if (n > size) { - goto fail; - } - memcpy(p, def.doc, n); - p += n; - size -= n; - } - else { - n = PyOS_snprintf(p, size, "%s - no docs available", def.name); - if (n < 0 || n >= size) { - goto fail; - } - p += n; - size -= n; - } - } - else { - PyArray_Descr *d = PyArray_DescrFromType(def.type); - n = PyOS_snprintf(p, size, "'%c'-", d->type); - Py_DECREF(d); - if (n < 0 || n >= size) { - goto fail; - } - p += n; - size -= n; - - if (def.data == NULL) { - n = format_def(p, size, def) == -1; - if (n < 0) { - goto fail; - } - p += n; - size -= n; - } - else if (def.rank > 0) { - n = format_def(p, size, def); - if (n < 0) { - goto fail; - } - p += n; - size -= n; - } - else { - n = strlen("scalar"); - if (size < n) { - goto fail; - } - memcpy(p, "scalar", n); - p += n; - size -= n; - } - } - if (size <= 1) { - goto fail; - } - *p++ = '\n'; - size--; - - /* p now points one beyond the last character of the string in buf */ -#if PY_VERSION_HEX >= 0x03000000 - s = PyUnicode_FromStringAndSize(buf, p - buf); -#else - s = PyString_FromStringAndSize(buf, p - buf); -#endif - - PyMem_Free(buf); - return s; - - fail: - fprintf(stderr, "fortranobject.c: fortran_doc: len(p)=%zd>%zd=size:" - " too long docstring required, increase size\n", - p - buf, origsize); - PyMem_Free(buf); - return NULL; -} - -static FortranDataDef *save_def; /* save pointer of an allocatable array */ -static void set_data(char *d,npy_intp *f) { /* callback from Fortran */ - if (*f) /* In fortran f=allocated(d) */ - save_def->data = d; - else - save_def->data = NULL; - /* printf("set_data: d=%p,f=%d\n",d,*f); */ -} - -static PyObject * -fortran_getattr(PyFortranObject *fp, char *name) { - int i,j,k,flag; - if (fp->dict != NULL) { - PyObject *v = PyDict_GetItemString(fp->dict, name); - if (v != NULL) { - Py_INCREF(v); - return v; - } - } - for (i=0,j=1;ilen && (j=strcmp(name,fp->defs[i].name));i++); - if (j==0) - if (fp->defs[i].rank!=-1) { /* F90 allocatable array */ - if (fp->defs[i].func==NULL) return NULL; - for(k=0;kdefs[i].rank;++k) - fp->defs[i].dims.d[k]=-1; - save_def = &fp->defs[i]; - (*(fp->defs[i].func))(&fp->defs[i].rank,fp->defs[i].dims.d,set_data,&flag); - if (flag==2) - k = fp->defs[i].rank + 1; - else - k = fp->defs[i].rank; - if (fp->defs[i].data !=NULL) { /* array is allocated */ - PyObject *v = PyArray_New(&PyArray_Type, k, fp->defs[i].dims.d, - fp->defs[i].type, NULL, fp->defs[i].data, 0, NPY_ARRAY_FARRAY, - NULL); - if (v==NULL) return NULL; - /* Py_INCREF(v); */ - return v; - } else { /* array is not allocated */ - Py_RETURN_NONE; - } - } - if (strcmp(name,"__dict__")==0) { - Py_INCREF(fp->dict); - return fp->dict; - } - if (strcmp(name,"__doc__")==0) { -#if PY_VERSION_HEX >= 0x03000000 - PyObject *s = PyUnicode_FromString(""), *s2, *s3; - for (i=0;ilen;i++) { - s2 = fortran_doc(fp->defs[i]); - s3 = PyUnicode_Concat(s, s2); - Py_DECREF(s2); - Py_DECREF(s); - s = s3; - } -#else - PyObject *s = PyString_FromString(""); - for (i=0;ilen;i++) - PyString_ConcatAndDel(&s,fortran_doc(fp->defs[i])); -#endif - if (PyDict_SetItemString(fp->dict, name, s)) - return NULL; - return s; - } - if ((strcmp(name,"_cpointer")==0) && (fp->len==1)) { - PyObject *cobj = F2PyCapsule_FromVoidPtr((void *)(fp->defs[0].data),NULL); - if (PyDict_SetItemString(fp->dict, name, cobj)) - return NULL; - return cobj; - } -#if PY_VERSION_HEX >= 0x03000000 - if (1) { - PyObject *str, *ret; - str = PyUnicode_FromString(name); - ret = PyObject_GenericGetAttr((PyObject *)fp, str); - Py_DECREF(str); - return ret; - } -#else - return Py_FindMethod(fortran_methods, (PyObject *)fp, name); -#endif -} - -static int -fortran_setattr(PyFortranObject *fp, char *name, PyObject *v) { - int i,j,flag; - PyArrayObject *arr = NULL; - for (i=0,j=1;ilen && (j=strcmp(name,fp->defs[i].name));i++); - if (j==0) { - if (fp->defs[i].rank==-1) { - PyErr_SetString(PyExc_AttributeError,"over-writing fortran routine"); - return -1; - } - if (fp->defs[i].func!=NULL) { /* is allocatable array */ - npy_intp dims[F2PY_MAX_DIMS]; - int k; - save_def = &fp->defs[i]; - if (v!=Py_None) { /* set new value (reallocate if needed -- - see f2py generated code for more - details ) */ - for(k=0;kdefs[i].rank;k++) dims[k]=-1; - if ((arr = array_from_pyobj(fp->defs[i].type,dims,fp->defs[i].rank,F2PY_INTENT_IN,v))==NULL) - return -1; - (*(fp->defs[i].func))(&fp->defs[i].rank,PyArray_DIMS(arr),set_data,&flag); - } else { /* deallocate */ - for(k=0;kdefs[i].rank;k++) dims[k]=0; - (*(fp->defs[i].func))(&fp->defs[i].rank,dims,set_data,&flag); - for(k=0;kdefs[i].rank;k++) dims[k]=-1; - } - memcpy(fp->defs[i].dims.d,dims,fp->defs[i].rank*sizeof(npy_intp)); - } else { /* not allocatable array */ - if ((arr = array_from_pyobj(fp->defs[i].type,fp->defs[i].dims.d,fp->defs[i].rank,F2PY_INTENT_IN,v))==NULL) - return -1; - } - if (fp->defs[i].data!=NULL) { /* copy Python object to Fortran array */ - npy_intp s = PyArray_MultiplyList(fp->defs[i].dims.d,PyArray_NDIM(arr)); - if (s==-1) - s = PyArray_MultiplyList(PyArray_DIMS(arr),PyArray_NDIM(arr)); - if (s<0 || - (memcpy(fp->defs[i].data,PyArray_DATA(arr),s*PyArray_ITEMSIZE(arr)))==NULL) { - if ((PyObject*)arr!=v) { - Py_DECREF(arr); - } - return -1; - } - if ((PyObject*)arr!=v) { - Py_DECREF(arr); - } - } else return (fp->defs[i].func==NULL?-1:0); - return 0; /* successful */ - } - if (fp->dict == NULL) { - fp->dict = PyDict_New(); - if (fp->dict == NULL) - return -1; - } - if (v == NULL) { - int rv = PyDict_DelItemString(fp->dict, name); - if (rv < 0) - PyErr_SetString(PyExc_AttributeError,"delete non-existing fortran attribute"); - return rv; - } - else - return PyDict_SetItemString(fp->dict, name, v); -} - -static PyObject* -fortran_call(PyFortranObject *fp, PyObject *arg, PyObject *kw) { - int i = 0; - /* printf("fortran call - name=%s,func=%p,data=%p,%p\n",fp->defs[i].name, - fp->defs[i].func,fp->defs[i].data,&fp->defs[i].data); */ - if (fp->defs[i].rank==-1) {/* is Fortran routine */ - if (fp->defs[i].func==NULL) { - PyErr_Format(PyExc_RuntimeError, "no function to call"); - return NULL; - } - else if (fp->defs[i].data==NULL) - /* dummy routine */ - return (*((fortranfunc)(fp->defs[i].func)))((PyObject *)fp,arg,kw,NULL); - else - return (*((fortranfunc)(fp->defs[i].func)))((PyObject *)fp,arg,kw, - (void *)fp->defs[i].data); - } - PyErr_Format(PyExc_TypeError, "this fortran object is not callable"); - return NULL; -} - -static PyObject * -fortran_repr(PyFortranObject *fp) -{ - PyObject *name = NULL, *repr = NULL; - name = PyObject_GetAttrString((PyObject *)fp, "__name__"); - PyErr_Clear(); -#if PY_VERSION_HEX >= 0x03000000 - if (name != NULL && PyUnicode_Check(name)) { - repr = PyUnicode_FromFormat("", name); - } - else { - repr = PyUnicode_FromString(""); - } -#else - if (name != NULL && PyString_Check(name)) { - repr = PyString_FromFormat("", PyString_AsString(name)); - } - else { - repr = PyString_FromString(""); - } -#endif - Py_XDECREF(name); - return repr; -} - - -PyTypeObject PyFortran_Type = { -#if PY_VERSION_HEX >= 0x03000000 - PyVarObject_HEAD_INIT(NULL, 0) -#else - PyObject_HEAD_INIT(0) - 0, /*ob_size*/ -#endif - "fortran", /*tp_name*/ - sizeof(PyFortranObject), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - /* methods */ - (destructor)fortran_dealloc, /*tp_dealloc*/ - 0, /*tp_print*/ - (getattrfunc)fortran_getattr, /*tp_getattr*/ - (setattrfunc)fortran_setattr, /*tp_setattr*/ - 0, /*tp_compare/tp_reserved*/ - (reprfunc)fortran_repr, /*tp_repr*/ - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash*/ - (ternaryfunc)fortran_call, /*tp_call*/ -}; - -/************************* f2py_report_atexit *******************************/ - -#ifdef F2PY_REPORT_ATEXIT -static int passed_time = 0; -static int passed_counter = 0; -static int passed_call_time = 0; -static struct timeb start_time; -static struct timeb stop_time; -static struct timeb start_call_time; -static struct timeb stop_call_time; -static int cb_passed_time = 0; -static int cb_passed_counter = 0; -static int cb_passed_call_time = 0; -static struct timeb cb_start_time; -static struct timeb cb_stop_time; -static struct timeb cb_start_call_time; -static struct timeb cb_stop_call_time; - -extern void f2py_start_clock(void) { ftime(&start_time); } -extern -void f2py_start_call_clock(void) { - f2py_stop_clock(); - ftime(&start_call_time); -} -extern -void f2py_stop_clock(void) { - ftime(&stop_time); - passed_time += 1000*(stop_time.time - start_time.time); - passed_time += stop_time.millitm - start_time.millitm; -} -extern -void f2py_stop_call_clock(void) { - ftime(&stop_call_time); - passed_call_time += 1000*(stop_call_time.time - start_call_time.time); - passed_call_time += stop_call_time.millitm - start_call_time.millitm; - passed_counter += 1; - f2py_start_clock(); -} - -extern void f2py_cb_start_clock(void) { ftime(&cb_start_time); } -extern -void f2py_cb_start_call_clock(void) { - f2py_cb_stop_clock(); - ftime(&cb_start_call_time); -} -extern -void f2py_cb_stop_clock(void) { - ftime(&cb_stop_time); - cb_passed_time += 1000*(cb_stop_time.time - cb_start_time.time); - cb_passed_time += cb_stop_time.millitm - cb_start_time.millitm; -} -extern -void f2py_cb_stop_call_clock(void) { - ftime(&cb_stop_call_time); - cb_passed_call_time += 1000*(cb_stop_call_time.time - cb_start_call_time.time); - cb_passed_call_time += cb_stop_call_time.millitm - cb_start_call_time.millitm; - cb_passed_counter += 1; - f2py_cb_start_clock(); -} - -static int f2py_report_on_exit_been_here = 0; -extern -void f2py_report_on_exit(int exit_flag,void *name) { - if (f2py_report_on_exit_been_here) { - fprintf(stderr," %s\n",(char*)name); - return; - } - f2py_report_on_exit_been_here = 1; - fprintf(stderr," /-----------------------\\\n"); - fprintf(stderr," < F2PY performance report >\n"); - fprintf(stderr," \\-----------------------/\n"); - fprintf(stderr,"Overall time spent in ...\n"); - fprintf(stderr,"(a) wrapped (Fortran/C) functions : %8d msec\n", - passed_call_time); - fprintf(stderr,"(b) f2py interface, %6d calls : %8d msec\n", - passed_counter,passed_time); - fprintf(stderr,"(c) call-back (Python) functions : %8d msec\n", - cb_passed_call_time); - fprintf(stderr,"(d) f2py call-back interface, %6d calls : %8d msec\n", - cb_passed_counter,cb_passed_time); - - fprintf(stderr,"(e) wrapped (Fortran/C) functions (actual) : %8d msec\n\n", - passed_call_time-cb_passed_call_time-cb_passed_time); - fprintf(stderr,"Use -DF2PY_REPORT_ATEXIT_DISABLE to disable this message.\n"); - fprintf(stderr,"Exit status: %d\n",exit_flag); - fprintf(stderr,"Modules : %s\n",(char*)name); -} -#endif - -/********************** report on array copy ****************************/ - -#ifdef F2PY_REPORT_ON_ARRAY_COPY -static void f2py_report_on_array_copy(PyArrayObject* arr) { - const npy_intp arr_size = PyArray_Size((PyObject *)arr); - if (arr_size>F2PY_REPORT_ON_ARRAY_COPY) { - fprintf(stderr,"copied an array: size=%ld, elsize=%"NPY_INTP_FMT"\n", - arr_size, (npy_intp)PyArray_ITEMSIZE(arr)); - } -} -static void f2py_report_on_array_copy_fromany(void) { - fprintf(stderr,"created an array from object\n"); -} - -#define F2PY_REPORT_ON_ARRAY_COPY_FROMARR f2py_report_on_array_copy((PyArrayObject *)arr) -#define F2PY_REPORT_ON_ARRAY_COPY_FROMANY f2py_report_on_array_copy_fromany() -#else -#define F2PY_REPORT_ON_ARRAY_COPY_FROMARR -#define F2PY_REPORT_ON_ARRAY_COPY_FROMANY -#endif - - -/************************* array_from_obj *******************************/ - -/* - * File: array_from_pyobj.c - * - * Description: - * ------------ - * Provides array_from_pyobj function that returns a contiguous array - * object with the given dimensions and required storage order, either - * in row-major (C) or column-major (Fortran) order. The function - * array_from_pyobj is very flexible about its Python object argument - * that can be any number, list, tuple, or array. - * - * array_from_pyobj is used in f2py generated Python extension - * modules. - * - * Author: Pearu Peterson - * Created: 13-16 January 2002 - * $Id: fortranobject.c,v 1.52 2005/07/11 07:44:20 pearu Exp $ - */ - -static int check_and_fix_dimensions(const PyArrayObject* arr, - const int rank, - npy_intp *dims); - -static int -count_negative_dimensions(const int rank, - const npy_intp *dims) { - int i=0,r=0; - while (iflags,size); - printf("\tstrides = "); - dump_dims(rank,arr->strides); - printf("\tdimensions = "); - dump_dims(rank,arr->dimensions); -} -#endif - -#define SWAPTYPE(a,b,t) {t c; c = (a); (a) = (b); (b) = c; } - -static int swap_arrays(PyArrayObject* obj1, PyArrayObject* obj2) { - PyArrayObject_fields *arr1 = (PyArrayObject_fields*) obj1, - *arr2 = (PyArrayObject_fields*) obj2; - SWAPTYPE(arr1->data,arr2->data,char*); - SWAPTYPE(arr1->nd,arr2->nd,int); - SWAPTYPE(arr1->dimensions,arr2->dimensions,npy_intp*); - SWAPTYPE(arr1->strides,arr2->strides,npy_intp*); - SWAPTYPE(arr1->base,arr2->base,PyObject*); - SWAPTYPE(arr1->descr,arr2->descr,PyArray_Descr*); - SWAPTYPE(arr1->flags,arr2->flags,int); - /* SWAPTYPE(arr1->weakreflist,arr2->weakreflist,PyObject*); */ - return 0; -} - -#define ARRAY_ISCOMPATIBLE(arr,type_num) \ - ( (PyArray_ISINTEGER(arr) && PyTypeNum_ISINTEGER(type_num)) \ - ||(PyArray_ISFLOAT(arr) && PyTypeNum_ISFLOAT(type_num)) \ - ||(PyArray_ISCOMPLEX(arr) && PyTypeNum_ISCOMPLEX(type_num)) \ - ||(PyArray_ISBOOL(arr) && PyTypeNum_ISBOOL(type_num)) \ - ) - -extern -PyArrayObject* array_from_pyobj(const int type_num, - npy_intp *dims, - const int rank, - const int intent, - PyObject *obj) { - /* - * Note about reference counting - * ----------------------------- - * If the caller returns the array to Python, it must be done with - * Py_BuildValue("N",arr). - * Otherwise, if obj!=arr then the caller must call Py_DECREF(arr). - * - * Note on intent(cache,out,..) - * --------------------- - * Don't expect correct data when returning intent(cache) array. - * - */ - char mess[200]; - PyArrayObject *arr = NULL; - PyArray_Descr *descr; - char typechar; - int elsize; - - if ((intent & F2PY_INTENT_HIDE) - || ((intent & F2PY_INTENT_CACHE) && (obj==Py_None)) - || ((intent & F2PY_OPTIONAL) && (obj==Py_None)) - ) { - /* intent(cache), optional, intent(hide) */ - if (count_negative_dimensions(rank,dims) > 0) { - int i; - strcpy(mess, "failed to create intent(cache|hide)|optional array" - "-- must have defined dimensions but got ("); - for(i=0;ielsize = 1; - descr->type = NPY_CHARLTR; - } - elsize = descr->elsize; - typechar = descr->type; - Py_DECREF(descr); - if (PyArray_Check(obj)) { - arr = (PyArrayObject *)obj; - - if (intent & F2PY_INTENT_CACHE) { - /* intent(cache) */ - if (PyArray_ISONESEGMENT(arr) - && PyArray_ITEMSIZE(arr)>=elsize) { - if (check_and_fix_dimensions(arr, rank, dims)) { - return NULL; - } - if (intent & F2PY_INTENT_OUT) - Py_INCREF(arr); - return arr; - } - strcpy(mess, "failed to initialize intent(cache) array"); - if (!PyArray_ISONESEGMENT(arr)) - strcat(mess, " -- input must be in one segment"); - if (PyArray_ITEMSIZE(arr)type,typechar); - if (!(F2PY_CHECK_ALIGNMENT(arr, intent))) - sprintf(mess+strlen(mess)," -- input not %d-aligned", F2PY_GET_ALIGNMENT(intent)); - PyErr_SetString(PyExc_ValueError,mess); - return NULL; - } - - /* here we have always intent(in) or intent(inplace) */ - - { - PyArrayObject * retarr; - retarr = (PyArrayObject *) \ - PyArray_New(&PyArray_Type, PyArray_NDIM(arr), PyArray_DIMS(arr), type_num, - NULL,NULL,1, - !(intent&F2PY_INTENT_C), - NULL); - if (retarr==NULL) - return NULL; - F2PY_REPORT_ON_ARRAY_COPY_FROMARR; - if (PyArray_CopyInto(retarr, arr)) { - Py_DECREF(retarr); - return NULL; - } - if (intent & F2PY_INTENT_INPLACE) { - if (swap_arrays(arr,retarr)) - return NULL; /* XXX: set exception */ - Py_XDECREF(retarr); - if (intent & F2PY_INTENT_OUT) - Py_INCREF(arr); - } else { - arr = retarr; - } - } - return arr; - } - - if ((intent & F2PY_INTENT_INOUT) || - (intent & F2PY_INTENT_INPLACE) || - (intent & F2PY_INTENT_CACHE)) { - PyErr_SetString(PyExc_TypeError, - "failed to initialize intent(inout|inplace|cache) " - "array, input not an array"); - return NULL; - } - - { - PyArray_Descr * descr = PyArray_DescrFromType(type_num); - /* compatibility with NPY_CHAR */ - if (type_num == NPY_STRING) { - PyArray_DESCR_REPLACE(descr); - if (descr == NULL) { - return NULL; - } - descr->elsize = 1; - descr->type = NPY_CHARLTR; - } - F2PY_REPORT_ON_ARRAY_COPY_FROMANY; - arr = (PyArrayObject *) \ - PyArray_FromAny(obj, descr, 0,0, - ((intent & F2PY_INTENT_C)?NPY_ARRAY_CARRAY:NPY_ARRAY_FARRAY) \ - | NPY_ARRAY_FORCECAST, NULL); - if (arr==NULL) - return NULL; - if (check_and_fix_dimensions(arr, rank, dims)) { - return NULL; - } - return arr; - } - -} - -/*****************************************/ -/* Helper functions for array_from_pyobj */ -/*****************************************/ - -static -int check_and_fix_dimensions(const PyArrayObject* arr, const int rank, npy_intp *dims) -{ - /* - * This function fills in blanks (that are -1's) in dims list using - * the dimensions from arr. It also checks that non-blank dims will - * match with the corresponding values in arr dimensions. - * - * Returns 0 if the function is successful. - * - * If an error condition is detected, an exception is set and 1 is returned. - */ - const npy_intp arr_size = (PyArray_NDIM(arr))?PyArray_Size((PyObject *)arr):1; -#ifdef DEBUG_COPY_ND_ARRAY - dump_attrs(arr); - printf("check_and_fix_dimensions:init: dims="); - dump_dims(rank,dims); -#endif - if (rank > PyArray_NDIM(arr)) { /* [1,2] -> [[1],[2]]; 1 -> [[1]] */ - npy_intp new_size = 1; - int free_axe = -1; - int i; - npy_intp d; - /* Fill dims where -1 or 0; check dimensions; calc new_size; */ - for(i=0;i= 0) { - if (d>1 && dims[i]!=d) { - PyErr_Format(PyExc_ValueError, - "%d-th dimension must be fixed to %" - NPY_INTP_FMT " but got %" NPY_INTP_FMT "\n", - i, dims[i], d); - return 1; - } - if (!dims[i]) dims[i] = 1; - } else { - dims[i] = d ? d : 1; - } - new_size *= dims[i]; - } - for(i=PyArray_NDIM(arr);i1) { - PyErr_Format(PyExc_ValueError, - "%d-th dimension must be %" NPY_INTP_FMT - " but got 0 (not defined).\n", - i, dims[i]); - return 1; - } else if (free_axe<0) - free_axe = i; - else - dims[i] = 1; - if (free_axe>=0) { - dims[free_axe] = arr_size/new_size; - new_size *= dims[free_axe]; - } - if (new_size != arr_size) { - PyErr_Format(PyExc_ValueError, - "unexpected array size: new_size=%" NPY_INTP_FMT - ", got array with arr_size=%" NPY_INTP_FMT - " (maybe too many free indices)\n", - new_size, arr_size); - return 1; - } - } else if (rank==PyArray_NDIM(arr)) { - npy_intp new_size = 1; - int i; - npy_intp d; - for (i=0; i=0) { - if (d > 1 && d!=dims[i]) { - PyErr_Format(PyExc_ValueError, - "%d-th dimension must be fixed to %" - NPY_INTP_FMT " but got %" NPY_INTP_FMT "\n", - i, dims[i], d); - return 1; - } - if (!dims[i]) dims[i] = 1; - } else dims[i] = d; - new_size *= dims[i]; - } - if (new_size != arr_size) { - PyErr_Format(PyExc_ValueError, - "unexpected array size: new_size=%" NPY_INTP_FMT - ", got array with arr_size=%" NPY_INTP_FMT "\n", - new_size, arr_size); - return 1; - } - } else { /* [[1,2]] -> [[1],[2]] */ - int i,j; - npy_intp d; - int effrank; - npy_intp size; - for (i=0,effrank=0;i1) ++effrank; - if (dims[rank-1]>=0) - if (effrank>rank) { - PyErr_Format(PyExc_ValueError, - "too many axes: %d (effrank=%d), " - "expected rank=%d\n", - PyArray_NDIM(arr), effrank, rank); - return 1; - } - - for (i=0,j=0;i=PyArray_NDIM(arr)) d = 1; - else d = PyArray_DIM(arr,j++); - if (dims[i]>=0) { - if (d>1 && d!=dims[i]) { - PyErr_Format(PyExc_ValueError, - "%d-th dimension must be fixed to %" - NPY_INTP_FMT " but got %" NPY_INTP_FMT - " (real index=%d)\n", - i, dims[i], d, j-1); - return 1; - } - if (!dims[i]) dims[i] = 1; - } else - dims[i] = d; - } - - for (i=rank;i [1,2,3,4] */ - while (j=PyArray_NDIM(arr)) d = 1; - else d = PyArray_DIM(arr,j++); - dims[rank-1] *= d; - } - for (i=0,size=1;i= 0x03000000 -#define PyString_Check PyBytes_Check -#define PyString_GET_SIZE PyBytes_GET_SIZE -#define PyString_AS_STRING PyBytes_AS_STRING -#define PyString_FromString PyBytes_FromString -#define PyUString_FromStringAndSize PyUnicode_FromStringAndSize -#define PyString_ConcatAndDel PyBytes_ConcatAndDel -#define PyString_AsString PyBytes_AsString - -#define PyInt_Check PyLong_Check -#define PyInt_FromLong PyLong_FromLong -#define PyInt_AS_LONG PyLong_AsLong -#define PyInt_AsLong PyLong_AsLong - -#define PyNumber_Int PyNumber_Long - -#else - -#define PyUString_FromStringAndSize PyString_FromStringAndSize -#endif - - -#ifdef F2PY_REPORT_ATEXIT -#include - extern void f2py_start_clock(void); - extern void f2py_stop_clock(void); - extern void f2py_start_call_clock(void); - extern void f2py_stop_call_clock(void); - extern void f2py_cb_start_clock(void); - extern void f2py_cb_stop_clock(void); - extern void f2py_cb_start_call_clock(void); - extern void f2py_cb_stop_call_clock(void); - extern void f2py_report_on_exit(int,void*); -#endif - -#ifdef DMALLOC -#include "dmalloc.h" -#endif - -/* Fortran object interface */ - -/* -123456789-123456789-123456789-123456789-123456789-123456789-123456789-12 - -PyFortranObject represents various Fortran objects: -Fortran (module) routines, COMMON blocks, module data. - -Author: Pearu Peterson -*/ - -#define F2PY_MAX_DIMS 40 - -typedef void (*f2py_set_data_func)(char*,npy_intp*); -typedef void (*f2py_void_func)(void); -typedef void (*f2py_init_func)(int*,npy_intp*,f2py_set_data_func,int*); - - /*typedef void* (*f2py_c_func)(void*,...);*/ - -typedef void *(*f2pycfunc)(void); - -typedef struct { - char *name; /* attribute (array||routine) name */ - int rank; /* array rank, 0 for scalar, max is F2PY_MAX_DIMS, - || rank=-1 for Fortran routine */ - struct {npy_intp d[F2PY_MAX_DIMS];} dims; /* dimensions of the array, || not used */ - int type; /* PyArray_ || not used */ - char *data; /* pointer to array || Fortran routine */ - f2py_init_func func; /* initialization function for - allocatable arrays: - func(&rank,dims,set_ptr_func,name,len(name)) - || C/API wrapper for Fortran routine */ - char *doc; /* documentation string; only recommended - for routines. */ -} FortranDataDef; - -typedef struct { - PyObject_HEAD - int len; /* Number of attributes */ - FortranDataDef *defs; /* An array of FortranDataDef's */ - PyObject *dict; /* Fortran object attribute dictionary */ -} PyFortranObject; - -#define PyFortran_Check(op) (Py_TYPE(op) == &PyFortran_Type) -#define PyFortran_Check1(op) (0==strcmp(Py_TYPE(op)->tp_name,"fortran")) - - extern PyTypeObject PyFortran_Type; - extern int F2PyDict_SetItemString(PyObject* dict, char *name, PyObject *obj); - extern PyObject * PyFortranObject_New(FortranDataDef* defs, f2py_void_func init); - extern PyObject * PyFortranObject_NewAsAttr(FortranDataDef* defs); - -#if PY_VERSION_HEX >= 0x03000000 - -PyObject * F2PyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *)); -void * F2PyCapsule_AsVoidPtr(PyObject *obj); -int F2PyCapsule_Check(PyObject *ptr); - -#else - -PyObject * F2PyCapsule_FromVoidPtr(void *ptr, void (*dtor)(void *)); -void * F2PyCapsule_AsVoidPtr(PyObject *ptr); -int F2PyCapsule_Check(PyObject *ptr); - -#endif - -#define ISCONTIGUOUS(m) (PyArray_FLAGS(m) & NPY_ARRAY_C_CONTIGUOUS) -#define F2PY_INTENT_IN 1 -#define F2PY_INTENT_INOUT 2 -#define F2PY_INTENT_OUT 4 -#define F2PY_INTENT_HIDE 8 -#define F2PY_INTENT_CACHE 16 -#define F2PY_INTENT_COPY 32 -#define F2PY_INTENT_C 64 -#define F2PY_OPTIONAL 128 -#define F2PY_INTENT_INPLACE 256 -#define F2PY_INTENT_ALIGNED4 512 -#define F2PY_INTENT_ALIGNED8 1024 -#define F2PY_INTENT_ALIGNED16 2048 - -#define ARRAY_ISALIGNED(ARR, SIZE) ((size_t)(PyArray_DATA(ARR)) % (SIZE) == 0) -#define F2PY_ALIGN4(intent) (intent & F2PY_INTENT_ALIGNED4) -#define F2PY_ALIGN8(intent) (intent & F2PY_INTENT_ALIGNED8) -#define F2PY_ALIGN16(intent) (intent & F2PY_INTENT_ALIGNED16) - -#define F2PY_GET_ALIGNMENT(intent) \ - (F2PY_ALIGN4(intent) ? 4 : \ - (F2PY_ALIGN8(intent) ? 8 : \ - (F2PY_ALIGN16(intent) ? 16 : 1) )) -#define F2PY_CHECK_ALIGNMENT(arr, intent) ARRAY_ISALIGNED(arr, F2PY_GET_ALIGNMENT(intent)) - - extern PyArrayObject* array_from_pyobj(const int type_num, - npy_intp *dims, - const int rank, - const int intent, - PyObject *obj); - extern int copy_ND_array(const PyArrayObject *in, PyArrayObject *out); - -#ifdef DEBUG_COPY_ND_ARRAY - extern void dump_attrs(const PyArrayObject* arr); -#endif - - -#ifdef __cplusplus -} -#endif -#endif /* !Py_FORTRANOBJECT_H */ diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/__init__.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c b/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c deleted file mode 100644 index 978db4e..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c +++ /dev/null @@ -1,245 +0,0 @@ -/* File: wrapmodule.c - * This file is auto-generated with f2py (version:2_1330). - * Hand edited by Pearu. - * f2py is a Fortran to Python Interface Generator (FPIG), Second Edition, - * written by Pearu Peterson . - * See http://cens.ioc.ee/projects/f2py2e/ - * Generation date: Fri Oct 21 22:41:12 2005 - * $Revision:$ - * $Date:$ - * Do not edit this file directly unless you know what you are doing!!! - */ -#ifdef __cplusplus -extern "C" { -#endif - -/*********************** See f2py2e/cfuncs.py: includes ***********************/ -#include "Python.h" -#include "fortranobject.h" -#include - -static PyObject *wrap_error; -static PyObject *wrap_module; - -/************************************ call ************************************/ -static char doc_f2py_rout_wrap_call[] = "\ -Function signature:\n\ - arr = call(type_num,dims,intent,obj)\n\ -Required arguments:\n" -" type_num : input int\n" -" dims : input int-sequence\n" -" intent : input int\n" -" obj : input python object\n" -"Return objects:\n" -" arr : array"; -static PyObject *f2py_rout_wrap_call(PyObject *capi_self, - PyObject *capi_args) { - PyObject * volatile capi_buildvalue = NULL; - int type_num = 0; - npy_intp *dims = NULL; - PyObject *dims_capi = Py_None; - int rank = 0; - int intent = 0; - PyArrayObject *capi_arr_tmp = NULL; - PyObject *arr_capi = Py_None; - int i; - - if (!PyArg_ParseTuple(capi_args,"iOiO|:wrap.call",\ - &type_num,&dims_capi,&intent,&arr_capi)) - return NULL; - rank = PySequence_Length(dims_capi); - dims = malloc(rank*sizeof(npy_intp)); - for (i=0;ikind, - PyArray_DESCR(arr)->type, - PyArray_TYPE(arr), - PyArray_ITEMSIZE(arr), - PyArray_DESCR(arr)->alignment, - PyArray_FLAGS(arr), - PyArray_ITEMSIZE(arr)); -} - -static PyMethodDef f2py_module_methods[] = { - - {"call",f2py_rout_wrap_call,METH_VARARGS,doc_f2py_rout_wrap_call}, - {"array_attrs",f2py_rout_wrap_attrs,METH_VARARGS,doc_f2py_rout_wrap_attrs}, - {NULL,NULL} -}; - -#if PY_VERSION_HEX >= 0x03000000 -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "test_array_from_pyobj_ext", - NULL, - -1, - f2py_module_methods, - NULL, - NULL, - NULL, - NULL -}; -#endif - -#if PY_VERSION_HEX >= 0x03000000 -#define RETVAL m -PyMODINIT_FUNC PyInit_test_array_from_pyobj_ext(void) { -#else -#define RETVAL -PyMODINIT_FUNC inittest_array_from_pyobj_ext(void) { -#endif - PyObject *m,*d, *s; -#if PY_VERSION_HEX >= 0x03000000 - m = wrap_module = PyModule_Create(&moduledef); -#else - m = wrap_module = Py_InitModule("test_array_from_pyobj_ext", f2py_module_methods); -#endif - Py_TYPE(&PyFortran_Type) = &PyType_Type; - import_array(); - if (PyErr_Occurred()) - Py_FatalError("can't initialize module wrap (failed to import numpy)"); - d = PyModule_GetDict(m); - s = PyString_FromString("This module 'wrap' is auto-generated with f2py (version:2_1330).\nFunctions:\n" -" arr = call(type_num,dims,intent,obj)\n" -"."); - PyDict_SetItemString(d, "__doc__", s); - wrap_error = PyErr_NewException ("wrap.error", NULL, NULL); - Py_DECREF(s); - -#define ADDCONST(NAME, CONST) \ - s = PyInt_FromLong(CONST); \ - PyDict_SetItemString(d, NAME, s); \ - Py_DECREF(s) - - ADDCONST("F2PY_INTENT_IN", F2PY_INTENT_IN); - ADDCONST("F2PY_INTENT_INOUT", F2PY_INTENT_INOUT); - ADDCONST("F2PY_INTENT_OUT", F2PY_INTENT_OUT); - ADDCONST("F2PY_INTENT_HIDE", F2PY_INTENT_HIDE); - ADDCONST("F2PY_INTENT_CACHE", F2PY_INTENT_CACHE); - ADDCONST("F2PY_INTENT_COPY", F2PY_INTENT_COPY); - ADDCONST("F2PY_INTENT_C", F2PY_INTENT_C); - ADDCONST("F2PY_OPTIONAL", F2PY_OPTIONAL); - ADDCONST("F2PY_INTENT_INPLACE", F2PY_INTENT_INPLACE); - ADDCONST("NPY_BOOL", NPY_BOOL); - ADDCONST("NPY_BYTE", NPY_BYTE); - ADDCONST("NPY_UBYTE", NPY_UBYTE); - ADDCONST("NPY_SHORT", NPY_SHORT); - ADDCONST("NPY_USHORT", NPY_USHORT); - ADDCONST("NPY_INT", NPY_INT); - ADDCONST("NPY_UINT", NPY_UINT); - ADDCONST("NPY_INTP", NPY_INTP); - ADDCONST("NPY_UINTP", NPY_UINTP); - ADDCONST("NPY_LONG", NPY_LONG); - ADDCONST("NPY_ULONG", NPY_ULONG); - ADDCONST("NPY_LONGLONG", NPY_LONGLONG); - ADDCONST("NPY_ULONGLONG", NPY_ULONGLONG); - ADDCONST("NPY_FLOAT", NPY_FLOAT); - ADDCONST("NPY_DOUBLE", NPY_DOUBLE); - ADDCONST("NPY_LONGDOUBLE", NPY_LONGDOUBLE); - ADDCONST("NPY_CFLOAT", NPY_CFLOAT); - ADDCONST("NPY_CDOUBLE", NPY_CDOUBLE); - ADDCONST("NPY_CLONGDOUBLE", NPY_CLONGDOUBLE); - ADDCONST("NPY_OBJECT", NPY_OBJECT); - ADDCONST("NPY_STRING", NPY_STRING); - ADDCONST("NPY_UNICODE", NPY_UNICODE); - ADDCONST("NPY_VOID", NPY_VOID); - ADDCONST("NPY_NTYPES", NPY_NTYPES); - ADDCONST("NPY_NOTYPE", NPY_NOTYPE); - ADDCONST("NPY_USERDEF", NPY_USERDEF); - - ADDCONST("CONTIGUOUS", NPY_ARRAY_C_CONTIGUOUS); - ADDCONST("FORTRAN", NPY_ARRAY_F_CONTIGUOUS); - ADDCONST("OWNDATA", NPY_ARRAY_OWNDATA); - ADDCONST("FORCECAST", NPY_ARRAY_FORCECAST); - ADDCONST("ENSURECOPY", NPY_ARRAY_ENSURECOPY); - ADDCONST("ENSUREARRAY", NPY_ARRAY_ENSUREARRAY); - ADDCONST("ALIGNED", NPY_ARRAY_ALIGNED); - ADDCONST("WRITEABLE", NPY_ARRAY_WRITEABLE); - ADDCONST("UPDATEIFCOPY", NPY_ARRAY_UPDATEIFCOPY); - ADDCONST("WRITEBACKIFCOPY", NPY_ARRAY_WRITEBACKIFCOPY); - - ADDCONST("BEHAVED", NPY_ARRAY_BEHAVED); - ADDCONST("BEHAVED_NS", NPY_ARRAY_BEHAVED_NS); - ADDCONST("CARRAY", NPY_ARRAY_CARRAY); - ADDCONST("FARRAY", NPY_ARRAY_FARRAY); - ADDCONST("CARRAY_RO", NPY_ARRAY_CARRAY_RO); - ADDCONST("FARRAY_RO", NPY_ARRAY_FARRAY_RO); - ADDCONST("DEFAULT", NPY_ARRAY_DEFAULT); - ADDCONST("UPDATE_ALL", NPY_ARRAY_UPDATE_ALL); - -#undef ADDCONST( - - if (PyErr_Occurred()) - Py_FatalError("can't initialize module wrap"); - -#ifdef F2PY_REPORT_ATEXIT - on_exit(f2py_report_on_exit,(void*)"array_from_pyobj.wrap.call"); -#endif - - return RETVAL; -} -#ifdef __cplusplus -} -#endif diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap b/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap deleted file mode 100644 index 2665f89..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap +++ /dev/null @@ -1 +0,0 @@ -dict(real=dict(rk="double")) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/assumed_shape/foo_free.f90 b/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/assumed_shape/foo_free.f90 deleted file mode 100644 index b301710..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/assumed_shape/foo_free.f90 +++ /dev/null @@ -1,34 +0,0 @@ - -subroutine sum(x, res) - implicit none - real, intent(in) :: x(:) - real, intent(out) :: res - - integer :: i - - !print *, "sum: size(x) = ", size(x) - - res = 0.0 - - do i = 1, size(x) - res = res + x(i) - enddo - -end subroutine sum - -function fsum(x) result (res) - implicit none - real, intent(in) :: x(:) - real :: res - - integer :: i - - !print *, "fsum: size(x) = ", size(x) - - res = 0.0 - - do i = 1, size(x) - res = res + x(i) - enddo - -end function fsum diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 b/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 deleted file mode 100644 index cbe6317..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 +++ /dev/null @@ -1,41 +0,0 @@ - -module mod - -contains - -subroutine sum(x, res) - implicit none - real, intent(in) :: x(:) - real, intent(out) :: res - - integer :: i - - !print *, "sum: size(x) = ", size(x) - - res = 0.0 - - do i = 1, size(x) - res = res + x(i) - enddo - -end subroutine sum - -function fsum(x) result (res) - implicit none - real, intent(in) :: x(:) - real :: res - - integer :: i - - !print *, "fsum: size(x) = ", size(x) - - res = 0.0 - - do i = 1, size(x) - res = res + x(i) - enddo - -end function fsum - - -end module mod diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/assumed_shape/foo_use.f90 b/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/assumed_shape/foo_use.f90 deleted file mode 100644 index 337465a..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/assumed_shape/foo_use.f90 +++ /dev/null @@ -1,19 +0,0 @@ -subroutine sum_with_use(x, res) - use precision - - implicit none - - real(kind=rk), intent(in) :: x(:) - real(kind=rk), intent(out) :: res - - integer :: i - - !print *, "size(x) = ", size(x) - - res = 0.0 - - do i = 1, size(x) - res = res + x(i) - enddo - - end subroutine diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/assumed_shape/precision.f90 b/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/assumed_shape/precision.f90 deleted file mode 100644 index ed6c70c..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/assumed_shape/precision.f90 +++ /dev/null @@ -1,4 +0,0 @@ -module precision - integer, parameter :: rk = selected_real_kind(8) - integer, parameter :: ik = selected_real_kind(4) -end module diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/common/block.f b/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/common/block.f deleted file mode 100644 index 7ea7968..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/common/block.f +++ /dev/null @@ -1,11 +0,0 @@ - SUBROUTINE INITCB - DOUBLE PRECISION LONG - CHARACTER STRING - INTEGER OK - - COMMON /BLOCK/ LONG, STRING, OK - LONG = 1.0 - STRING = '2' - OK = 3 - RETURN - END diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/kind/foo.f90 b/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/kind/foo.f90 deleted file mode 100644 index d3d15cf..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/kind/foo.f90 +++ /dev/null @@ -1,20 +0,0 @@ - - -subroutine selectedrealkind(p, r, res) - implicit none - - integer, intent(in) :: p, r - !f2py integer :: r=0 - integer, intent(out) :: res - res = selected_real_kind(p, r) - -end subroutine - -subroutine selectedintkind(p, res) - implicit none - - integer, intent(in) :: p - integer, intent(out) :: res - res = selected_int_kind(p) - -end subroutine diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/mixed/foo.f b/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/mixed/foo.f deleted file mode 100644 index c347425..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/mixed/foo.f +++ /dev/null @@ -1,5 +0,0 @@ - subroutine bar11(a) -cf2py intent(out) a - integer a - a = 11 - end diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/mixed/foo_fixed.f90 b/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/mixed/foo_fixed.f90 deleted file mode 100644 index 7543a6a..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/mixed/foo_fixed.f90 +++ /dev/null @@ -1,8 +0,0 @@ - module foo_fixed - contains - subroutine bar12(a) -!f2py intent(out) a - integer a - a = 12 - end subroutine bar12 - end module foo_fixed diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/mixed/foo_free.f90 b/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/mixed/foo_free.f90 deleted file mode 100644 index c1b641f..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/mixed/foo_free.f90 +++ /dev/null @@ -1,8 +0,0 @@ -module foo_free -contains - subroutine bar13(a) - !f2py intent(out) a - integer a - a = 13 - end subroutine bar13 -end module foo_free diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/parameter/constant_both.f90 b/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/parameter/constant_both.f90 deleted file mode 100644 index ac90ced..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/parameter/constant_both.f90 +++ /dev/null @@ -1,57 +0,0 @@ -! Check that parameters are correct intercepted. -! Constants with comma separations are commonly -! used, for instance Pi = 3._dp -subroutine foo(x) - implicit none - integer, parameter :: sp = selected_real_kind(6) - integer, parameter :: dp = selected_real_kind(15) - integer, parameter :: ii = selected_int_kind(9) - integer, parameter :: il = selected_int_kind(18) - real(dp), intent(inout) :: x - dimension x(3) - real(sp), parameter :: three_s = 3._sp - real(dp), parameter :: three_d = 3._dp - integer(ii), parameter :: three_i = 3_ii - integer(il), parameter :: three_l = 3_il - x(1) = x(1) + x(2) * three_s * three_i + x(3) * three_d * three_l - x(2) = x(2) * three_s - x(3) = x(3) * three_l - return -end subroutine - - -subroutine foo_no(x) - implicit none - integer, parameter :: sp = selected_real_kind(6) - integer, parameter :: dp = selected_real_kind(15) - integer, parameter :: ii = selected_int_kind(9) - integer, parameter :: il = selected_int_kind(18) - real(dp), intent(inout) :: x - dimension x(3) - real(sp), parameter :: three_s = 3. - real(dp), parameter :: three_d = 3. - integer(ii), parameter :: three_i = 3 - integer(il), parameter :: three_l = 3 - x(1) = x(1) + x(2) * three_s * three_i + x(3) * three_d * three_l - x(2) = x(2) * three_s - x(3) = x(3) * three_l - return -end subroutine - -subroutine foo_sum(x) - implicit none - integer, parameter :: sp = selected_real_kind(6) - integer, parameter :: dp = selected_real_kind(15) - integer, parameter :: ii = selected_int_kind(9) - integer, parameter :: il = selected_int_kind(18) - real(dp), intent(inout) :: x - dimension x(3) - real(sp), parameter :: three_s = 2._sp + 1._sp - real(dp), parameter :: three_d = 1._dp + 2._dp - integer(ii), parameter :: three_i = 2_ii + 1_ii - integer(il), parameter :: three_l = 1_il + 2_il - x(1) = x(1) + x(2) * three_s * three_i + x(3) * three_d * three_l - x(2) = x(2) * three_s - x(3) = x(3) * three_l - return -end subroutine diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/parameter/constant_compound.f90 b/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/parameter/constant_compound.f90 deleted file mode 100644 index e51f5e9..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/parameter/constant_compound.f90 +++ /dev/null @@ -1,15 +0,0 @@ -! Check that parameters are correct intercepted. -! Constants with comma separations are commonly -! used, for instance Pi = 3._dp -subroutine foo_compound_int(x) - implicit none - integer, parameter :: ii = selected_int_kind(9) - integer(ii), intent(inout) :: x - dimension x(3) - integer(ii), parameter :: three = 3_ii - integer(ii), parameter :: two = 2_ii - integer(ii), parameter :: six = three * 1_ii * two - - x(1) = x(1) + x(2) + x(3) * six - return -end subroutine diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/parameter/constant_integer.f90 b/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/parameter/constant_integer.f90 deleted file mode 100644 index aaa83d2..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/parameter/constant_integer.f90 +++ /dev/null @@ -1,22 +0,0 @@ -! Check that parameters are correct intercepted. -! Constants with comma separations are commonly -! used, for instance Pi = 3._dp -subroutine foo_int(x) - implicit none - integer, parameter :: ii = selected_int_kind(9) - integer(ii), intent(inout) :: x - dimension x(3) - integer(ii), parameter :: three = 3_ii - x(1) = x(1) + x(2) + x(3) * three - return -end subroutine - -subroutine foo_long(x) - implicit none - integer, parameter :: ii = selected_int_kind(18) - integer(ii), intent(inout) :: x - dimension x(3) - integer(ii), parameter :: three = 3_ii - x(1) = x(1) + x(2) + x(3) * three - return -end subroutine diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/parameter/constant_non_compound.f90 b/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/parameter/constant_non_compound.f90 deleted file mode 100644 index 62c9a5b..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/parameter/constant_non_compound.f90 +++ /dev/null @@ -1,23 +0,0 @@ -! Check that parameters are correct intercepted. -! Specifically that types of constants without -! compound kind specs are correctly inferred -! adapted Gibbs iteration code from pymc -! for this test case -subroutine foo_non_compound_int(x) - implicit none - integer, parameter :: ii = selected_int_kind(9) - - integer(ii) maxiterates - parameter (maxiterates=2) - - integer(ii) maxseries - parameter (maxseries=2) - - integer(ii) wasize - parameter (wasize=maxiterates*maxseries) - integer(ii), intent(inout) :: x - dimension x(wasize) - - x(1) = x(1) + x(2) + x(3) + x(4) * wasize - return -end subroutine diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/parameter/constant_real.f90 b/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/parameter/constant_real.f90 deleted file mode 100644 index 02ac9dd..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/parameter/constant_real.f90 +++ /dev/null @@ -1,23 +0,0 @@ -! Check that parameters are correct intercepted. -! Constants with comma separations are commonly -! used, for instance Pi = 3._dp -subroutine foo_single(x) - implicit none - integer, parameter :: rp = selected_real_kind(6) - real(rp), intent(inout) :: x - dimension x(3) - real(rp), parameter :: three = 3._rp - x(1) = x(1) + x(2) + x(3) * three - return -end subroutine - -subroutine foo_double(x) - implicit none - integer, parameter :: rp = selected_real_kind(15) - real(rp), intent(inout) :: x - dimension x(3) - real(rp), parameter :: three = 3._rp - x(1) = x(1) + x(2) + x(3) * three - return -end subroutine - diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/regression/inout.f90 b/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/regression/inout.f90 deleted file mode 100644 index 80cdad9..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/regression/inout.f90 +++ /dev/null @@ -1,9 +0,0 @@ -! Check that intent(in out) translates as intent(inout). -! The separation seems to be a common usage. - subroutine foo(x) - implicit none - real(4), intent(in out) :: x - dimension x(3) - x(1) = x(1) + x(2) + x(3) - return - end diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/size/foo.f90 b/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/size/foo.f90 deleted file mode 100644 index 5b66f8c..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/size/foo.f90 +++ /dev/null @@ -1,44 +0,0 @@ - -subroutine foo(a, n, m, b) - implicit none - - real, intent(in) :: a(n, m) - integer, intent(in) :: n, m - real, intent(out) :: b(size(a, 1)) - - integer :: i - - do i = 1, size(b) - b(i) = sum(a(i,:)) - enddo -end subroutine - -subroutine trans(x,y) - implicit none - real, intent(in), dimension(:,:) :: x - real, intent(out), dimension( size(x,2), size(x,1) ) :: y - integer :: N, M, i, j - N = size(x,1) - M = size(x,2) - DO i=1,N - do j=1,M - y(j,i) = x(i,j) - END DO - END DO -end subroutine trans - -subroutine flatten(x,y) - implicit none - real, intent(in), dimension(:,:) :: x - real, intent(out), dimension( size(x) ) :: y - integer :: N, M, i, j, k - N = size(x,1) - M = size(x,2) - k = 1 - DO i=1,N - do j=1,M - y(k) = x(i,j) - k = k + 1 - END DO - END DO -end subroutine flatten diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/string/char.f90 b/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/string/char.f90 deleted file mode 100644 index bb7985c..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/src/string/char.f90 +++ /dev/null @@ -1,29 +0,0 @@ -MODULE char_test - -CONTAINS - -SUBROUTINE change_strings(strings, n_strs, out_strings) - IMPLICIT NONE - - ! Inputs - INTEGER, INTENT(IN) :: n_strs - CHARACTER, INTENT(IN), DIMENSION(2,n_strs) :: strings - CHARACTER, INTENT(OUT), DIMENSION(2,n_strs) :: out_strings - -!f2py INTEGER, INTENT(IN) :: n_strs -!f2py CHARACTER, INTENT(IN), DIMENSION(2,n_strs) :: strings -!f2py CHARACTER, INTENT(OUT), DIMENSION(2,n_strs) :: strings - - ! Misc. - INTEGER*4 :: j - - - DO j=1, n_strs - out_strings(1,j) = strings(1,j) - out_strings(2,j) = 'A' - END DO - -END SUBROUTINE change_strings - -END MODULE char_test - diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_array_from_pyobj.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_array_from_pyobj.py deleted file mode 100644 index a800901..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_array_from_pyobj.py +++ /dev/null @@ -1,581 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import sys -import copy -import pytest - -from numpy import ( - array, alltrue, ndarray, zeros, dtype, intp, clongdouble - ) -from numpy.testing import assert_, assert_equal -from numpy.core.multiarray import typeinfo -from . import util - -wrap = None - - -def setup_module(): - """ - Build the required testing extension module - - """ - global wrap - - # Check compiler availability first - if not util.has_c_compiler(): - pytest.skip("No C compiler available") - - if wrap is None: - config_code = """ - config.add_extension('test_array_from_pyobj_ext', - sources=['wrapmodule.c', 'fortranobject.c'], - define_macros=[]) - """ - d = os.path.dirname(__file__) - src = [os.path.join(d, 'src', 'array_from_pyobj', 'wrapmodule.c'), - os.path.join(d, '..', 'src', 'fortranobject.c'), - os.path.join(d, '..', 'src', 'fortranobject.h')] - wrap = util.build_module_distutils(src, config_code, - 'test_array_from_pyobj_ext') - - -def flags_info(arr): - flags = wrap.array_attrs(arr)[6] - return flags2names(flags) - - -def flags2names(flags): - info = [] - for flagname in ['CONTIGUOUS', 'FORTRAN', 'OWNDATA', 'ENSURECOPY', - 'ENSUREARRAY', 'ALIGNED', 'NOTSWAPPED', 'WRITEABLE', - 'WRITEBACKIFCOPY', 'UPDATEIFCOPY', 'BEHAVED', 'BEHAVED_RO', - 'CARRAY', 'FARRAY' - ]: - if abs(flags) & getattr(wrap, flagname, 0): - info.append(flagname) - return info - - -class Intent(object): - - def __init__(self, intent_list=[]): - self.intent_list = intent_list[:] - flags = 0 - for i in intent_list: - if i == 'optional': - flags |= wrap.F2PY_OPTIONAL - else: - flags |= getattr(wrap, 'F2PY_INTENT_' + i.upper()) - self.flags = flags - - def __getattr__(self, name): - name = name.lower() - if name == 'in_': - name = 'in' - return self.__class__(self.intent_list + [name]) - - def __str__(self): - return 'intent(%s)' % (','.join(self.intent_list)) - - def __repr__(self): - return 'Intent(%r)' % (self.intent_list) - - def is_intent(self, *names): - for name in names: - if name not in self.intent_list: - return False - return True - - def is_intent_exact(self, *names): - return len(self.intent_list) == len(names) and self.is_intent(*names) - -intent = Intent() - -_type_names = ['BOOL', 'BYTE', 'UBYTE', 'SHORT', 'USHORT', 'INT', 'UINT', - 'LONG', 'ULONG', 'LONGLONG', 'ULONGLONG', - 'FLOAT', 'DOUBLE', 'CFLOAT'] - -_cast_dict = {'BOOL': ['BOOL']} -_cast_dict['BYTE'] = _cast_dict['BOOL'] + ['BYTE'] -_cast_dict['UBYTE'] = _cast_dict['BOOL'] + ['UBYTE'] -_cast_dict['BYTE'] = ['BYTE'] -_cast_dict['UBYTE'] = ['UBYTE'] -_cast_dict['SHORT'] = _cast_dict['BYTE'] + ['UBYTE', 'SHORT'] -_cast_dict['USHORT'] = _cast_dict['UBYTE'] + ['BYTE', 'USHORT'] -_cast_dict['INT'] = _cast_dict['SHORT'] + ['USHORT', 'INT'] -_cast_dict['UINT'] = _cast_dict['USHORT'] + ['SHORT', 'UINT'] - -_cast_dict['LONG'] = _cast_dict['INT'] + ['LONG'] -_cast_dict['ULONG'] = _cast_dict['UINT'] + ['ULONG'] - -_cast_dict['LONGLONG'] = _cast_dict['LONG'] + ['LONGLONG'] -_cast_dict['ULONGLONG'] = _cast_dict['ULONG'] + ['ULONGLONG'] - -_cast_dict['FLOAT'] = _cast_dict['SHORT'] + ['USHORT', 'FLOAT'] -_cast_dict['DOUBLE'] = _cast_dict['INT'] + ['UINT', 'FLOAT', 'DOUBLE'] - -_cast_dict['CFLOAT'] = _cast_dict['FLOAT'] + ['CFLOAT'] - -# 32 bit system malloc typically does not provide the alignment required by -# 16 byte long double types this means the inout intent cannot be satisfied -# and several tests fail as the alignment flag can be randomly true or fals -# when numpy gains an aligned allocator the tests could be enabled again -if ((intp().dtype.itemsize != 4 or clongdouble().dtype.alignment <= 8) and - sys.platform != 'win32'): - _type_names.extend(['LONGDOUBLE', 'CDOUBLE', 'CLONGDOUBLE']) - _cast_dict['LONGDOUBLE'] = _cast_dict['LONG'] + \ - ['ULONG', 'FLOAT', 'DOUBLE', 'LONGDOUBLE'] - _cast_dict['CLONGDOUBLE'] = _cast_dict['LONGDOUBLE'] + \ - ['CFLOAT', 'CDOUBLE', 'CLONGDOUBLE'] - _cast_dict['CDOUBLE'] = _cast_dict['DOUBLE'] + ['CFLOAT', 'CDOUBLE'] - - -class Type(object): - _type_cache = {} - - def __new__(cls, name): - if isinstance(name, dtype): - dtype0 = name - name = None - for n, i in typeinfo.items(): - if not isinstance(i, type) and dtype0.type is i.type: - name = n - break - obj = cls._type_cache.get(name.upper(), None) - if obj is not None: - return obj - obj = object.__new__(cls) - obj._init(name) - cls._type_cache[name.upper()] = obj - return obj - - def _init(self, name): - self.NAME = name.upper() - info = typeinfo[self.NAME] - self.type_num = getattr(wrap, 'NPY_' + self.NAME) - assert_equal(self.type_num, info.num) - self.dtype = info.type - self.elsize = info.bits / 8 - self.dtypechar = info.char - - def cast_types(self): - return [self.__class__(_m) for _m in _cast_dict[self.NAME]] - - def all_types(self): - return [self.__class__(_m) for _m in _type_names] - - def smaller_types(self): - bits = typeinfo[self.NAME].alignment - types = [] - for name in _type_names: - if typeinfo[name].alignment < bits: - types.append(Type(name)) - return types - - def equal_types(self): - bits = typeinfo[self.NAME].alignment - types = [] - for name in _type_names: - if name == self.NAME: - continue - if typeinfo[name].alignment == bits: - types.append(Type(name)) - return types - - def larger_types(self): - bits = typeinfo[self.NAME].alignment - types = [] - for name in _type_names: - if typeinfo[name].alignment > bits: - types.append(Type(name)) - return types - - -class Array(object): - - def __init__(self, typ, dims, intent, obj): - self.type = typ - self.dims = dims - self.intent = intent - self.obj_copy = copy.deepcopy(obj) - self.obj = obj - - # arr.dtypechar may be different from typ.dtypechar - self.arr = wrap.call(typ.type_num, dims, intent.flags, obj) - - assert_(isinstance(self.arr, ndarray), repr(type(self.arr))) - - self.arr_attr = wrap.array_attrs(self.arr) - - if len(dims) > 1: - if self.intent.is_intent('c'): - assert_(intent.flags & wrap.F2PY_INTENT_C) - assert_(not self.arr.flags['FORTRAN'], - repr((self.arr.flags, getattr(obj, 'flags', None)))) - assert_(self.arr.flags['CONTIGUOUS']) - assert_(not self.arr_attr[6] & wrap.FORTRAN) - else: - assert_(not intent.flags & wrap.F2PY_INTENT_C) - assert_(self.arr.flags['FORTRAN']) - assert_(not self.arr.flags['CONTIGUOUS']) - assert_(self.arr_attr[6] & wrap.FORTRAN) - - if obj is None: - self.pyarr = None - self.pyarr_attr = None - return - - if intent.is_intent('cache'): - assert_(isinstance(obj, ndarray), repr(type(obj))) - self.pyarr = array(obj).reshape(*dims).copy() - else: - self.pyarr = array(array(obj, dtype=typ.dtypechar).reshape(*dims), - order=self.intent.is_intent('c') and 'C' or 'F') - assert_(self.pyarr.dtype == typ, - repr((self.pyarr.dtype, typ))) - assert_(self.pyarr.flags['OWNDATA'], (obj, intent)) - self.pyarr_attr = wrap.array_attrs(self.pyarr) - - if len(dims) > 1: - if self.intent.is_intent('c'): - assert_(not self.pyarr.flags['FORTRAN']) - assert_(self.pyarr.flags['CONTIGUOUS']) - assert_(not self.pyarr_attr[6] & wrap.FORTRAN) - else: - assert_(self.pyarr.flags['FORTRAN']) - assert_(not self.pyarr.flags['CONTIGUOUS']) - assert_(self.pyarr_attr[6] & wrap.FORTRAN) - - assert_(self.arr_attr[1] == self.pyarr_attr[1]) # nd - assert_(self.arr_attr[2] == self.pyarr_attr[2]) # dimensions - if self.arr_attr[1] <= 1: - assert_(self.arr_attr[3] == self.pyarr_attr[3], - repr((self.arr_attr[3], self.pyarr_attr[3], - self.arr.tobytes(), self.pyarr.tobytes()))) # strides - assert_(self.arr_attr[5][-2:] == self.pyarr_attr[5][-2:], - repr((self.arr_attr[5], self.pyarr_attr[5]))) # descr - assert_(self.arr_attr[6] == self.pyarr_attr[6], - repr((self.arr_attr[6], self.pyarr_attr[6], - flags2names(0 * self.arr_attr[6] - self.pyarr_attr[6]), - flags2names(self.arr_attr[6]), intent))) # flags - - if intent.is_intent('cache'): - assert_(self.arr_attr[5][3] >= self.type.elsize, - repr((self.arr_attr[5][3], self.type.elsize))) - else: - assert_(self.arr_attr[5][3] == self.type.elsize, - repr((self.arr_attr[5][3], self.type.elsize))) - assert_(self.arr_equal(self.pyarr, self.arr)) - - if isinstance(self.obj, ndarray): - if typ.elsize == Type(obj.dtype).elsize: - if not intent.is_intent('copy') and self.arr_attr[1] <= 1: - assert_(self.has_shared_memory()) - - def arr_equal(self, arr1, arr2): - if arr1.shape != arr2.shape: - return False - s = arr1 == arr2 - return alltrue(s.flatten()) - - def __str__(self): - return str(self.arr) - - def has_shared_memory(self): - """Check that created array shares data with input array. - """ - if self.obj is self.arr: - return True - if not isinstance(self.obj, ndarray): - return False - obj_attr = wrap.array_attrs(self.obj) - return obj_attr[0] == self.arr_attr[0] - - -class TestIntent(object): - - def test_in_out(self): - assert_equal(str(intent.in_.out), 'intent(in,out)') - assert_(intent.in_.c.is_intent('c')) - assert_(not intent.in_.c.is_intent_exact('c')) - assert_(intent.in_.c.is_intent_exact('c', 'in')) - assert_(intent.in_.c.is_intent_exact('in', 'c')) - assert_(not intent.in_.is_intent('c')) - - -class TestSharedMemory(object): - num2seq = [1, 2] - num23seq = [[1, 2, 3], [4, 5, 6]] - - @pytest.fixture(autouse=True, scope='class', params=_type_names) - def setup_type(self, request): - request.cls.type = Type(request.param) - request.cls.array = lambda self, dims, intent, obj: \ - Array(Type(request.param), dims, intent, obj) - - def test_in_from_2seq(self): - a = self.array([2], intent.in_, self.num2seq) - assert_(not a.has_shared_memory()) - - def test_in_from_2casttype(self): - for t in self.type.cast_types(): - obj = array(self.num2seq, dtype=t.dtype) - a = self.array([len(self.num2seq)], intent.in_, obj) - if t.elsize == self.type.elsize: - assert_( - a.has_shared_memory(), repr((self.type.dtype, t.dtype))) - else: - assert_(not a.has_shared_memory(), repr(t.dtype)) - - def test_inout_2seq(self): - obj = array(self.num2seq, dtype=self.type.dtype) - a = self.array([len(self.num2seq)], intent.inout, obj) - assert_(a.has_shared_memory()) - - try: - a = self.array([2], intent.in_.inout, self.num2seq) - except TypeError as msg: - if not str(msg).startswith('failed to initialize intent' - '(inout|inplace|cache) array'): - raise - else: - raise SystemError('intent(inout) should have failed on sequence') - - def test_f_inout_23seq(self): - obj = array(self.num23seq, dtype=self.type.dtype, order='F') - shape = (len(self.num23seq), len(self.num23seq[0])) - a = self.array(shape, intent.in_.inout, obj) - assert_(a.has_shared_memory()) - - obj = array(self.num23seq, dtype=self.type.dtype, order='C') - shape = (len(self.num23seq), len(self.num23seq[0])) - try: - a = self.array(shape, intent.in_.inout, obj) - except ValueError as msg: - if not str(msg).startswith('failed to initialize intent' - '(inout) array'): - raise - else: - raise SystemError( - 'intent(inout) should have failed on improper array') - - def test_c_inout_23seq(self): - obj = array(self.num23seq, dtype=self.type.dtype) - shape = (len(self.num23seq), len(self.num23seq[0])) - a = self.array(shape, intent.in_.c.inout, obj) - assert_(a.has_shared_memory()) - - def test_in_copy_from_2casttype(self): - for t in self.type.cast_types(): - obj = array(self.num2seq, dtype=t.dtype) - a = self.array([len(self.num2seq)], intent.in_.copy, obj) - assert_(not a.has_shared_memory(), repr(t.dtype)) - - def test_c_in_from_23seq(self): - a = self.array([len(self.num23seq), len(self.num23seq[0])], - intent.in_, self.num23seq) - assert_(not a.has_shared_memory()) - - def test_in_from_23casttype(self): - for t in self.type.cast_types(): - obj = array(self.num23seq, dtype=t.dtype) - a = self.array([len(self.num23seq), len(self.num23seq[0])], - intent.in_, obj) - assert_(not a.has_shared_memory(), repr(t.dtype)) - - def test_f_in_from_23casttype(self): - for t in self.type.cast_types(): - obj = array(self.num23seq, dtype=t.dtype, order='F') - a = self.array([len(self.num23seq), len(self.num23seq[0])], - intent.in_, obj) - if t.elsize == self.type.elsize: - assert_(a.has_shared_memory(), repr(t.dtype)) - else: - assert_(not a.has_shared_memory(), repr(t.dtype)) - - def test_c_in_from_23casttype(self): - for t in self.type.cast_types(): - obj = array(self.num23seq, dtype=t.dtype) - a = self.array([len(self.num23seq), len(self.num23seq[0])], - intent.in_.c, obj) - if t.elsize == self.type.elsize: - assert_(a.has_shared_memory(), repr(t.dtype)) - else: - assert_(not a.has_shared_memory(), repr(t.dtype)) - - def test_f_copy_in_from_23casttype(self): - for t in self.type.cast_types(): - obj = array(self.num23seq, dtype=t.dtype, order='F') - a = self.array([len(self.num23seq), len(self.num23seq[0])], - intent.in_.copy, obj) - assert_(not a.has_shared_memory(), repr(t.dtype)) - - def test_c_copy_in_from_23casttype(self): - for t in self.type.cast_types(): - obj = array(self.num23seq, dtype=t.dtype) - a = self.array([len(self.num23seq), len(self.num23seq[0])], - intent.in_.c.copy, obj) - assert_(not a.has_shared_memory(), repr(t.dtype)) - - def test_in_cache_from_2casttype(self): - for t in self.type.all_types(): - if t.elsize != self.type.elsize: - continue - obj = array(self.num2seq, dtype=t.dtype) - shape = (len(self.num2seq),) - a = self.array(shape, intent.in_.c.cache, obj) - assert_(a.has_shared_memory(), repr(t.dtype)) - - a = self.array(shape, intent.in_.cache, obj) - assert_(a.has_shared_memory(), repr(t.dtype)) - - obj = array(self.num2seq, dtype=t.dtype, order='F') - a = self.array(shape, intent.in_.c.cache, obj) - assert_(a.has_shared_memory(), repr(t.dtype)) - - a = self.array(shape, intent.in_.cache, obj) - assert_(a.has_shared_memory(), repr(t.dtype)) - - try: - a = self.array(shape, intent.in_.cache, obj[::-1]) - except ValueError as msg: - if not str(msg).startswith('failed to initialize' - ' intent(cache) array'): - raise - else: - raise SystemError( - 'intent(cache) should have failed on multisegmented array') - - def test_in_cache_from_2casttype_failure(self): - for t in self.type.all_types(): - if t.elsize >= self.type.elsize: - continue - obj = array(self.num2seq, dtype=t.dtype) - shape = (len(self.num2seq),) - try: - self.array(shape, intent.in_.cache, obj) # Should succeed - except ValueError as msg: - if not str(msg).startswith('failed to initialize' - ' intent(cache) array'): - raise - else: - raise SystemError( - 'intent(cache) should have failed on smaller array') - - def test_cache_hidden(self): - shape = (2,) - a = self.array(shape, intent.cache.hide, None) - assert_(a.arr.shape == shape) - - shape = (2, 3) - a = self.array(shape, intent.cache.hide, None) - assert_(a.arr.shape == shape) - - shape = (-1, 3) - try: - a = self.array(shape, intent.cache.hide, None) - except ValueError as msg: - if not str(msg).startswith('failed to create intent' - '(cache|hide)|optional array'): - raise - else: - raise SystemError( - 'intent(cache) should have failed on undefined dimensions') - - def test_hidden(self): - shape = (2,) - a = self.array(shape, intent.hide, None) - assert_(a.arr.shape == shape) - assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) - - shape = (2, 3) - a = self.array(shape, intent.hide, None) - assert_(a.arr.shape == shape) - assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) - assert_(a.arr.flags['FORTRAN'] and not a.arr.flags['CONTIGUOUS']) - - shape = (2, 3) - a = self.array(shape, intent.c.hide, None) - assert_(a.arr.shape == shape) - assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) - assert_(not a.arr.flags['FORTRAN'] and a.arr.flags['CONTIGUOUS']) - - shape = (-1, 3) - try: - a = self.array(shape, intent.hide, None) - except ValueError as msg: - if not str(msg).startswith('failed to create intent' - '(cache|hide)|optional array'): - raise - else: - raise SystemError('intent(hide) should have failed' - ' on undefined dimensions') - - def test_optional_none(self): - shape = (2,) - a = self.array(shape, intent.optional, None) - assert_(a.arr.shape == shape) - assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) - - shape = (2, 3) - a = self.array(shape, intent.optional, None) - assert_(a.arr.shape == shape) - assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) - assert_(a.arr.flags['FORTRAN'] and not a.arr.flags['CONTIGUOUS']) - - shape = (2, 3) - a = self.array(shape, intent.c.optional, None) - assert_(a.arr.shape == shape) - assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) - assert_(not a.arr.flags['FORTRAN'] and a.arr.flags['CONTIGUOUS']) - - def test_optional_from_2seq(self): - obj = self.num2seq - shape = (len(obj),) - a = self.array(shape, intent.optional, obj) - assert_(a.arr.shape == shape) - assert_(not a.has_shared_memory()) - - def test_optional_from_23seq(self): - obj = self.num23seq - shape = (len(obj), len(obj[0])) - a = self.array(shape, intent.optional, obj) - assert_(a.arr.shape == shape) - assert_(not a.has_shared_memory()) - - a = self.array(shape, intent.optional.c, obj) - assert_(a.arr.shape == shape) - assert_(not a.has_shared_memory()) - - def test_inplace(self): - obj = array(self.num23seq, dtype=self.type.dtype) - assert_(not obj.flags['FORTRAN'] and obj.flags['CONTIGUOUS']) - shape = obj.shape - a = self.array(shape, intent.inplace, obj) - assert_(obj[1][2] == a.arr[1][2], repr((obj, a.arr))) - a.arr[1][2] = 54 - assert_(obj[1][2] == a.arr[1][2] == - array(54, dtype=self.type.dtype), repr((obj, a.arr))) - assert_(a.arr is obj) - assert_(obj.flags['FORTRAN']) # obj attributes are changed inplace! - assert_(not obj.flags['CONTIGUOUS']) - - def test_inplace_from_casttype(self): - for t in self.type.cast_types(): - if t is self.type: - continue - obj = array(self.num23seq, dtype=t.dtype) - assert_(obj.dtype.type == t.dtype) - assert_(obj.dtype.type is not self.type.dtype) - assert_(not obj.flags['FORTRAN'] and obj.flags['CONTIGUOUS']) - shape = obj.shape - a = self.array(shape, intent.inplace, obj) - assert_(obj[1][2] == a.arr[1][2], repr((obj, a.arr))) - a.arr[1][2] = 54 - assert_(obj[1][2] == a.arr[1][2] == - array(54, dtype=self.type.dtype), repr((obj, a.arr))) - assert_(a.arr is obj) - assert_(obj.flags['FORTRAN']) # obj attributes changed inplace! - assert_(not obj.flags['CONTIGUOUS']) - assert_(obj.dtype.type is self.type.dtype) # obj changed inplace! diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_assumed_shape.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_assumed_shape.py deleted file mode 100644 index e5695a6..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_assumed_shape.py +++ /dev/null @@ -1,55 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import pytest -import tempfile - -from numpy.testing import assert_ -from . import util - - -def _path(*a): - return os.path.join(*((os.path.dirname(__file__),) + a)) - - -class TestAssumedShapeSumExample(util.F2PyTest): - sources = [_path('src', 'assumed_shape', 'foo_free.f90'), - _path('src', 'assumed_shape', 'foo_use.f90'), - _path('src', 'assumed_shape', 'precision.f90'), - _path('src', 'assumed_shape', 'foo_mod.f90'), - _path('src', 'assumed_shape', '.f2py_f2cmap'), - ] - - @pytest.mark.slow - def test_all(self): - r = self.module.fsum([1, 2]) - assert_(r == 3, repr(r)) - r = self.module.sum([1, 2]) - assert_(r == 3, repr(r)) - r = self.module.sum_with_use([1, 2]) - assert_(r == 3, repr(r)) - - r = self.module.mod.sum([1, 2]) - assert_(r == 3, repr(r)) - r = self.module.mod.fsum([1, 2]) - assert_(r == 3, repr(r)) - - -class TestF2cmapOption(TestAssumedShapeSumExample): - def setup(self): - # Use a custom file name for .f2py_f2cmap - self.sources = list(self.sources) - f2cmap_src = self.sources.pop(-1) - - self.f2cmap_file = tempfile.NamedTemporaryFile(delete=False) - with open(f2cmap_src, 'rb') as f: - self.f2cmap_file.write(f.read()) - self.f2cmap_file.close() - - self.sources.append(self.f2cmap_file.name) - self.options = ["--f2cmap", self.f2cmap_file.name] - - super(TestF2cmapOption, self).setup() - - def teardown(self): - os.unlink(self.f2cmap_file.name) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_block_docstring.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_block_docstring.py deleted file mode 100644 index 4f16789..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_block_docstring.py +++ /dev/null @@ -1,24 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import pytest -from . import util - -from numpy.testing import assert_equal, IS_PYPY - -class TestBlockDocString(util.F2PyTest): - code = """ - SUBROUTINE FOO() - INTEGER BAR(2, 3) - - COMMON /BLOCK/ BAR - RETURN - END - """ - - @pytest.mark.skipif(sys.platform=='win32', - reason='Fails with MinGW64 Gfortran (Issue #9673)') - @pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") - def test_block_docstring(self): - expected = "'i'-array(2,3)\n" - assert_equal(self.module.block.__doc__, expected) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_callback.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_callback.py deleted file mode 100644 index 21c29ba..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_callback.py +++ /dev/null @@ -1,165 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import math -import textwrap -import sys -import pytest - -import numpy as np -from numpy.testing import assert_, assert_equal -from . import util - - -class TestF77Callback(util.F2PyTest): - code = """ - subroutine t(fun,a) - integer a -cf2py intent(out) a - external fun - call fun(a) - end - - subroutine func(a) -cf2py intent(in,out) a - integer a - a = a + 11 - end - - subroutine func0(a) -cf2py intent(out) a - integer a - a = 11 - end - - subroutine t2(a) -cf2py intent(callback) fun - integer a -cf2py intent(out) a - external fun - call fun(a) - end - - subroutine string_callback(callback, a) - external callback - double precision callback - double precision a - character*1 r -cf2py intent(out) a - r = 'r' - a = callback(r) - end - - subroutine string_callback_array(callback, cu, lencu, a) - external callback - integer callback - integer lencu - character*8 cu(lencu) - integer a -cf2py intent(out) a - - a = callback(cu, lencu) - end - """ - - @pytest.mark.slow - @pytest.mark.parametrize('name', 't,t2'.split(',')) - def test_all(self, name): - self.check_function(name) - - @pytest.mark.slow - def test_docstring(self): - expected = textwrap.dedent("""\ - a = t(fun,[fun_extra_args]) - - Wrapper for ``t``. - - Parameters - ---------- - fun : call-back function - - Other Parameters - ---------------- - fun_extra_args : input tuple, optional - Default: () - - Returns - ------- - a : int - - Notes - ----- - Call-back functions:: - - def fun(): return a - Return objects: - a : int - """) - assert_equal(self.module.t.__doc__, expected) - - def check_function(self, name): - t = getattr(self.module, name) - r = t(lambda: 4) - assert_(r == 4, repr(r)) - r = t(lambda a: 5, fun_extra_args=(6,)) - assert_(r == 5, repr(r)) - r = t(lambda a: a, fun_extra_args=(6,)) - assert_(r == 6, repr(r)) - r = t(lambda a: 5 + a, fun_extra_args=(7,)) - assert_(r == 12, repr(r)) - r = t(lambda a: math.degrees(a), fun_extra_args=(math.pi,)) - assert_(r == 180, repr(r)) - r = t(math.degrees, fun_extra_args=(math.pi,)) - assert_(r == 180, repr(r)) - - r = t(self.module.func, fun_extra_args=(6,)) - assert_(r == 17, repr(r)) - r = t(self.module.func0) - assert_(r == 11, repr(r)) - r = t(self.module.func0._cpointer) - assert_(r == 11, repr(r)) - - class A(object): - - def __call__(self): - return 7 - - def mth(self): - return 9 - a = A() - r = t(a) - assert_(r == 7, repr(r)) - r = t(a.mth) - assert_(r == 9, repr(r)) - - @pytest.mark.skipif(sys.platform=='win32', - reason='Fails with MinGW64 Gfortran (Issue #9673)') - def test_string_callback(self): - - def callback(code): - if code == 'r': - return 0 - else: - return 1 - - f = getattr(self.module, 'string_callback') - r = f(callback) - assert_(r == 0, repr(r)) - - @pytest.mark.skipif(sys.platform=='win32', - reason='Fails with MinGW64 Gfortran (Issue #9673)') - def test_string_callback_array(self): - # See gh-10027 - cu = np.zeros((1, 8), 'S1') - - def callback(cu, lencu): - if cu.shape != (lencu, 8): - return 1 - if cu.dtype != 'S1': - return 2 - if not np.all(cu == b''): - return 3 - return 0 - - f = getattr(self.module, 'string_callback_array') - res = f(callback, cu, len(cu)) - assert_(res == 0, repr(res)) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_common.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_common.py deleted file mode 100644 index dcb01b0..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_common.py +++ /dev/null @@ -1,27 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import sys -import pytest - -import numpy as np -from . import util - -from numpy.testing import assert_array_equal - -def _path(*a): - return os.path.join(*((os.path.dirname(__file__),) + a)) - -class TestCommonBlock(util.F2PyTest): - sources = [_path('src', 'common', 'block.f')] - - @pytest.mark.skipif(sys.platform=='win32', - reason='Fails with MinGW64 Gfortran (Issue #9673)') - def test_common_block(self): - self.module.initcb() - assert_array_equal(self.module.block.long_bn, - np.array(1.0, dtype=np.float64)) - assert_array_equal(self.module.block.string_bn, - np.array('2', dtype='|S1')) - assert_array_equal(self.module.block.ok, - np.array(3, dtype=np.int32)) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_compile_function.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_compile_function.py deleted file mode 100644 index 40ea799..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_compile_function.py +++ /dev/null @@ -1,129 +0,0 @@ -"""See https://github.com/numpy/numpy/pull/11937. - -""" -from __future__ import division, absolute_import, print_function - -import sys -import os -import uuid -from importlib import import_module -import pytest - -import numpy.f2py - -from numpy.testing import assert_equal -from . import util - - -def setup_module(): - if sys.platform == 'win32' and sys.version_info[0] < 3: - pytest.skip('Fails with MinGW64 Gfortran (Issue #9673)') - if not util.has_c_compiler(): - pytest.skip("Needs C compiler") - if not util.has_f77_compiler(): - pytest.skip('Needs FORTRAN 77 compiler') - - -# extra_args can be a list (since gh-11937) or string. -# also test absence of extra_args -@pytest.mark.parametrize( - "extra_args", [['--noopt', '--debug'], '--noopt --debug', ''] - ) -@pytest.mark.leaks_references(reason="Imported module seems never deleted.") -def test_f2py_init_compile(extra_args): - # flush through the f2py __init__ compile() function code path as a - # crude test for input handling following migration from - # exec_command() to subprocess.check_output() in gh-11937 - - # the Fortran 77 syntax requires 6 spaces before any commands, but - # more space may be added/ - fsource = """ - integer function foo() - foo = 10 + 5 - return - end - """ - # use various helper functions in util.py to enable robust build / - # compile and reimport cycle in test suite - moddir = util.get_module_dir() - modname = util.get_temp_module_name() - - cwd = os.getcwd() - target = os.path.join(moddir, str(uuid.uuid4()) + '.f') - # try running compile() with and without a source_fn provided so - # that the code path where a temporary file for writing Fortran - # source is created is also explored - for source_fn in [target, None]: - # mimic the path changing behavior used by build_module() in - # util.py, but don't actually use build_module() because it has - # its own invocation of subprocess that circumvents the - # f2py.compile code block under test - try: - os.chdir(moddir) - ret_val = numpy.f2py.compile( - fsource, - modulename=modname, - extra_args=extra_args, - source_fn=source_fn - ) - finally: - os.chdir(cwd) - - # check for compile success return value - assert_equal(ret_val, 0) - - # we are not currently able to import the Python-Fortran - # interface module on Windows / Appveyor, even though we do get - # successful compilation on that platform with Python 3.x - if sys.platform != 'win32': - # check for sensible result of Fortran function; that means - # we can import the module name in Python and retrieve the - # result of the sum operation - return_check = import_module(modname) - calc_result = return_check.foo() - assert_equal(calc_result, 15) - # Removal from sys.modules, is not as such necessary. Even with - # removal, the module (dict) stays alive. - del sys.modules[modname] - - -def test_f2py_init_compile_failure(): - # verify an appropriate integer status value returned by - # f2py.compile() when invalid Fortran is provided - ret_val = numpy.f2py.compile(b"invalid") - assert_equal(ret_val, 1) - - -def test_f2py_init_compile_bad_cmd(): - # verify that usage of invalid command in f2py.compile() returns - # status value of 127 for historic consistency with exec_command() - # error handling - - # patch the sys Python exe path temporarily to induce an OSError - # downstream NOTE: how bad of an idea is this patching? - try: - temp = sys.executable - sys.executable = 'does not exist' - - # the OSError should take precedence over invalid Fortran - ret_val = numpy.f2py.compile(b"invalid") - assert_equal(ret_val, 127) - finally: - sys.executable = temp - - -@pytest.mark.parametrize('fsource', - ['program test_f2py\nend program test_f2py', - b'program test_f2py\nend program test_f2py',]) -def test_compile_from_strings(tmpdir, fsource): - # Make sure we can compile str and bytes gh-12796 - cwd = os.getcwd() - try: - os.chdir(str(tmpdir)) - ret_val = numpy.f2py.compile( - fsource, - modulename='test_compile_from_strings', - extension='.f90') - assert_equal(ret_val, 0) - finally: - os.chdir(cwd) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_kind.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_kind.py deleted file mode 100644 index 1f7762a..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_kind.py +++ /dev/null @@ -1,34 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import pytest - -from numpy.testing import assert_ -from numpy.f2py.crackfortran import ( - _selected_int_kind_func as selected_int_kind, - _selected_real_kind_func as selected_real_kind - ) -from . import util - - -def _path(*a): - return os.path.join(*((os.path.dirname(__file__),) + a)) - - -class TestKind(util.F2PyTest): - sources = [_path('src', 'kind', 'foo.f90')] - - @pytest.mark.slow - def test_all(self): - selectedrealkind = self.module.selectedrealkind - selectedintkind = self.module.selectedintkind - - for i in range(40): - assert_(selectedintkind(i) in [selected_int_kind(i), -1], - 'selectedintkind(%s): expected %r but got %r' % - (i, selected_int_kind(i), selectedintkind(i))) - - for i in range(20): - assert_(selectedrealkind(i) in [selected_real_kind(i), -1], - 'selectedrealkind(%s): expected %r but got %r' % - (i, selected_real_kind(i), selectedrealkind(i))) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_mixed.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_mixed.py deleted file mode 100644 index 0337538..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_mixed.py +++ /dev/null @@ -1,37 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import textwrap -import pytest - -from numpy.testing import assert_, assert_equal -from . import util - - -def _path(*a): - return os.path.join(*((os.path.dirname(__file__),) + a)) - - -class TestMixed(util.F2PyTest): - sources = [_path('src', 'mixed', 'foo.f'), - _path('src', 'mixed', 'foo_fixed.f90'), - _path('src', 'mixed', 'foo_free.f90')] - - @pytest.mark.slow - def test_all(self): - assert_(self.module.bar11() == 11) - assert_(self.module.foo_fixed.bar12() == 12) - assert_(self.module.foo_free.bar13() == 13) - - @pytest.mark.slow - def test_docstring(self): - expected = textwrap.dedent("""\ - a = bar11() - - Wrapper for ``bar11``. - - Returns - ------- - a : int - """) - assert_equal(self.module.bar11.__doc__, expected) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_parameter.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_parameter.py deleted file mode 100644 index 6a37868..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_parameter.py +++ /dev/null @@ -1,118 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import pytest - -import numpy as np -from numpy.testing import assert_raises, assert_equal - -from . import util - - -def _path(*a): - return os.path.join(*((os.path.dirname(__file__),) + a)) - - -class TestParameters(util.F2PyTest): - # Check that intent(in out) translates as intent(inout) - sources = [_path('src', 'parameter', 'constant_real.f90'), - _path('src', 'parameter', 'constant_integer.f90'), - _path('src', 'parameter', 'constant_both.f90'), - _path('src', 'parameter', 'constant_compound.f90'), - _path('src', 'parameter', 'constant_non_compound.f90'), - ] - - @pytest.mark.slow - def test_constant_real_single(self): - # non-contiguous should raise error - x = np.arange(6, dtype=np.float32)[::2] - assert_raises(ValueError, self.module.foo_single, x) - - # check values with contiguous array - x = np.arange(3, dtype=np.float32) - self.module.foo_single(x) - assert_equal(x, [0 + 1 + 2*3, 1, 2]) - - @pytest.mark.slow - def test_constant_real_double(self): - # non-contiguous should raise error - x = np.arange(6, dtype=np.float64)[::2] - assert_raises(ValueError, self.module.foo_double, x) - - # check values with contiguous array - x = np.arange(3, dtype=np.float64) - self.module.foo_double(x) - assert_equal(x, [0 + 1 + 2*3, 1, 2]) - - @pytest.mark.slow - def test_constant_compound_int(self): - # non-contiguous should raise error - x = np.arange(6, dtype=np.int32)[::2] - assert_raises(ValueError, self.module.foo_compound_int, x) - - # check values with contiguous array - x = np.arange(3, dtype=np.int32) - self.module.foo_compound_int(x) - assert_equal(x, [0 + 1 + 2*6, 1, 2]) - - @pytest.mark.slow - def test_constant_non_compound_int(self): - # check values - x = np.arange(4, dtype=np.int32) - self.module.foo_non_compound_int(x) - assert_equal(x, [0 + 1 + 2 + 3*4, 1, 2, 3]) - - @pytest.mark.slow - def test_constant_integer_int(self): - # non-contiguous should raise error - x = np.arange(6, dtype=np.int32)[::2] - assert_raises(ValueError, self.module.foo_int, x) - - # check values with contiguous array - x = np.arange(3, dtype=np.int32) - self.module.foo_int(x) - assert_equal(x, [0 + 1 + 2*3, 1, 2]) - - @pytest.mark.slow - def test_constant_integer_long(self): - # non-contiguous should raise error - x = np.arange(6, dtype=np.int64)[::2] - assert_raises(ValueError, self.module.foo_long, x) - - # check values with contiguous array - x = np.arange(3, dtype=np.int64) - self.module.foo_long(x) - assert_equal(x, [0 + 1 + 2*3, 1, 2]) - - @pytest.mark.slow - def test_constant_both(self): - # non-contiguous should raise error - x = np.arange(6, dtype=np.float64)[::2] - assert_raises(ValueError, self.module.foo, x) - - # check values with contiguous array - x = np.arange(3, dtype=np.float64) - self.module.foo(x) - assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3]) - - @pytest.mark.slow - def test_constant_no(self): - # non-contiguous should raise error - x = np.arange(6, dtype=np.float64)[::2] - assert_raises(ValueError, self.module.foo_no, x) - - # check values with contiguous array - x = np.arange(3, dtype=np.float64) - self.module.foo_no(x) - assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3]) - - @pytest.mark.slow - def test_constant_sum(self): - # non-contiguous should raise error - x = np.arange(6, dtype=np.float64)[::2] - assert_raises(ValueError, self.module.foo_sum, x) - - # check values with contiguous array - x = np.arange(3, dtype=np.float64) - self.module.foo_sum(x) - assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3]) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_quoted_character.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_quoted_character.py deleted file mode 100644 index c9a1c36..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_quoted_character.py +++ /dev/null @@ -1,35 +0,0 @@ -"""See https://github.com/numpy/numpy/pull/10676. - -""" -from __future__ import division, absolute_import, print_function - -import sys -from importlib import import_module -import pytest - -from numpy.testing import assert_equal -from . import util - - -class TestQuotedCharacter(util.F2PyTest): - code = """ - SUBROUTINE FOO(OUT1, OUT2, OUT3, OUT4, OUT5, OUT6) - CHARACTER SINGLE, DOUBLE, SEMICOL, EXCLA, OPENPAR, CLOSEPAR - PARAMETER (SINGLE="'", DOUBLE='"', SEMICOL=';', EXCLA="!", - 1 OPENPAR="(", CLOSEPAR=")") - CHARACTER OUT1, OUT2, OUT3, OUT4, OUT5, OUT6 -Cf2py intent(out) OUT1, OUT2, OUT3, OUT4, OUT5, OUT6 - OUT1 = SINGLE - OUT2 = DOUBLE - OUT3 = SEMICOL - OUT4 = EXCLA - OUT5 = OPENPAR - OUT6 = CLOSEPAR - RETURN - END - """ - - @pytest.mark.skipif(sys.platform=='win32', - reason='Fails with MinGW64 Gfortran (Issue #9673)') - def test_quoted_character(self): - assert_equal(self.module.foo(), (b"'", b'"', b';', b'!', b'(', b')')) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_regression.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_regression.py deleted file mode 100644 index 3adae63..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_regression.py +++ /dev/null @@ -1,29 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import pytest - -import numpy as np -from numpy.testing import assert_raises, assert_equal - -from . import util - - -def _path(*a): - return os.path.join(*((os.path.dirname(__file__),) + a)) - - -class TestIntentInOut(util.F2PyTest): - # Check that intent(in out) translates as intent(inout) - sources = [_path('src', 'regression', 'inout.f90')] - - @pytest.mark.slow - def test_inout(self): - # non-contiguous should raise error - x = np.arange(6, dtype=np.float32)[::2] - assert_raises(ValueError, self.module.foo, x) - - # check values with contiguous array - x = np.arange(3, dtype=np.float32) - self.module.foo(x) - assert_equal(x, [3, 1, 2]) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_return_character.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_return_character.py deleted file mode 100644 index fc3a58d..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_return_character.py +++ /dev/null @@ -1,146 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import pytest - -from numpy import array -from numpy.testing import assert_ -from . import util - - -class TestReturnCharacter(util.F2PyTest): - - def check_function(self, t): - tname = t.__doc__.split()[0] - if tname in ['t0', 't1', 's0', 's1']: - assert_(t(23) == b'2') - r = t('ab') - assert_(r == b'a', repr(r)) - r = t(array('ab')) - assert_(r == b'a', repr(r)) - r = t(array(77, 'u1')) - assert_(r == b'M', repr(r)) - #assert_(_raises(ValueError, t, array([77,87]))) - #assert_(_raises(ValueError, t, array(77))) - elif tname in ['ts', 'ss']: - assert_(t(23) == b'23 ', repr(t(23))) - assert_(t('123456789abcdef') == b'123456789a') - elif tname in ['t5', 's5']: - assert_(t(23) == b'23 ', repr(t(23))) - assert_(t('ab') == b'ab ', repr(t('ab'))) - assert_(t('123456789abcdef') == b'12345') - else: - raise NotImplementedError - - -class TestF77ReturnCharacter(TestReturnCharacter): - code = """ - function t0(value) - character value - character t0 - t0 = value - end - function t1(value) - character*1 value - character*1 t1 - t1 = value - end - function t5(value) - character*5 value - character*5 t5 - t5 = value - end - function ts(value) - character*(*) value - character*(*) ts - ts = value - end - - subroutine s0(t0,value) - character value - character t0 -cf2py intent(out) t0 - t0 = value - end - subroutine s1(t1,value) - character*1 value - character*1 t1 -cf2py intent(out) t1 - t1 = value - end - subroutine s5(t5,value) - character*5 value - character*5 t5 -cf2py intent(out) t5 - t5 = value - end - subroutine ss(ts,value) - character*(*) value - character*10 ts -cf2py intent(out) ts - ts = value - end - """ - - @pytest.mark.slow - @pytest.mark.parametrize('name', 't0,t1,t5,s0,s1,s5,ss'.split(',')) - def test_all(self, name): - self.check_function(getattr(self.module, name)) - - -class TestF90ReturnCharacter(TestReturnCharacter): - suffix = ".f90" - code = """ -module f90_return_char - contains - function t0(value) - character :: value - character :: t0 - t0 = value - end function t0 - function t1(value) - character(len=1) :: value - character(len=1) :: t1 - t1 = value - end function t1 - function t5(value) - character(len=5) :: value - character(len=5) :: t5 - t5 = value - end function t5 - function ts(value) - character(len=*) :: value - character(len=10) :: ts - ts = value - end function ts - - subroutine s0(t0,value) - character :: value - character :: t0 -!f2py intent(out) t0 - t0 = value - end subroutine s0 - subroutine s1(t1,value) - character(len=1) :: value - character(len=1) :: t1 -!f2py intent(out) t1 - t1 = value - end subroutine s1 - subroutine s5(t5,value) - character(len=5) :: value - character(len=5) :: t5 -!f2py intent(out) t5 - t5 = value - end subroutine s5 - subroutine ss(ts,value) - character(len=*) :: value - character(len=10) :: ts -!f2py intent(out) ts - ts = value - end subroutine ss -end module f90_return_char - """ - - @pytest.mark.slow - @pytest.mark.parametrize('name', 't0,t1,t5,ts,s0,s1,s5,ss'.split(',')) - def test_all(self, name): - self.check_function(getattr(self.module.f90_return_char, name)) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_return_complex.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_return_complex.py deleted file mode 100644 index 43c884d..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_return_complex.py +++ /dev/null @@ -1,169 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import pytest - -from numpy import array -from numpy.compat import long -from numpy.testing import assert_, assert_raises -from . import util - - -class TestReturnComplex(util.F2PyTest): - - def check_function(self, t): - tname = t.__doc__.split()[0] - if tname in ['t0', 't8', 's0', 's8']: - err = 1e-5 - else: - err = 0.0 - assert_(abs(t(234j) - 234.0j) <= err) - assert_(abs(t(234.6) - 234.6) <= err) - assert_(abs(t(long(234)) - 234.0) <= err) - assert_(abs(t(234.6 + 3j) - (234.6 + 3j)) <= err) - #assert_( abs(t('234')-234.)<=err) - #assert_( abs(t('234.6')-234.6)<=err) - assert_(abs(t(-234) + 234.) <= err) - assert_(abs(t([234]) - 234.) <= err) - assert_(abs(t((234,)) - 234.) <= err) - assert_(abs(t(array(234)) - 234.) <= err) - assert_(abs(t(array(23 + 4j, 'F')) - (23 + 4j)) <= err) - assert_(abs(t(array([234])) - 234.) <= err) - assert_(abs(t(array([[234]])) - 234.) <= err) - assert_(abs(t(array([234], 'b')) + 22.) <= err) - assert_(abs(t(array([234], 'h')) - 234.) <= err) - assert_(abs(t(array([234], 'i')) - 234.) <= err) - assert_(abs(t(array([234], 'l')) - 234.) <= err) - assert_(abs(t(array([234], 'q')) - 234.) <= err) - assert_(abs(t(array([234], 'f')) - 234.) <= err) - assert_(abs(t(array([234], 'd')) - 234.) <= err) - assert_(abs(t(array([234 + 3j], 'F')) - (234 + 3j)) <= err) - assert_(abs(t(array([234], 'D')) - 234.) <= err) - - #assert_raises(TypeError, t, array([234], 'a1')) - assert_raises(TypeError, t, 'abc') - - assert_raises(IndexError, t, []) - assert_raises(IndexError, t, ()) - - assert_raises(TypeError, t, t) - assert_raises(TypeError, t, {}) - - try: - r = t(10 ** 400) - assert_(repr(r) in ['(inf+0j)', '(Infinity+0j)'], repr(r)) - except OverflowError: - pass - - -class TestF77ReturnComplex(TestReturnComplex): - code = """ - function t0(value) - complex value - complex t0 - t0 = value - end - function t8(value) - complex*8 value - complex*8 t8 - t8 = value - end - function t16(value) - complex*16 value - complex*16 t16 - t16 = value - end - function td(value) - double complex value - double complex td - td = value - end - - subroutine s0(t0,value) - complex value - complex t0 -cf2py intent(out) t0 - t0 = value - end - subroutine s8(t8,value) - complex*8 value - complex*8 t8 -cf2py intent(out) t8 - t8 = value - end - subroutine s16(t16,value) - complex*16 value - complex*16 t16 -cf2py intent(out) t16 - t16 = value - end - subroutine sd(td,value) - double complex value - double complex td -cf2py intent(out) td - td = value - end - """ - - @pytest.mark.slow - @pytest.mark.parametrize('name', 't0,t8,t16,td,s0,s8,s16,sd'.split(',')) - def test_all(self, name): - self.check_function(getattr(self.module, name)) - - -class TestF90ReturnComplex(TestReturnComplex): - suffix = ".f90" - code = """ -module f90_return_complex - contains - function t0(value) - complex :: value - complex :: t0 - t0 = value - end function t0 - function t8(value) - complex(kind=4) :: value - complex(kind=4) :: t8 - t8 = value - end function t8 - function t16(value) - complex(kind=8) :: value - complex(kind=8) :: t16 - t16 = value - end function t16 - function td(value) - double complex :: value - double complex :: td - td = value - end function td - - subroutine s0(t0,value) - complex :: value - complex :: t0 -!f2py intent(out) t0 - t0 = value - end subroutine s0 - subroutine s8(t8,value) - complex(kind=4) :: value - complex(kind=4) :: t8 -!f2py intent(out) t8 - t8 = value - end subroutine s8 - subroutine s16(t16,value) - complex(kind=8) :: value - complex(kind=8) :: t16 -!f2py intent(out) t16 - t16 = value - end subroutine s16 - subroutine sd(td,value) - double complex :: value - double complex :: td -!f2py intent(out) td - td = value - end subroutine sd -end module f90_return_complex - """ - - @pytest.mark.slow - @pytest.mark.parametrize('name', 't0,t8,t16,td,s0,s8,s16,sd'.split(',')) - def test_all(self, name): - self.check_function(getattr(self.module.f90_return_complex, name)) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_return_integer.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_return_integer.py deleted file mode 100644 index 22f4acf..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_return_integer.py +++ /dev/null @@ -1,181 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import pytest - -from numpy import array -from numpy.compat import long -from numpy.testing import assert_, assert_raises -from . import util - - -class TestReturnInteger(util.F2PyTest): - - def check_function(self, t): - assert_(t(123) == 123, repr(t(123))) - assert_(t(123.6) == 123) - assert_(t(long(123)) == 123) - assert_(t('123') == 123) - assert_(t(-123) == -123) - assert_(t([123]) == 123) - assert_(t((123,)) == 123) - assert_(t(array(123)) == 123) - assert_(t(array([123])) == 123) - assert_(t(array([[123]])) == 123) - assert_(t(array([123], 'b')) == 123) - assert_(t(array([123], 'h')) == 123) - assert_(t(array([123], 'i')) == 123) - assert_(t(array([123], 'l')) == 123) - assert_(t(array([123], 'B')) == 123) - assert_(t(array([123], 'f')) == 123) - assert_(t(array([123], 'd')) == 123) - - #assert_raises(ValueError, t, array([123],'S3')) - assert_raises(ValueError, t, 'abc') - - assert_raises(IndexError, t, []) - assert_raises(IndexError, t, ()) - - assert_raises(Exception, t, t) - assert_raises(Exception, t, {}) - - if t.__doc__.split()[0] in ['t8', 's8']: - assert_raises(OverflowError, t, 100000000000000000000000) - assert_raises(OverflowError, t, 10000000011111111111111.23) - - -class TestF77ReturnInteger(TestReturnInteger): - code = """ - function t0(value) - integer value - integer t0 - t0 = value - end - function t1(value) - integer*1 value - integer*1 t1 - t1 = value - end - function t2(value) - integer*2 value - integer*2 t2 - t2 = value - end - function t4(value) - integer*4 value - integer*4 t4 - t4 = value - end - function t8(value) - integer*8 value - integer*8 t8 - t8 = value - end - - subroutine s0(t0,value) - integer value - integer t0 -cf2py intent(out) t0 - t0 = value - end - subroutine s1(t1,value) - integer*1 value - integer*1 t1 -cf2py intent(out) t1 - t1 = value - end - subroutine s2(t2,value) - integer*2 value - integer*2 t2 -cf2py intent(out) t2 - t2 = value - end - subroutine s4(t4,value) - integer*4 value - integer*4 t4 -cf2py intent(out) t4 - t4 = value - end - subroutine s8(t8,value) - integer*8 value - integer*8 t8 -cf2py intent(out) t8 - t8 = value - end - """ - - @pytest.mark.slow - @pytest.mark.parametrize('name', - 't0,t1,t2,t4,t8,s0,s1,s2,s4,s8'.split(',')) - def test_all(self, name): - self.check_function(getattr(self.module, name)) - - -class TestF90ReturnInteger(TestReturnInteger): - suffix = ".f90" - code = """ -module f90_return_integer - contains - function t0(value) - integer :: value - integer :: t0 - t0 = value - end function t0 - function t1(value) - integer(kind=1) :: value - integer(kind=1) :: t1 - t1 = value - end function t1 - function t2(value) - integer(kind=2) :: value - integer(kind=2) :: t2 - t2 = value - end function t2 - function t4(value) - integer(kind=4) :: value - integer(kind=4) :: t4 - t4 = value - end function t4 - function t8(value) - integer(kind=8) :: value - integer(kind=8) :: t8 - t8 = value - end function t8 - - subroutine s0(t0,value) - integer :: value - integer :: t0 -!f2py intent(out) t0 - t0 = value - end subroutine s0 - subroutine s1(t1,value) - integer(kind=1) :: value - integer(kind=1) :: t1 -!f2py intent(out) t1 - t1 = value - end subroutine s1 - subroutine s2(t2,value) - integer(kind=2) :: value - integer(kind=2) :: t2 -!f2py intent(out) t2 - t2 = value - end subroutine s2 - subroutine s4(t4,value) - integer(kind=4) :: value - integer(kind=4) :: t4 -!f2py intent(out) t4 - t4 = value - end subroutine s4 - subroutine s8(t8,value) - integer(kind=8) :: value - integer(kind=8) :: t8 -!f2py intent(out) t8 - t8 = value - end subroutine s8 -end module f90_return_integer - """ - - @pytest.mark.slow - @pytest.mark.parametrize('name', - 't0,t1,t2,t4,t8,s0,s1,s2,s4,s8'.split(',')) - def test_all(self, name): - self.check_function(getattr(self.module.f90_return_integer, name)) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_return_logical.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_return_logical.py deleted file mode 100644 index 96f215a..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_return_logical.py +++ /dev/null @@ -1,189 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import pytest - -from numpy import array -from numpy.compat import long -from numpy.testing import assert_, assert_raises -from . import util - - -class TestReturnLogical(util.F2PyTest): - - def check_function(self, t): - assert_(t(True) == 1, repr(t(True))) - assert_(t(False) == 0, repr(t(False))) - assert_(t(0) == 0) - assert_(t(None) == 0) - assert_(t(0.0) == 0) - assert_(t(0j) == 0) - assert_(t(1j) == 1) - assert_(t(234) == 1) - assert_(t(234.6) == 1) - assert_(t(long(234)) == 1) - assert_(t(234.6 + 3j) == 1) - assert_(t('234') == 1) - assert_(t('aaa') == 1) - assert_(t('') == 0) - assert_(t([]) == 0) - assert_(t(()) == 0) - assert_(t({}) == 0) - assert_(t(t) == 1) - assert_(t(-234) == 1) - assert_(t(10 ** 100) == 1) - assert_(t([234]) == 1) - assert_(t((234,)) == 1) - assert_(t(array(234)) == 1) - assert_(t(array([234])) == 1) - assert_(t(array([[234]])) == 1) - assert_(t(array([234], 'b')) == 1) - assert_(t(array([234], 'h')) == 1) - assert_(t(array([234], 'i')) == 1) - assert_(t(array([234], 'l')) == 1) - assert_(t(array([234], 'f')) == 1) - assert_(t(array([234], 'd')) == 1) - assert_(t(array([234 + 3j], 'F')) == 1) - assert_(t(array([234], 'D')) == 1) - assert_(t(array(0)) == 0) - assert_(t(array([0])) == 0) - assert_(t(array([[0]])) == 0) - assert_(t(array([0j])) == 0) - assert_(t(array([1])) == 1) - assert_raises(ValueError, t, array([0, 0])) - - -class TestF77ReturnLogical(TestReturnLogical): - code = """ - function t0(value) - logical value - logical t0 - t0 = value - end - function t1(value) - logical*1 value - logical*1 t1 - t1 = value - end - function t2(value) - logical*2 value - logical*2 t2 - t2 = value - end - function t4(value) - logical*4 value - logical*4 t4 - t4 = value - end -c function t8(value) -c logical*8 value -c logical*8 t8 -c t8 = value -c end - - subroutine s0(t0,value) - logical value - logical t0 -cf2py intent(out) t0 - t0 = value - end - subroutine s1(t1,value) - logical*1 value - logical*1 t1 -cf2py intent(out) t1 - t1 = value - end - subroutine s2(t2,value) - logical*2 value - logical*2 t2 -cf2py intent(out) t2 - t2 = value - end - subroutine s4(t4,value) - logical*4 value - logical*4 t4 -cf2py intent(out) t4 - t4 = value - end -c subroutine s8(t8,value) -c logical*8 value -c logical*8 t8 -cf2py intent(out) t8 -c t8 = value -c end - """ - - @pytest.mark.slow - @pytest.mark.parametrize('name', 't0,t1,t2,t4,s0,s1,s2,s4'.split(',')) - def test_all(self, name): - self.check_function(getattr(self.module, name)) - - -class TestF90ReturnLogical(TestReturnLogical): - suffix = ".f90" - code = """ -module f90_return_logical - contains - function t0(value) - logical :: value - logical :: t0 - t0 = value - end function t0 - function t1(value) - logical(kind=1) :: value - logical(kind=1) :: t1 - t1 = value - end function t1 - function t2(value) - logical(kind=2) :: value - logical(kind=2) :: t2 - t2 = value - end function t2 - function t4(value) - logical(kind=4) :: value - logical(kind=4) :: t4 - t4 = value - end function t4 - function t8(value) - logical(kind=8) :: value - logical(kind=8) :: t8 - t8 = value - end function t8 - - subroutine s0(t0,value) - logical :: value - logical :: t0 -!f2py intent(out) t0 - t0 = value - end subroutine s0 - subroutine s1(t1,value) - logical(kind=1) :: value - logical(kind=1) :: t1 -!f2py intent(out) t1 - t1 = value - end subroutine s1 - subroutine s2(t2,value) - logical(kind=2) :: value - logical(kind=2) :: t2 -!f2py intent(out) t2 - t2 = value - end subroutine s2 - subroutine s4(t4,value) - logical(kind=4) :: value - logical(kind=4) :: t4 -!f2py intent(out) t4 - t4 = value - end subroutine s4 - subroutine s8(t8,value) - logical(kind=8) :: value - logical(kind=8) :: t8 -!f2py intent(out) t8 - t8 = value - end subroutine s8 -end module f90_return_logical - """ - - @pytest.mark.slow - @pytest.mark.parametrize('name', - 't0,t1,t2,t4,t8,s0,s1,s2,s4,s8'.split(',')) - def test_all(self, name): - self.check_function(getattr(self.module.f90_return_logical, name)) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_return_real.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_return_real.py deleted file mode 100644 index 315cfe4..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_return_real.py +++ /dev/null @@ -1,210 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import platform -import pytest - -from numpy import array -from numpy.compat import long -from numpy.testing import assert_, assert_raises -from . import util - - -class TestReturnReal(util.F2PyTest): - - def check_function(self, t): - if t.__doc__.split()[0] in ['t0', 't4', 's0', 's4']: - err = 1e-5 - else: - err = 0.0 - assert_(abs(t(234) - 234.0) <= err) - assert_(abs(t(234.6) - 234.6) <= err) - assert_(abs(t(long(234)) - 234.0) <= err) - assert_(abs(t('234') - 234) <= err) - assert_(abs(t('234.6') - 234.6) <= err) - assert_(abs(t(-234) + 234) <= err) - assert_(abs(t([234]) - 234) <= err) - assert_(abs(t((234,)) - 234.) <= err) - assert_(abs(t(array(234)) - 234.) <= err) - assert_(abs(t(array([234])) - 234.) <= err) - assert_(abs(t(array([[234]])) - 234.) <= err) - assert_(abs(t(array([234], 'b')) + 22) <= err) - assert_(abs(t(array([234], 'h')) - 234.) <= err) - assert_(abs(t(array([234], 'i')) - 234.) <= err) - assert_(abs(t(array([234], 'l')) - 234.) <= err) - assert_(abs(t(array([234], 'B')) - 234.) <= err) - assert_(abs(t(array([234], 'f')) - 234.) <= err) - assert_(abs(t(array([234], 'd')) - 234.) <= err) - if t.__doc__.split()[0] in ['t0', 't4', 's0', 's4']: - assert_(t(1e200) == t(1e300)) # inf - - #assert_raises(ValueError, t, array([234], 'S1')) - assert_raises(ValueError, t, 'abc') - - assert_raises(IndexError, t, []) - assert_raises(IndexError, t, ()) - - assert_raises(Exception, t, t) - assert_raises(Exception, t, {}) - - try: - r = t(10 ** 400) - assert_(repr(r) in ['inf', 'Infinity'], repr(r)) - except OverflowError: - pass - - - -@pytest.mark.skipif( - platform.system() == 'Darwin', - reason="Prone to error when run with numpy/f2py/tests on mac os, " - "but not when run in isolation") -class TestCReturnReal(TestReturnReal): - suffix = ".pyf" - module_name = "c_ext_return_real" - code = """ -python module c_ext_return_real -usercode \'\'\' -float t4(float value) { return value; } -void s4(float *t4, float value) { *t4 = value; } -double t8(double value) { return value; } -void s8(double *t8, double value) { *t8 = value; } -\'\'\' -interface - function t4(value) - real*4 intent(c) :: t4,value - end - function t8(value) - real*8 intent(c) :: t8,value - end - subroutine s4(t4,value) - intent(c) s4 - real*4 intent(out) :: t4 - real*4 intent(c) :: value - end - subroutine s8(t8,value) - intent(c) s8 - real*8 intent(out) :: t8 - real*8 intent(c) :: value - end -end interface -end python module c_ext_return_real - """ - - @pytest.mark.slow - @pytest.mark.parametrize('name', 't4,t8,s4,s8'.split(',')) - def test_all(self, name): - self.check_function(getattr(self.module, name)) - - -class TestF77ReturnReal(TestReturnReal): - code = """ - function t0(value) - real value - real t0 - t0 = value - end - function t4(value) - real*4 value - real*4 t4 - t4 = value - end - function t8(value) - real*8 value - real*8 t8 - t8 = value - end - function td(value) - double precision value - double precision td - td = value - end - - subroutine s0(t0,value) - real value - real t0 -cf2py intent(out) t0 - t0 = value - end - subroutine s4(t4,value) - real*4 value - real*4 t4 -cf2py intent(out) t4 - t4 = value - end - subroutine s8(t8,value) - real*8 value - real*8 t8 -cf2py intent(out) t8 - t8 = value - end - subroutine sd(td,value) - double precision value - double precision td -cf2py intent(out) td - td = value - end - """ - - @pytest.mark.slow - @pytest.mark.parametrize('name', 't0,t4,t8,td,s0,s4,s8,sd'.split(',')) - def test_all(self, name): - self.check_function(getattr(self.module, name)) - - -class TestF90ReturnReal(TestReturnReal): - suffix = ".f90" - code = """ -module f90_return_real - contains - function t0(value) - real :: value - real :: t0 - t0 = value - end function t0 - function t4(value) - real(kind=4) :: value - real(kind=4) :: t4 - t4 = value - end function t4 - function t8(value) - real(kind=8) :: value - real(kind=8) :: t8 - t8 = value - end function t8 - function td(value) - double precision :: value - double precision :: td - td = value - end function td - - subroutine s0(t0,value) - real :: value - real :: t0 -!f2py intent(out) t0 - t0 = value - end subroutine s0 - subroutine s4(t4,value) - real(kind=4) :: value - real(kind=4) :: t4 -!f2py intent(out) t4 - t4 = value - end subroutine s4 - subroutine s8(t8,value) - real(kind=8) :: value - real(kind=8) :: t8 -!f2py intent(out) t8 - t8 = value - end subroutine s8 - subroutine sd(td,value) - double precision :: value - double precision :: td -!f2py intent(out) td - td = value - end subroutine sd -end module f90_return_real - """ - - @pytest.mark.slow - @pytest.mark.parametrize('name', 't0,t4,t8,td,s0,s4,s8,sd'.split(',')) - def test_all(self, name): - self.check_function(getattr(self.module.f90_return_real, name)) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_semicolon_split.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_semicolon_split.py deleted file mode 100644 index bcd18c8..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_semicolon_split.py +++ /dev/null @@ -1,65 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import platform -import pytest - -from . import util -from numpy.testing import assert_equal - -@pytest.mark.skipif( - platform.system() == 'Darwin', - reason="Prone to error when run with numpy/f2py/tests on mac os, " - "but not when run in isolation") -class TestMultiline(util.F2PyTest): - suffix = ".pyf" - module_name = "multiline" - code = """ -python module {module} - usercode ''' -void foo(int* x) {{ - char dummy = ';'; - *x = 42; -}} -''' - interface - subroutine foo(x) - intent(c) foo - integer intent(out) :: x - end subroutine foo - end interface -end python module {module} - """.format(module=module_name) - - def test_multiline(self): - assert_equal(self.module.foo(), 42) - - -@pytest.mark.skipif( - platform.system() == 'Darwin', - reason="Prone to error when run with numpy/f2py/tests on mac os, " - "but not when run in isolation") -class TestCallstatement(util.F2PyTest): - suffix = ".pyf" - module_name = "callstatement" - code = """ -python module {module} - usercode ''' -void foo(int* x) {{ -}} -''' - interface - subroutine foo(x) - intent(c) foo - integer intent(out) :: x - callprotoargument int* - callstatement {{ & - ; & - x = 42; & - }} - end subroutine foo - end interface -end python module {module} - """.format(module=module_name) - - def test_callstatement(self): - assert_equal(self.module.foo(), 42) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_size.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_size.py deleted file mode 100644 index e2af618..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_size.py +++ /dev/null @@ -1,51 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import pytest - -from numpy.testing import assert_equal -from . import util - - -def _path(*a): - return os.path.join(*((os.path.dirname(__file__),) + a)) - - -class TestSizeSumExample(util.F2PyTest): - sources = [_path('src', 'size', 'foo.f90')] - - @pytest.mark.slow - def test_all(self): - r = self.module.foo([[]]) - assert_equal(r, [0], repr(r)) - - r = self.module.foo([[1, 2]]) - assert_equal(r, [3], repr(r)) - - r = self.module.foo([[1, 2], [3, 4]]) - assert_equal(r, [3, 7], repr(r)) - - r = self.module.foo([[1, 2], [3, 4], [5, 6]]) - assert_equal(r, [3, 7, 11], repr(r)) - - @pytest.mark.slow - def test_transpose(self): - r = self.module.trans([[]]) - assert_equal(r.T, [[]], repr(r)) - - r = self.module.trans([[1, 2]]) - assert_equal(r, [[1], [2]], repr(r)) - - r = self.module.trans([[1, 2, 3], [4, 5, 6]]) - assert_equal(r, [[1, 4], [2, 5], [3, 6]], repr(r)) - - @pytest.mark.slow - def test_flatten(self): - r = self.module.flatten([[]]) - assert_equal(r, [], repr(r)) - - r = self.module.flatten([[1, 2]]) - assert_equal(r, [1, 2], repr(r)) - - r = self.module.flatten([[1, 2, 3], [4, 5, 6]]) - assert_equal(r, [1, 2, 3, 4, 5, 6], repr(r)) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_string.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_string.py deleted file mode 100644 index 0493c99..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/test_string.py +++ /dev/null @@ -1,24 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import pytest - -from numpy.testing import assert_array_equal -import numpy as np -from . import util - - -def _path(*a): - return os.path.join(*((os.path.dirname(__file__),) + a)) - -class TestString(util.F2PyTest): - sources = [_path('src', 'string', 'char.f90')] - - @pytest.mark.slow - def test_char(self): - strings = np.array(['ab', 'cd', 'ef'], dtype='c').T - inp, out = self.module.char_test.change_strings(strings, strings.shape[1]) - assert_array_equal(inp, strings) - expected = strings.copy() - expected[1, :] = 'AAA' - assert_array_equal(out, expected) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/tests/util.py b/venv/lib/python3.7/site-packages/numpy/f2py/tests/util.py deleted file mode 100644 index bf005df..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/tests/util.py +++ /dev/null @@ -1,367 +0,0 @@ -""" -Utility functions for - -- building and importing modules on test time, using a temporary location -- detecting if compilers are present - -""" -from __future__ import division, absolute_import, print_function - -import os -import sys -import subprocess -import tempfile -import shutil -import atexit -import textwrap -import re -import pytest - -from numpy.compat import asbytes, asstr -from numpy.testing import temppath -from importlib import import_module - -try: - from hashlib import md5 -except ImportError: - from md5 import new as md5 # noqa: F401 - -# -# Maintaining a temporary module directory -# - -_module_dir = None -_module_num = 5403 - - -def _cleanup(): - global _module_dir - if _module_dir is not None: - try: - sys.path.remove(_module_dir) - except ValueError: - pass - try: - shutil.rmtree(_module_dir) - except (IOError, OSError): - pass - _module_dir = None - - -def get_module_dir(): - global _module_dir - if _module_dir is None: - _module_dir = tempfile.mkdtemp() - atexit.register(_cleanup) - if _module_dir not in sys.path: - sys.path.insert(0, _module_dir) - return _module_dir - - -def get_temp_module_name(): - # Assume single-threaded, and the module dir usable only by this thread - global _module_num - d = get_module_dir() - name = "_test_ext_module_%d" % _module_num - _module_num += 1 - if name in sys.modules: - # this should not be possible, but check anyway - raise RuntimeError("Temporary module name already in use.") - return name - - -def _memoize(func): - memo = {} - - def wrapper(*a, **kw): - key = repr((a, kw)) - if key not in memo: - try: - memo[key] = func(*a, **kw) - except Exception as e: - memo[key] = e - raise - ret = memo[key] - if isinstance(ret, Exception): - raise ret - return ret - wrapper.__name__ = func.__name__ - return wrapper - -# -# Building modules -# - - -@_memoize -def build_module(source_files, options=[], skip=[], only=[], module_name=None): - """ - Compile and import a f2py module, built from the given files. - - """ - - code = ("import sys; sys.path = %s; import numpy.f2py as f2py2e; " - "f2py2e.main()" % repr(sys.path)) - - d = get_module_dir() - - # Copy files - dst_sources = [] - f2py_sources = [] - for fn in source_files: - if not os.path.isfile(fn): - raise RuntimeError("%s is not a file" % fn) - dst = os.path.join(d, os.path.basename(fn)) - shutil.copyfile(fn, dst) - dst_sources.append(dst) - - base, ext = os.path.splitext(dst) - if ext in ('.f90', '.f', '.c', '.pyf'): - f2py_sources.append(dst) - - # Prepare options - if module_name is None: - module_name = get_temp_module_name() - f2py_opts = ['-c', '-m', module_name] + options + f2py_sources - if skip: - f2py_opts += ['skip:'] + skip - if only: - f2py_opts += ['only:'] + only - - # Build - cwd = os.getcwd() - try: - os.chdir(d) - cmd = [sys.executable, '-c', code] + f2py_opts - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - out, err = p.communicate() - if p.returncode != 0: - raise RuntimeError("Running f2py failed: %s\n%s" - % (cmd[4:], asstr(out))) - finally: - os.chdir(cwd) - - # Partial cleanup - for fn in dst_sources: - os.unlink(fn) - - # Import - return import_module(module_name) - - -@_memoize -def build_code(source_code, options=[], skip=[], only=[], suffix=None, - module_name=None): - """ - Compile and import Fortran code using f2py. - - """ - if suffix is None: - suffix = '.f' - with temppath(suffix=suffix) as path: - with open(path, 'w') as f: - f.write(source_code) - return build_module([path], options=options, skip=skip, only=only, - module_name=module_name) - -# -# Check if compilers are available at all... -# - -_compiler_status = None - - -def _get_compiler_status(): - global _compiler_status - if _compiler_status is not None: - return _compiler_status - - _compiler_status = (False, False, False) - - # XXX: this is really ugly. But I don't know how to invoke Distutils - # in a safer way... - code = textwrap.dedent("""\ - import os - import sys - sys.path = %(syspath)s - - def configuration(parent_name='',top_path=None): - global config - from numpy.distutils.misc_util import Configuration - config = Configuration('', parent_name, top_path) - return config - - from numpy.distutils.core import setup - setup(configuration=configuration) - - config_cmd = config.get_config_cmd() - have_c = config_cmd.try_compile('void foo() {}') - print('COMPILERS:%%d,%%d,%%d' %% (have_c, - config.have_f77c(), - config.have_f90c())) - sys.exit(99) - """) - code = code % dict(syspath=repr(sys.path)) - - tmpdir = tempfile.mkdtemp() - try: - script = os.path.join(tmpdir, 'setup.py') - - with open(script, 'w') as f: - f.write(code) - - cmd = [sys.executable, 'setup.py', 'config'] - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - cwd=tmpdir) - out, err = p.communicate() - finally: - shutil.rmtree(tmpdir) - - m = re.search(br'COMPILERS:(\d+),(\d+),(\d+)', out) - if m: - _compiler_status = (bool(int(m.group(1))), bool(int(m.group(2))), - bool(int(m.group(3)))) - # Finished - return _compiler_status - - -def has_c_compiler(): - return _get_compiler_status()[0] - - -def has_f77_compiler(): - return _get_compiler_status()[1] - - -def has_f90_compiler(): - return _get_compiler_status()[2] - -# -# Building with distutils -# - - -@_memoize -def build_module_distutils(source_files, config_code, module_name, **kw): - """ - Build a module via distutils and import it. - - """ - from numpy.distutils.misc_util import Configuration - from numpy.distutils.core import setup - - d = get_module_dir() - - # Copy files - dst_sources = [] - for fn in source_files: - if not os.path.isfile(fn): - raise RuntimeError("%s is not a file" % fn) - dst = os.path.join(d, os.path.basename(fn)) - shutil.copyfile(fn, dst) - dst_sources.append(dst) - - # Build script - config_code = textwrap.dedent(config_code).replace("\n", "\n ") - - code = textwrap.dedent("""\ - import os - import sys - sys.path = %(syspath)s - - def configuration(parent_name='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('', parent_name, top_path) - %(config_code)s - return config - - if __name__ == "__main__": - from numpy.distutils.core import setup - setup(configuration=configuration) - """) % dict(config_code=config_code, syspath=repr(sys.path)) - - script = os.path.join(d, get_temp_module_name() + '.py') - dst_sources.append(script) - f = open(script, 'wb') - f.write(asbytes(code)) - f.close() - - # Build - cwd = os.getcwd() - try: - os.chdir(d) - cmd = [sys.executable, script, 'build_ext', '-i'] - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - out, err = p.communicate() - if p.returncode != 0: - raise RuntimeError("Running distutils build failed: %s\n%s" - % (cmd[4:], asstr(out))) - finally: - os.chdir(cwd) - - # Partial cleanup - for fn in dst_sources: - os.unlink(fn) - - # Import - __import__(module_name) - return sys.modules[module_name] - -# -# Unittest convenience -# - - -class F2PyTest(object): - code = None - sources = None - options = [] - skip = [] - only = [] - suffix = '.f' - module = None - module_name = None - - def setup(self): - if sys.platform == 'win32': - pytest.skip('Fails with MinGW64 Gfortran (Issue #9673)') - - if self.module is not None: - return - - # Check compiler availability first - if not has_c_compiler(): - pytest.skip("No C compiler available") - - codes = [] - if self.sources: - codes.extend(self.sources) - if self.code is not None: - codes.append(self.suffix) - - needs_f77 = False - needs_f90 = False - for fn in codes: - if fn.endswith('.f'): - needs_f77 = True - elif fn.endswith('.f90'): - needs_f90 = True - if needs_f77 and not has_f77_compiler(): - pytest.skip("No Fortran 77 compiler available") - if needs_f90 and not has_f90_compiler(): - pytest.skip("No Fortran 90 compiler available") - - # Build the module - if self.code is not None: - self.module = build_code(self.code, options=self.options, - skip=self.skip, only=self.only, - suffix=self.suffix, - module_name=self.module_name) - - if self.sources is not None: - self.module = build_module(self.sources, options=self.options, - skip=self.skip, only=self.only, - module_name=self.module_name) diff --git a/venv/lib/python3.7/site-packages/numpy/f2py/use_rules.py b/venv/lib/python3.7/site-packages/numpy/f2py/use_rules.py deleted file mode 100644 index 6f44f16..0000000 --- a/venv/lib/python3.7/site-packages/numpy/f2py/use_rules.py +++ /dev/null @@ -1,115 +0,0 @@ -#!/usr/bin/env python -""" - -Build 'use others module data' mechanism for f2py2e. - -Unfinished. - -Copyright 2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2000/09/10 12:35:43 $ -Pearu Peterson - -""" -from __future__ import division, absolute_import, print_function - -__version__ = "$Revision: 1.3 $"[10:-1] - -f2py_version = 'See `f2py -v`' - - -from .auxfuncs import ( - applyrules, dictappend, gentitle, hasnote, outmess -) - - -usemodule_rules = { - 'body': """ -#begintitle# -static char doc_#apiname#[] = \"\\\nVariable wrapper signature:\\n\\ -\t #name# = get_#name#()\\n\\ -Arguments:\\n\\ -#docstr#\"; -extern F_MODFUNC(#usemodulename#,#USEMODULENAME#,#realname#,#REALNAME#); -static PyObject *#apiname#(PyObject *capi_self, PyObject *capi_args) { -/*#decl#*/ -\tif (!PyArg_ParseTuple(capi_args, \"\")) goto capi_fail; -printf(\"c: %d\\n\",F_MODFUNC(#usemodulename#,#USEMODULENAME#,#realname#,#REALNAME#)); -\treturn Py_BuildValue(\"\"); -capi_fail: -\treturn NULL; -} -""", - 'method': '\t{\"get_#name#\",#apiname#,METH_VARARGS|METH_KEYWORDS,doc_#apiname#},', - 'need': ['F_MODFUNC'] -} - -################ - - -def buildusevars(m, r): - ret = {} - outmess( - '\t\tBuilding use variable hooks for module "%s" (feature only for F90/F95)...\n' % (m['name'])) - varsmap = {} - revmap = {} - if 'map' in r: - for k in r['map'].keys(): - if r['map'][k] in revmap: - outmess('\t\t\tVariable "%s<=%s" is already mapped by "%s". Skipping.\n' % ( - r['map'][k], k, revmap[r['map'][k]])) - else: - revmap[r['map'][k]] = k - if 'only' in r and r['only']: - for v in r['map'].keys(): - if r['map'][v] in m['vars']: - - if revmap[r['map'][v]] == v: - varsmap[v] = r['map'][v] - else: - outmess('\t\t\tIgnoring map "%s=>%s". See above.\n' % - (v, r['map'][v])) - else: - outmess( - '\t\t\tNo definition for variable "%s=>%s". Skipping.\n' % (v, r['map'][v])) - else: - for v in m['vars'].keys(): - if v in revmap: - varsmap[v] = revmap[v] - else: - varsmap[v] = v - for v in varsmap.keys(): - ret = dictappend(ret, buildusevar(v, varsmap[v], m['vars'], m['name'])) - return ret - - -def buildusevar(name, realname, vars, usemodulename): - outmess('\t\t\tConstructing wrapper function for variable "%s=>%s"...\n' % ( - name, realname)) - ret = {} - vrd = {'name': name, - 'realname': realname, - 'REALNAME': realname.upper(), - 'usemodulename': usemodulename, - 'USEMODULENAME': usemodulename.upper(), - 'texname': name.replace('_', '\\_'), - 'begintitle': gentitle('%s=>%s' % (name, realname)), - 'endtitle': gentitle('end of %s=>%s' % (name, realname)), - 'apiname': '#modulename#_use_%s_from_%s' % (realname, usemodulename) - } - nummap = {0: 'Ro', 1: 'Ri', 2: 'Rii', 3: 'Riii', 4: 'Riv', - 5: 'Rv', 6: 'Rvi', 7: 'Rvii', 8: 'Rviii', 9: 'Rix'} - vrd['texnamename'] = name - for i in nummap.keys(): - vrd['texnamename'] = vrd['texnamename'].replace(repr(i), nummap[i]) - if hasnote(vars[realname]): - vrd['note'] = vars[realname]['note'] - rd = dictappend({}, vrd) - - print(name, realname, vars[realname]) - ret = applyrules(usemodule_rules, rd) - return ret diff --git a/venv/lib/python3.7/site-packages/numpy/fft/__init__.py b/venv/lib/python3.7/site-packages/numpy/fft/__init__.py deleted file mode 100644 index 37b3f0d..0000000 --- a/venv/lib/python3.7/site-packages/numpy/fft/__init__.py +++ /dev/null @@ -1,201 +0,0 @@ -""" -Discrete Fourier Transform (:mod:`numpy.fft`) -============================================= - -.. currentmodule:: numpy.fft - -Standard FFTs -------------- - -.. autosummary:: - :toctree: generated/ - - fft Discrete Fourier transform. - ifft Inverse discrete Fourier transform. - fft2 Discrete Fourier transform in two dimensions. - ifft2 Inverse discrete Fourier transform in two dimensions. - fftn Discrete Fourier transform in N-dimensions. - ifftn Inverse discrete Fourier transform in N dimensions. - -Real FFTs ---------- - -.. autosummary:: - :toctree: generated/ - - rfft Real discrete Fourier transform. - irfft Inverse real discrete Fourier transform. - rfft2 Real discrete Fourier transform in two dimensions. - irfft2 Inverse real discrete Fourier transform in two dimensions. - rfftn Real discrete Fourier transform in N dimensions. - irfftn Inverse real discrete Fourier transform in N dimensions. - -Hermitian FFTs --------------- - -.. autosummary:: - :toctree: generated/ - - hfft Hermitian discrete Fourier transform. - ihfft Inverse Hermitian discrete Fourier transform. - -Helper routines ---------------- - -.. autosummary:: - :toctree: generated/ - - fftfreq Discrete Fourier Transform sample frequencies. - rfftfreq DFT sample frequencies (for usage with rfft, irfft). - fftshift Shift zero-frequency component to center of spectrum. - ifftshift Inverse of fftshift. - - -Background information ----------------------- - -Fourier analysis is fundamentally a method for expressing a function as a -sum of periodic components, and for recovering the function from those -components. When both the function and its Fourier transform are -replaced with discretized counterparts, it is called the discrete Fourier -transform (DFT). The DFT has become a mainstay of numerical computing in -part because of a very fast algorithm for computing it, called the Fast -Fourier Transform (FFT), which was known to Gauss (1805) and was brought -to light in its current form by Cooley and Tukey [CT]_. Press et al. [NR]_ -provide an accessible introduction to Fourier analysis and its -applications. - -Because the discrete Fourier transform separates its input into -components that contribute at discrete frequencies, it has a great number -of applications in digital signal processing, e.g., for filtering, and in -this context the discretized input to the transform is customarily -referred to as a *signal*, which exists in the *time domain*. The output -is called a *spectrum* or *transform* and exists in the *frequency -domain*. - -Implementation details ----------------------- - -There are many ways to define the DFT, varying in the sign of the -exponent, normalization, etc. In this implementation, the DFT is defined -as - -.. math:: - A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}\\right\\} - \\qquad k = 0,\\ldots,n-1. - -The DFT is in general defined for complex inputs and outputs, and a -single-frequency component at linear frequency :math:`f` is -represented by a complex exponential -:math:`a_m = \\exp\\{2\\pi i\\,f m\\Delta t\\}`, where :math:`\\Delta t` -is the sampling interval. - -The values in the result follow so-called "standard" order: If ``A = -fft(a, n)``, then ``A[0]`` contains the zero-frequency term (the sum of -the signal), which is always purely real for real inputs. Then ``A[1:n/2]`` -contains the positive-frequency terms, and ``A[n/2+1:]`` contains the -negative-frequency terms, in order of decreasingly negative frequency. -For an even number of input points, ``A[n/2]`` represents both positive and -negative Nyquist frequency, and is also purely real for real input. For -an odd number of input points, ``A[(n-1)/2]`` contains the largest positive -frequency, while ``A[(n+1)/2]`` contains the largest negative frequency. -The routine ``np.fft.fftfreq(n)`` returns an array giving the frequencies -of corresponding elements in the output. The routine -``np.fft.fftshift(A)`` shifts transforms and their frequencies to put the -zero-frequency components in the middle, and ``np.fft.ifftshift(A)`` undoes -that shift. - -When the input `a` is a time-domain signal and ``A = fft(a)``, ``np.abs(A)`` -is its amplitude spectrum and ``np.abs(A)**2`` is its power spectrum. -The phase spectrum is obtained by ``np.angle(A)``. - -The inverse DFT is defined as - -.. math:: - a_m = \\frac{1}{n}\\sum_{k=0}^{n-1}A_k\\exp\\left\\{2\\pi i{mk\\over n}\\right\\} - \\qquad m = 0,\\ldots,n-1. - -It differs from the forward transform by the sign of the exponential -argument and the default normalization by :math:`1/n`. - -Type Promotion --------------- - -`numpy.fft` promotes ``float32`` and ``complex64`` arrays to ``float64`` and -``complex128`` arrays respectively. For an FFT implementation that does not -promote input arrays, see `scipy.fftpack`. - -Normalization -------------- - -The default normalization has the direct transforms unscaled and the inverse -transforms are scaled by :math:`1/n`. It is possible to obtain unitary -transforms by setting the keyword argument ``norm`` to ``"ortho"`` (default is -`None`) so that both direct and inverse transforms will be scaled by -:math:`1/\\sqrt{n}`. - -Real and Hermitian transforms ------------------------------ - -When the input is purely real, its transform is Hermitian, i.e., the -component at frequency :math:`f_k` is the complex conjugate of the -component at frequency :math:`-f_k`, which means that for real -inputs there is no information in the negative frequency components that -is not already available from the positive frequency components. -The family of `rfft` functions is -designed to operate on real inputs, and exploits this symmetry by -computing only the positive frequency components, up to and including the -Nyquist frequency. Thus, ``n`` input points produce ``n/2+1`` complex -output points. The inverses of this family assumes the same symmetry of -its input, and for an output of ``n`` points uses ``n/2+1`` input points. - -Correspondingly, when the spectrum is purely real, the signal is -Hermitian. The `hfft` family of functions exploits this symmetry by -using ``n/2+1`` complex points in the input (time) domain for ``n`` real -points in the frequency domain. - -In higher dimensions, FFTs are used, e.g., for image analysis and -filtering. The computational efficiency of the FFT means that it can -also be a faster way to compute large convolutions, using the property -that a convolution in the time domain is equivalent to a point-by-point -multiplication in the frequency domain. - -Higher dimensions ------------------ - -In two dimensions, the DFT is defined as - -.. math:: - A_{kl} = \\sum_{m=0}^{M-1} \\sum_{n=0}^{N-1} - a_{mn}\\exp\\left\\{-2\\pi i \\left({mk\\over M}+{nl\\over N}\\right)\\right\\} - \\qquad k = 0, \\ldots, M-1;\\quad l = 0, \\ldots, N-1, - -which extends in the obvious way to higher dimensions, and the inverses -in higher dimensions also extend in the same way. - -References ----------- - -.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the - machine calculation of complex Fourier series," *Math. Comput.* - 19: 297-301. - -.. [NR] Press, W., Teukolsky, S., Vetterline, W.T., and Flannery, B.P., - 2007, *Numerical Recipes: The Art of Scientific Computing*, ch. - 12-13. Cambridge Univ. Press, Cambridge, UK. - -Examples --------- - -For examples, see the various functions. - -""" - -from __future__ import division, absolute_import, print_function - -from ._pocketfft import * -from .helper import * - -from numpy._pytesttester import PytestTester -test = PytestTester(__name__) -del PytestTester diff --git a/venv/lib/python3.7/site-packages/numpy/fft/_pocketfft.py b/venv/lib/python3.7/site-packages/numpy/fft/_pocketfft.py deleted file mode 100644 index 50720cd..0000000 --- a/venv/lib/python3.7/site-packages/numpy/fft/_pocketfft.py +++ /dev/null @@ -1,1307 +0,0 @@ -""" -Discrete Fourier Transforms - -Routines in this module: - -fft(a, n=None, axis=-1) -ifft(a, n=None, axis=-1) -rfft(a, n=None, axis=-1) -irfft(a, n=None, axis=-1) -hfft(a, n=None, axis=-1) -ihfft(a, n=None, axis=-1) -fftn(a, s=None, axes=None) -ifftn(a, s=None, axes=None) -rfftn(a, s=None, axes=None) -irfftn(a, s=None, axes=None) -fft2(a, s=None, axes=(-2,-1)) -ifft2(a, s=None, axes=(-2, -1)) -rfft2(a, s=None, axes=(-2,-1)) -irfft2(a, s=None, axes=(-2, -1)) - -i = inverse transform -r = transform of purely real data -h = Hermite transform -n = n-dimensional transform -2 = 2-dimensional transform -(Note: 2D routines are just nD routines with different default -behavior.) - -""" -from __future__ import division, absolute_import, print_function - -__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn', - 'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn'] - -import functools - -from numpy.core import asarray, zeros, swapaxes, conjugate, take, sqrt -from . import _pocketfft_internal as pfi -from numpy.core.multiarray import normalize_axis_index -from numpy.core import overrides - - -array_function_dispatch = functools.partial( - overrides.array_function_dispatch, module='numpy.fft') - - -# `inv_norm` is a float by which the result of the transform needs to be -# divided. This replaces the original, more intuitive 'fct` parameter to avoid -# divisions by zero (or alternatively additional checks) in the case of -# zero-length axes during its computation. -def _raw_fft(a, n, axis, is_real, is_forward, inv_norm): - axis = normalize_axis_index(axis, a.ndim) - if n is None: - n = a.shape[axis] - - if n < 1: - raise ValueError("Invalid number of FFT data points (%d) specified." - % n) - - fct = 1/inv_norm - - if a.shape[axis] != n: - s = list(a.shape) - if s[axis] > n: - index = [slice(None)]*len(s) - index[axis] = slice(0, n) - a = a[tuple(index)] - else: - index = [slice(None)]*len(s) - index[axis] = slice(0, s[axis]) - s[axis] = n - z = zeros(s, a.dtype.char) - z[tuple(index)] = a - a = z - - if axis == a.ndim-1: - r = pfi.execute(a, is_real, is_forward, fct) - else: - a = swapaxes(a, axis, -1) - r = pfi.execute(a, is_real, is_forward, fct) - r = swapaxes(r, axis, -1) - return r - - -def _unitary(norm): - if norm is None: - return False - if norm=="ortho": - return True - raise ValueError("Invalid norm value %s, should be None or \"ortho\"." - % norm) - - -def _fft_dispatcher(a, n=None, axis=None, norm=None): - return (a,) - - -@array_function_dispatch(_fft_dispatcher) -def fft(a, n=None, axis=-1, norm=None): - """ - Compute the one-dimensional discrete Fourier Transform. - - This function computes the one-dimensional *n*-point discrete Fourier - Transform (DFT) with the efficient Fast Fourier Transform (FFT) - algorithm [CT]. - - Parameters - ---------- - a : array_like - Input array, can be complex. - n : int, optional - Length of the transformed axis of the output. - If `n` is smaller than the length of the input, the input is cropped. - If it is larger, the input is padded with zeros. If `n` is not given, - the length of the input along the axis specified by `axis` is used. - axis : int, optional - Axis over which to compute the FFT. If not given, the last axis is - used. - norm : {None, "ortho"}, optional - .. versionadded:: 1.10.0 - - Normalization mode (see `numpy.fft`). Default is None. - - Returns - ------- - out : complex ndarray - The truncated or zero-padded input, transformed along the axis - indicated by `axis`, or the last one if `axis` is not specified. - - Raises - ------ - IndexError - if `axes` is larger than the last axis of `a`. - - See Also - -------- - numpy.fft : for definition of the DFT and conventions used. - ifft : The inverse of `fft`. - fft2 : The two-dimensional FFT. - fftn : The *n*-dimensional FFT. - rfftn : The *n*-dimensional FFT of real input. - fftfreq : Frequency bins for given FFT parameters. - - Notes - ----- - FFT (Fast Fourier Transform) refers to a way the discrete Fourier - Transform (DFT) can be calculated efficiently, by using symmetries in the - calculated terms. The symmetry is highest when `n` is a power of 2, and - the transform is therefore most efficient for these sizes. - - The DFT is defined, with the conventions used in this implementation, in - the documentation for the `numpy.fft` module. - - References - ---------- - .. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the - machine calculation of complex Fourier series," *Math. Comput.* - 19: 297-301. - - Examples - -------- - >>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8)) - array([-2.33486982e-16+1.14423775e-17j, 8.00000000e+00-1.25557246e-15j, - 2.33486982e-16+2.33486982e-16j, 0.00000000e+00+1.22464680e-16j, - -1.14423775e-17+2.33486982e-16j, 0.00000000e+00+5.20784380e-16j, - 1.14423775e-17+1.14423775e-17j, 0.00000000e+00+1.22464680e-16j]) - - In this example, real input has an FFT which is Hermitian, i.e., symmetric - in the real part and anti-symmetric in the imaginary part, as described in - the `numpy.fft` documentation: - - >>> import matplotlib.pyplot as plt - >>> t = np.arange(256) - >>> sp = np.fft.fft(np.sin(t)) - >>> freq = np.fft.fftfreq(t.shape[-1]) - >>> plt.plot(freq, sp.real, freq, sp.imag) - [, ] - >>> plt.show() - - """ - - a = asarray(a) - if n is None: - n = a.shape[axis] - inv_norm = 1 - if norm is not None and _unitary(norm): - inv_norm = sqrt(n) - output = _raw_fft(a, n, axis, False, True, inv_norm) - return output - - -@array_function_dispatch(_fft_dispatcher) -def ifft(a, n=None, axis=-1, norm=None): - """ - Compute the one-dimensional inverse discrete Fourier Transform. - - This function computes the inverse of the one-dimensional *n*-point - discrete Fourier transform computed by `fft`. In other words, - ``ifft(fft(a)) == a`` to within numerical accuracy. - For a general description of the algorithm and definitions, - see `numpy.fft`. - - The input should be ordered in the same way as is returned by `fft`, - i.e., - - * ``a[0]`` should contain the zero frequency term, - * ``a[1:n//2]`` should contain the positive-frequency terms, - * ``a[n//2 + 1:]`` should contain the negative-frequency terms, in - increasing order starting from the most negative frequency. - - For an even number of input points, ``A[n//2]`` represents the sum of - the values at the positive and negative Nyquist frequencies, as the two - are aliased together. See `numpy.fft` for details. - - Parameters - ---------- - a : array_like - Input array, can be complex. - n : int, optional - Length of the transformed axis of the output. - If `n` is smaller than the length of the input, the input is cropped. - If it is larger, the input is padded with zeros. If `n` is not given, - the length of the input along the axis specified by `axis` is used. - See notes about padding issues. - axis : int, optional - Axis over which to compute the inverse DFT. If not given, the last - axis is used. - norm : {None, "ortho"}, optional - .. versionadded:: 1.10.0 - - Normalization mode (see `numpy.fft`). Default is None. - - Returns - ------- - out : complex ndarray - The truncated or zero-padded input, transformed along the axis - indicated by `axis`, or the last one if `axis` is not specified. - - Raises - ------ - IndexError - If `axes` is larger than the last axis of `a`. - - See Also - -------- - numpy.fft : An introduction, with definitions and general explanations. - fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse - ifft2 : The two-dimensional inverse FFT. - ifftn : The n-dimensional inverse FFT. - - Notes - ----- - If the input parameter `n` is larger than the size of the input, the input - is padded by appending zeros at the end. Even though this is the common - approach, it might lead to surprising results. If a different padding is - desired, it must be performed before calling `ifft`. - - Examples - -------- - >>> np.fft.ifft([0, 4, 0, 0]) - array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j]) # may vary - - Create and plot a band-limited signal with random phases: - - >>> import matplotlib.pyplot as plt - >>> t = np.arange(400) - >>> n = np.zeros((400,), dtype=complex) - >>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,))) - >>> s = np.fft.ifft(n) - >>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--') - [, ] - >>> plt.legend(('real', 'imaginary')) - - >>> plt.show() - - """ - a = asarray(a) - if n is None: - n = a.shape[axis] - if norm is not None and _unitary(norm): - inv_norm = sqrt(max(n, 1)) - else: - inv_norm = n - output = _raw_fft(a, n, axis, False, False, inv_norm) - return output - - - -@array_function_dispatch(_fft_dispatcher) -def rfft(a, n=None, axis=-1, norm=None): - """ - Compute the one-dimensional discrete Fourier Transform for real input. - - This function computes the one-dimensional *n*-point discrete Fourier - Transform (DFT) of a real-valued array by means of an efficient algorithm - called the Fast Fourier Transform (FFT). - - Parameters - ---------- - a : array_like - Input array - n : int, optional - Number of points along transformation axis in the input to use. - If `n` is smaller than the length of the input, the input is cropped. - If it is larger, the input is padded with zeros. If `n` is not given, - the length of the input along the axis specified by `axis` is used. - axis : int, optional - Axis over which to compute the FFT. If not given, the last axis is - used. - norm : {None, "ortho"}, optional - .. versionadded:: 1.10.0 - - Normalization mode (see `numpy.fft`). Default is None. - - Returns - ------- - out : complex ndarray - The truncated or zero-padded input, transformed along the axis - indicated by `axis`, or the last one if `axis` is not specified. - If `n` is even, the length of the transformed axis is ``(n/2)+1``. - If `n` is odd, the length is ``(n+1)/2``. - - Raises - ------ - IndexError - If `axis` is larger than the last axis of `a`. - - See Also - -------- - numpy.fft : For definition of the DFT and conventions used. - irfft : The inverse of `rfft`. - fft : The one-dimensional FFT of general (complex) input. - fftn : The *n*-dimensional FFT. - rfftn : The *n*-dimensional FFT of real input. - - Notes - ----- - When the DFT is computed for purely real input, the output is - Hermitian-symmetric, i.e. the negative frequency terms are just the complex - conjugates of the corresponding positive-frequency terms, and the - negative-frequency terms are therefore redundant. This function does not - compute the negative frequency terms, and the length of the transformed - axis of the output is therefore ``n//2 + 1``. - - When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains - the zero-frequency term 0*fs, which is real due to Hermitian symmetry. - - If `n` is even, ``A[-1]`` contains the term representing both positive - and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely - real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains - the largest positive frequency (fs/2*(n-1)/n), and is complex in the - general case. - - If the input `a` contains an imaginary part, it is silently discarded. - - Examples - -------- - >>> np.fft.fft([0, 1, 0, 0]) - array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j]) # may vary - >>> np.fft.rfft([0, 1, 0, 0]) - array([ 1.+0.j, 0.-1.j, -1.+0.j]) # may vary - - Notice how the final element of the `fft` output is the complex conjugate - of the second element, for real input. For `rfft`, this symmetry is - exploited to compute only the non-negative frequency terms. - - """ - a = asarray(a) - inv_norm = 1 - if norm is not None and _unitary(norm): - if n is None: - n = a.shape[axis] - inv_norm = sqrt(n) - output = _raw_fft(a, n, axis, True, True, inv_norm) - return output - - -@array_function_dispatch(_fft_dispatcher) -def irfft(a, n=None, axis=-1, norm=None): - """ - Compute the inverse of the n-point DFT for real input. - - This function computes the inverse of the one-dimensional *n*-point - discrete Fourier Transform of real input computed by `rfft`. - In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical - accuracy. (See Notes below for why ``len(a)`` is necessary here.) - - The input is expected to be in the form returned by `rfft`, i.e. the - real zero-frequency term followed by the complex positive frequency terms - in order of increasing frequency. Since the discrete Fourier Transform of - real input is Hermitian-symmetric, the negative frequency terms are taken - to be the complex conjugates of the corresponding positive frequency terms. - - Parameters - ---------- - a : array_like - The input array. - n : int, optional - Length of the transformed axis of the output. - For `n` output points, ``n//2+1`` input points are necessary. If the - input is longer than this, it is cropped. If it is shorter than this, - it is padded with zeros. If `n` is not given, it is taken to be - ``2*(m-1)`` where ``m`` is the length of the input along the axis - specified by `axis`. - axis : int, optional - Axis over which to compute the inverse FFT. If not given, the last - axis is used. - norm : {None, "ortho"}, optional - .. versionadded:: 1.10.0 - - Normalization mode (see `numpy.fft`). Default is None. - - Returns - ------- - out : ndarray - The truncated or zero-padded input, transformed along the axis - indicated by `axis`, or the last one if `axis` is not specified. - The length of the transformed axis is `n`, or, if `n` is not given, - ``2*(m-1)`` where ``m`` is the length of the transformed axis of the - input. To get an odd number of output points, `n` must be specified. - - Raises - ------ - IndexError - If `axis` is larger than the last axis of `a`. - - See Also - -------- - numpy.fft : For definition of the DFT and conventions used. - rfft : The one-dimensional FFT of real input, of which `irfft` is inverse. - fft : The one-dimensional FFT. - irfft2 : The inverse of the two-dimensional FFT of real input. - irfftn : The inverse of the *n*-dimensional FFT of real input. - - Notes - ----- - Returns the real valued `n`-point inverse discrete Fourier transform - of `a`, where `a` contains the non-negative frequency terms of a - Hermitian-symmetric sequence. `n` is the length of the result, not the - input. - - If you specify an `n` such that `a` must be zero-padded or truncated, the - extra/removed values will be added/removed at high frequencies. One can - thus resample a series to `m` points via Fourier interpolation by: - ``a_resamp = irfft(rfft(a), m)``. - - The correct interpretation of the hermitian input depends on the length of - the original data, as given by `n`. This is because each input shape could - correspond to either an odd or even length signal. By default, `irfft` - assumes an even output length which puts the last entry at the Nyquist - frequency; aliasing with its symmetric counterpart. By Hermitian symmetry, - the value is thus treated as purely real. To avoid losing information, the - correct length of the real input **must** be given. - - Examples - -------- - >>> np.fft.ifft([1, -1j, -1, 1j]) - array([0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]) # may vary - >>> np.fft.irfft([1, -1j, -1]) - array([0., 1., 0., 0.]) - - Notice how the last term in the input to the ordinary `ifft` is the - complex conjugate of the second term, and the output has zero imaginary - part everywhere. When calling `irfft`, the negative frequencies are not - specified, and the output array is purely real. - - """ - a = asarray(a) - if n is None: - n = (a.shape[axis] - 1) * 2 - inv_norm = n - if norm is not None and _unitary(norm): - inv_norm = sqrt(n) - output = _raw_fft(a, n, axis, True, False, inv_norm) - return output - - -@array_function_dispatch(_fft_dispatcher) -def hfft(a, n=None, axis=-1, norm=None): - """ - Compute the FFT of a signal that has Hermitian symmetry, i.e., a real - spectrum. - - Parameters - ---------- - a : array_like - The input array. - n : int, optional - Length of the transformed axis of the output. For `n` output - points, ``n//2 + 1`` input points are necessary. If the input is - longer than this, it is cropped. If it is shorter than this, it is - padded with zeros. If `n` is not given, it is taken to be ``2*(m-1)`` - where ``m`` is the length of the input along the axis specified by - `axis`. - axis : int, optional - Axis over which to compute the FFT. If not given, the last - axis is used. - norm : {None, "ortho"}, optional - Normalization mode (see `numpy.fft`). Default is None. - - .. versionadded:: 1.10.0 - - Returns - ------- - out : ndarray - The truncated or zero-padded input, transformed along the axis - indicated by `axis`, or the last one if `axis` is not specified. - The length of the transformed axis is `n`, or, if `n` is not given, - ``2*m - 2`` where ``m`` is the length of the transformed axis of - the input. To get an odd number of output points, `n` must be - specified, for instance as ``2*m - 1`` in the typical case, - - Raises - ------ - IndexError - If `axis` is larger than the last axis of `a`. - - See also - -------- - rfft : Compute the one-dimensional FFT for real input. - ihfft : The inverse of `hfft`. - - Notes - ----- - `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the - opposite case: here the signal has Hermitian symmetry in the time - domain and is real in the frequency domain. So here it's `hfft` for - which you must supply the length of the result if it is to be odd. - - * even: ``ihfft(hfft(a, 2*len(a) - 2) == a``, within roundoff error, - * odd: ``ihfft(hfft(a, 2*len(a) - 1) == a``, within roundoff error. - - The correct interpretation of the hermitian input depends on the length of - the original data, as given by `n`. This is because each input shape could - correspond to either an odd or even length signal. By default, `hfft` - assumes an even output length which puts the last entry at the Nyquist - frequency; aliasing with its symmetric counterpart. By Hermitian symmetry, - the value is thus treated as purely real. To avoid losing information, the - shape of the full signal **must** be given. - - Examples - -------- - >>> signal = np.array([1, 2, 3, 4, 3, 2]) - >>> np.fft.fft(signal) - array([15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j]) # may vary - >>> np.fft.hfft(signal[:4]) # Input first half of signal - array([15., -4., 0., -1., 0., -4.]) - >>> np.fft.hfft(signal, 6) # Input entire signal and truncate - array([15., -4., 0., -1., 0., -4.]) - - - >>> signal = np.array([[1, 1.j], [-1.j, 2]]) - >>> np.conj(signal.T) - signal # check Hermitian symmetry - array([[ 0.-0.j, -0.+0.j], # may vary - [ 0.+0.j, 0.-0.j]]) - >>> freq_spectrum = np.fft.hfft(signal) - >>> freq_spectrum - array([[ 1., 1.], - [ 2., -2.]]) - - """ - a = asarray(a) - if n is None: - n = (a.shape[axis] - 1) * 2 - unitary = _unitary(norm) - return irfft(conjugate(a), n, axis) * (sqrt(n) if unitary else n) - - -@array_function_dispatch(_fft_dispatcher) -def ihfft(a, n=None, axis=-1, norm=None): - """ - Compute the inverse FFT of a signal that has Hermitian symmetry. - - Parameters - ---------- - a : array_like - Input array. - n : int, optional - Length of the inverse FFT, the number of points along - transformation axis in the input to use. If `n` is smaller than - the length of the input, the input is cropped. If it is larger, - the input is padded with zeros. If `n` is not given, the length of - the input along the axis specified by `axis` is used. - axis : int, optional - Axis over which to compute the inverse FFT. If not given, the last - axis is used. - norm : {None, "ortho"}, optional - Normalization mode (see `numpy.fft`). Default is None. - - .. versionadded:: 1.10.0 - - Returns - ------- - out : complex ndarray - The truncated or zero-padded input, transformed along the axis - indicated by `axis`, or the last one if `axis` is not specified. - The length of the transformed axis is ``n//2 + 1``. - - See also - -------- - hfft, irfft - - Notes - ----- - `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the - opposite case: here the signal has Hermitian symmetry in the time - domain and is real in the frequency domain. So here it's `hfft` for - which you must supply the length of the result if it is to be odd: - - * even: ``ihfft(hfft(a, 2*len(a) - 2) == a``, within roundoff error, - * odd: ``ihfft(hfft(a, 2*len(a) - 1) == a``, within roundoff error. - - Examples - -------- - >>> spectrum = np.array([ 15, -4, 0, -1, 0, -4]) - >>> np.fft.ifft(spectrum) - array([1.+0.j, 2.+0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.+0.j]) # may vary - >>> np.fft.ihfft(spectrum) - array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j]) # may vary - - """ - a = asarray(a) - if n is None: - n = a.shape[axis] - unitary = _unitary(norm) - output = conjugate(rfft(a, n, axis)) - return output * (1 / (sqrt(n) if unitary else n)) - - -def _cook_nd_args(a, s=None, axes=None, invreal=0): - if s is None: - shapeless = 1 - if axes is None: - s = list(a.shape) - else: - s = take(a.shape, axes) - else: - shapeless = 0 - s = list(s) - if axes is None: - axes = list(range(-len(s), 0)) - if len(s) != len(axes): - raise ValueError("Shape and axes have different lengths.") - if invreal and shapeless: - s[-1] = (a.shape[axes[-1]] - 1) * 2 - return s, axes - - -def _raw_fftnd(a, s=None, axes=None, function=fft, norm=None): - a = asarray(a) - s, axes = _cook_nd_args(a, s, axes) - itl = list(range(len(axes))) - itl.reverse() - for ii in itl: - a = function(a, n=s[ii], axis=axes[ii], norm=norm) - return a - - -def _fftn_dispatcher(a, s=None, axes=None, norm=None): - return (a,) - - -@array_function_dispatch(_fftn_dispatcher) -def fftn(a, s=None, axes=None, norm=None): - """ - Compute the N-dimensional discrete Fourier Transform. - - This function computes the *N*-dimensional discrete Fourier Transform over - any number of axes in an *M*-dimensional array by means of the Fast Fourier - Transform (FFT). - - Parameters - ---------- - a : array_like - Input array, can be complex. - s : sequence of ints, optional - Shape (length of each transformed axis) of the output - (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). - This corresponds to ``n`` for ``fft(x, n)``. - Along any axis, if the given shape is smaller than that of the input, - the input is cropped. If it is larger, the input is padded with zeros. - if `s` is not given, the shape of the input along the axes specified - by `axes` is used. - axes : sequence of ints, optional - Axes over which to compute the FFT. If not given, the last ``len(s)`` - axes are used, or all axes if `s` is also not specified. - Repeated indices in `axes` means that the transform over that axis is - performed multiple times. - norm : {None, "ortho"}, optional - .. versionadded:: 1.10.0 - - Normalization mode (see `numpy.fft`). Default is None. - - Returns - ------- - out : complex ndarray - The truncated or zero-padded input, transformed along the axes - indicated by `axes`, or by a combination of `s` and `a`, - as explained in the parameters section above. - - Raises - ------ - ValueError - If `s` and `axes` have different length. - IndexError - If an element of `axes` is larger than than the number of axes of `a`. - - See Also - -------- - numpy.fft : Overall view of discrete Fourier transforms, with definitions - and conventions used. - ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT. - fft : The one-dimensional FFT, with definitions and conventions used. - rfftn : The *n*-dimensional FFT of real input. - fft2 : The two-dimensional FFT. - fftshift : Shifts zero-frequency terms to centre of array - - Notes - ----- - The output, analogously to `fft`, contains the term for zero frequency in - the low-order corner of all axes, the positive frequency terms in the - first half of all axes, the term for the Nyquist frequency in the middle - of all axes and the negative frequency terms in the second half of all - axes, in order of decreasingly negative frequency. - - See `numpy.fft` for details, definitions and conventions used. - - Examples - -------- - >>> a = np.mgrid[:3, :3, :3][0] - >>> np.fft.fftn(a, axes=(1, 2)) - array([[[ 0.+0.j, 0.+0.j, 0.+0.j], # may vary - [ 0.+0.j, 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j]], - [[ 9.+0.j, 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j]], - [[18.+0.j, 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j]]]) - >>> np.fft.fftn(a, (2, 2), axes=(0, 1)) - array([[[ 2.+0.j, 2.+0.j, 2.+0.j], # may vary - [ 0.+0.j, 0.+0.j, 0.+0.j]], - [[-2.+0.j, -2.+0.j, -2.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j]]]) - - >>> import matplotlib.pyplot as plt - >>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12, - ... 2 * np.pi * np.arange(200) / 34) - >>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape) - >>> FS = np.fft.fftn(S) - >>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2)) - - >>> plt.show() - - """ - - return _raw_fftnd(a, s, axes, fft, norm) - - -@array_function_dispatch(_fftn_dispatcher) -def ifftn(a, s=None, axes=None, norm=None): - """ - Compute the N-dimensional inverse discrete Fourier Transform. - - This function computes the inverse of the N-dimensional discrete - Fourier Transform over any number of axes in an M-dimensional array by - means of the Fast Fourier Transform (FFT). In other words, - ``ifftn(fftn(a)) == a`` to within numerical accuracy. - For a description of the definitions and conventions used, see `numpy.fft`. - - The input, analogously to `ifft`, should be ordered in the same way as is - returned by `fftn`, i.e. it should have the term for zero frequency - in all axes in the low-order corner, the positive frequency terms in the - first half of all axes, the term for the Nyquist frequency in the middle - of all axes and the negative frequency terms in the second half of all - axes, in order of decreasingly negative frequency. - - Parameters - ---------- - a : array_like - Input array, can be complex. - s : sequence of ints, optional - Shape (length of each transformed axis) of the output - (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). - This corresponds to ``n`` for ``ifft(x, n)``. - Along any axis, if the given shape is smaller than that of the input, - the input is cropped. If it is larger, the input is padded with zeros. - if `s` is not given, the shape of the input along the axes specified - by `axes` is used. See notes for issue on `ifft` zero padding. - axes : sequence of ints, optional - Axes over which to compute the IFFT. If not given, the last ``len(s)`` - axes are used, or all axes if `s` is also not specified. - Repeated indices in `axes` means that the inverse transform over that - axis is performed multiple times. - norm : {None, "ortho"}, optional - .. versionadded:: 1.10.0 - - Normalization mode (see `numpy.fft`). Default is None. - - Returns - ------- - out : complex ndarray - The truncated or zero-padded input, transformed along the axes - indicated by `axes`, or by a combination of `s` or `a`, - as explained in the parameters section above. - - Raises - ------ - ValueError - If `s` and `axes` have different length. - IndexError - If an element of `axes` is larger than than the number of axes of `a`. - - See Also - -------- - numpy.fft : Overall view of discrete Fourier transforms, with definitions - and conventions used. - fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse. - ifft : The one-dimensional inverse FFT. - ifft2 : The two-dimensional inverse FFT. - ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning - of array. - - Notes - ----- - See `numpy.fft` for definitions and conventions used. - - Zero-padding, analogously with `ifft`, is performed by appending zeros to - the input along the specified dimension. Although this is the common - approach, it might lead to surprising results. If another form of zero - padding is desired, it must be performed before `ifftn` is called. - - Examples - -------- - >>> a = np.eye(4) - >>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,)) - array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary - [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j], - [0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], - [0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]]) - - - Create and plot an image with band-limited frequency content: - - >>> import matplotlib.pyplot as plt - >>> n = np.zeros((200,200), dtype=complex) - >>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20))) - >>> im = np.fft.ifftn(n).real - >>> plt.imshow(im) - - >>> plt.show() - - """ - - return _raw_fftnd(a, s, axes, ifft, norm) - - -@array_function_dispatch(_fftn_dispatcher) -def fft2(a, s=None, axes=(-2, -1), norm=None): - """ - Compute the 2-dimensional discrete Fourier Transform - - This function computes the *n*-dimensional discrete Fourier Transform - over any axes in an *M*-dimensional array by means of the - Fast Fourier Transform (FFT). By default, the transform is computed over - the last two axes of the input array, i.e., a 2-dimensional FFT. - - Parameters - ---------- - a : array_like - Input array, can be complex - s : sequence of ints, optional - Shape (length of each transformed axis) of the output - (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). - This corresponds to ``n`` for ``fft(x, n)``. - Along each axis, if the given shape is smaller than that of the input, - the input is cropped. If it is larger, the input is padded with zeros. - if `s` is not given, the shape of the input along the axes specified - by `axes` is used. - axes : sequence of ints, optional - Axes over which to compute the FFT. If not given, the last two - axes are used. A repeated index in `axes` means the transform over - that axis is performed multiple times. A one-element sequence means - that a one-dimensional FFT is performed. - norm : {None, "ortho"}, optional - .. versionadded:: 1.10.0 - - Normalization mode (see `numpy.fft`). Default is None. - - Returns - ------- - out : complex ndarray - The truncated or zero-padded input, transformed along the axes - indicated by `axes`, or the last two axes if `axes` is not given. - - Raises - ------ - ValueError - If `s` and `axes` have different length, or `axes` not given and - ``len(s) != 2``. - IndexError - If an element of `axes` is larger than than the number of axes of `a`. - - See Also - -------- - numpy.fft : Overall view of discrete Fourier transforms, with definitions - and conventions used. - ifft2 : The inverse two-dimensional FFT. - fft : The one-dimensional FFT. - fftn : The *n*-dimensional FFT. - fftshift : Shifts zero-frequency terms to the center of the array. - For two-dimensional input, swaps first and third quadrants, and second - and fourth quadrants. - - Notes - ----- - `fft2` is just `fftn` with a different default for `axes`. - - The output, analogously to `fft`, contains the term for zero frequency in - the low-order corner of the transformed axes, the positive frequency terms - in the first half of these axes, the term for the Nyquist frequency in the - middle of the axes and the negative frequency terms in the second half of - the axes, in order of decreasingly negative frequency. - - See `fftn` for details and a plotting example, and `numpy.fft` for - definitions and conventions used. - - - Examples - -------- - >>> a = np.mgrid[:5, :5][0] - >>> np.fft.fft2(a) - array([[ 50. +0.j , 0. +0.j , 0. +0.j , # may vary - 0. +0.j , 0. +0.j ], - [-12.5+17.20477401j, 0. +0.j , 0. +0.j , - 0. +0.j , 0. +0.j ], - [-12.5 +4.0614962j , 0. +0.j , 0. +0.j , - 0. +0.j , 0. +0.j ], - [-12.5 -4.0614962j , 0. +0.j , 0. +0.j , - 0. +0.j , 0. +0.j ], - [-12.5-17.20477401j, 0. +0.j , 0. +0.j , - 0. +0.j , 0. +0.j ]]) - - """ - - return _raw_fftnd(a, s, axes, fft, norm) - - -@array_function_dispatch(_fftn_dispatcher) -def ifft2(a, s=None, axes=(-2, -1), norm=None): - """ - Compute the 2-dimensional inverse discrete Fourier Transform. - - This function computes the inverse of the 2-dimensional discrete Fourier - Transform over any number of axes in an M-dimensional array by means of - the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a`` - to within numerical accuracy. By default, the inverse transform is - computed over the last two axes of the input array. - - The input, analogously to `ifft`, should be ordered in the same way as is - returned by `fft2`, i.e. it should have the term for zero frequency - in the low-order corner of the two axes, the positive frequency terms in - the first half of these axes, the term for the Nyquist frequency in the - middle of the axes and the negative frequency terms in the second half of - both axes, in order of decreasingly negative frequency. - - Parameters - ---------- - a : array_like - Input array, can be complex. - s : sequence of ints, optional - Shape (length of each axis) of the output (``s[0]`` refers to axis 0, - ``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``. - Along each axis, if the given shape is smaller than that of the input, - the input is cropped. If it is larger, the input is padded with zeros. - if `s` is not given, the shape of the input along the axes specified - by `axes` is used. See notes for issue on `ifft` zero padding. - axes : sequence of ints, optional - Axes over which to compute the FFT. If not given, the last two - axes are used. A repeated index in `axes` means the transform over - that axis is performed multiple times. A one-element sequence means - that a one-dimensional FFT is performed. - norm : {None, "ortho"}, optional - .. versionadded:: 1.10.0 - - Normalization mode (see `numpy.fft`). Default is None. - - Returns - ------- - out : complex ndarray - The truncated or zero-padded input, transformed along the axes - indicated by `axes`, or the last two axes if `axes` is not given. - - Raises - ------ - ValueError - If `s` and `axes` have different length, or `axes` not given and - ``len(s) != 2``. - IndexError - If an element of `axes` is larger than than the number of axes of `a`. - - See Also - -------- - numpy.fft : Overall view of discrete Fourier transforms, with definitions - and conventions used. - fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse. - ifftn : The inverse of the *n*-dimensional FFT. - fft : The one-dimensional FFT. - ifft : The one-dimensional inverse FFT. - - Notes - ----- - `ifft2` is just `ifftn` with a different default for `axes`. - - See `ifftn` for details and a plotting example, and `numpy.fft` for - definition and conventions used. - - Zero-padding, analogously with `ifft`, is performed by appending zeros to - the input along the specified dimension. Although this is the common - approach, it might lead to surprising results. If another form of zero - padding is desired, it must be performed before `ifft2` is called. - - Examples - -------- - >>> a = 4 * np.eye(4) - >>> np.fft.ifft2(a) - array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary - [0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j], - [0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], - [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]]) - - """ - - return _raw_fftnd(a, s, axes, ifft, norm) - - -@array_function_dispatch(_fftn_dispatcher) -def rfftn(a, s=None, axes=None, norm=None): - """ - Compute the N-dimensional discrete Fourier Transform for real input. - - This function computes the N-dimensional discrete Fourier Transform over - any number of axes in an M-dimensional real array by means of the Fast - Fourier Transform (FFT). By default, all axes are transformed, with the - real transform performed over the last axis, while the remaining - transforms are complex. - - Parameters - ---------- - a : array_like - Input array, taken to be real. - s : sequence of ints, optional - Shape (length along each transformed axis) to use from the input. - (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). - The final element of `s` corresponds to `n` for ``rfft(x, n)``, while - for the remaining axes, it corresponds to `n` for ``fft(x, n)``. - Along any axis, if the given shape is smaller than that of the input, - the input is cropped. If it is larger, the input is padded with zeros. - if `s` is not given, the shape of the input along the axes specified - by `axes` is used. - axes : sequence of ints, optional - Axes over which to compute the FFT. If not given, the last ``len(s)`` - axes are used, or all axes if `s` is also not specified. - norm : {None, "ortho"}, optional - .. versionadded:: 1.10.0 - - Normalization mode (see `numpy.fft`). Default is None. - - Returns - ------- - out : complex ndarray - The truncated or zero-padded input, transformed along the axes - indicated by `axes`, or by a combination of `s` and `a`, - as explained in the parameters section above. - The length of the last axis transformed will be ``s[-1]//2+1``, - while the remaining transformed axes will have lengths according to - `s`, or unchanged from the input. - - Raises - ------ - ValueError - If `s` and `axes` have different length. - IndexError - If an element of `axes` is larger than than the number of axes of `a`. - - See Also - -------- - irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT - of real input. - fft : The one-dimensional FFT, with definitions and conventions used. - rfft : The one-dimensional FFT of real input. - fftn : The n-dimensional FFT. - rfft2 : The two-dimensional FFT of real input. - - Notes - ----- - The transform for real input is performed over the last transformation - axis, as by `rfft`, then the transform over the remaining axes is - performed as by `fftn`. The order of the output is as for `rfft` for the - final transformation axis, and as for `fftn` for the remaining - transformation axes. - - See `fft` for details, definitions and conventions used. - - Examples - -------- - >>> a = np.ones((2, 2, 2)) - >>> np.fft.rfftn(a) - array([[[8.+0.j, 0.+0.j], # may vary - [0.+0.j, 0.+0.j]], - [[0.+0.j, 0.+0.j], - [0.+0.j, 0.+0.j]]]) - - >>> np.fft.rfftn(a, axes=(2, 0)) - array([[[4.+0.j, 0.+0.j], # may vary - [4.+0.j, 0.+0.j]], - [[0.+0.j, 0.+0.j], - [0.+0.j, 0.+0.j]]]) - - """ - a = asarray(a) - s, axes = _cook_nd_args(a, s, axes) - a = rfft(a, s[-1], axes[-1], norm) - for ii in range(len(axes)-1): - a = fft(a, s[ii], axes[ii], norm) - return a - - -@array_function_dispatch(_fftn_dispatcher) -def rfft2(a, s=None, axes=(-2, -1), norm=None): - """ - Compute the 2-dimensional FFT of a real array. - - Parameters - ---------- - a : array - Input array, taken to be real. - s : sequence of ints, optional - Shape of the FFT. - axes : sequence of ints, optional - Axes over which to compute the FFT. - norm : {None, "ortho"}, optional - .. versionadded:: 1.10.0 - - Normalization mode (see `numpy.fft`). Default is None. - - Returns - ------- - out : ndarray - The result of the real 2-D FFT. - - See Also - -------- - rfftn : Compute the N-dimensional discrete Fourier Transform for real - input. - - Notes - ----- - This is really just `rfftn` with different default behavior. - For more details see `rfftn`. - - """ - - return rfftn(a, s, axes, norm) - - -@array_function_dispatch(_fftn_dispatcher) -def irfftn(a, s=None, axes=None, norm=None): - """ - Compute the inverse of the N-dimensional FFT of real input. - - This function computes the inverse of the N-dimensional discrete - Fourier Transform for real input over any number of axes in an - M-dimensional array by means of the Fast Fourier Transform (FFT). In - other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical - accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`, - and for the same reason.) - - The input should be ordered in the same way as is returned by `rfftn`, - i.e. as for `irfft` for the final transformation axis, and as for `ifftn` - along all the other axes. - - Parameters - ---------- - a : array_like - Input array. - s : sequence of ints, optional - Shape (length of each transformed axis) of the output - (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the - number of input points used along this axis, except for the last axis, - where ``s[-1]//2+1`` points of the input are used. - Along any axis, if the shape indicated by `s` is smaller than that of - the input, the input is cropped. If it is larger, the input is padded - with zeros. If `s` is not given, the shape of the input along the axes - specified by axes is used. Except for the last axis which is taken to be - ``2*(m-1)`` where ``m`` is the length of the input along that axis. - axes : sequence of ints, optional - Axes over which to compute the inverse FFT. If not given, the last - `len(s)` axes are used, or all axes if `s` is also not specified. - Repeated indices in `axes` means that the inverse transform over that - axis is performed multiple times. - norm : {None, "ortho"}, optional - .. versionadded:: 1.10.0 - - Normalization mode (see `numpy.fft`). Default is None. - - Returns - ------- - out : ndarray - The truncated or zero-padded input, transformed along the axes - indicated by `axes`, or by a combination of `s` or `a`, - as explained in the parameters section above. - The length of each transformed axis is as given by the corresponding - element of `s`, or the length of the input in every axis except for the - last one if `s` is not given. In the final transformed axis the length - of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the - length of the final transformed axis of the input. To get an odd - number of output points in the final axis, `s` must be specified. - - Raises - ------ - ValueError - If `s` and `axes` have different length. - IndexError - If an element of `axes` is larger than than the number of axes of `a`. - - See Also - -------- - rfftn : The forward n-dimensional FFT of real input, - of which `ifftn` is the inverse. - fft : The one-dimensional FFT, with definitions and conventions used. - irfft : The inverse of the one-dimensional FFT of real input. - irfft2 : The inverse of the two-dimensional FFT of real input. - - Notes - ----- - See `fft` for definitions and conventions used. - - See `rfft` for definitions and conventions used for real input. - - The correct interpretation of the hermitian input depends on the shape of - the original data, as given by `s`. This is because each input shape could - correspond to either an odd or even length signal. By default, `irfftn` - assumes an even output length which puts the last entry at the Nyquist - frequency; aliasing with its symmetric counterpart. When performing the - final complex to real transform, the last value is thus treated as purely - real. To avoid losing information, the correct shape of the real input - **must** be given. - - Examples - -------- - >>> a = np.zeros((3, 2, 2)) - >>> a[0, 0, 0] = 3 * 2 * 2 - >>> np.fft.irfftn(a) - array([[[1., 1.], - [1., 1.]], - [[1., 1.], - [1., 1.]], - [[1., 1.], - [1., 1.]]]) - - """ - a = asarray(a) - s, axes = _cook_nd_args(a, s, axes, invreal=1) - for ii in range(len(axes)-1): - a = ifft(a, s[ii], axes[ii], norm) - a = irfft(a, s[-1], axes[-1], norm) - return a - - -@array_function_dispatch(_fftn_dispatcher) -def irfft2(a, s=None, axes=(-2, -1), norm=None): - """ - Compute the 2-dimensional inverse FFT of a real array. - - Parameters - ---------- - a : array_like - The input array - s : sequence of ints, optional - Shape of the real output to the inverse FFT. - axes : sequence of ints, optional - The axes over which to compute the inverse fft. - Default is the last two axes. - norm : {None, "ortho"}, optional - .. versionadded:: 1.10.0 - - Normalization mode (see `numpy.fft`). Default is None. - - Returns - ------- - out : ndarray - The result of the inverse real 2-D FFT. - - See Also - -------- - irfftn : Compute the inverse of the N-dimensional FFT of real input. - - Notes - ----- - This is really `irfftn` with different defaults. - For more details see `irfftn`. - - """ - - return irfftn(a, s, axes, norm) diff --git a/venv/lib/python3.7/site-packages/numpy/fft/_pocketfft_internal.cpython-37m-x86_64-linux-gnu.so b/venv/lib/python3.7/site-packages/numpy/fft/_pocketfft_internal.cpython-37m-x86_64-linux-gnu.so deleted file mode 100755 index b4c0a9d..0000000 Binary files a/venv/lib/python3.7/site-packages/numpy/fft/_pocketfft_internal.cpython-37m-x86_64-linux-gnu.so and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/numpy/fft/helper.py b/venv/lib/python3.7/site-packages/numpy/fft/helper.py deleted file mode 100644 index a920a4a..0000000 --- a/venv/lib/python3.7/site-packages/numpy/fft/helper.py +++ /dev/null @@ -1,224 +0,0 @@ -""" -Discrete Fourier Transforms - helper.py - -""" -from __future__ import division, absolute_import, print_function - -from numpy.compat import integer_types -from numpy.core import integer, empty, arange, asarray, roll -from numpy.core.overrides import array_function_dispatch, set_module - -# Created by Pearu Peterson, September 2002 - -__all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq'] - -integer_types = integer_types + (integer,) - - -def _fftshift_dispatcher(x, axes=None): - return (x,) - - -@array_function_dispatch(_fftshift_dispatcher, module='numpy.fft') -def fftshift(x, axes=None): - """ - Shift the zero-frequency component to the center of the spectrum. - - This function swaps half-spaces for all axes listed (defaults to all). - Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even. - - Parameters - ---------- - x : array_like - Input array. - axes : int or shape tuple, optional - Axes over which to shift. Default is None, which shifts all axes. - - Returns - ------- - y : ndarray - The shifted array. - - See Also - -------- - ifftshift : The inverse of `fftshift`. - - Examples - -------- - >>> freqs = np.fft.fftfreq(10, 0.1) - >>> freqs - array([ 0., 1., 2., ..., -3., -2., -1.]) - >>> np.fft.fftshift(freqs) - array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.]) - - Shift the zero-frequency component only along the second axis: - - >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3) - >>> freqs - array([[ 0., 1., 2.], - [ 3., 4., -4.], - [-3., -2., -1.]]) - >>> np.fft.fftshift(freqs, axes=(1,)) - array([[ 2., 0., 1.], - [-4., 3., 4.], - [-1., -3., -2.]]) - - """ - x = asarray(x) - if axes is None: - axes = tuple(range(x.ndim)) - shift = [dim // 2 for dim in x.shape] - elif isinstance(axes, integer_types): - shift = x.shape[axes] // 2 - else: - shift = [x.shape[ax] // 2 for ax in axes] - - return roll(x, shift, axes) - - -@array_function_dispatch(_fftshift_dispatcher, module='numpy.fft') -def ifftshift(x, axes=None): - """ - The inverse of `fftshift`. Although identical for even-length `x`, the - functions differ by one sample for odd-length `x`. - - Parameters - ---------- - x : array_like - Input array. - axes : int or shape tuple, optional - Axes over which to calculate. Defaults to None, which shifts all axes. - - Returns - ------- - y : ndarray - The shifted array. - - See Also - -------- - fftshift : Shift zero-frequency component to the center of the spectrum. - - Examples - -------- - >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3) - >>> freqs - array([[ 0., 1., 2.], - [ 3., 4., -4.], - [-3., -2., -1.]]) - >>> np.fft.ifftshift(np.fft.fftshift(freqs)) - array([[ 0., 1., 2.], - [ 3., 4., -4.], - [-3., -2., -1.]]) - - """ - x = asarray(x) - if axes is None: - axes = tuple(range(x.ndim)) - shift = [-(dim // 2) for dim in x.shape] - elif isinstance(axes, integer_types): - shift = -(x.shape[axes] // 2) - else: - shift = [-(x.shape[ax] // 2) for ax in axes] - - return roll(x, shift, axes) - - -@set_module('numpy.fft') -def fftfreq(n, d=1.0): - """ - Return the Discrete Fourier Transform sample frequencies. - - The returned float array `f` contains the frequency bin centers in cycles - per unit of the sample spacing (with zero at the start). For instance, if - the sample spacing is in seconds, then the frequency unit is cycles/second. - - Given a window length `n` and a sample spacing `d`:: - - f = [0, 1, ..., n/2-1, -n/2, ..., -1] / (d*n) if n is even - f = [0, 1, ..., (n-1)/2, -(n-1)/2, ..., -1] / (d*n) if n is odd - - Parameters - ---------- - n : int - Window length. - d : scalar, optional - Sample spacing (inverse of the sampling rate). Defaults to 1. - - Returns - ------- - f : ndarray - Array of length `n` containing the sample frequencies. - - Examples - -------- - >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float) - >>> fourier = np.fft.fft(signal) - >>> n = signal.size - >>> timestep = 0.1 - >>> freq = np.fft.fftfreq(n, d=timestep) - >>> freq - array([ 0. , 1.25, 2.5 , ..., -3.75, -2.5 , -1.25]) - - """ - if not isinstance(n, integer_types): - raise ValueError("n should be an integer") - val = 1.0 / (n * d) - results = empty(n, int) - N = (n-1)//2 + 1 - p1 = arange(0, N, dtype=int) - results[:N] = p1 - p2 = arange(-(n//2), 0, dtype=int) - results[N:] = p2 - return results * val - - -@set_module('numpy.fft') -def rfftfreq(n, d=1.0): - """ - Return the Discrete Fourier Transform sample frequencies - (for usage with rfft, irfft). - - The returned float array `f` contains the frequency bin centers in cycles - per unit of the sample spacing (with zero at the start). For instance, if - the sample spacing is in seconds, then the frequency unit is cycles/second. - - Given a window length `n` and a sample spacing `d`:: - - f = [0, 1, ..., n/2-1, n/2] / (d*n) if n is even - f = [0, 1, ..., (n-1)/2-1, (n-1)/2] / (d*n) if n is odd - - Unlike `fftfreq` (but like `scipy.fftpack.rfftfreq`) - the Nyquist frequency component is considered to be positive. - - Parameters - ---------- - n : int - Window length. - d : scalar, optional - Sample spacing (inverse of the sampling rate). Defaults to 1. - - Returns - ------- - f : ndarray - Array of length ``n//2 + 1`` containing the sample frequencies. - - Examples - -------- - >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=float) - >>> fourier = np.fft.rfft(signal) - >>> n = signal.size - >>> sample_rate = 100 - >>> freq = np.fft.fftfreq(n, d=1./sample_rate) - >>> freq - array([ 0., 10., 20., ..., -30., -20., -10.]) - >>> freq = np.fft.rfftfreq(n, d=1./sample_rate) - >>> freq - array([ 0., 10., 20., 30., 40., 50.]) - - """ - if not isinstance(n, integer_types): - raise ValueError("n should be an integer") - val = 1.0/(n*d) - N = n//2 + 1 - results = arange(0, N, dtype=int) - return results * val diff --git a/venv/lib/python3.7/site-packages/numpy/fft/setup.py b/venv/lib/python3.7/site-packages/numpy/fft/setup.py deleted file mode 100644 index 8c3a315..0000000 --- a/venv/lib/python3.7/site-packages/numpy/fft/setup.py +++ /dev/null @@ -1,19 +0,0 @@ -from __future__ import division, print_function - - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('fft', parent_package, top_path) - - config.add_data_dir('tests') - - # Configure pocketfft_internal - config.add_extension('_pocketfft_internal', - sources=['_pocketfft.c'] - ) - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/venv/lib/python3.7/site-packages/numpy/fft/tests/__init__.py b/venv/lib/python3.7/site-packages/numpy/fft/tests/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/venv/lib/python3.7/site-packages/numpy/fft/tests/test_helper.py b/venv/lib/python3.7/site-packages/numpy/fft/tests/test_helper.py deleted file mode 100644 index 6613c80..0000000 --- a/venv/lib/python3.7/site-packages/numpy/fft/tests/test_helper.py +++ /dev/null @@ -1,169 +0,0 @@ -"""Test functions for fftpack.helper module - -Copied from fftpack.helper by Pearu Peterson, October 2005 - -""" -from __future__ import division, absolute_import, print_function -import numpy as np -from numpy.testing import assert_array_almost_equal, assert_equal -from numpy import fft, pi - - -class TestFFTShift(object): - - def test_definition(self): - x = [0, 1, 2, 3, 4, -4, -3, -2, -1] - y = [-4, -3, -2, -1, 0, 1, 2, 3, 4] - assert_array_almost_equal(fft.fftshift(x), y) - assert_array_almost_equal(fft.ifftshift(y), x) - x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1] - y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4] - assert_array_almost_equal(fft.fftshift(x), y) - assert_array_almost_equal(fft.ifftshift(y), x) - - def test_inverse(self): - for n in [1, 4, 9, 100, 211]: - x = np.random.random((n,)) - assert_array_almost_equal(fft.ifftshift(fft.fftshift(x)), x) - - def test_axes_keyword(self): - freqs = [[0, 1, 2], [3, 4, -4], [-3, -2, -1]] - shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]] - assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shifted) - assert_array_almost_equal(fft.fftshift(freqs, axes=0), - fft.fftshift(freqs, axes=(0,))) - assert_array_almost_equal(fft.ifftshift(shifted, axes=(0, 1)), freqs) - assert_array_almost_equal(fft.ifftshift(shifted, axes=0), - fft.ifftshift(shifted, axes=(0,))) - - assert_array_almost_equal(fft.fftshift(freqs), shifted) - assert_array_almost_equal(fft.ifftshift(shifted), freqs) - - def test_uneven_dims(self): - """ Test 2D input, which has uneven dimension sizes """ - freqs = [ - [0, 1], - [2, 3], - [4, 5] - ] - - # shift in dimension 0 - shift_dim0 = [ - [4, 5], - [0, 1], - [2, 3] - ] - assert_array_almost_equal(fft.fftshift(freqs, axes=0), shift_dim0) - assert_array_almost_equal(fft.ifftshift(shift_dim0, axes=0), freqs) - assert_array_almost_equal(fft.fftshift(freqs, axes=(0,)), shift_dim0) - assert_array_almost_equal(fft.ifftshift(shift_dim0, axes=[0]), freqs) - - # shift in dimension 1 - shift_dim1 = [ - [1, 0], - [3, 2], - [5, 4] - ] - assert_array_almost_equal(fft.fftshift(freqs, axes=1), shift_dim1) - assert_array_almost_equal(fft.ifftshift(shift_dim1, axes=1), freqs) - - # shift in both dimensions - shift_dim_both = [ - [5, 4], - [1, 0], - [3, 2] - ] - assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shift_dim_both) - assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=(0, 1)), freqs) - assert_array_almost_equal(fft.fftshift(freqs, axes=[0, 1]), shift_dim_both) - assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=[0, 1]), freqs) - - # axes=None (default) shift in all dimensions - assert_array_almost_equal(fft.fftshift(freqs, axes=None), shift_dim_both) - assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=None), freqs) - assert_array_almost_equal(fft.fftshift(freqs), shift_dim_both) - assert_array_almost_equal(fft.ifftshift(shift_dim_both), freqs) - - def test_equal_to_original(self): - """ Test that the new (>=v1.15) implementation (see #10073) is equal to the original (<=v1.14) """ - from numpy.compat import integer_types - from numpy.core import asarray, concatenate, arange, take - - def original_fftshift(x, axes=None): - """ How fftshift was implemented in v1.14""" - tmp = asarray(x) - ndim = tmp.ndim - if axes is None: - axes = list(range(ndim)) - elif isinstance(axes, integer_types): - axes = (axes,) - y = tmp - for k in axes: - n = tmp.shape[k] - p2 = (n + 1) // 2 - mylist = concatenate((arange(p2, n), arange(p2))) - y = take(y, mylist, k) - return y - - def original_ifftshift(x, axes=None): - """ How ifftshift was implemented in v1.14 """ - tmp = asarray(x) - ndim = tmp.ndim - if axes is None: - axes = list(range(ndim)) - elif isinstance(axes, integer_types): - axes = (axes,) - y = tmp - for k in axes: - n = tmp.shape[k] - p2 = n - (n + 1) // 2 - mylist = concatenate((arange(p2, n), arange(p2))) - y = take(y, mylist, k) - return y - - # create possible 2d array combinations and try all possible keywords - # compare output to original functions - for i in range(16): - for j in range(16): - for axes_keyword in [0, 1, None, (0,), (0, 1)]: - inp = np.random.rand(i, j) - - assert_array_almost_equal(fft.fftshift(inp, axes_keyword), - original_fftshift(inp, axes_keyword)) - - assert_array_almost_equal(fft.ifftshift(inp, axes_keyword), - original_ifftshift(inp, axes_keyword)) - - -class TestFFTFreq(object): - - def test_definition(self): - x = [0, 1, 2, 3, 4, -4, -3, -2, -1] - assert_array_almost_equal(9*fft.fftfreq(9), x) - assert_array_almost_equal(9*pi*fft.fftfreq(9, pi), x) - x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1] - assert_array_almost_equal(10*fft.fftfreq(10), x) - assert_array_almost_equal(10*pi*fft.fftfreq(10, pi), x) - - -class TestRFFTFreq(object): - - def test_definition(self): - x = [0, 1, 2, 3, 4] - assert_array_almost_equal(9*fft.rfftfreq(9), x) - assert_array_almost_equal(9*pi*fft.rfftfreq(9, pi), x) - x = [0, 1, 2, 3, 4, 5] - assert_array_almost_equal(10*fft.rfftfreq(10), x) - assert_array_almost_equal(10*pi*fft.rfftfreq(10, pi), x) - - -class TestIRFFTN(object): - - def test_not_last_axis_success(self): - ar, ai = np.random.random((2, 16, 8, 32)) - a = ar + 1j*ai - - axes = (-2,) - - # Should not raise error - fft.irfftn(a, axes=axes) diff --git a/venv/lib/python3.7/site-packages/numpy/fft/tests/test_pocketfft.py b/venv/lib/python3.7/site-packages/numpy/fft/tests/test_pocketfft.py deleted file mode 100644 index 453e964..0000000 --- a/venv/lib/python3.7/site-packages/numpy/fft/tests/test_pocketfft.py +++ /dev/null @@ -1,261 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy as np -import pytest -from numpy.random import random -from numpy.testing import ( - assert_array_equal, assert_raises, assert_allclose - ) -import threading -import sys -if sys.version_info[0] >= 3: - import queue -else: - import Queue as queue - - -def fft1(x): - L = len(x) - phase = -2j*np.pi*(np.arange(L)/float(L)) - phase = np.arange(L).reshape(-1, 1) * phase - return np.sum(x*np.exp(phase), axis=1) - - -class TestFFTShift(object): - - def test_fft_n(self): - assert_raises(ValueError, np.fft.fft, [1, 2, 3], 0) - - -class TestFFT1D(object): - - def test_identity(self): - maxlen = 512 - x = random(maxlen) + 1j*random(maxlen) - xr = random(maxlen) - for i in range(1,maxlen): - assert_allclose(np.fft.ifft(np.fft.fft(x[0:i])), x[0:i], - atol=1e-12) - assert_allclose(np.fft.irfft(np.fft.rfft(xr[0:i]),i), - xr[0:i], atol=1e-12) - - def test_fft(self): - x = random(30) + 1j*random(30) - assert_allclose(fft1(x), np.fft.fft(x), atol=1e-6) - assert_allclose(fft1(x) / np.sqrt(30), - np.fft.fft(x, norm="ortho"), atol=1e-6) - - @pytest.mark.parametrize('norm', (None, 'ortho')) - def test_ifft(self, norm): - x = random(30) + 1j*random(30) - assert_allclose( - x, np.fft.ifft(np.fft.fft(x, norm=norm), norm=norm), - atol=1e-6) - # Ensure we get the correct error message - with pytest.raises(ValueError, - match='Invalid number of FFT data points'): - np.fft.ifft([], norm=norm) - - def test_fft2(self): - x = random((30, 20)) + 1j*random((30, 20)) - assert_allclose(np.fft.fft(np.fft.fft(x, axis=1), axis=0), - np.fft.fft2(x), atol=1e-6) - assert_allclose(np.fft.fft2(x) / np.sqrt(30 * 20), - np.fft.fft2(x, norm="ortho"), atol=1e-6) - - def test_ifft2(self): - x = random((30, 20)) + 1j*random((30, 20)) - assert_allclose(np.fft.ifft(np.fft.ifft(x, axis=1), axis=0), - np.fft.ifft2(x), atol=1e-6) - assert_allclose(np.fft.ifft2(x) * np.sqrt(30 * 20), - np.fft.ifft2(x, norm="ortho"), atol=1e-6) - - def test_fftn(self): - x = random((30, 20, 10)) + 1j*random((30, 20, 10)) - assert_allclose( - np.fft.fft(np.fft.fft(np.fft.fft(x, axis=2), axis=1), axis=0), - np.fft.fftn(x), atol=1e-6) - assert_allclose(np.fft.fftn(x) / np.sqrt(30 * 20 * 10), - np.fft.fftn(x, norm="ortho"), atol=1e-6) - - def test_ifftn(self): - x = random((30, 20, 10)) + 1j*random((30, 20, 10)) - assert_allclose( - np.fft.ifft(np.fft.ifft(np.fft.ifft(x, axis=2), axis=1), axis=0), - np.fft.ifftn(x), atol=1e-6) - assert_allclose(np.fft.ifftn(x) * np.sqrt(30 * 20 * 10), - np.fft.ifftn(x, norm="ortho"), atol=1e-6) - - def test_rfft(self): - x = random(30) - for n in [x.size, 2*x.size]: - for norm in [None, 'ortho']: - assert_allclose( - np.fft.fft(x, n=n, norm=norm)[:(n//2 + 1)], - np.fft.rfft(x, n=n, norm=norm), atol=1e-6) - assert_allclose( - np.fft.rfft(x, n=n) / np.sqrt(n), - np.fft.rfft(x, n=n, norm="ortho"), atol=1e-6) - - def test_irfft(self): - x = random(30) - assert_allclose(x, np.fft.irfft(np.fft.rfft(x)), atol=1e-6) - assert_allclose( - x, np.fft.irfft(np.fft.rfft(x, norm="ortho"), norm="ortho"), atol=1e-6) - - def test_rfft2(self): - x = random((30, 20)) - assert_allclose(np.fft.fft2(x)[:, :11], np.fft.rfft2(x), atol=1e-6) - assert_allclose(np.fft.rfft2(x) / np.sqrt(30 * 20), - np.fft.rfft2(x, norm="ortho"), atol=1e-6) - - def test_irfft2(self): - x = random((30, 20)) - assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x)), atol=1e-6) - assert_allclose( - x, np.fft.irfft2(np.fft.rfft2(x, norm="ortho"), norm="ortho"), atol=1e-6) - - def test_rfftn(self): - x = random((30, 20, 10)) - assert_allclose(np.fft.fftn(x)[:, :, :6], np.fft.rfftn(x), atol=1e-6) - assert_allclose(np.fft.rfftn(x) / np.sqrt(30 * 20 * 10), - np.fft.rfftn(x, norm="ortho"), atol=1e-6) - - def test_irfftn(self): - x = random((30, 20, 10)) - assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x)), atol=1e-6) - assert_allclose( - x, np.fft.irfftn(np.fft.rfftn(x, norm="ortho"), norm="ortho"), atol=1e-6) - - def test_hfft(self): - x = random(14) + 1j*random(14) - x_herm = np.concatenate((random(1), x, random(1))) - x = np.concatenate((x_herm, x[::-1].conj())) - assert_allclose(np.fft.fft(x), np.fft.hfft(x_herm), atol=1e-6) - assert_allclose(np.fft.hfft(x_herm) / np.sqrt(30), - np.fft.hfft(x_herm, norm="ortho"), atol=1e-6) - - def test_ihttf(self): - x = random(14) + 1j*random(14) - x_herm = np.concatenate((random(1), x, random(1))) - x = np.concatenate((x_herm, x[::-1].conj())) - assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm)), atol=1e-6) - assert_allclose( - x_herm, np.fft.ihfft(np.fft.hfft(x_herm, norm="ortho"), - norm="ortho"), atol=1e-6) - - @pytest.mark.parametrize("op", [np.fft.fftn, np.fft.ifftn, - np.fft.rfftn, np.fft.irfftn]) - def test_axes(self, op): - x = random((30, 20, 10)) - axes = [(0, 1, 2), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0)] - for a in axes: - op_tr = op(np.transpose(x, a)) - tr_op = np.transpose(op(x, axes=a), a) - assert_allclose(op_tr, tr_op, atol=1e-6) - - def test_all_1d_norm_preserving(self): - # verify that round-trip transforms are norm-preserving - x = random(30) - x_norm = np.linalg.norm(x) - n = x.size * 2 - func_pairs = [(np.fft.fft, np.fft.ifft), - (np.fft.rfft, np.fft.irfft), - # hfft: order so the first function takes x.size samples - # (necessary for comparison to x_norm above) - (np.fft.ihfft, np.fft.hfft), - ] - for forw, back in func_pairs: - for n in [x.size, 2*x.size]: - for norm in [None, 'ortho']: - tmp = forw(x, n=n, norm=norm) - tmp = back(tmp, n=n, norm=norm) - assert_allclose(x_norm, - np.linalg.norm(tmp), atol=1e-6) - - @pytest.mark.parametrize("dtype", [np.half, np.single, np.double, - np.longdouble]) - def test_dtypes(self, dtype): - # make sure that all input precisions are accepted and internally - # converted to 64bit - x = random(30).astype(dtype) - assert_allclose(np.fft.ifft(np.fft.fft(x)), x, atol=1e-6) - assert_allclose(np.fft.irfft(np.fft.rfft(x)), x, atol=1e-6) - - -@pytest.mark.parametrize( - "dtype", - [np.float32, np.float64, np.complex64, np.complex128]) -@pytest.mark.parametrize("order", ["F", 'non-contiguous']) -@pytest.mark.parametrize( - "fft", - [np.fft.fft, np.fft.fft2, np.fft.fftn, - np.fft.ifft, np.fft.ifft2, np.fft.ifftn]) -def test_fft_with_order(dtype, order, fft): - # Check that FFT/IFFT produces identical results for C, Fortran and - # non contiguous arrays - rng = np.random.RandomState(42) - X = rng.rand(8, 7, 13).astype(dtype, copy=False) - # See discussion in pull/14178 - _tol = 8.0 * np.sqrt(np.log2(X.size)) * np.finfo(X.dtype).eps - if order == 'F': - Y = np.asfortranarray(X) - else: - # Make a non contiguous array - Y = X[::-1] - X = np.ascontiguousarray(X[::-1]) - - if fft.__name__.endswith('fft'): - for axis in range(3): - X_res = fft(X, axis=axis) - Y_res = fft(Y, axis=axis) - assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol) - elif fft.__name__.endswith(('fft2', 'fftn')): - axes = [(0, 1), (1, 2), (0, 2)] - if fft.__name__.endswith('fftn'): - axes.extend([(0,), (1,), (2,), None]) - for ax in axes: - X_res = fft(X, axes=ax) - Y_res = fft(Y, axes=ax) - assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol) - else: - raise ValueError() - - -class TestFFTThreadSafe(object): - threads = 16 - input_shape = (800, 200) - - def _test_mtsame(self, func, *args): - def worker(args, q): - q.put(func(*args)) - - q = queue.Queue() - expected = func(*args) - - # Spin off a bunch of threads to call the same function simultaneously - t = [threading.Thread(target=worker, args=(args, q)) - for i in range(self.threads)] - [x.start() for x in t] - - [x.join() for x in t] - # Make sure all threads returned the correct value - for i in range(self.threads): - assert_array_equal(q.get(timeout=5), expected, - 'Function returned wrong value in multithreaded context') - - def test_fft(self): - a = np.ones(self.input_shape) * 1+0j - self._test_mtsame(np.fft.fft, a) - - def test_ifft(self): - a = np.ones(self.input_shape) * 1+0j - self._test_mtsame(np.fft.ifft, a) - - def test_rfft(self): - a = np.ones(self.input_shape) - self._test_mtsame(np.fft.rfft, a) - - def test_irfft(self): - a = np.ones(self.input_shape) * 1+0j - self._test_mtsame(np.fft.irfft, a) diff --git a/venv/lib/python3.7/site-packages/numpy/lib/__init__.py b/venv/lib/python3.7/site-packages/numpy/lib/__init__.py deleted file mode 100644 index 2db12d9..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/__init__.py +++ /dev/null @@ -1,65 +0,0 @@ -""" -**Note:** almost all functions in the ``numpy.lib`` namespace -are also present in the main ``numpy`` namespace. Please use the -functions as ``np.`` where possible. - -``numpy.lib`` is mostly a space for implementing functions that don't -belong in core or in another NumPy submodule with a clear purpose -(e.g. ``random``, ``fft``, ``linalg``, ``ma``). - -Most contains basic functions that are used by several submodules and are -useful to have in the main name-space. - -""" -from __future__ import division, absolute_import, print_function - -import math - -from numpy.version import version as __version__ - -# Public submodules -# Note: recfunctions and (maybe) format are public too, but not imported -from . import mixins -from . import scimath as emath - -# Private submodules -from .type_check import * -from .index_tricks import * -from .function_base import * -from .nanfunctions import * -from .shape_base import * -from .stride_tricks import * -from .twodim_base import * -from .ufunclike import * -from .histograms import * - -from .polynomial import * -from .utils import * -from .arraysetops import * -from .npyio import * -from .financial import * -from .arrayterator import Arrayterator -from .arraypad import * -from ._version import * -from numpy.core._multiarray_umath import tracemalloc_domain - -__all__ = ['emath', 'math', 'tracemalloc_domain', 'Arrayterator'] -__all__ += type_check.__all__ -__all__ += index_tricks.__all__ -__all__ += function_base.__all__ -__all__ += shape_base.__all__ -__all__ += stride_tricks.__all__ -__all__ += twodim_base.__all__ -__all__ += ufunclike.__all__ -__all__ += arraypad.__all__ -__all__ += polynomial.__all__ -__all__ += utils.__all__ -__all__ += arraysetops.__all__ -__all__ += npyio.__all__ -__all__ += financial.__all__ -__all__ += nanfunctions.__all__ -__all__ += histograms.__all__ - -from numpy._pytesttester import PytestTester -test = PytestTester(__name__) -del PytestTester diff --git a/venv/lib/python3.7/site-packages/numpy/lib/_datasource.py b/venv/lib/python3.7/site-packages/numpy/lib/_datasource.py deleted file mode 100644 index 0d71375..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/_datasource.py +++ /dev/null @@ -1,794 +0,0 @@ -"""A file interface for handling local and remote data files. - -The goal of datasource is to abstract some of the file system operations -when dealing with data files so the researcher doesn't have to know all the -low-level details. Through datasource, a researcher can obtain and use a -file with one function call, regardless of location of the file. - -DataSource is meant to augment standard python libraries, not replace them. -It should work seamlessly with standard file IO operations and the os -module. - -DataSource files can originate locally or remotely: - -- local files : '/home/guido/src/local/data.txt' -- URLs (http, ftp, ...) : 'http://www.scipy.org/not/real/data.txt' - -DataSource files can also be compressed or uncompressed. Currently only -gzip, bz2 and xz are supported. - -Example:: - - >>> # Create a DataSource, use os.curdir (default) for local storage. - >>> from numpy import DataSource - >>> ds = DataSource() - >>> - >>> # Open a remote file. - >>> # DataSource downloads the file, stores it locally in: - >>> # './www.google.com/index.html' - >>> # opens the file and returns a file object. - >>> fp = ds.open('http://www.google.com/') # doctest: +SKIP - >>> - >>> # Use the file as you normally would - >>> fp.read() # doctest: +SKIP - >>> fp.close() # doctest: +SKIP - -""" -from __future__ import division, absolute_import, print_function - -import os -import sys -import warnings -import shutil -import io -from contextlib import closing - -from numpy.core.overrides import set_module - - -_open = open - - -def _check_mode(mode, encoding, newline): - """Check mode and that encoding and newline are compatible. - - Parameters - ---------- - mode : str - File open mode. - encoding : str - File encoding. - newline : str - Newline for text files. - - """ - if "t" in mode: - if "b" in mode: - raise ValueError("Invalid mode: %r" % (mode,)) - else: - if encoding is not None: - raise ValueError("Argument 'encoding' not supported in binary mode") - if newline is not None: - raise ValueError("Argument 'newline' not supported in binary mode") - - -def _python2_bz2open(fn, mode, encoding, newline): - """Wrapper to open bz2 in text mode. - - Parameters - ---------- - fn : str - File name - mode : {'r', 'w'} - File mode. Note that bz2 Text files are not supported. - encoding : str - Ignored, text bz2 files not supported in Python2. - newline : str - Ignored, text bz2 files not supported in Python2. - """ - import bz2 - - _check_mode(mode, encoding, newline) - - if "t" in mode: - # BZ2File is missing necessary functions for TextIOWrapper - warnings.warn("Assuming latin1 encoding for bz2 text file in Python2", - RuntimeWarning, stacklevel=5) - mode = mode.replace("t", "") - return bz2.BZ2File(fn, mode) - -def _python2_gzipopen(fn, mode, encoding, newline): - """ Wrapper to open gzip in text mode. - - Parameters - ---------- - fn : str, bytes, file - File path or opened file. - mode : str - File mode. The actual files are opened as binary, but will decoded - using the specified `encoding` and `newline`. - encoding : str - Encoding to be used when reading/writing as text. - newline : str - Newline to be used when reading/writing as text. - - """ - import gzip - # gzip is lacking read1 needed for TextIOWrapper - class GzipWrap(gzip.GzipFile): - def read1(self, n): - return self.read(n) - - _check_mode(mode, encoding, newline) - - gz_mode = mode.replace("t", "") - - if isinstance(fn, (str, bytes)): - binary_file = GzipWrap(fn, gz_mode) - elif hasattr(fn, "read") or hasattr(fn, "write"): - binary_file = GzipWrap(None, gz_mode, fileobj=fn) - else: - raise TypeError("filename must be a str or bytes object, or a file") - - if "t" in mode: - return io.TextIOWrapper(binary_file, encoding, newline=newline) - else: - return binary_file - - -# Using a class instead of a module-level dictionary -# to reduce the initial 'import numpy' overhead by -# deferring the import of lzma, bz2 and gzip until needed - -# TODO: .zip support, .tar support? -class _FileOpeners(object): - """ - Container for different methods to open (un-)compressed files. - - `_FileOpeners` contains a dictionary that holds one method for each - supported file format. Attribute lookup is implemented in such a way - that an instance of `_FileOpeners` itself can be indexed with the keys - of that dictionary. Currently uncompressed files as well as files - compressed with ``gzip``, ``bz2`` or ``xz`` compression are supported. - - Notes - ----- - `_file_openers`, an instance of `_FileOpeners`, is made available for - use in the `_datasource` module. - - Examples - -------- - >>> import gzip - >>> np.lib._datasource._file_openers.keys() - [None, '.bz2', '.gz', '.xz', '.lzma'] - >>> np.lib._datasource._file_openers['.gz'] is gzip.open - True - - """ - - def __init__(self): - self._loaded = False - self._file_openers = {None: io.open} - - def _load(self): - if self._loaded: - return - - try: - import bz2 - if sys.version_info[0] >= 3: - self._file_openers[".bz2"] = bz2.open - else: - self._file_openers[".bz2"] = _python2_bz2open - except ImportError: - pass - - try: - import gzip - if sys.version_info[0] >= 3: - self._file_openers[".gz"] = gzip.open - else: - self._file_openers[".gz"] = _python2_gzipopen - except ImportError: - pass - - try: - import lzma - self._file_openers[".xz"] = lzma.open - self._file_openers[".lzma"] = lzma.open - except (ImportError, AttributeError): - # There are incompatible backports of lzma that do not have the - # lzma.open attribute, so catch that as well as ImportError. - pass - - self._loaded = True - - def keys(self): - """ - Return the keys of currently supported file openers. - - Parameters - ---------- - None - - Returns - ------- - keys : list - The keys are None for uncompressed files and the file extension - strings (i.e. ``'.gz'``, ``'.xz'``) for supported compression - methods. - - """ - self._load() - return list(self._file_openers.keys()) - - def __getitem__(self, key): - self._load() - return self._file_openers[key] - -_file_openers = _FileOpeners() - -def open(path, mode='r', destpath=os.curdir, encoding=None, newline=None): - """ - Open `path` with `mode` and return the file object. - - If ``path`` is an URL, it will be downloaded, stored in the - `DataSource` `destpath` directory and opened from there. - - Parameters - ---------- - path : str - Local file path or URL to open. - mode : str, optional - Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to - append. Available modes depend on the type of object specified by - path. Default is 'r'. - destpath : str, optional - Path to the directory where the source file gets downloaded to for - use. If `destpath` is None, a temporary directory will be created. - The default path is the current directory. - encoding : {None, str}, optional - Open text file with given encoding. The default encoding will be - what `io.open` uses. - newline : {None, str}, optional - Newline to use when reading text file. - - Returns - ------- - out : file object - The opened file. - - Notes - ----- - This is a convenience function that instantiates a `DataSource` and - returns the file object from ``DataSource.open(path)``. - - """ - - ds = DataSource(destpath) - return ds.open(path, mode, encoding=encoding, newline=newline) - - -@set_module('numpy') -class DataSource(object): - """ - DataSource(destpath='.') - - A generic data source file (file, http, ftp, ...). - - DataSources can be local files or remote files/URLs. The files may - also be compressed or uncompressed. DataSource hides some of the - low-level details of downloading the file, allowing you to simply pass - in a valid file path (or URL) and obtain a file object. - - Parameters - ---------- - destpath : str or None, optional - Path to the directory where the source file gets downloaded to for - use. If `destpath` is None, a temporary directory will be created. - The default path is the current directory. - - Notes - ----- - URLs require a scheme string (``http://``) to be used, without it they - will fail:: - - >>> repos = np.DataSource() - >>> repos.exists('www.google.com/index.html') - False - >>> repos.exists('http://www.google.com/index.html') - True - - Temporary directories are deleted when the DataSource is deleted. - - Examples - -------- - :: - - >>> ds = np.DataSource('/home/guido') - >>> urlname = 'http://www.google.com/' - >>> gfile = ds.open('http://www.google.com/') - >>> ds.abspath(urlname) - '/home/guido/www.google.com/index.html' - - >>> ds = np.DataSource(None) # use with temporary file - >>> ds.open('/home/guido/foobar.txt') - - >>> ds.abspath('/home/guido/foobar.txt') - '/tmp/.../home/guido/foobar.txt' - - """ - - def __init__(self, destpath=os.curdir): - """Create a DataSource with a local path at destpath.""" - if destpath: - self._destpath = os.path.abspath(destpath) - self._istmpdest = False - else: - import tempfile # deferring import to improve startup time - self._destpath = tempfile.mkdtemp() - self._istmpdest = True - - def __del__(self): - # Remove temp directories - if hasattr(self, '_istmpdest') and self._istmpdest: - shutil.rmtree(self._destpath) - - def _iszip(self, filename): - """Test if the filename is a zip file by looking at the file extension. - - """ - fname, ext = os.path.splitext(filename) - return ext in _file_openers.keys() - - def _iswritemode(self, mode): - """Test if the given mode will open a file for writing.""" - - # Currently only used to test the bz2 files. - _writemodes = ("w", "+") - for c in mode: - if c in _writemodes: - return True - return False - - def _splitzipext(self, filename): - """Split zip extension from filename and return filename. - - *Returns*: - base, zip_ext : {tuple} - - """ - - if self._iszip(filename): - return os.path.splitext(filename) - else: - return filename, None - - def _possible_names(self, filename): - """Return a tuple containing compressed filename variations.""" - names = [filename] - if not self._iszip(filename): - for zipext in _file_openers.keys(): - if zipext: - names.append(filename+zipext) - return names - - def _isurl(self, path): - """Test if path is a net location. Tests the scheme and netloc.""" - - # We do this here to reduce the 'import numpy' initial import time. - if sys.version_info[0] >= 3: - from urllib.parse import urlparse - else: - from urlparse import urlparse - - # BUG : URLs require a scheme string ('http://') to be used. - # www.google.com will fail. - # Should we prepend the scheme for those that don't have it and - # test that also? Similar to the way we append .gz and test for - # for compressed versions of files. - - scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path) - return bool(scheme and netloc) - - def _cache(self, path): - """Cache the file specified by path. - - Creates a copy of the file in the datasource cache. - - """ - # We import these here because importing urllib2 is slow and - # a significant fraction of numpy's total import time. - if sys.version_info[0] >= 3: - from urllib.request import urlopen - from urllib.error import URLError - else: - from urllib2 import urlopen - from urllib2 import URLError - - upath = self.abspath(path) - - # ensure directory exists - if not os.path.exists(os.path.dirname(upath)): - os.makedirs(os.path.dirname(upath)) - - # TODO: Doesn't handle compressed files! - if self._isurl(path): - try: - with closing(urlopen(path)) as openedurl: - with _open(upath, 'wb') as f: - shutil.copyfileobj(openedurl, f) - except URLError: - raise URLError("URL not found: %s" % path) - else: - shutil.copyfile(path, upath) - return upath - - def _findfile(self, path): - """Searches for ``path`` and returns full path if found. - - If path is an URL, _findfile will cache a local copy and return the - path to the cached file. If path is a local file, _findfile will - return a path to that local file. - - The search will include possible compressed versions of the file - and return the first occurrence found. - - """ - - # Build list of possible local file paths - if not self._isurl(path): - # Valid local paths - filelist = self._possible_names(path) - # Paths in self._destpath - filelist += self._possible_names(self.abspath(path)) - else: - # Cached URLs in self._destpath - filelist = self._possible_names(self.abspath(path)) - # Remote URLs - filelist = filelist + self._possible_names(path) - - for name in filelist: - if self.exists(name): - if self._isurl(name): - name = self._cache(name) - return name - return None - - def abspath(self, path): - """ - Return absolute path of file in the DataSource directory. - - If `path` is an URL, then `abspath` will return either the location - the file exists locally or the location it would exist when opened - using the `open` method. - - Parameters - ---------- - path : str - Can be a local file or a remote URL. - - Returns - ------- - out : str - Complete path, including the `DataSource` destination directory. - - Notes - ----- - The functionality is based on `os.path.abspath`. - - """ - # We do this here to reduce the 'import numpy' initial import time. - if sys.version_info[0] >= 3: - from urllib.parse import urlparse - else: - from urlparse import urlparse - - # TODO: This should be more robust. Handles case where path includes - # the destpath, but not other sub-paths. Failing case: - # path = /home/guido/datafile.txt - # destpath = /home/alex/ - # upath = self.abspath(path) - # upath == '/home/alex/home/guido/datafile.txt' - - # handle case where path includes self._destpath - splitpath = path.split(self._destpath, 2) - if len(splitpath) > 1: - path = splitpath[1] - scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path) - netloc = self._sanitize_relative_path(netloc) - upath = self._sanitize_relative_path(upath) - return os.path.join(self._destpath, netloc, upath) - - def _sanitize_relative_path(self, path): - """Return a sanitised relative path for which - os.path.abspath(os.path.join(base, path)).startswith(base) - """ - last = None - path = os.path.normpath(path) - while path != last: - last = path - # Note: os.path.join treats '/' as os.sep on Windows - path = path.lstrip(os.sep).lstrip('/') - path = path.lstrip(os.pardir).lstrip('..') - drive, path = os.path.splitdrive(path) # for Windows - return path - - def exists(self, path): - """ - Test if path exists. - - Test if `path` exists as (and in this order): - - - a local file. - - a remote URL that has been downloaded and stored locally in the - `DataSource` directory. - - a remote URL that has not been downloaded, but is valid and - accessible. - - Parameters - ---------- - path : str - Can be a local file or a remote URL. - - Returns - ------- - out : bool - True if `path` exists. - - Notes - ----- - When `path` is an URL, `exists` will return True if it's either - stored locally in the `DataSource` directory, or is a valid remote - URL. `DataSource` does not discriminate between the two, the file - is accessible if it exists in either location. - - """ - - # First test for local path - if os.path.exists(path): - return True - - # We import this here because importing urllib2 is slow and - # a significant fraction of numpy's total import time. - if sys.version_info[0] >= 3: - from urllib.request import urlopen - from urllib.error import URLError - else: - from urllib2 import urlopen - from urllib2 import URLError - - # Test cached url - upath = self.abspath(path) - if os.path.exists(upath): - return True - - # Test remote url - if self._isurl(path): - try: - netfile = urlopen(path) - netfile.close() - del(netfile) - return True - except URLError: - return False - return False - - def open(self, path, mode='r', encoding=None, newline=None): - """ - Open and return file-like object. - - If `path` is an URL, it will be downloaded, stored in the - `DataSource` directory and opened from there. - - Parameters - ---------- - path : str - Local file path or URL to open. - mode : {'r', 'w', 'a'}, optional - Mode to open `path`. Mode 'r' for reading, 'w' for writing, - 'a' to append. Available modes depend on the type of object - specified by `path`. Default is 'r'. - encoding : {None, str}, optional - Open text file with given encoding. The default encoding will be - what `io.open` uses. - newline : {None, str}, optional - Newline to use when reading text file. - - Returns - ------- - out : file object - File object. - - """ - - # TODO: There is no support for opening a file for writing which - # doesn't exist yet (creating a file). Should there be? - - # TODO: Add a ``subdir`` parameter for specifying the subdirectory - # used to store URLs in self._destpath. - - if self._isurl(path) and self._iswritemode(mode): - raise ValueError("URLs are not writeable") - - # NOTE: _findfile will fail on a new file opened for writing. - found = self._findfile(path) - if found: - _fname, ext = self._splitzipext(found) - if ext == 'bz2': - mode.replace("+", "") - return _file_openers[ext](found, mode=mode, - encoding=encoding, newline=newline) - else: - raise IOError("%s not found." % path) - - -class Repository (DataSource): - """ - Repository(baseurl, destpath='.') - - A data repository where multiple DataSource's share a base - URL/directory. - - `Repository` extends `DataSource` by prepending a base URL (or - directory) to all the files it handles. Use `Repository` when you will - be working with multiple files from one base URL. Initialize - `Repository` with the base URL, then refer to each file by its filename - only. - - Parameters - ---------- - baseurl : str - Path to the local directory or remote location that contains the - data files. - destpath : str or None, optional - Path to the directory where the source file gets downloaded to for - use. If `destpath` is None, a temporary directory will be created. - The default path is the current directory. - - Examples - -------- - To analyze all files in the repository, do something like this - (note: this is not self-contained code):: - - >>> repos = np.lib._datasource.Repository('/home/user/data/dir/') - >>> for filename in filelist: - ... fp = repos.open(filename) - ... fp.analyze() - ... fp.close() - - Similarly you could use a URL for a repository:: - - >>> repos = np.lib._datasource.Repository('http://www.xyz.edu/data') - - """ - - def __init__(self, baseurl, destpath=os.curdir): - """Create a Repository with a shared url or directory of baseurl.""" - DataSource.__init__(self, destpath=destpath) - self._baseurl = baseurl - - def __del__(self): - DataSource.__del__(self) - - def _fullpath(self, path): - """Return complete path for path. Prepends baseurl if necessary.""" - splitpath = path.split(self._baseurl, 2) - if len(splitpath) == 1: - result = os.path.join(self._baseurl, path) - else: - result = path # path contains baseurl already - return result - - def _findfile(self, path): - """Extend DataSource method to prepend baseurl to ``path``.""" - return DataSource._findfile(self, self._fullpath(path)) - - def abspath(self, path): - """ - Return absolute path of file in the Repository directory. - - If `path` is an URL, then `abspath` will return either the location - the file exists locally or the location it would exist when opened - using the `open` method. - - Parameters - ---------- - path : str - Can be a local file or a remote URL. This may, but does not - have to, include the `baseurl` with which the `Repository` was - initialized. - - Returns - ------- - out : str - Complete path, including the `DataSource` destination directory. - - """ - return DataSource.abspath(self, self._fullpath(path)) - - def exists(self, path): - """ - Test if path exists prepending Repository base URL to path. - - Test if `path` exists as (and in this order): - - - a local file. - - a remote URL that has been downloaded and stored locally in the - `DataSource` directory. - - a remote URL that has not been downloaded, but is valid and - accessible. - - Parameters - ---------- - path : str - Can be a local file or a remote URL. This may, but does not - have to, include the `baseurl` with which the `Repository` was - initialized. - - Returns - ------- - out : bool - True if `path` exists. - - Notes - ----- - When `path` is an URL, `exists` will return True if it's either - stored locally in the `DataSource` directory, or is a valid remote - URL. `DataSource` does not discriminate between the two, the file - is accessible if it exists in either location. - - """ - return DataSource.exists(self, self._fullpath(path)) - - def open(self, path, mode='r', encoding=None, newline=None): - """ - Open and return file-like object prepending Repository base URL. - - If `path` is an URL, it will be downloaded, stored in the - DataSource directory and opened from there. - - Parameters - ---------- - path : str - Local file path or URL to open. This may, but does not have to, - include the `baseurl` with which the `Repository` was - initialized. - mode : {'r', 'w', 'a'}, optional - Mode to open `path`. Mode 'r' for reading, 'w' for writing, - 'a' to append. Available modes depend on the type of object - specified by `path`. Default is 'r'. - encoding : {None, str}, optional - Open text file with given encoding. The default encoding will be - what `io.open` uses. - newline : {None, str}, optional - Newline to use when reading text file. - - Returns - ------- - out : file object - File object. - - """ - return DataSource.open(self, self._fullpath(path), mode, - encoding=encoding, newline=newline) - - def listdir(self): - """ - List files in the source Repository. - - Returns - ------- - files : list of str - List of file names (not containing a directory part). - - Notes - ----- - Does not currently work for remote repositories. - - """ - if self._isurl(self._baseurl): - raise NotImplementedError( - "Directory listing of URLs, not supported yet.") - else: - return os.listdir(self._baseurl) diff --git a/venv/lib/python3.7/site-packages/numpy/lib/_iotools.py b/venv/lib/python3.7/site-packages/numpy/lib/_iotools.py deleted file mode 100644 index c392929..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/_iotools.py +++ /dev/null @@ -1,958 +0,0 @@ -"""A collection of functions designed to help I/O with ascii files. - -""" -from __future__ import division, absolute_import, print_function - -__docformat__ = "restructuredtext en" - -import sys -import numpy as np -import numpy.core.numeric as nx -from numpy.compat import asbytes, asunicode, bytes, basestring - -if sys.version_info[0] >= 3: - from builtins import bool, int, float, complex, object, str - unicode = str -else: - from __builtin__ import bool, int, float, complex, object, unicode, str - - -def _decode_line(line, encoding=None): - """Decode bytes from binary input streams. - - Defaults to decoding from 'latin1'. That differs from the behavior of - np.compat.asunicode that decodes from 'ascii'. - - Parameters - ---------- - line : str or bytes - Line to be decoded. - - Returns - ------- - decoded_line : unicode - Unicode in Python 2, a str (unicode) in Python 3. - - """ - if type(line) is bytes: - if encoding is None: - line = line.decode('latin1') - else: - line = line.decode(encoding) - - return line - - -def _is_string_like(obj): - """ - Check whether obj behaves like a string. - """ - try: - obj + '' - except (TypeError, ValueError): - return False - return True - - -def _is_bytes_like(obj): - """ - Check whether obj behaves like a bytes object. - """ - try: - obj + b'' - except (TypeError, ValueError): - return False - return True - - -def _to_filehandle(fname, flag='r', return_opened=False): - """ - Returns the filehandle corresponding to a string or a file. - If the string ends in '.gz', the file is automatically unzipped. - - Parameters - ---------- - fname : string, filehandle - Name of the file whose filehandle must be returned. - flag : string, optional - Flag indicating the status of the file ('r' for read, 'w' for write). - return_opened : boolean, optional - Whether to return the opening status of the file. - """ - if _is_string_like(fname): - if fname.endswith('.gz'): - import gzip - fhd = gzip.open(fname, flag) - elif fname.endswith('.bz2'): - import bz2 - fhd = bz2.BZ2File(fname) - else: - fhd = file(fname, flag) - opened = True - elif hasattr(fname, 'seek'): - fhd = fname - opened = False - else: - raise ValueError('fname must be a string or file handle') - if return_opened: - return fhd, opened - return fhd - - -def has_nested_fields(ndtype): - """ - Returns whether one or several fields of a dtype are nested. - - Parameters - ---------- - ndtype : dtype - Data-type of a structured array. - - Raises - ------ - AttributeError - If `ndtype` does not have a `names` attribute. - - Examples - -------- - >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float)]) - >>> np.lib._iotools.has_nested_fields(dt) - False - - """ - for name in ndtype.names or (): - if ndtype[name].names is not None: - return True - return False - - -def flatten_dtype(ndtype, flatten_base=False): - """ - Unpack a structured data-type by collapsing nested fields and/or fields - with a shape. - - Note that the field names are lost. - - Parameters - ---------- - ndtype : dtype - The datatype to collapse - flatten_base : bool, optional - If True, transform a field with a shape into several fields. Default is - False. - - Examples - -------- - >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), - ... ('block', int, (2, 3))]) - >>> np.lib._iotools.flatten_dtype(dt) - [dtype('S4'), dtype('float64'), dtype('float64'), dtype('int64')] - >>> np.lib._iotools.flatten_dtype(dt, flatten_base=True) - [dtype('S4'), - dtype('float64'), - dtype('float64'), - dtype('int64'), - dtype('int64'), - dtype('int64'), - dtype('int64'), - dtype('int64'), - dtype('int64')] - - """ - names = ndtype.names - if names is None: - if flatten_base: - return [ndtype.base] * int(np.prod(ndtype.shape)) - return [ndtype.base] - else: - types = [] - for field in names: - info = ndtype.fields[field] - flat_dt = flatten_dtype(info[0], flatten_base) - types.extend(flat_dt) - return types - - -class LineSplitter(object): - """ - Object to split a string at a given delimiter or at given places. - - Parameters - ---------- - delimiter : str, int, or sequence of ints, optional - If a string, character used to delimit consecutive fields. - If an integer or a sequence of integers, width(s) of each field. - comments : str, optional - Character used to mark the beginning of a comment. Default is '#'. - autostrip : bool, optional - Whether to strip each individual field. Default is True. - - """ - - def autostrip(self, method): - """ - Wrapper to strip each member of the output of `method`. - - Parameters - ---------- - method : function - Function that takes a single argument and returns a sequence of - strings. - - Returns - ------- - wrapped : function - The result of wrapping `method`. `wrapped` takes a single input - argument and returns a list of strings that are stripped of - white-space. - - """ - return lambda input: [_.strip() for _ in method(input)] - # - - def __init__(self, delimiter=None, comments='#', autostrip=True, encoding=None): - delimiter = _decode_line(delimiter) - comments = _decode_line(comments) - - self.comments = comments - - # Delimiter is a character - if (delimiter is None) or isinstance(delimiter, basestring): - delimiter = delimiter or None - _handyman = self._delimited_splitter - # Delimiter is a list of field widths - elif hasattr(delimiter, '__iter__'): - _handyman = self._variablewidth_splitter - idx = np.cumsum([0] + list(delimiter)) - delimiter = [slice(i, j) for (i, j) in zip(idx[:-1], idx[1:])] - # Delimiter is a single integer - elif int(delimiter): - (_handyman, delimiter) = ( - self._fixedwidth_splitter, int(delimiter)) - else: - (_handyman, delimiter) = (self._delimited_splitter, None) - self.delimiter = delimiter - if autostrip: - self._handyman = self.autostrip(_handyman) - else: - self._handyman = _handyman - self.encoding = encoding - # - - def _delimited_splitter(self, line): - """Chop off comments, strip, and split at delimiter. """ - if self.comments is not None: - line = line.split(self.comments)[0] - line = line.strip(" \r\n") - if not line: - return [] - return line.split(self.delimiter) - # - - def _fixedwidth_splitter(self, line): - if self.comments is not None: - line = line.split(self.comments)[0] - line = line.strip("\r\n") - if not line: - return [] - fixed = self.delimiter - slices = [slice(i, i + fixed) for i in range(0, len(line), fixed)] - return [line[s] for s in slices] - # - - def _variablewidth_splitter(self, line): - if self.comments is not None: - line = line.split(self.comments)[0] - if not line: - return [] - slices = self.delimiter - return [line[s] for s in slices] - # - - def __call__(self, line): - return self._handyman(_decode_line(line, self.encoding)) - - -class NameValidator(object): - """ - Object to validate a list of strings to use as field names. - - The strings are stripped of any non alphanumeric character, and spaces - are replaced by '_'. During instantiation, the user can define a list - of names to exclude, as well as a list of invalid characters. Names in - the exclusion list are appended a '_' character. - - Once an instance has been created, it can be called with a list of - names, and a list of valid names will be created. The `__call__` - method accepts an optional keyword "default" that sets the default name - in case of ambiguity. By default this is 'f', so that names will - default to `f0`, `f1`, etc. - - Parameters - ---------- - excludelist : sequence, optional - A list of names to exclude. This list is appended to the default - list ['return', 'file', 'print']. Excluded names are appended an - underscore: for example, `file` becomes `file_` if supplied. - deletechars : str, optional - A string combining invalid characters that must be deleted from the - names. - case_sensitive : {True, False, 'upper', 'lower'}, optional - * If True, field names are case-sensitive. - * If False or 'upper', field names are converted to upper case. - * If 'lower', field names are converted to lower case. - - The default value is True. - replace_space : '_', optional - Character(s) used in replacement of white spaces. - - Notes - ----- - Calling an instance of `NameValidator` is the same as calling its - method `validate`. - - Examples - -------- - >>> validator = np.lib._iotools.NameValidator() - >>> validator(['file', 'field2', 'with space', 'CaSe']) - ('file_', 'field2', 'with_space', 'CaSe') - - >>> validator = np.lib._iotools.NameValidator(excludelist=['excl'], - ... deletechars='q', - ... case_sensitive=False) - >>> validator(['excl', 'field2', 'no_q', 'with space', 'CaSe']) - ('EXCL', 'FIELD2', 'NO_Q', 'WITH_SPACE', 'CASE') - - """ - # - defaultexcludelist = ['return', 'file', 'print'] - defaultdeletechars = set(r"""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""") - # - - def __init__(self, excludelist=None, deletechars=None, - case_sensitive=None, replace_space='_'): - # Process the exclusion list .. - if excludelist is None: - excludelist = [] - excludelist.extend(self.defaultexcludelist) - self.excludelist = excludelist - # Process the list of characters to delete - if deletechars is None: - delete = self.defaultdeletechars - else: - delete = set(deletechars) - delete.add('"') - self.deletechars = delete - # Process the case option ..... - if (case_sensitive is None) or (case_sensitive is True): - self.case_converter = lambda x: x - elif (case_sensitive is False) or case_sensitive.startswith('u'): - self.case_converter = lambda x: x.upper() - elif case_sensitive.startswith('l'): - self.case_converter = lambda x: x.lower() - else: - msg = 'unrecognized case_sensitive value %s.' % case_sensitive - raise ValueError(msg) - # - self.replace_space = replace_space - - def validate(self, names, defaultfmt="f%i", nbfields=None): - """ - Validate a list of strings as field names for a structured array. - - Parameters - ---------- - names : sequence of str - Strings to be validated. - defaultfmt : str, optional - Default format string, used if validating a given string - reduces its length to zero. - nbfields : integer, optional - Final number of validated names, used to expand or shrink the - initial list of names. - - Returns - ------- - validatednames : list of str - The list of validated field names. - - Notes - ----- - A `NameValidator` instance can be called directly, which is the - same as calling `validate`. For examples, see `NameValidator`. - - """ - # Initial checks .............. - if (names is None): - if (nbfields is None): - return None - names = [] - if isinstance(names, basestring): - names = [names, ] - if nbfields is not None: - nbnames = len(names) - if (nbnames < nbfields): - names = list(names) + [''] * (nbfields - nbnames) - elif (nbnames > nbfields): - names = names[:nbfields] - # Set some shortcuts ........... - deletechars = self.deletechars - excludelist = self.excludelist - case_converter = self.case_converter - replace_space = self.replace_space - # Initializes some variables ... - validatednames = [] - seen = dict() - nbempty = 0 - # - for item in names: - item = case_converter(item).strip() - if replace_space: - item = item.replace(' ', replace_space) - item = ''.join([c for c in item if c not in deletechars]) - if item == '': - item = defaultfmt % nbempty - while item in names: - nbempty += 1 - item = defaultfmt % nbempty - nbempty += 1 - elif item in excludelist: - item += '_' - cnt = seen.get(item, 0) - if cnt > 0: - validatednames.append(item + '_%d' % cnt) - else: - validatednames.append(item) - seen[item] = cnt + 1 - return tuple(validatednames) - # - - def __call__(self, names, defaultfmt="f%i", nbfields=None): - return self.validate(names, defaultfmt=defaultfmt, nbfields=nbfields) - - -def str2bool(value): - """ - Tries to transform a string supposed to represent a boolean to a boolean. - - Parameters - ---------- - value : str - The string that is transformed to a boolean. - - Returns - ------- - boolval : bool - The boolean representation of `value`. - - Raises - ------ - ValueError - If the string is not 'True' or 'False' (case independent) - - Examples - -------- - >>> np.lib._iotools.str2bool('TRUE') - True - >>> np.lib._iotools.str2bool('false') - False - - """ - value = value.upper() - if value == 'TRUE': - return True - elif value == 'FALSE': - return False - else: - raise ValueError("Invalid boolean") - - -class ConverterError(Exception): - """ - Exception raised when an error occurs in a converter for string values. - - """ - pass - - -class ConverterLockError(ConverterError): - """ - Exception raised when an attempt is made to upgrade a locked converter. - - """ - pass - - -class ConversionWarning(UserWarning): - """ - Warning issued when a string converter has a problem. - - Notes - ----- - In `genfromtxt` a `ConversionWarning` is issued if raising exceptions - is explicitly suppressed with the "invalid_raise" keyword. - - """ - pass - - -class StringConverter(object): - """ - Factory class for function transforming a string into another object - (int, float). - - After initialization, an instance can be called to transform a string - into another object. If the string is recognized as representing a - missing value, a default value is returned. - - Attributes - ---------- - func : function - Function used for the conversion. - default : any - Default value to return when the input corresponds to a missing - value. - type : type - Type of the output. - _status : int - Integer representing the order of the conversion. - _mapper : sequence of tuples - Sequence of tuples (dtype, function, default value) to evaluate in - order. - _locked : bool - Holds `locked` parameter. - - Parameters - ---------- - dtype_or_func : {None, dtype, function}, optional - If a `dtype`, specifies the input data type, used to define a basic - function and a default value for missing data. For example, when - `dtype` is float, the `func` attribute is set to `float` and the - default value to `np.nan`. If a function, this function is used to - convert a string to another object. In this case, it is recommended - to give an associated default value as input. - default : any, optional - Value to return by default, that is, when the string to be - converted is flagged as missing. If not given, `StringConverter` - tries to supply a reasonable default value. - missing_values : {None, sequence of str}, optional - ``None`` or sequence of strings indicating a missing value. If ``None`` - then missing values are indicated by empty entries. The default is - ``None``. - locked : bool, optional - Whether the StringConverter should be locked to prevent automatic - upgrade or not. Default is False. - - """ - # - _mapper = [(nx.bool_, str2bool, False), - (nx.integer, int, -1)] - - # On 32-bit systems, we need to make sure that we explicitly include - # nx.int64 since ns.integer is nx.int32. - if nx.dtype(nx.integer).itemsize < nx.dtype(nx.int64).itemsize: - _mapper.append((nx.int64, int, -1)) - - _mapper.extend([(nx.floating, float, nx.nan), - (nx.complexfloating, complex, nx.nan + 0j), - (nx.longdouble, nx.longdouble, nx.nan), - (nx.unicode_, asunicode, '???'), - (nx.string_, asbytes, '???')]) - - (_defaulttype, _defaultfunc, _defaultfill) = zip(*_mapper) - - @classmethod - def _getdtype(cls, val): - """Returns the dtype of the input variable.""" - return np.array(val).dtype - # - - @classmethod - def _getsubdtype(cls, val): - """Returns the type of the dtype of the input variable.""" - return np.array(val).dtype.type - # - # This is a bit annoying. We want to return the "general" type in most - # cases (ie. "string" rather than "S10"), but we want to return the - # specific type for datetime64 (ie. "datetime64[us]" rather than - # "datetime64"). - - @classmethod - def _dtypeortype(cls, dtype): - """Returns dtype for datetime64 and type of dtype otherwise.""" - if dtype.type == np.datetime64: - return dtype - return dtype.type - # - - @classmethod - def upgrade_mapper(cls, func, default=None): - """ - Upgrade the mapper of a StringConverter by adding a new function and - its corresponding default. - - The input function (or sequence of functions) and its associated - default value (if any) is inserted in penultimate position of the - mapper. The corresponding type is estimated from the dtype of the - default value. - - Parameters - ---------- - func : var - Function, or sequence of functions - - Examples - -------- - >>> import dateutil.parser - >>> import datetime - >>> dateparser = dateutil.parser.parse - >>> defaultdate = datetime.date(2000, 1, 1) - >>> StringConverter.upgrade_mapper(dateparser, default=defaultdate) - """ - # Func is a single functions - if hasattr(func, '__call__'): - cls._mapper.insert(-1, (cls._getsubdtype(default), func, default)) - return - elif hasattr(func, '__iter__'): - if isinstance(func[0], (tuple, list)): - for _ in func: - cls._mapper.insert(-1, _) - return - if default is None: - default = [None] * len(func) - else: - default = list(default) - default.append([None] * (len(func) - len(default))) - for (fct, dft) in zip(func, default): - cls._mapper.insert(-1, (cls._getsubdtype(dft), fct, dft)) - # - - def __init__(self, dtype_or_func=None, default=None, missing_values=None, - locked=False): - # Defines a lock for upgrade - self._locked = bool(locked) - # No input dtype: minimal initialization - if dtype_or_func is None: - self.func = str2bool - self._status = 0 - self.default = default or False - dtype = np.dtype('bool') - else: - # Is the input a np.dtype ? - try: - self.func = None - dtype = np.dtype(dtype_or_func) - except TypeError: - # dtype_or_func must be a function, then - if not hasattr(dtype_or_func, '__call__'): - errmsg = ("The input argument `dtype` is neither a" - " function nor a dtype (got '%s' instead)") - raise TypeError(errmsg % type(dtype_or_func)) - # Set the function - self.func = dtype_or_func - # If we don't have a default, try to guess it or set it to - # None - if default is None: - try: - default = self.func('0') - except ValueError: - default = None - dtype = self._getdtype(default) - # Set the status according to the dtype - _status = -1 - for (i, (deftype, func, default_def)) in enumerate(self._mapper): - if np.issubdtype(dtype.type, deftype): - _status = i - if default is None: - self.default = default_def - else: - self.default = default - break - # if a converter for the specific dtype is available use that - last_func = func - for (i, (deftype, func, default_def)) in enumerate(self._mapper): - if dtype.type == deftype: - _status = i - last_func = func - if default is None: - self.default = default_def - else: - self.default = default - break - func = last_func - if _status == -1: - # We never found a match in the _mapper... - _status = 0 - self.default = default - self._status = _status - # If the input was a dtype, set the function to the last we saw - if self.func is None: - self.func = func - # If the status is 1 (int), change the function to - # something more robust. - if self.func == self._mapper[1][1]: - if issubclass(dtype.type, np.uint64): - self.func = np.uint64 - elif issubclass(dtype.type, np.int64): - self.func = np.int64 - else: - self.func = lambda x: int(float(x)) - # Store the list of strings corresponding to missing values. - if missing_values is None: - self.missing_values = {''} - else: - if isinstance(missing_values, basestring): - missing_values = missing_values.split(",") - self.missing_values = set(list(missing_values) + ['']) - # - self._callingfunction = self._strict_call - self.type = self._dtypeortype(dtype) - self._checked = False - self._initial_default = default - # - - def _loose_call(self, value): - try: - return self.func(value) - except ValueError: - return self.default - # - - def _strict_call(self, value): - try: - - # We check if we can convert the value using the current function - new_value = self.func(value) - - # In addition to having to check whether func can convert the - # value, we also have to make sure that we don't get overflow - # errors for integers. - if self.func is int: - try: - np.array(value, dtype=self.type) - except OverflowError: - raise ValueError - - # We're still here so we can now return the new value - return new_value - - except ValueError: - if value.strip() in self.missing_values: - if not self._status: - self._checked = False - return self.default - raise ValueError("Cannot convert string '%s'" % value) - # - - def __call__(self, value): - return self._callingfunction(value) - # - - def upgrade(self, value): - """ - Find the best converter for a given string, and return the result. - - The supplied string `value` is converted by testing different - converters in order. First the `func` method of the - `StringConverter` instance is tried, if this fails other available - converters are tried. The order in which these other converters - are tried is determined by the `_status` attribute of the instance. - - Parameters - ---------- - value : str - The string to convert. - - Returns - ------- - out : any - The result of converting `value` with the appropriate converter. - - """ - self._checked = True - try: - return self._strict_call(value) - except ValueError: - # Raise an exception if we locked the converter... - if self._locked: - errmsg = "Converter is locked and cannot be upgraded" - raise ConverterLockError(errmsg) - _statusmax = len(self._mapper) - # Complains if we try to upgrade by the maximum - _status = self._status - if _status == _statusmax: - errmsg = "Could not find a valid conversion function" - raise ConverterError(errmsg) - elif _status < _statusmax - 1: - _status += 1 - (self.type, self.func, default) = self._mapper[_status] - self._status = _status - if self._initial_default is not None: - self.default = self._initial_default - else: - self.default = default - return self.upgrade(value) - - def iterupgrade(self, value): - self._checked = True - if not hasattr(value, '__iter__'): - value = (value,) - _strict_call = self._strict_call - try: - for _m in value: - _strict_call(_m) - except ValueError: - # Raise an exception if we locked the converter... - if self._locked: - errmsg = "Converter is locked and cannot be upgraded" - raise ConverterLockError(errmsg) - _statusmax = len(self._mapper) - # Complains if we try to upgrade by the maximum - _status = self._status - if _status == _statusmax: - raise ConverterError( - "Could not find a valid conversion function" - ) - elif _status < _statusmax - 1: - _status += 1 - (self.type, self.func, default) = self._mapper[_status] - if self._initial_default is not None: - self.default = self._initial_default - else: - self.default = default - self._status = _status - self.iterupgrade(value) - - def update(self, func, default=None, testing_value=None, - missing_values='', locked=False): - """ - Set StringConverter attributes directly. - - Parameters - ---------- - func : function - Conversion function. - default : any, optional - Value to return by default, that is, when the string to be - converted is flagged as missing. If not given, - `StringConverter` tries to supply a reasonable default value. - testing_value : str, optional - A string representing a standard input value of the converter. - This string is used to help defining a reasonable default - value. - missing_values : {sequence of str, None}, optional - Sequence of strings indicating a missing value. If ``None``, then - the existing `missing_values` are cleared. The default is `''`. - locked : bool, optional - Whether the StringConverter should be locked to prevent - automatic upgrade or not. Default is False. - - Notes - ----- - `update` takes the same parameters as the constructor of - `StringConverter`, except that `func` does not accept a `dtype` - whereas `dtype_or_func` in the constructor does. - - """ - self.func = func - self._locked = locked - - # Don't reset the default to None if we can avoid it - if default is not None: - self.default = default - self.type = self._dtypeortype(self._getdtype(default)) - else: - try: - tester = func(testing_value or '1') - except (TypeError, ValueError): - tester = None - self.type = self._dtypeortype(self._getdtype(tester)) - - # Add the missing values to the existing set or clear it. - if missing_values is None: - # Clear all missing values even though the ctor initializes it to - # set(['']) when the argument is None. - self.missing_values = set() - else: - if not np.iterable(missing_values): - missing_values = [missing_values] - if not all(isinstance(v, basestring) for v in missing_values): - raise TypeError("missing_values must be strings or unicode") - self.missing_values.update(missing_values) - - -def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs): - """ - Convenience function to create a `np.dtype` object. - - The function processes the input `dtype` and matches it with the given - names. - - Parameters - ---------- - ndtype : var - Definition of the dtype. Can be any string or dictionary recognized - by the `np.dtype` function, or a sequence of types. - names : str or sequence, optional - Sequence of strings to use as field names for a structured dtype. - For convenience, `names` can be a string of a comma-separated list - of names. - defaultfmt : str, optional - Format string used to define missing names, such as ``"f%i"`` - (default) or ``"fields_%02i"``. - validationargs : optional - A series of optional arguments used to initialize a - `NameValidator`. - - Examples - -------- - >>> np.lib._iotools.easy_dtype(float) - dtype('float64') - >>> np.lib._iotools.easy_dtype("i4, f8") - dtype([('f0', '>> np.lib._iotools.easy_dtype("i4, f8", defaultfmt="field_%03i") - dtype([('field_000', '>> np.lib._iotools.easy_dtype((int, float, float), names="a,b,c") - dtype([('a', '>> np.lib._iotools.easy_dtype(float, names="a,b,c") - dtype([('a', ' 9) in principle): - - - Released version: '1.8.0', '1.8.1', etc. - - Alpha: '1.8.0a1', '1.8.0a2', etc. - - Beta: '1.8.0b1', '1.8.0b2', etc. - - Release candidates: '1.8.0rc1', '1.8.0rc2', etc. - - Development versions: '1.8.0.dev-f1234afa' (git commit hash appended) - - Development versions after a1: '1.8.0a1.dev-f1234afa', - '1.8.0b2.dev-f1234afa', - '1.8.1rc1.dev-f1234afa', etc. - - Development versions (no git hash available): '1.8.0.dev-Unknown' - - Comparing needs to be done against a valid version string or other - `NumpyVersion` instance. Note that all development versions of the same - (pre-)release compare equal. - - .. versionadded:: 1.9.0 - - Parameters - ---------- - vstring : str - NumPy version string (``np.__version__``). - - Examples - -------- - >>> from numpy.lib import NumpyVersion - >>> if NumpyVersion(np.__version__) < '1.7.0': - ... print('skip') - >>> # skip - - >>> NumpyVersion('1.7') # raises ValueError, add ".0" - Traceback (most recent call last): - ... - ValueError: Not a valid numpy version string - - """ - - def __init__(self, vstring): - self.vstring = vstring - ver_main = re.match(r'\d[.]\d+[.]\d+', vstring) - if not ver_main: - raise ValueError("Not a valid numpy version string") - - self.version = ver_main.group() - self.major, self.minor, self.bugfix = [int(x) for x in - self.version.split('.')] - if len(vstring) == ver_main.end(): - self.pre_release = 'final' - else: - alpha = re.match(r'a\d', vstring[ver_main.end():]) - beta = re.match(r'b\d', vstring[ver_main.end():]) - rc = re.match(r'rc\d', vstring[ver_main.end():]) - pre_rel = [m for m in [alpha, beta, rc] if m is not None] - if pre_rel: - self.pre_release = pre_rel[0].group() - else: - self.pre_release = '' - - self.is_devversion = bool(re.search(r'.dev', vstring)) - - def _compare_version(self, other): - """Compare major.minor.bugfix""" - if self.major == other.major: - if self.minor == other.minor: - if self.bugfix == other.bugfix: - vercmp = 0 - elif self.bugfix > other.bugfix: - vercmp = 1 - else: - vercmp = -1 - elif self.minor > other.minor: - vercmp = 1 - else: - vercmp = -1 - elif self.major > other.major: - vercmp = 1 - else: - vercmp = -1 - - return vercmp - - def _compare_pre_release(self, other): - """Compare alpha/beta/rc/final.""" - if self.pre_release == other.pre_release: - vercmp = 0 - elif self.pre_release == 'final': - vercmp = 1 - elif other.pre_release == 'final': - vercmp = -1 - elif self.pre_release > other.pre_release: - vercmp = 1 - else: - vercmp = -1 - - return vercmp - - def _compare(self, other): - if not isinstance(other, (basestring, NumpyVersion)): - raise ValueError("Invalid object to compare with NumpyVersion.") - - if isinstance(other, basestring): - other = NumpyVersion(other) - - vercmp = self._compare_version(other) - if vercmp == 0: - # Same x.y.z version, check for alpha/beta/rc - vercmp = self._compare_pre_release(other) - if vercmp == 0: - # Same version and same pre-release, check if dev version - if self.is_devversion is other.is_devversion: - vercmp = 0 - elif self.is_devversion: - vercmp = -1 - else: - vercmp = 1 - - return vercmp - - def __lt__(self, other): - return self._compare(other) < 0 - - def __le__(self, other): - return self._compare(other) <= 0 - - def __eq__(self, other): - return self._compare(other) == 0 - - def __ne__(self, other): - return self._compare(other) != 0 - - def __gt__(self, other): - return self._compare(other) > 0 - - def __ge__(self, other): - return self._compare(other) >= 0 - - def __repr(self): - return "NumpyVersion(%s)" % self.vstring diff --git a/venv/lib/python3.7/site-packages/numpy/lib/arraypad.py b/venv/lib/python3.7/site-packages/numpy/lib/arraypad.py deleted file mode 100644 index 33e6470..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/arraypad.py +++ /dev/null @@ -1,881 +0,0 @@ -""" -The arraypad module contains a group of functions to pad values onto the edges -of an n-dimensional array. - -""" -from __future__ import division, absolute_import, print_function - -import numpy as np -from numpy.core.overrides import array_function_dispatch -from numpy.lib.index_tricks import ndindex - - -__all__ = ['pad'] - - -############################################################################### -# Private utility functions. - - -def _round_if_needed(arr, dtype): - """ - Rounds arr inplace if destination dtype is integer. - - Parameters - ---------- - arr : ndarray - Input array. - dtype : dtype - The dtype of the destination array. - """ - if np.issubdtype(dtype, np.integer): - arr.round(out=arr) - - -def _slice_at_axis(sl, axis): - """ - Construct tuple of slices to slice an array in the given dimension. - - Parameters - ---------- - sl : slice - The slice for the given dimension. - axis : int - The axis to which `sl` is applied. All other dimensions are left - "unsliced". - - Returns - ------- - sl : tuple of slices - A tuple with slices matching `shape` in length. - - Examples - -------- - >>> _slice_at_axis(slice(None, 3, -1), 1) - (slice(None, None, None), slice(None, 3, -1), (...,)) - """ - return (slice(None),) * axis + (sl,) + (...,) - - -def _view_roi(array, original_area_slice, axis): - """ - Get a view of the current region of interest during iterative padding. - - When padding multiple dimensions iteratively corner values are - unnecessarily overwritten multiple times. This function reduces the - working area for the first dimensions so that corners are excluded. - - Parameters - ---------- - array : ndarray - The array with the region of interest. - original_area_slice : tuple of slices - Denotes the area with original values of the unpadded array. - axis : int - The currently padded dimension assuming that `axis` is padded before - `axis` + 1. - - Returns - ------- - roi : ndarray - The region of interest of the original `array`. - """ - axis += 1 - sl = (slice(None),) * axis + original_area_slice[axis:] - return array[sl] - - -def _pad_simple(array, pad_width, fill_value=None): - """ - Pad array on all sides with either a single value or undefined values. - - Parameters - ---------- - array : ndarray - Array to grow. - pad_width : sequence of tuple[int, int] - Pad width on both sides for each dimension in `arr`. - fill_value : scalar, optional - If provided the padded area is filled with this value, otherwise - the pad area left undefined. - - Returns - ------- - padded : ndarray - The padded array with the same dtype as`array`. Its order will default - to C-style if `array` is not F-contiguous. - original_area_slice : tuple - A tuple of slices pointing to the area of the original array. - """ - # Allocate grown array - new_shape = tuple( - left + size + right - for size, (left, right) in zip(array.shape, pad_width) - ) - order = 'F' if array.flags.fnc else 'C' # Fortran and not also C-order - padded = np.empty(new_shape, dtype=array.dtype, order=order) - - if fill_value is not None: - padded.fill(fill_value) - - # Copy old array into correct space - original_area_slice = tuple( - slice(left, left + size) - for size, (left, right) in zip(array.shape, pad_width) - ) - padded[original_area_slice] = array - - return padded, original_area_slice - - -def _set_pad_area(padded, axis, width_pair, value_pair): - """ - Set empty-padded area in given dimension. - - Parameters - ---------- - padded : ndarray - Array with the pad area which is modified inplace. - axis : int - Dimension with the pad area to set. - width_pair : (int, int) - Pair of widths that mark the pad area on both sides in the given - dimension. - value_pair : tuple of scalars or ndarrays - Values inserted into the pad area on each side. It must match or be - broadcastable to the shape of `arr`. - """ - left_slice = _slice_at_axis(slice(None, width_pair[0]), axis) - padded[left_slice] = value_pair[0] - - right_slice = _slice_at_axis( - slice(padded.shape[axis] - width_pair[1], None), axis) - padded[right_slice] = value_pair[1] - - -def _get_edges(padded, axis, width_pair): - """ - Retrieve edge values from empty-padded array in given dimension. - - Parameters - ---------- - padded : ndarray - Empty-padded array. - axis : int - Dimension in which the edges are considered. - width_pair : (int, int) - Pair of widths that mark the pad area on both sides in the given - dimension. - - Returns - ------- - left_edge, right_edge : ndarray - Edge values of the valid area in `padded` in the given dimension. Its - shape will always match `padded` except for the dimension given by - `axis` which will have a length of 1. - """ - left_index = width_pair[0] - left_slice = _slice_at_axis(slice(left_index, left_index + 1), axis) - left_edge = padded[left_slice] - - right_index = padded.shape[axis] - width_pair[1] - right_slice = _slice_at_axis(slice(right_index - 1, right_index), axis) - right_edge = padded[right_slice] - - return left_edge, right_edge - - -def _get_linear_ramps(padded, axis, width_pair, end_value_pair): - """ - Construct linear ramps for empty-padded array in given dimension. - - Parameters - ---------- - padded : ndarray - Empty-padded array. - axis : int - Dimension in which the ramps are constructed. - width_pair : (int, int) - Pair of widths that mark the pad area on both sides in the given - dimension. - end_value_pair : (scalar, scalar) - End values for the linear ramps which form the edge of the fully padded - array. These values are included in the linear ramps. - - Returns - ------- - left_ramp, right_ramp : ndarray - Linear ramps to set on both sides of `padded`. - """ - edge_pair = _get_edges(padded, axis, width_pair) - - left_ramp = np.linspace( - start=end_value_pair[0], - stop=edge_pair[0].squeeze(axis), # Dimensions is replaced by linspace - num=width_pair[0], - endpoint=False, - dtype=padded.dtype, - axis=axis, - ) - - right_ramp = np.linspace( - start=end_value_pair[1], - stop=edge_pair[1].squeeze(axis), # Dimension is replaced by linspace - num=width_pair[1], - endpoint=False, - dtype=padded.dtype, - axis=axis, - ) - # Reverse linear space in appropriate dimension - right_ramp = right_ramp[_slice_at_axis(slice(None, None, -1), axis)] - - return left_ramp, right_ramp - - -def _get_stats(padded, axis, width_pair, length_pair, stat_func): - """ - Calculate statistic for the empty-padded array in given dimnsion. - - Parameters - ---------- - padded : ndarray - Empty-padded array. - axis : int - Dimension in which the statistic is calculated. - width_pair : (int, int) - Pair of widths that mark the pad area on both sides in the given - dimension. - length_pair : 2-element sequence of None or int - Gives the number of values in valid area from each side that is - taken into account when calculating the statistic. If None the entire - valid area in `padded` is considered. - stat_func : function - Function to compute statistic. The expected signature is - ``stat_func(x: ndarray, axis: int, keepdims: bool) -> ndarray``. - - Returns - ------- - left_stat, right_stat : ndarray - Calculated statistic for both sides of `padded`. - """ - # Calculate indices of the edges of the area with original values - left_index = width_pair[0] - right_index = padded.shape[axis] - width_pair[1] - # as well as its length - max_length = right_index - left_index - - # Limit stat_lengths to max_length - left_length, right_length = length_pair - if left_length is None or max_length < left_length: - left_length = max_length - if right_length is None or max_length < right_length: - right_length = max_length - - if (left_length == 0 or right_length == 0) \ - and stat_func in {np.amax, np.amin}: - # amax and amin can't operate on an emtpy array, - # raise a more descriptive warning here instead of the default one - raise ValueError("stat_length of 0 yields no value for padding") - - # Calculate statistic for the left side - left_slice = _slice_at_axis( - slice(left_index, left_index + left_length), axis) - left_chunk = padded[left_slice] - left_stat = stat_func(left_chunk, axis=axis, keepdims=True) - _round_if_needed(left_stat, padded.dtype) - - if left_length == right_length == max_length: - # return early as right_stat must be identical to left_stat - return left_stat, left_stat - - # Calculate statistic for the right side - right_slice = _slice_at_axis( - slice(right_index - right_length, right_index), axis) - right_chunk = padded[right_slice] - right_stat = stat_func(right_chunk, axis=axis, keepdims=True) - _round_if_needed(right_stat, padded.dtype) - - return left_stat, right_stat - - -def _set_reflect_both(padded, axis, width_pair, method, include_edge=False): - """ - Pad `axis` of `arr` with reflection. - - Parameters - ---------- - padded : ndarray - Input array of arbitrary shape. - axis : int - Axis along which to pad `arr`. - width_pair : (int, int) - Pair of widths that mark the pad area on both sides in the given - dimension. - method : str - Controls method of reflection; options are 'even' or 'odd'. - include_edge : bool - If true, edge value is included in reflection, otherwise the edge - value forms the symmetric axis to the reflection. - - Returns - ------- - pad_amt : tuple of ints, length 2 - New index positions of padding to do along the `axis`. If these are - both 0, padding is done in this dimension. - """ - left_pad, right_pad = width_pair - old_length = padded.shape[axis] - right_pad - left_pad - - if include_edge: - # Edge is included, we need to offset the pad amount by 1 - edge_offset = 1 - else: - edge_offset = 0 # Edge is not included, no need to offset pad amount - old_length -= 1 # but must be omitted from the chunk - - if left_pad > 0: - # Pad with reflected values on left side: - # First limit chunk size which can't be larger than pad area - chunk_length = min(old_length, left_pad) - # Slice right to left, stop on or next to edge, start relative to stop - stop = left_pad - edge_offset - start = stop + chunk_length - left_slice = _slice_at_axis(slice(start, stop, -1), axis) - left_chunk = padded[left_slice] - - if method == "odd": - # Negate chunk and align with edge - edge_slice = _slice_at_axis(slice(left_pad, left_pad + 1), axis) - left_chunk = 2 * padded[edge_slice] - left_chunk - - # Insert chunk into padded area - start = left_pad - chunk_length - stop = left_pad - pad_area = _slice_at_axis(slice(start, stop), axis) - padded[pad_area] = left_chunk - # Adjust pointer to left edge for next iteration - left_pad -= chunk_length - - if right_pad > 0: - # Pad with reflected values on right side: - # First limit chunk size which can't be larger than pad area - chunk_length = min(old_length, right_pad) - # Slice right to left, start on or next to edge, stop relative to start - start = -right_pad + edge_offset - 2 - stop = start - chunk_length - right_slice = _slice_at_axis(slice(start, stop, -1), axis) - right_chunk = padded[right_slice] - - if method == "odd": - # Negate chunk and align with edge - edge_slice = _slice_at_axis( - slice(-right_pad - 1, -right_pad), axis) - right_chunk = 2 * padded[edge_slice] - right_chunk - - # Insert chunk into padded area - start = padded.shape[axis] - right_pad - stop = start + chunk_length - pad_area = _slice_at_axis(slice(start, stop), axis) - padded[pad_area] = right_chunk - # Adjust pointer to right edge for next iteration - right_pad -= chunk_length - - return left_pad, right_pad - - -def _set_wrap_both(padded, axis, width_pair): - """ - Pad `axis` of `arr` with wrapped values. - - Parameters - ---------- - padded : ndarray - Input array of arbitrary shape. - axis : int - Axis along which to pad `arr`. - width_pair : (int, int) - Pair of widths that mark the pad area on both sides in the given - dimension. - - Returns - ------- - pad_amt : tuple of ints, length 2 - New index positions of padding to do along the `axis`. If these are - both 0, padding is done in this dimension. - """ - left_pad, right_pad = width_pair - period = padded.shape[axis] - right_pad - left_pad - - # If the current dimension of `arr` doesn't contain enough valid values - # (not part of the undefined pad area) we need to pad multiple times. - # Each time the pad area shrinks on both sides which is communicated with - # these variables. - new_left_pad = 0 - new_right_pad = 0 - - if left_pad > 0: - # Pad with wrapped values on left side - # First slice chunk from right side of the non-pad area. - # Use min(period, left_pad) to ensure that chunk is not larger than - # pad area - right_slice = _slice_at_axis( - slice(-right_pad - min(period, left_pad), - -right_pad if right_pad != 0 else None), - axis - ) - right_chunk = padded[right_slice] - - if left_pad > period: - # Chunk is smaller than pad area - pad_area = _slice_at_axis(slice(left_pad - period, left_pad), axis) - new_left_pad = left_pad - period - else: - # Chunk matches pad area - pad_area = _slice_at_axis(slice(None, left_pad), axis) - padded[pad_area] = right_chunk - - if right_pad > 0: - # Pad with wrapped values on right side - # First slice chunk from left side of the non-pad area. - # Use min(period, right_pad) to ensure that chunk is not larger than - # pad area - left_slice = _slice_at_axis( - slice(left_pad, left_pad + min(period, right_pad),), axis) - left_chunk = padded[left_slice] - - if right_pad > period: - # Chunk is smaller than pad area - pad_area = _slice_at_axis( - slice(-right_pad, -right_pad + period), axis) - new_right_pad = right_pad - period - else: - # Chunk matches pad area - pad_area = _slice_at_axis(slice(-right_pad, None), axis) - padded[pad_area] = left_chunk - - return new_left_pad, new_right_pad - - -def _as_pairs(x, ndim, as_index=False): - """ - Broadcast `x` to an array with the shape (`ndim`, 2). - - A helper function for `pad` that prepares and validates arguments like - `pad_width` for iteration in pairs. - - Parameters - ---------- - x : {None, scalar, array-like} - The object to broadcast to the shape (`ndim`, 2). - ndim : int - Number of pairs the broadcasted `x` will have. - as_index : bool, optional - If `x` is not None, try to round each element of `x` to an integer - (dtype `np.intp`) and ensure every element is positive. - - Returns - ------- - pairs : nested iterables, shape (`ndim`, 2) - The broadcasted version of `x`. - - Raises - ------ - ValueError - If `as_index` is True and `x` contains negative elements. - Or if `x` is not broadcastable to the shape (`ndim`, 2). - """ - if x is None: - # Pass through None as a special case, otherwise np.round(x) fails - # with an AttributeError - return ((None, None),) * ndim - - x = np.array(x) - if as_index: - x = np.round(x).astype(np.intp, copy=False) - - if x.ndim < 3: - # Optimization: Possibly use faster paths for cases where `x` has - # only 1 or 2 elements. `np.broadcast_to` could handle these as well - # but is currently slower - - if x.size == 1: - # x was supplied as a single value - x = x.ravel() # Ensure x[0] works for x.ndim == 0, 1, 2 - if as_index and x < 0: - raise ValueError("index can't contain negative values") - return ((x[0], x[0]),) * ndim - - if x.size == 2 and x.shape != (2, 1): - # x was supplied with a single value for each side - # but except case when each dimension has a single value - # which should be broadcasted to a pair, - # e.g. [[1], [2]] -> [[1, 1], [2, 2]] not [[1, 2], [1, 2]] - x = x.ravel() # Ensure x[0], x[1] works - if as_index and (x[0] < 0 or x[1] < 0): - raise ValueError("index can't contain negative values") - return ((x[0], x[1]),) * ndim - - if as_index and x.min() < 0: - raise ValueError("index can't contain negative values") - - # Converting the array with `tolist` seems to improve performance - # when iterating and indexing the result (see usage in `pad`) - return np.broadcast_to(x, (ndim, 2)).tolist() - - -def _pad_dispatcher(array, pad_width, mode=None, **kwargs): - return (array,) - - -############################################################################### -# Public functions - - -@array_function_dispatch(_pad_dispatcher, module='numpy') -def pad(array, pad_width, mode='constant', **kwargs): - """ - Pad an array. - - Parameters - ---------- - array : array_like of rank N - The array to pad. - pad_width : {sequence, array_like, int} - Number of values padded to the edges of each axis. - ((before_1, after_1), ... (before_N, after_N)) unique pad widths - for each axis. - ((before, after),) yields same before and after pad for each axis. - (pad,) or int is a shortcut for before = after = pad width for all - axes. - mode : str or function, optional - One of the following string values or a user supplied function. - - 'constant' (default) - Pads with a constant value. - 'edge' - Pads with the edge values of array. - 'linear_ramp' - Pads with the linear ramp between end_value and the - array edge value. - 'maximum' - Pads with the maximum value of all or part of the - vector along each axis. - 'mean' - Pads with the mean value of all or part of the - vector along each axis. - 'median' - Pads with the median value of all or part of the - vector along each axis. - 'minimum' - Pads with the minimum value of all or part of the - vector along each axis. - 'reflect' - Pads with the reflection of the vector mirrored on - the first and last values of the vector along each - axis. - 'symmetric' - Pads with the reflection of the vector mirrored - along the edge of the array. - 'wrap' - Pads with the wrap of the vector along the axis. - The first values are used to pad the end and the - end values are used to pad the beginning. - 'empty' - Pads with undefined values. - - .. versionadded:: 1.17 - - - Padding function, see Notes. - stat_length : sequence or int, optional - Used in 'maximum', 'mean', 'median', and 'minimum'. Number of - values at edge of each axis used to calculate the statistic value. - - ((before_1, after_1), ... (before_N, after_N)) unique statistic - lengths for each axis. - - ((before, after),) yields same before and after statistic lengths - for each axis. - - (stat_length,) or int is a shortcut for before = after = statistic - length for all axes. - - Default is ``None``, to use the entire axis. - constant_values : sequence or scalar, optional - Used in 'constant'. The values to set the padded values for each - axis. - - ``((before_1, after_1), ... (before_N, after_N))`` unique pad constants - for each axis. - - ``((before, after),)`` yields same before and after constants for each - axis. - - ``(constant,)`` or ``constant`` is a shortcut for ``before = after = constant`` for - all axes. - - Default is 0. - end_values : sequence or scalar, optional - Used in 'linear_ramp'. The values used for the ending value of the - linear_ramp and that will form the edge of the padded array. - - ``((before_1, after_1), ... (before_N, after_N))`` unique end values - for each axis. - - ``((before, after),)`` yields same before and after end values for each - axis. - - ``(constant,)`` or ``constant`` is a shortcut for ``before = after = constant`` for - all axes. - - Default is 0. - reflect_type : {'even', 'odd'}, optional - Used in 'reflect', and 'symmetric'. The 'even' style is the - default with an unaltered reflection around the edge value. For - the 'odd' style, the extended part of the array is created by - subtracting the reflected values from two times the edge value. - - Returns - ------- - pad : ndarray - Padded array of rank equal to `array` with shape increased - according to `pad_width`. - - Notes - ----- - .. versionadded:: 1.7.0 - - For an array with rank greater than 1, some of the padding of later - axes is calculated from padding of previous axes. This is easiest to - think about with a rank 2 array where the corners of the padded array - are calculated by using padded values from the first axis. - - The padding function, if used, should modify a rank 1 array in-place. It - has the following signature:: - - padding_func(vector, iaxis_pad_width, iaxis, kwargs) - - where - - vector : ndarray - A rank 1 array already padded with zeros. Padded values are - vector[:iaxis_pad_width[0]] and vector[-iaxis_pad_width[1]:]. - iaxis_pad_width : tuple - A 2-tuple of ints, iaxis_pad_width[0] represents the number of - values padded at the beginning of vector where - iaxis_pad_width[1] represents the number of values padded at - the end of vector. - iaxis : int - The axis currently being calculated. - kwargs : dict - Any keyword arguments the function requires. - - Examples - -------- - >>> a = [1, 2, 3, 4, 5] - >>> np.pad(a, (2, 3), 'constant', constant_values=(4, 6)) - array([4, 4, 1, ..., 6, 6, 6]) - - >>> np.pad(a, (2, 3), 'edge') - array([1, 1, 1, ..., 5, 5, 5]) - - >>> np.pad(a, (2, 3), 'linear_ramp', end_values=(5, -4)) - array([ 5, 3, 1, 2, 3, 4, 5, 2, -1, -4]) - - >>> np.pad(a, (2,), 'maximum') - array([5, 5, 1, 2, 3, 4, 5, 5, 5]) - - >>> np.pad(a, (2,), 'mean') - array([3, 3, 1, 2, 3, 4, 5, 3, 3]) - - >>> np.pad(a, (2,), 'median') - array([3, 3, 1, 2, 3, 4, 5, 3, 3]) - - >>> a = [[1, 2], [3, 4]] - >>> np.pad(a, ((3, 2), (2, 3)), 'minimum') - array([[1, 1, 1, 2, 1, 1, 1], - [1, 1, 1, 2, 1, 1, 1], - [1, 1, 1, 2, 1, 1, 1], - [1, 1, 1, 2, 1, 1, 1], - [3, 3, 3, 4, 3, 3, 3], - [1, 1, 1, 2, 1, 1, 1], - [1, 1, 1, 2, 1, 1, 1]]) - - >>> a = [1, 2, 3, 4, 5] - >>> np.pad(a, (2, 3), 'reflect') - array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2]) - - >>> np.pad(a, (2, 3), 'reflect', reflect_type='odd') - array([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8]) - - >>> np.pad(a, (2, 3), 'symmetric') - array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3]) - - >>> np.pad(a, (2, 3), 'symmetric', reflect_type='odd') - array([0, 1, 1, 2, 3, 4, 5, 5, 6, 7]) - - >>> np.pad(a, (2, 3), 'wrap') - array([4, 5, 1, 2, 3, 4, 5, 1, 2, 3]) - - >>> def pad_with(vector, pad_width, iaxis, kwargs): - ... pad_value = kwargs.get('padder', 10) - ... vector[:pad_width[0]] = pad_value - ... vector[-pad_width[1]:] = pad_value - >>> a = np.arange(6) - >>> a = a.reshape((2, 3)) - >>> np.pad(a, 2, pad_with) - array([[10, 10, 10, 10, 10, 10, 10], - [10, 10, 10, 10, 10, 10, 10], - [10, 10, 0, 1, 2, 10, 10], - [10, 10, 3, 4, 5, 10, 10], - [10, 10, 10, 10, 10, 10, 10], - [10, 10, 10, 10, 10, 10, 10]]) - >>> np.pad(a, 2, pad_with, padder=100) - array([[100, 100, 100, 100, 100, 100, 100], - [100, 100, 100, 100, 100, 100, 100], - [100, 100, 0, 1, 2, 100, 100], - [100, 100, 3, 4, 5, 100, 100], - [100, 100, 100, 100, 100, 100, 100], - [100, 100, 100, 100, 100, 100, 100]]) - """ - array = np.asarray(array) - pad_width = np.asarray(pad_width) - - if not pad_width.dtype.kind == 'i': - raise TypeError('`pad_width` must be of integral type.') - - # Broadcast to shape (array.ndim, 2) - pad_width = _as_pairs(pad_width, array.ndim, as_index=True) - - if callable(mode): - # Old behavior: Use user-supplied function with np.apply_along_axis - function = mode - # Create a new zero padded array - padded, _ = _pad_simple(array, pad_width, fill_value=0) - # And apply along each axis - - for axis in range(padded.ndim): - # Iterate using ndindex as in apply_along_axis, but assuming that - # function operates inplace on the padded array. - - # view with the iteration axis at the end - view = np.moveaxis(padded, axis, -1) - - # compute indices for the iteration axes, and append a trailing - # ellipsis to prevent 0d arrays decaying to scalars (gh-8642) - inds = ndindex(view.shape[:-1]) - inds = (ind + (Ellipsis,) for ind in inds) - for ind in inds: - function(view[ind], pad_width[axis], axis, kwargs) - - return padded - - # Make sure that no unsupported keywords were passed for the current mode - allowed_kwargs = { - 'empty': [], 'edge': [], 'wrap': [], - 'constant': ['constant_values'], - 'linear_ramp': ['end_values'], - 'maximum': ['stat_length'], - 'mean': ['stat_length'], - 'median': ['stat_length'], - 'minimum': ['stat_length'], - 'reflect': ['reflect_type'], - 'symmetric': ['reflect_type'], - } - try: - unsupported_kwargs = set(kwargs) - set(allowed_kwargs[mode]) - except KeyError: - raise ValueError("mode '{}' is not supported".format(mode)) - if unsupported_kwargs: - raise ValueError("unsupported keyword arguments for mode '{}': {}" - .format(mode, unsupported_kwargs)) - - stat_functions = {"maximum": np.amax, "minimum": np.amin, - "mean": np.mean, "median": np.median} - - # Create array with final shape and original values - # (padded area is undefined) - padded, original_area_slice = _pad_simple(array, pad_width) - # And prepare iteration over all dimensions - # (zipping may be more readable than using enumerate) - axes = range(padded.ndim) - - if mode == "constant": - values = kwargs.get("constant_values", 0) - values = _as_pairs(values, padded.ndim) - for axis, width_pair, value_pair in zip(axes, pad_width, values): - roi = _view_roi(padded, original_area_slice, axis) - _set_pad_area(roi, axis, width_pair, value_pair) - - elif mode == "empty": - pass # Do nothing as _pad_simple already returned the correct result - - elif array.size == 0: - # Only modes "constant" and "empty" can extend empty axes, all other - # modes depend on `array` not being empty - # -> ensure every empty axis is only "padded with 0" - for axis, width_pair in zip(axes, pad_width): - if array.shape[axis] == 0 and any(width_pair): - raise ValueError( - "can't extend empty axis {} using modes other than " - "'constant' or 'empty'".format(axis) - ) - # passed, don't need to do anything more as _pad_simple already - # returned the correct result - - elif mode == "edge": - for axis, width_pair in zip(axes, pad_width): - roi = _view_roi(padded, original_area_slice, axis) - edge_pair = _get_edges(roi, axis, width_pair) - _set_pad_area(roi, axis, width_pair, edge_pair) - - elif mode == "linear_ramp": - end_values = kwargs.get("end_values", 0) - end_values = _as_pairs(end_values, padded.ndim) - for axis, width_pair, value_pair in zip(axes, pad_width, end_values): - roi = _view_roi(padded, original_area_slice, axis) - ramp_pair = _get_linear_ramps(roi, axis, width_pair, value_pair) - _set_pad_area(roi, axis, width_pair, ramp_pair) - - elif mode in stat_functions: - func = stat_functions[mode] - length = kwargs.get("stat_length", None) - length = _as_pairs(length, padded.ndim, as_index=True) - for axis, width_pair, length_pair in zip(axes, pad_width, length): - roi = _view_roi(padded, original_area_slice, axis) - stat_pair = _get_stats(roi, axis, width_pair, length_pair, func) - _set_pad_area(roi, axis, width_pair, stat_pair) - - elif mode in {"reflect", "symmetric"}: - method = kwargs.get("reflect_type", "even") - include_edge = True if mode == "symmetric" else False - for axis, (left_index, right_index) in zip(axes, pad_width): - if array.shape[axis] == 1 and (left_index > 0 or right_index > 0): - # Extending singleton dimension for 'reflect' is legacy - # behavior; it really should raise an error. - edge_pair = _get_edges(padded, axis, (left_index, right_index)) - _set_pad_area( - padded, axis, (left_index, right_index), edge_pair) - continue - - roi = _view_roi(padded, original_area_slice, axis) - while left_index > 0 or right_index > 0: - # Iteratively pad until dimension is filled with reflected - # values. This is necessary if the pad area is larger than - # the length of the original values in the current dimension. - left_index, right_index = _set_reflect_both( - roi, axis, (left_index, right_index), - method, include_edge - ) - - elif mode == "wrap": - for axis, (left_index, right_index) in zip(axes, pad_width): - roi = _view_roi(padded, original_area_slice, axis) - while left_index > 0 or right_index > 0: - # Iteratively pad until dimension is filled with wrapped - # values. This is necessary if the pad area is larger than - # the length of the original values in the current dimension. - left_index, right_index = _set_wrap_both( - roi, axis, (left_index, right_index)) - - return padded diff --git a/venv/lib/python3.7/site-packages/numpy/lib/arraysetops.py b/venv/lib/python3.7/site-packages/numpy/lib/arraysetops.py deleted file mode 100644 index 2309f7e..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/arraysetops.py +++ /dev/null @@ -1,788 +0,0 @@ -""" -Set operations for arrays based on sorting. - -:Contains: - unique, - isin, - ediff1d, - intersect1d, - setxor1d, - in1d, - union1d, - setdiff1d - -:Notes: - -For floating point arrays, inaccurate results may appear due to usual round-off -and floating point comparison issues. - -Speed could be gained in some operations by an implementation of -sort(), that can provide directly the permutation vectors, avoiding -thus calls to argsort(). - -To do: Optionally return indices analogously to unique for all functions. - -:Author: Robert Cimrman - -""" -from __future__ import division, absolute_import, print_function - -import functools - -import numpy as np -from numpy.core import overrides - - -array_function_dispatch = functools.partial( - overrides.array_function_dispatch, module='numpy') - - -__all__ = [ - 'ediff1d', 'intersect1d', 'setxor1d', 'union1d', 'setdiff1d', 'unique', - 'in1d', 'isin' - ] - - -def _ediff1d_dispatcher(ary, to_end=None, to_begin=None): - return (ary, to_end, to_begin) - - -@array_function_dispatch(_ediff1d_dispatcher) -def ediff1d(ary, to_end=None, to_begin=None): - """ - The differences between consecutive elements of an array. - - Parameters - ---------- - ary : array_like - If necessary, will be flattened before the differences are taken. - to_end : array_like, optional - Number(s) to append at the end of the returned differences. - to_begin : array_like, optional - Number(s) to prepend at the beginning of the returned differences. - - Returns - ------- - ediff1d : ndarray - The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``. - - See Also - -------- - diff, gradient - - Notes - ----- - When applied to masked arrays, this function drops the mask information - if the `to_begin` and/or `to_end` parameters are used. - - Examples - -------- - >>> x = np.array([1, 2, 4, 7, 0]) - >>> np.ediff1d(x) - array([ 1, 2, 3, -7]) - - >>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99])) - array([-99, 1, 2, ..., -7, 88, 99]) - - The returned array is always 1D. - - >>> y = [[1, 2, 4], [1, 6, 24]] - >>> np.ediff1d(y) - array([ 1, 2, -3, 5, 18]) - - """ - # force a 1d array - ary = np.asanyarray(ary).ravel() - - # enforce propagation of the dtype of input - # ary to returned result - dtype_req = ary.dtype - - # fast track default case - if to_begin is None and to_end is None: - return ary[1:] - ary[:-1] - - if to_begin is None: - l_begin = 0 - else: - _to_begin = np.asanyarray(to_begin, dtype=dtype_req) - if not np.all(_to_begin == to_begin): - raise ValueError("cannot convert 'to_begin' to array with dtype " - "'%r' as required for input ary" % dtype_req) - to_begin = _to_begin.ravel() - l_begin = len(to_begin) - - if to_end is None: - l_end = 0 - else: - _to_end = np.asanyarray(to_end, dtype=dtype_req) - # check that casting has not overflowed - if not np.all(_to_end == to_end): - raise ValueError("cannot convert 'to_end' to array with dtype " - "'%r' as required for input ary" % dtype_req) - to_end = _to_end.ravel() - l_end = len(to_end) - - # do the calculation in place and copy to_begin and to_end - l_diff = max(len(ary) - 1, 0) - result = np.empty(l_diff + l_begin + l_end, dtype=ary.dtype) - result = ary.__array_wrap__(result) - if l_begin > 0: - result[:l_begin] = to_begin - if l_end > 0: - result[l_begin + l_diff:] = to_end - np.subtract(ary[1:], ary[:-1], result[l_begin:l_begin + l_diff]) - return result - - -def _unpack_tuple(x): - """ Unpacks one-element tuples for use as return values """ - if len(x) == 1: - return x[0] - else: - return x - - -def _unique_dispatcher(ar, return_index=None, return_inverse=None, - return_counts=None, axis=None): - return (ar,) - - -@array_function_dispatch(_unique_dispatcher) -def unique(ar, return_index=False, return_inverse=False, - return_counts=False, axis=None): - """ - Find the unique elements of an array. - - Returns the sorted unique elements of an array. There are three optional - outputs in addition to the unique elements: - - * the indices of the input array that give the unique values - * the indices of the unique array that reconstruct the input array - * the number of times each unique value comes up in the input array - - Parameters - ---------- - ar : array_like - Input array. Unless `axis` is specified, this will be flattened if it - is not already 1-D. - return_index : bool, optional - If True, also return the indices of `ar` (along the specified axis, - if provided, or in the flattened array) that result in the unique array. - return_inverse : bool, optional - If True, also return the indices of the unique array (for the specified - axis, if provided) that can be used to reconstruct `ar`. - return_counts : bool, optional - If True, also return the number of times each unique item appears - in `ar`. - - .. versionadded:: 1.9.0 - - axis : int or None, optional - The axis to operate on. If None, `ar` will be flattened. If an integer, - the subarrays indexed by the given axis will be flattened and treated - as the elements of a 1-D array with the dimension of the given axis, - see the notes for more details. Object arrays or structured arrays - that contain objects are not supported if the `axis` kwarg is used. The - default is None. - - .. versionadded:: 1.13.0 - - Returns - ------- - unique : ndarray - The sorted unique values. - unique_indices : ndarray, optional - The indices of the first occurrences of the unique values in the - original array. Only provided if `return_index` is True. - unique_inverse : ndarray, optional - The indices to reconstruct the original array from the - unique array. Only provided if `return_inverse` is True. - unique_counts : ndarray, optional - The number of times each of the unique values comes up in the - original array. Only provided if `return_counts` is True. - - .. versionadded:: 1.9.0 - - See Also - -------- - numpy.lib.arraysetops : Module with a number of other functions for - performing set operations on arrays. - - Notes - ----- - When an axis is specified the subarrays indexed by the axis are sorted. - This is done by making the specified axis the first dimension of the array - (move the axis to the first dimension to keep the order of the other axes) - and then flattening the subarrays in C order. The flattened subarrays are - then viewed as a structured type with each element given a label, with the - effect that we end up with a 1-D array of structured types that can be - treated in the same way as any other 1-D array. The result is that the - flattened subarrays are sorted in lexicographic order starting with the - first element. - - Examples - -------- - >>> np.unique([1, 1, 2, 2, 3, 3]) - array([1, 2, 3]) - >>> a = np.array([[1, 1], [2, 3]]) - >>> np.unique(a) - array([1, 2, 3]) - - Return the unique rows of a 2D array - - >>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]]) - >>> np.unique(a, axis=0) - array([[1, 0, 0], [2, 3, 4]]) - - Return the indices of the original array that give the unique values: - - >>> a = np.array(['a', 'b', 'b', 'c', 'a']) - >>> u, indices = np.unique(a, return_index=True) - >>> u - array(['a', 'b', 'c'], dtype='>> indices - array([0, 1, 3]) - >>> a[indices] - array(['a', 'b', 'c'], dtype='>> a = np.array([1, 2, 6, 4, 2, 3, 2]) - >>> u, indices = np.unique(a, return_inverse=True) - >>> u - array([1, 2, 3, 4, 6]) - >>> indices - array([0, 1, 4, ..., 1, 2, 1]) - >>> u[indices] - array([1, 2, 6, ..., 2, 3, 2]) - - """ - ar = np.asanyarray(ar) - if axis is None: - ret = _unique1d(ar, return_index, return_inverse, return_counts) - return _unpack_tuple(ret) - - # axis was specified and not None - try: - ar = np.moveaxis(ar, axis, 0) - except np.AxisError: - # this removes the "axis1" or "axis2" prefix from the error message - raise np.AxisError(axis, ar.ndim) - - # Must reshape to a contiguous 2D array for this to work... - orig_shape, orig_dtype = ar.shape, ar.dtype - ar = ar.reshape(orig_shape[0], -1) - ar = np.ascontiguousarray(ar) - dtype = [('f{i}'.format(i=i), ar.dtype) for i in range(ar.shape[1])] - - try: - consolidated = ar.view(dtype) - except TypeError: - # There's no good way to do this for object arrays, etc... - msg = 'The axis argument to unique is not supported for dtype {dt}' - raise TypeError(msg.format(dt=ar.dtype)) - - def reshape_uniq(uniq): - uniq = uniq.view(orig_dtype) - uniq = uniq.reshape(-1, *orig_shape[1:]) - uniq = np.moveaxis(uniq, 0, axis) - return uniq - - output = _unique1d(consolidated, return_index, - return_inverse, return_counts) - output = (reshape_uniq(output[0]),) + output[1:] - return _unpack_tuple(output) - - -def _unique1d(ar, return_index=False, return_inverse=False, - return_counts=False): - """ - Find the unique elements of an array, ignoring shape. - """ - ar = np.asanyarray(ar).flatten() - - optional_indices = return_index or return_inverse - - if optional_indices: - perm = ar.argsort(kind='mergesort' if return_index else 'quicksort') - aux = ar[perm] - else: - ar.sort() - aux = ar - mask = np.empty(aux.shape, dtype=np.bool_) - mask[:1] = True - mask[1:] = aux[1:] != aux[:-1] - - ret = (aux[mask],) - if return_index: - ret += (perm[mask],) - if return_inverse: - imask = np.cumsum(mask) - 1 - inv_idx = np.empty(mask.shape, dtype=np.intp) - inv_idx[perm] = imask - ret += (inv_idx,) - if return_counts: - idx = np.concatenate(np.nonzero(mask) + ([mask.size],)) - ret += (np.diff(idx),) - return ret - - -def _intersect1d_dispatcher( - ar1, ar2, assume_unique=None, return_indices=None): - return (ar1, ar2) - - -@array_function_dispatch(_intersect1d_dispatcher) -def intersect1d(ar1, ar2, assume_unique=False, return_indices=False): - """ - Find the intersection of two arrays. - - Return the sorted, unique values that are in both of the input arrays. - - Parameters - ---------- - ar1, ar2 : array_like - Input arrays. Will be flattened if not already 1D. - assume_unique : bool - If True, the input arrays are both assumed to be unique, which - can speed up the calculation. Default is False. - return_indices : bool - If True, the indices which correspond to the intersection of the two - arrays are returned. The first instance of a value is used if there are - multiple. Default is False. - - .. versionadded:: 1.15.0 - - Returns - ------- - intersect1d : ndarray - Sorted 1D array of common and unique elements. - comm1 : ndarray - The indices of the first occurrences of the common values in `ar1`. - Only provided if `return_indices` is True. - comm2 : ndarray - The indices of the first occurrences of the common values in `ar2`. - Only provided if `return_indices` is True. - - - See Also - -------- - numpy.lib.arraysetops : Module with a number of other functions for - performing set operations on arrays. - - Examples - -------- - >>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1]) - array([1, 3]) - - To intersect more than two arrays, use functools.reduce: - - >>> from functools import reduce - >>> reduce(np.intersect1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2])) - array([3]) - - To return the indices of the values common to the input arrays - along with the intersected values: - - >>> x = np.array([1, 1, 2, 3, 4]) - >>> y = np.array([2, 1, 4, 6]) - >>> xy, x_ind, y_ind = np.intersect1d(x, y, return_indices=True) - >>> x_ind, y_ind - (array([0, 2, 4]), array([1, 0, 2])) - >>> xy, x[x_ind], y[y_ind] - (array([1, 2, 4]), array([1, 2, 4]), array([1, 2, 4])) - - """ - ar1 = np.asanyarray(ar1) - ar2 = np.asanyarray(ar2) - - if not assume_unique: - if return_indices: - ar1, ind1 = unique(ar1, return_index=True) - ar2, ind2 = unique(ar2, return_index=True) - else: - ar1 = unique(ar1) - ar2 = unique(ar2) - else: - ar1 = ar1.ravel() - ar2 = ar2.ravel() - - aux = np.concatenate((ar1, ar2)) - if return_indices: - aux_sort_indices = np.argsort(aux, kind='mergesort') - aux = aux[aux_sort_indices] - else: - aux.sort() - - mask = aux[1:] == aux[:-1] - int1d = aux[:-1][mask] - - if return_indices: - ar1_indices = aux_sort_indices[:-1][mask] - ar2_indices = aux_sort_indices[1:][mask] - ar1.size - if not assume_unique: - ar1_indices = ind1[ar1_indices] - ar2_indices = ind2[ar2_indices] - - return int1d, ar1_indices, ar2_indices - else: - return int1d - - -def _setxor1d_dispatcher(ar1, ar2, assume_unique=None): - return (ar1, ar2) - - -@array_function_dispatch(_setxor1d_dispatcher) -def setxor1d(ar1, ar2, assume_unique=False): - """ - Find the set exclusive-or of two arrays. - - Return the sorted, unique values that are in only one (not both) of the - input arrays. - - Parameters - ---------- - ar1, ar2 : array_like - Input arrays. - assume_unique : bool - If True, the input arrays are both assumed to be unique, which - can speed up the calculation. Default is False. - - Returns - ------- - setxor1d : ndarray - Sorted 1D array of unique values that are in only one of the input - arrays. - - Examples - -------- - >>> a = np.array([1, 2, 3, 2, 4]) - >>> b = np.array([2, 3, 5, 7, 5]) - >>> np.setxor1d(a,b) - array([1, 4, 5, 7]) - - """ - if not assume_unique: - ar1 = unique(ar1) - ar2 = unique(ar2) - - aux = np.concatenate((ar1, ar2)) - if aux.size == 0: - return aux - - aux.sort() - flag = np.concatenate(([True], aux[1:] != aux[:-1], [True])) - return aux[flag[1:] & flag[:-1]] - - -def _in1d_dispatcher(ar1, ar2, assume_unique=None, invert=None): - return (ar1, ar2) - - -@array_function_dispatch(_in1d_dispatcher) -def in1d(ar1, ar2, assume_unique=False, invert=False): - """ - Test whether each element of a 1-D array is also present in a second array. - - Returns a boolean array the same length as `ar1` that is True - where an element of `ar1` is in `ar2` and False otherwise. - - We recommend using :func:`isin` instead of `in1d` for new code. - - Parameters - ---------- - ar1 : (M,) array_like - Input array. - ar2 : array_like - The values against which to test each value of `ar1`. - assume_unique : bool, optional - If True, the input arrays are both assumed to be unique, which - can speed up the calculation. Default is False. - invert : bool, optional - If True, the values in the returned array are inverted (that is, - False where an element of `ar1` is in `ar2` and True otherwise). - Default is False. ``np.in1d(a, b, invert=True)`` is equivalent - to (but is faster than) ``np.invert(in1d(a, b))``. - - .. versionadded:: 1.8.0 - - Returns - ------- - in1d : (M,) ndarray, bool - The values `ar1[in1d]` are in `ar2`. - - See Also - -------- - isin : Version of this function that preserves the - shape of ar1. - numpy.lib.arraysetops : Module with a number of other functions for - performing set operations on arrays. - - Notes - ----- - `in1d` can be considered as an element-wise function version of the - python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly - equivalent to ``np.array([item in b for item in a])``. - However, this idea fails if `ar2` is a set, or similar (non-sequence) - container: As ``ar2`` is converted to an array, in those cases - ``asarray(ar2)`` is an object array rather than the expected array of - contained values. - - .. versionadded:: 1.4.0 - - Examples - -------- - >>> test = np.array([0, 1, 2, 5, 0]) - >>> states = [0, 2] - >>> mask = np.in1d(test, states) - >>> mask - array([ True, False, True, False, True]) - >>> test[mask] - array([0, 2, 0]) - >>> mask = np.in1d(test, states, invert=True) - >>> mask - array([False, True, False, True, False]) - >>> test[mask] - array([1, 5]) - """ - # Ravel both arrays, behavior for the first array could be different - ar1 = np.asarray(ar1).ravel() - ar2 = np.asarray(ar2).ravel() - - # Check if one of the arrays may contain arbitrary objects - contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject - - # This code is run when - # a) the first condition is true, making the code significantly faster - # b) the second condition is true (i.e. `ar1` or `ar2` may contain - # arbitrary objects), since then sorting is not guaranteed to work - if len(ar2) < 10 * len(ar1) ** 0.145 or contains_object: - if invert: - mask = np.ones(len(ar1), dtype=bool) - for a in ar2: - mask &= (ar1 != a) - else: - mask = np.zeros(len(ar1), dtype=bool) - for a in ar2: - mask |= (ar1 == a) - return mask - - # Otherwise use sorting - if not assume_unique: - ar1, rev_idx = np.unique(ar1, return_inverse=True) - ar2 = np.unique(ar2) - - ar = np.concatenate((ar1, ar2)) - # We need this to be a stable sort, so always use 'mergesort' - # here. The values from the first array should always come before - # the values from the second array. - order = ar.argsort(kind='mergesort') - sar = ar[order] - if invert: - bool_ar = (sar[1:] != sar[:-1]) - else: - bool_ar = (sar[1:] == sar[:-1]) - flag = np.concatenate((bool_ar, [invert])) - ret = np.empty(ar.shape, dtype=bool) - ret[order] = flag - - if assume_unique: - return ret[:len(ar1)] - else: - return ret[rev_idx] - - -def _isin_dispatcher(element, test_elements, assume_unique=None, invert=None): - return (element, test_elements) - - -@array_function_dispatch(_isin_dispatcher) -def isin(element, test_elements, assume_unique=False, invert=False): - """ - Calculates `element in test_elements`, broadcasting over `element` only. - Returns a boolean array of the same shape as `element` that is True - where an element of `element` is in `test_elements` and False otherwise. - - Parameters - ---------- - element : array_like - Input array. - test_elements : array_like - The values against which to test each value of `element`. - This argument is flattened if it is an array or array_like. - See notes for behavior with non-array-like parameters. - assume_unique : bool, optional - If True, the input arrays are both assumed to be unique, which - can speed up the calculation. Default is False. - invert : bool, optional - If True, the values in the returned array are inverted, as if - calculating `element not in test_elements`. Default is False. - ``np.isin(a, b, invert=True)`` is equivalent to (but faster - than) ``np.invert(np.isin(a, b))``. - - Returns - ------- - isin : ndarray, bool - Has the same shape as `element`. The values `element[isin]` - are in `test_elements`. - - See Also - -------- - in1d : Flattened version of this function. - numpy.lib.arraysetops : Module with a number of other functions for - performing set operations on arrays. - - Notes - ----- - - `isin` is an element-wise function version of the python keyword `in`. - ``isin(a, b)`` is roughly equivalent to - ``np.array([item in b for item in a])`` if `a` and `b` are 1-D sequences. - - `element` and `test_elements` are converted to arrays if they are not - already. If `test_elements` is a set (or other non-sequence collection) - it will be converted to an object array with one element, rather than an - array of the values contained in `test_elements`. This is a consequence - of the `array` constructor's way of handling non-sequence collections. - Converting the set to a list usually gives the desired behavior. - - .. versionadded:: 1.13.0 - - Examples - -------- - >>> element = 2*np.arange(4).reshape((2, 2)) - >>> element - array([[0, 2], - [4, 6]]) - >>> test_elements = [1, 2, 4, 8] - >>> mask = np.isin(element, test_elements) - >>> mask - array([[False, True], - [ True, False]]) - >>> element[mask] - array([2, 4]) - - The indices of the matched values can be obtained with `nonzero`: - - >>> np.nonzero(mask) - (array([0, 1]), array([1, 0])) - - The test can also be inverted: - - >>> mask = np.isin(element, test_elements, invert=True) - >>> mask - array([[ True, False], - [False, True]]) - >>> element[mask] - array([0, 6]) - - Because of how `array` handles sets, the following does not - work as expected: - - >>> test_set = {1, 2, 4, 8} - >>> np.isin(element, test_set) - array([[False, False], - [False, False]]) - - Casting the set to a list gives the expected result: - - >>> np.isin(element, list(test_set)) - array([[False, True], - [ True, False]]) - """ - element = np.asarray(element) - return in1d(element, test_elements, assume_unique=assume_unique, - invert=invert).reshape(element.shape) - - -def _union1d_dispatcher(ar1, ar2): - return (ar1, ar2) - - -@array_function_dispatch(_union1d_dispatcher) -def union1d(ar1, ar2): - """ - Find the union of two arrays. - - Return the unique, sorted array of values that are in either of the two - input arrays. - - Parameters - ---------- - ar1, ar2 : array_like - Input arrays. They are flattened if they are not already 1D. - - Returns - ------- - union1d : ndarray - Unique, sorted union of the input arrays. - - See Also - -------- - numpy.lib.arraysetops : Module with a number of other functions for - performing set operations on arrays. - - Examples - -------- - >>> np.union1d([-1, 0, 1], [-2, 0, 2]) - array([-2, -1, 0, 1, 2]) - - To find the union of more than two arrays, use functools.reduce: - - >>> from functools import reduce - >>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2])) - array([1, 2, 3, 4, 6]) - """ - return unique(np.concatenate((ar1, ar2), axis=None)) - - -def _setdiff1d_dispatcher(ar1, ar2, assume_unique=None): - return (ar1, ar2) - - -@array_function_dispatch(_setdiff1d_dispatcher) -def setdiff1d(ar1, ar2, assume_unique=False): - """ - Find the set difference of two arrays. - - Return the unique values in `ar1` that are not in `ar2`. - - Parameters - ---------- - ar1 : array_like - Input array. - ar2 : array_like - Input comparison array. - assume_unique : bool - If True, the input arrays are both assumed to be unique, which - can speed up the calculation. Default is False. - - Returns - ------- - setdiff1d : ndarray - 1D array of values in `ar1` that are not in `ar2`. The result - is sorted when `assume_unique=False`, but otherwise only sorted - if the input is sorted. - - See Also - -------- - numpy.lib.arraysetops : Module with a number of other functions for - performing set operations on arrays. - - Examples - -------- - >>> a = np.array([1, 2, 3, 2, 4, 1]) - >>> b = np.array([3, 4, 5, 6]) - >>> np.setdiff1d(a, b) - array([1, 2]) - - """ - if assume_unique: - ar1 = np.asarray(ar1).ravel() - else: - ar1 = unique(ar1) - ar2 = unique(ar2) - return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)] - diff --git a/venv/lib/python3.7/site-packages/numpy/lib/arrayterator.py b/venv/lib/python3.7/site-packages/numpy/lib/arrayterator.py deleted file mode 100644 index c166685..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/arrayterator.py +++ /dev/null @@ -1,224 +0,0 @@ -""" -A buffered iterator for big arrays. - -This module solves the problem of iterating over a big file-based array -without having to read it into memory. The `Arrayterator` class wraps -an array object, and when iterated it will return sub-arrays with at most -a user-specified number of elements. - -""" -from __future__ import division, absolute_import, print_function - -from operator import mul -from functools import reduce - -from numpy.compat import long - -__all__ = ['Arrayterator'] - - -class Arrayterator(object): - """ - Buffered iterator for big arrays. - - `Arrayterator` creates a buffered iterator for reading big arrays in small - contiguous blocks. The class is useful for objects stored in the - file system. It allows iteration over the object *without* reading - everything in memory; instead, small blocks are read and iterated over. - - `Arrayterator` can be used with any object that supports multidimensional - slices. This includes NumPy arrays, but also variables from - Scientific.IO.NetCDF or pynetcdf for example. - - Parameters - ---------- - var : array_like - The object to iterate over. - buf_size : int, optional - The buffer size. If `buf_size` is supplied, the maximum amount of - data that will be read into memory is `buf_size` elements. - Default is None, which will read as many element as possible - into memory. - - Attributes - ---------- - var - buf_size - start - stop - step - shape - flat - - See Also - -------- - ndenumerate : Multidimensional array iterator. - flatiter : Flat array iterator. - memmap : Create a memory-map to an array stored in a binary file on disk. - - Notes - ----- - The algorithm works by first finding a "running dimension", along which - the blocks will be extracted. Given an array of dimensions - ``(d1, d2, ..., dn)``, e.g. if `buf_size` is smaller than ``d1``, the - first dimension will be used. If, on the other hand, - ``d1 < buf_size < d1*d2`` the second dimension will be used, and so on. - Blocks are extracted along this dimension, and when the last block is - returned the process continues from the next dimension, until all - elements have been read. - - Examples - -------- - >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) - >>> a_itor = np.lib.Arrayterator(a, 2) - >>> a_itor.shape - (3, 4, 5, 6) - - Now we can iterate over ``a_itor``, and it will return arrays of size - two. Since `buf_size` was smaller than any dimension, the first - dimension will be iterated over first: - - >>> for subarr in a_itor: - ... if not subarr.all(): - ... print(subarr, subarr.shape) # doctest: +SKIP - >>> # [[[[0 1]]]] (1, 1, 1, 2) - - """ - - def __init__(self, var, buf_size=None): - self.var = var - self.buf_size = buf_size - - self.start = [0 for dim in var.shape] - self.stop = [dim for dim in var.shape] - self.step = [1 for dim in var.shape] - - def __getattr__(self, attr): - return getattr(self.var, attr) - - def __getitem__(self, index): - """ - Return a new arrayterator. - - """ - # Fix index, handling ellipsis and incomplete slices. - if not isinstance(index, tuple): - index = (index,) - fixed = [] - length, dims = len(index), self.ndim - for slice_ in index: - if slice_ is Ellipsis: - fixed.extend([slice(None)] * (dims-length+1)) - length = len(fixed) - elif isinstance(slice_, (int, long)): - fixed.append(slice(slice_, slice_+1, 1)) - else: - fixed.append(slice_) - index = tuple(fixed) - if len(index) < dims: - index += (slice(None),) * (dims-len(index)) - - # Return a new arrayterator object. - out = self.__class__(self.var, self.buf_size) - for i, (start, stop, step, slice_) in enumerate( - zip(self.start, self.stop, self.step, index)): - out.start[i] = start + (slice_.start or 0) - out.step[i] = step * (slice_.step or 1) - out.stop[i] = start + (slice_.stop or stop-start) - out.stop[i] = min(stop, out.stop[i]) - return out - - def __array__(self): - """ - Return corresponding data. - - """ - slice_ = tuple(slice(*t) for t in zip( - self.start, self.stop, self.step)) - return self.var[slice_] - - @property - def flat(self): - """ - A 1-D flat iterator for Arrayterator objects. - - This iterator returns elements of the array to be iterated over in - `Arrayterator` one by one. It is similar to `flatiter`. - - See Also - -------- - Arrayterator - flatiter - - Examples - -------- - >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) - >>> a_itor = np.lib.Arrayterator(a, 2) - - >>> for subarr in a_itor.flat: - ... if not subarr: - ... print(subarr, type(subarr)) - ... - 0 - - """ - for block in self: - for value in block.flat: - yield value - - @property - def shape(self): - """ - The shape of the array to be iterated over. - - For an example, see `Arrayterator`. - - """ - return tuple(((stop-start-1)//step+1) for start, stop, step in - zip(self.start, self.stop, self.step)) - - def __iter__(self): - # Skip arrays with degenerate dimensions - if [dim for dim in self.shape if dim <= 0]: - return - - start = self.start[:] - stop = self.stop[:] - step = self.step[:] - ndims = self.var.ndim - - while True: - count = self.buf_size or reduce(mul, self.shape) - - # iterate over each dimension, looking for the - # running dimension (ie, the dimension along which - # the blocks will be built from) - rundim = 0 - for i in range(ndims-1, -1, -1): - # if count is zero we ran out of elements to read - # along higher dimensions, so we read only a single position - if count == 0: - stop[i] = start[i]+1 - elif count <= self.shape[i]: - # limit along this dimension - stop[i] = start[i] + count*step[i] - rundim = i - else: - # read everything along this dimension - stop[i] = self.stop[i] - stop[i] = min(self.stop[i], stop[i]) - count = count//self.shape[i] - - # yield a block - slice_ = tuple(slice(*t) for t in zip(start, stop, step)) - yield self.var[slice_] - - # Update start position, taking care of overflow to - # other dimensions - start[rundim] = stop[rundim] # start where we stopped - for i in range(ndims-1, 0, -1): - if start[i] >= self.stop[i]: - start[i] = self.start[i] - start[i-1] += self.step[i-1] - if start[0] >= self.stop[0]: - return diff --git a/venv/lib/python3.7/site-packages/numpy/lib/financial.py b/venv/lib/python3.7/site-packages/numpy/lib/financial.py deleted file mode 100644 index a011e52..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/financial.py +++ /dev/null @@ -1,969 +0,0 @@ -"""Some simple financial calculations - -patterned after spreadsheet computations. - -There is some complexity in each function -so that the functions behave like ufuncs with -broadcasting and being able to be called with scalars -or arrays (or other sequences). - -Functions support the :class:`decimal.Decimal` type unless -otherwise stated. -""" -from __future__ import division, absolute_import, print_function - -import warnings -from decimal import Decimal -import functools - -import numpy as np -from numpy.core import overrides - - -_depmsg = ("numpy.{name} is deprecated and will be removed from NumPy 1.20. " - "Use numpy_financial.{name} instead " - "(https://pypi.org/project/numpy-financial/).") - -array_function_dispatch = functools.partial( - overrides.array_function_dispatch, module='numpy') - - -__all__ = ['fv', 'pmt', 'nper', 'ipmt', 'ppmt', 'pv', 'rate', - 'irr', 'npv', 'mirr'] - -_when_to_num = {'end':0, 'begin':1, - 'e':0, 'b':1, - 0:0, 1:1, - 'beginning':1, - 'start':1, - 'finish':0} - -def _convert_when(when): - #Test to see if when has already been converted to ndarray - #This will happen if one function calls another, for example ppmt - if isinstance(when, np.ndarray): - return when - try: - return _when_to_num[when] - except (KeyError, TypeError): - return [_when_to_num[x] for x in when] - - -def _fv_dispatcher(rate, nper, pmt, pv, when=None): - warnings.warn(_depmsg.format(name='fv'), - DeprecationWarning, stacklevel=3) - return (rate, nper, pmt, pv) - - -@array_function_dispatch(_fv_dispatcher) -def fv(rate, nper, pmt, pv, when='end'): - """ - Compute the future value. - - .. deprecated:: 1.18 - - `fv` is deprecated; for details, see NEP 32 [1]_. - Use the corresponding function in the numpy-financial library, - https://pypi.org/project/numpy-financial. - - Given: - * a present value, `pv` - * an interest `rate` compounded once per period, of which - there are - * `nper` total - * a (fixed) payment, `pmt`, paid either - * at the beginning (`when` = {'begin', 1}) or the end - (`when` = {'end', 0}) of each period - - Return: - the value at the end of the `nper` periods - - Parameters - ---------- - rate : scalar or array_like of shape(M, ) - Rate of interest as decimal (not per cent) per period - nper : scalar or array_like of shape(M, ) - Number of compounding periods - pmt : scalar or array_like of shape(M, ) - Payment - pv : scalar or array_like of shape(M, ) - Present value - when : {{'begin', 1}, {'end', 0}}, {string, int}, optional - When payments are due ('begin' (1) or 'end' (0)). - Defaults to {'end', 0}. - - Returns - ------- - out : ndarray - Future values. If all input is scalar, returns a scalar float. If - any input is array_like, returns future values for each input element. - If multiple inputs are array_like, they all must have the same shape. - - Notes - ----- - The future value is computed by solving the equation:: - - fv + - pv*(1+rate)**nper + - pmt*(1 + rate*when)/rate*((1 + rate)**nper - 1) == 0 - - or, when ``rate == 0``:: - - fv + pv + pmt * nper == 0 - - References - ---------- - .. [1] NumPy Enhancement Proposal (NEP) 32, - https://numpy.org/neps/nep-0032-remove-financial-functions.html - .. [2] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). - Open Document Format for Office Applications (OpenDocument)v1.2, - Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version, - Pre-Draft 12. Organization for the Advancement of Structured Information - Standards (OASIS). Billerica, MA, USA. [ODT Document]. - Available: - http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula - OpenDocument-formula-20090508.odt - - - Examples - -------- - What is the future value after 10 years of saving $100 now, with - an additional monthly savings of $100. Assume the interest rate is - 5% (annually) compounded monthly? - - >>> np.fv(0.05/12, 10*12, -100, -100) - 15692.928894335748 - - By convention, the negative sign represents cash flow out (i.e. money not - available today). Thus, saving $100 a month at 5% annual interest leads - to $15,692.93 available to spend in 10 years. - - If any input is array_like, returns an array of equal shape. Let's - compare different interest rates from the example above. - - >>> a = np.array((0.05, 0.06, 0.07))/12 - >>> np.fv(a, 10*12, -100, -100) - array([ 15692.92889434, 16569.87435405, 17509.44688102]) # may vary - - """ - when = _convert_when(when) - (rate, nper, pmt, pv, when) = map(np.asarray, [rate, nper, pmt, pv, when]) - temp = (1+rate)**nper - fact = np.where(rate == 0, nper, - (1 + rate*when)*(temp - 1)/rate) - return -(pv*temp + pmt*fact) - - -def _pmt_dispatcher(rate, nper, pv, fv=None, when=None): - warnings.warn(_depmsg.format(name='pmt'), - DeprecationWarning, stacklevel=3) - return (rate, nper, pv, fv) - - -@array_function_dispatch(_pmt_dispatcher) -def pmt(rate, nper, pv, fv=0, when='end'): - """ - Compute the payment against loan principal plus interest. - - .. deprecated:: 1.18 - - `pmt` is deprecated; for details, see NEP 32 [1]_. - Use the corresponding function in the numpy-financial library, - https://pypi.org/project/numpy-financial. - - Given: - * a present value, `pv` (e.g., an amount borrowed) - * a future value, `fv` (e.g., 0) - * an interest `rate` compounded once per period, of which - there are - * `nper` total - * and (optional) specification of whether payment is made - at the beginning (`when` = {'begin', 1}) or the end - (`when` = {'end', 0}) of each period - - Return: - the (fixed) periodic payment. - - Parameters - ---------- - rate : array_like - Rate of interest (per period) - nper : array_like - Number of compounding periods - pv : array_like - Present value - fv : array_like, optional - Future value (default = 0) - when : {{'begin', 1}, {'end', 0}}, {string, int} - When payments are due ('begin' (1) or 'end' (0)) - - Returns - ------- - out : ndarray - Payment against loan plus interest. If all input is scalar, returns a - scalar float. If any input is array_like, returns payment for each - input element. If multiple inputs are array_like, they all must have - the same shape. - - Notes - ----- - The payment is computed by solving the equation:: - - fv + - pv*(1 + rate)**nper + - pmt*(1 + rate*when)/rate*((1 + rate)**nper - 1) == 0 - - or, when ``rate == 0``:: - - fv + pv + pmt * nper == 0 - - for ``pmt``. - - Note that computing a monthly mortgage payment is only - one use for this function. For example, pmt returns the - periodic deposit one must make to achieve a specified - future balance given an initial deposit, a fixed, - periodically compounded interest rate, and the total - number of periods. - - References - ---------- - .. [1] NumPy Enhancement Proposal (NEP) 32, - https://numpy.org/neps/nep-0032-remove-financial-functions.html - .. [2] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). - Open Document Format for Office Applications (OpenDocument)v1.2, - Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version, - Pre-Draft 12. Organization for the Advancement of Structured Information - Standards (OASIS). Billerica, MA, USA. [ODT Document]. - Available: - http://www.oasis-open.org/committees/documents.php - ?wg_abbrev=office-formulaOpenDocument-formula-20090508.odt - - Examples - -------- - What is the monthly payment needed to pay off a $200,000 loan in 15 - years at an annual interest rate of 7.5%? - - >>> np.pmt(0.075/12, 12*15, 200000) - -1854.0247200054619 - - In order to pay-off (i.e., have a future-value of 0) the $200,000 obtained - today, a monthly payment of $1,854.02 would be required. Note that this - example illustrates usage of `fv` having a default value of 0. - - """ - when = _convert_when(when) - (rate, nper, pv, fv, when) = map(np.array, [rate, nper, pv, fv, when]) - temp = (1 + rate)**nper - mask = (rate == 0) - masked_rate = np.where(mask, 1, rate) - fact = np.where(mask != 0, nper, - (1 + masked_rate*when)*(temp - 1)/masked_rate) - return -(fv + pv*temp) / fact - - -def _nper_dispatcher(rate, pmt, pv, fv=None, when=None): - warnings.warn(_depmsg.format(name='nper'), - DeprecationWarning, stacklevel=3) - return (rate, pmt, pv, fv) - - -@array_function_dispatch(_nper_dispatcher) -def nper(rate, pmt, pv, fv=0, when='end'): - """ - Compute the number of periodic payments. - - .. deprecated:: 1.18 - - `nper` is deprecated; for details, see NEP 32 [1]_. - Use the corresponding function in the numpy-financial library, - https://pypi.org/project/numpy-financial. - - :class:`decimal.Decimal` type is not supported. - - Parameters - ---------- - rate : array_like - Rate of interest (per period) - pmt : array_like - Payment - pv : array_like - Present value - fv : array_like, optional - Future value - when : {{'begin', 1}, {'end', 0}}, {string, int}, optional - When payments are due ('begin' (1) or 'end' (0)) - - Notes - ----- - The number of periods ``nper`` is computed by solving the equation:: - - fv + pv*(1+rate)**nper + pmt*(1+rate*when)/rate*((1+rate)**nper-1) = 0 - - but if ``rate = 0`` then:: - - fv + pv + pmt*nper = 0 - - References - ---------- - .. [1] NumPy Enhancement Proposal (NEP) 32, - https://numpy.org/neps/nep-0032-remove-financial-functions.html - - Examples - -------- - If you only had $150/month to pay towards the loan, how long would it take - to pay-off a loan of $8,000 at 7% annual interest? - - >>> print(np.round(np.nper(0.07/12, -150, 8000), 5)) - 64.07335 - - So, over 64 months would be required to pay off the loan. - - The same analysis could be done with several different interest rates - and/or payments and/or total amounts to produce an entire table. - - >>> np.nper(*(np.ogrid[0.07/12: 0.08/12: 0.01/12, - ... -150 : -99 : 50 , - ... 8000 : 9001 : 1000])) - array([[[ 64.07334877, 74.06368256], - [108.07548412, 127.99022654]], - [[ 66.12443902, 76.87897353], - [114.70165583, 137.90124779]]]) - - """ - when = _convert_when(when) - (rate, pmt, pv, fv, when) = map(np.asarray, [rate, pmt, pv, fv, when]) - - use_zero_rate = False - with np.errstate(divide="raise"): - try: - z = pmt*(1+rate*when)/rate - except FloatingPointError: - use_zero_rate = True - - if use_zero_rate: - return (-fv + pv) / pmt - else: - A = -(fv + pv)/(pmt+0) - B = np.log((-fv+z) / (pv+z))/np.log(1+rate) - return np.where(rate == 0, A, B) - - -def _ipmt_dispatcher(rate, per, nper, pv, fv=None, when=None): - warnings.warn(_depmsg.format(name='ipmt'), - DeprecationWarning, stacklevel=3) - return (rate, per, nper, pv, fv) - - -@array_function_dispatch(_ipmt_dispatcher) -def ipmt(rate, per, nper, pv, fv=0, when='end'): - """ - Compute the interest portion of a payment. - - .. deprecated:: 1.18 - - `ipmt` is deprecated; for details, see NEP 32 [1]_. - Use the corresponding function in the numpy-financial library, - https://pypi.org/project/numpy-financial. - - Parameters - ---------- - rate : scalar or array_like of shape(M, ) - Rate of interest as decimal (not per cent) per period - per : scalar or array_like of shape(M, ) - Interest paid against the loan changes during the life or the loan. - The `per` is the payment period to calculate the interest amount. - nper : scalar or array_like of shape(M, ) - Number of compounding periods - pv : scalar or array_like of shape(M, ) - Present value - fv : scalar or array_like of shape(M, ), optional - Future value - when : {{'begin', 1}, {'end', 0}}, {string, int}, optional - When payments are due ('begin' (1) or 'end' (0)). - Defaults to {'end', 0}. - - Returns - ------- - out : ndarray - Interest portion of payment. If all input is scalar, returns a scalar - float. If any input is array_like, returns interest payment for each - input element. If multiple inputs are array_like, they all must have - the same shape. - - See Also - -------- - ppmt, pmt, pv - - Notes - ----- - The total payment is made up of payment against principal plus interest. - - ``pmt = ppmt + ipmt`` - - References - ---------- - .. [1] NumPy Enhancement Proposal (NEP) 32, - https://numpy.org/neps/nep-0032-remove-financial-functions.html - - Examples - -------- - What is the amortization schedule for a 1 year loan of $2500 at - 8.24% interest per year compounded monthly? - - >>> principal = 2500.00 - - The 'per' variable represents the periods of the loan. Remember that - financial equations start the period count at 1! - - >>> per = np.arange(1*12) + 1 - >>> ipmt = np.ipmt(0.0824/12, per, 1*12, principal) - >>> ppmt = np.ppmt(0.0824/12, per, 1*12, principal) - - Each element of the sum of the 'ipmt' and 'ppmt' arrays should equal - 'pmt'. - - >>> pmt = np.pmt(0.0824/12, 1*12, principal) - >>> np.allclose(ipmt + ppmt, pmt) - True - - >>> fmt = '{0:2d} {1:8.2f} {2:8.2f} {3:8.2f}' - >>> for payment in per: - ... index = payment - 1 - ... principal = principal + ppmt[index] - ... print(fmt.format(payment, ppmt[index], ipmt[index], principal)) - 1 -200.58 -17.17 2299.42 - 2 -201.96 -15.79 2097.46 - 3 -203.35 -14.40 1894.11 - 4 -204.74 -13.01 1689.37 - 5 -206.15 -11.60 1483.22 - 6 -207.56 -10.18 1275.66 - 7 -208.99 -8.76 1066.67 - 8 -210.42 -7.32 856.25 - 9 -211.87 -5.88 644.38 - 10 -213.32 -4.42 431.05 - 11 -214.79 -2.96 216.26 - 12 -216.26 -1.49 -0.00 - - >>> interestpd = np.sum(ipmt) - >>> np.round(interestpd, 2) - -112.98 - - """ - when = _convert_when(when) - rate, per, nper, pv, fv, when = np.broadcast_arrays(rate, per, nper, - pv, fv, when) - total_pmt = pmt(rate, nper, pv, fv, when) - ipmt = _rbl(rate, per, total_pmt, pv, when)*rate - try: - ipmt = np.where(when == 1, ipmt/(1 + rate), ipmt) - ipmt = np.where(np.logical_and(when == 1, per == 1), 0, ipmt) - except IndexError: - pass - return ipmt - - -def _rbl(rate, per, pmt, pv, when): - """ - This function is here to simply have a different name for the 'fv' - function to not interfere with the 'fv' keyword argument within the 'ipmt' - function. It is the 'remaining balance on loan' which might be useful as - it's own function, but is easily calculated with the 'fv' function. - """ - return fv(rate, (per - 1), pmt, pv, when) - - -def _ppmt_dispatcher(rate, per, nper, pv, fv=None, when=None): - warnings.warn(_depmsg.format(name='ppmt'), - DeprecationWarning, stacklevel=3) - return (rate, per, nper, pv, fv) - - -@array_function_dispatch(_ppmt_dispatcher) -def ppmt(rate, per, nper, pv, fv=0, when='end'): - """ - Compute the payment against loan principal. - - .. deprecated:: 1.18 - - `ppmt` is deprecated; for details, see NEP 32 [1]_. - Use the corresponding function in the numpy-financial library, - https://pypi.org/project/numpy-financial. - - Parameters - ---------- - rate : array_like - Rate of interest (per period) - per : array_like, int - Amount paid against the loan changes. The `per` is the period of - interest. - nper : array_like - Number of compounding periods - pv : array_like - Present value - fv : array_like, optional - Future value - when : {{'begin', 1}, {'end', 0}}, {string, int} - When payments are due ('begin' (1) or 'end' (0)) - - See Also - -------- - pmt, pv, ipmt - - References - ---------- - .. [1] NumPy Enhancement Proposal (NEP) 32, - https://numpy.org/neps/nep-0032-remove-financial-functions.html - - """ - total = pmt(rate, nper, pv, fv, when) - return total - ipmt(rate, per, nper, pv, fv, when) - - -def _pv_dispatcher(rate, nper, pmt, fv=None, when=None): - warnings.warn(_depmsg.format(name='pv'), - DeprecationWarning, stacklevel=3) - return (rate, nper, nper, pv, fv) - - -@array_function_dispatch(_pv_dispatcher) -def pv(rate, nper, pmt, fv=0, when='end'): - """ - Compute the present value. - - .. deprecated:: 1.18 - - `pv` is deprecated; for details, see NEP 32 [1]_. - Use the corresponding function in the numpy-financial library, - https://pypi.org/project/numpy-financial. - - Given: - * a future value, `fv` - * an interest `rate` compounded once per period, of which - there are - * `nper` total - * a (fixed) payment, `pmt`, paid either - * at the beginning (`when` = {'begin', 1}) or the end - (`when` = {'end', 0}) of each period - - Return: - the value now - - Parameters - ---------- - rate : array_like - Rate of interest (per period) - nper : array_like - Number of compounding periods - pmt : array_like - Payment - fv : array_like, optional - Future value - when : {{'begin', 1}, {'end', 0}}, {string, int}, optional - When payments are due ('begin' (1) or 'end' (0)) - - Returns - ------- - out : ndarray, float - Present value of a series of payments or investments. - - Notes - ----- - The present value is computed by solving the equation:: - - fv + - pv*(1 + rate)**nper + - pmt*(1 + rate*when)/rate*((1 + rate)**nper - 1) = 0 - - or, when ``rate = 0``:: - - fv + pv + pmt * nper = 0 - - for `pv`, which is then returned. - - References - ---------- - .. [1] NumPy Enhancement Proposal (NEP) 32, - https://numpy.org/neps/nep-0032-remove-financial-functions.html - .. [2] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). - Open Document Format for Office Applications (OpenDocument)v1.2, - Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version, - Pre-Draft 12. Organization for the Advancement of Structured Information - Standards (OASIS). Billerica, MA, USA. [ODT Document]. - Available: - http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula - OpenDocument-formula-20090508.odt - - Examples - -------- - What is the present value (e.g., the initial investment) - of an investment that needs to total $15692.93 - after 10 years of saving $100 every month? Assume the - interest rate is 5% (annually) compounded monthly. - - >>> np.pv(0.05/12, 10*12, -100, 15692.93) - -100.00067131625819 - - By convention, the negative sign represents cash flow out - (i.e., money not available today). Thus, to end up with - $15,692.93 in 10 years saving $100 a month at 5% annual - interest, one's initial deposit should also be $100. - - If any input is array_like, ``pv`` returns an array of equal shape. - Let's compare different interest rates in the example above: - - >>> a = np.array((0.05, 0.04, 0.03))/12 - >>> np.pv(a, 10*12, -100, 15692.93) - array([ -100.00067132, -649.26771385, -1273.78633713]) # may vary - - So, to end up with the same $15692.93 under the same $100 per month - "savings plan," for annual interest rates of 4% and 3%, one would - need initial investments of $649.27 and $1273.79, respectively. - - """ - when = _convert_when(when) - (rate, nper, pmt, fv, when) = map(np.asarray, [rate, nper, pmt, fv, when]) - temp = (1+rate)**nper - fact = np.where(rate == 0, nper, (1+rate*when)*(temp-1)/rate) - return -(fv + pmt*fact)/temp - -# Computed with Sage -# (y + (r + 1)^n*x + p*((r + 1)^n - 1)*(r*w + 1)/r)/(n*(r + 1)^(n - 1)*x - -# p*((r + 1)^n - 1)*(r*w + 1)/r^2 + n*p*(r + 1)^(n - 1)*(r*w + 1)/r + -# p*((r + 1)^n - 1)*w/r) - -def _g_div_gp(r, n, p, x, y, w): - t1 = (r+1)**n - t2 = (r+1)**(n-1) - return ((y + t1*x + p*(t1 - 1)*(r*w + 1)/r) / - (n*t2*x - p*(t1 - 1)*(r*w + 1)/(r**2) + n*p*t2*(r*w + 1)/r + - p*(t1 - 1)*w/r)) - - -def _rate_dispatcher(nper, pmt, pv, fv, when=None, guess=None, tol=None, - maxiter=None): - warnings.warn(_depmsg.format(name='rate'), - DeprecationWarning, stacklevel=3) - return (nper, pmt, pv, fv) - - -# Use Newton's iteration until the change is less than 1e-6 -# for all values or a maximum of 100 iterations is reached. -# Newton's rule is -# r_{n+1} = r_{n} - g(r_n)/g'(r_n) -# where -# g(r) is the formula -# g'(r) is the derivative with respect to r. -@array_function_dispatch(_rate_dispatcher) -def rate(nper, pmt, pv, fv, when='end', guess=None, tol=None, maxiter=100): - """ - Compute the rate of interest per period. - - .. deprecated:: 1.18 - - `rate` is deprecated; for details, see NEP 32 [1]_. - Use the corresponding function in the numpy-financial library, - https://pypi.org/project/numpy-financial. - - Parameters - ---------- - nper : array_like - Number of compounding periods - pmt : array_like - Payment - pv : array_like - Present value - fv : array_like - Future value - when : {{'begin', 1}, {'end', 0}}, {string, int}, optional - When payments are due ('begin' (1) or 'end' (0)) - guess : Number, optional - Starting guess for solving the rate of interest, default 0.1 - tol : Number, optional - Required tolerance for the solution, default 1e-6 - maxiter : int, optional - Maximum iterations in finding the solution - - Notes - ----- - The rate of interest is computed by iteratively solving the - (non-linear) equation:: - - fv + pv*(1+rate)**nper + pmt*(1+rate*when)/rate * ((1+rate)**nper - 1) = 0 - - for ``rate``. - - References - ---------- - .. [1] NumPy Enhancement Proposal (NEP) 32, - https://numpy.org/neps/nep-0032-remove-financial-functions.html - .. [2] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). - Open Document Format for Office Applications (OpenDocument)v1.2, - Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version, - Pre-Draft 12. Organization for the Advancement of Structured Information - Standards (OASIS). Billerica, MA, USA. [ODT Document]. - Available: - http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula - OpenDocument-formula-20090508.odt - - """ - when = _convert_when(when) - default_type = Decimal if isinstance(pmt, Decimal) else float - - # Handle casting defaults to Decimal if/when pmt is a Decimal and - # guess and/or tol are not given default values - if guess is None: - guess = default_type('0.1') - - if tol is None: - tol = default_type('1e-6') - - (nper, pmt, pv, fv, when) = map(np.asarray, [nper, pmt, pv, fv, when]) - - rn = guess - iterator = 0 - close = False - while (iterator < maxiter) and not close: - rnp1 = rn - _g_div_gp(rn, nper, pmt, pv, fv, when) - diff = abs(rnp1-rn) - close = np.all(diff < tol) - iterator += 1 - rn = rnp1 - if not close: - # Return nan's in array of the same shape as rn - return np.nan + rn - else: - return rn - - -def _irr_dispatcher(values): - warnings.warn(_depmsg.format(name='irr'), - DeprecationWarning, stacklevel=3) - return (values,) - - -@array_function_dispatch(_irr_dispatcher) -def irr(values): - """ - Return the Internal Rate of Return (IRR). - - .. deprecated:: 1.18 - - `irr` is deprecated; for details, see NEP 32 [1]_. - Use the corresponding function in the numpy-financial library, - https://pypi.org/project/numpy-financial. - - This is the "average" periodically compounded rate of return - that gives a net present value of 0.0; for a more complete explanation, - see Notes below. - - :class:`decimal.Decimal` type is not supported. - - Parameters - ---------- - values : array_like, shape(N,) - Input cash flows per time period. By convention, net "deposits" - are negative and net "withdrawals" are positive. Thus, for - example, at least the first element of `values`, which represents - the initial investment, will typically be negative. - - Returns - ------- - out : float - Internal Rate of Return for periodic input values. - - Notes - ----- - The IRR is perhaps best understood through an example (illustrated - using np.irr in the Examples section below). Suppose one invests 100 - units and then makes the following withdrawals at regular (fixed) - intervals: 39, 59, 55, 20. Assuming the ending value is 0, one's 100 - unit investment yields 173 units; however, due to the combination of - compounding and the periodic withdrawals, the "average" rate of return - is neither simply 0.73/4 nor (1.73)^0.25-1. Rather, it is the solution - (for :math:`r`) of the equation: - - .. math:: -100 + \\frac{39}{1+r} + \\frac{59}{(1+r)^2} - + \\frac{55}{(1+r)^3} + \\frac{20}{(1+r)^4} = 0 - - In general, for `values` :math:`= [v_0, v_1, ... v_M]`, - irr is the solution of the equation: [2]_ - - .. math:: \\sum_{t=0}^M{\\frac{v_t}{(1+irr)^{t}}} = 0 - - References - ---------- - .. [1] NumPy Enhancement Proposal (NEP) 32, - https://numpy.org/neps/nep-0032-remove-financial-functions.html - .. [2] L. J. Gitman, "Principles of Managerial Finance, Brief," 3rd ed., - Addison-Wesley, 2003, pg. 348. - - Examples - -------- - >>> round(np.irr([-100, 39, 59, 55, 20]), 5) - 0.28095 - >>> round(np.irr([-100, 0, 0, 74]), 5) - -0.0955 - >>> round(np.irr([-100, 100, 0, -7]), 5) - -0.0833 - >>> round(np.irr([-100, 100, 0, 7]), 5) - 0.06206 - >>> round(np.irr([-5, 10.5, 1, -8, 1]), 5) - 0.0886 - - """ - # `np.roots` call is why this function does not support Decimal type. - # - # Ultimately Decimal support needs to be added to np.roots, which has - # greater implications on the entire linear algebra module and how it does - # eigenvalue computations. - res = np.roots(values[::-1]) - mask = (res.imag == 0) & (res.real > 0) - if not mask.any(): - return np.nan - res = res[mask].real - # NPV(rate) = 0 can have more than one solution so we return - # only the solution closest to zero. - rate = 1/res - 1 - rate = rate.item(np.argmin(np.abs(rate))) - return rate - - -def _npv_dispatcher(rate, values): - warnings.warn(_depmsg.format(name='npv'), - DeprecationWarning, stacklevel=3) - return (values,) - - -@array_function_dispatch(_npv_dispatcher) -def npv(rate, values): - """ - Returns the NPV (Net Present Value) of a cash flow series. - - .. deprecated:: 1.18 - - `npv` is deprecated; for details, see NEP 32 [1]_. - Use the corresponding function in the numpy-financial library, - https://pypi.org/project/numpy-financial. - - Parameters - ---------- - rate : scalar - The discount rate. - values : array_like, shape(M, ) - The values of the time series of cash flows. The (fixed) time - interval between cash flow "events" must be the same as that for - which `rate` is given (i.e., if `rate` is per year, then precisely - a year is understood to elapse between each cash flow event). By - convention, investments or "deposits" are negative, income or - "withdrawals" are positive; `values` must begin with the initial - investment, thus `values[0]` will typically be negative. - - Returns - ------- - out : float - The NPV of the input cash flow series `values` at the discount - `rate`. - - Warnings - -------- - ``npv`` considers a series of cashflows starting in the present (t = 0). - NPV can also be defined with a series of future cashflows, paid at the - end, rather than the start, of each period. If future cashflows are used, - the first cashflow `values[0]` must be zeroed and added to the net - present value of the future cashflows. This is demonstrated in the - examples. - - Notes - ----- - Returns the result of: [2]_ - - .. math :: \\sum_{t=0}^{M-1}{\\frac{values_t}{(1+rate)^{t}}} - - References - ---------- - .. [1] NumPy Enhancement Proposal (NEP) 32, - https://numpy.org/neps/nep-0032-remove-financial-functions.html - .. [2] L. J. Gitman, "Principles of Managerial Finance, Brief," 3rd ed., - Addison-Wesley, 2003, pg. 346. - - Examples - -------- - Consider a potential project with an initial investment of $40 000 and - projected cashflows of $5 000, $8 000, $12 000 and $30 000 at the end of - each period discounted at a rate of 8% per period. To find the project's - net present value: - - >>> rate, cashflows = 0.08, [-40_000, 5_000, 8_000, 12_000, 30_000] - >>> np.npv(rate, cashflows).round(5) - 3065.22267 - - It may be preferable to split the projected cashflow into an initial - investment and expected future cashflows. In this case, the value of - the initial cashflow is zero and the initial investment is later added - to the future cashflows net present value: - - >>> initial_cashflow = cashflows[0] - >>> cashflows[0] = 0 - >>> np.round(np.npv(rate, cashflows) + initial_cashflow, 5) - 3065.22267 - - """ - values = np.asarray(values) - return (values / (1+rate)**np.arange(0, len(values))).sum(axis=0) - - -def _mirr_dispatcher(values, finance_rate, reinvest_rate): - warnings.warn(_depmsg.format(name='mirr'), - DeprecationWarning, stacklevel=3) - return (values,) - - -@array_function_dispatch(_mirr_dispatcher) -def mirr(values, finance_rate, reinvest_rate): - """ - Modified internal rate of return. - - .. deprecated:: 1.18 - - `mirr` is deprecated; for details, see NEP 32 [1]_. - Use the corresponding function in the numpy-financial library, - https://pypi.org/project/numpy-financial. - - Parameters - ---------- - values : array_like - Cash flows (must contain at least one positive and one negative - value) or nan is returned. The first value is considered a sunk - cost at time zero. - finance_rate : scalar - Interest rate paid on the cash flows - reinvest_rate : scalar - Interest rate received on the cash flows upon reinvestment - - Returns - ------- - out : float - Modified internal rate of return - - References - ---------- - .. [1] NumPy Enhancement Proposal (NEP) 32, - https://numpy.org/neps/nep-0032-remove-financial-functions.html - """ - values = np.asarray(values) - n = values.size - - # Without this explicit cast the 1/(n - 1) computation below - # becomes a float, which causes TypeError when using Decimal - # values. - if isinstance(finance_rate, Decimal): - n = Decimal(n) - - pos = values > 0 - neg = values < 0 - if not (pos.any() and neg.any()): - return np.nan - numer = np.abs(npv(reinvest_rate, values*pos)) - denom = np.abs(npv(finance_rate, values*neg)) - return (numer/denom)**(1/(n - 1))*(1 + reinvest_rate) - 1 diff --git a/venv/lib/python3.7/site-packages/numpy/lib/format.py b/venv/lib/python3.7/site-packages/numpy/lib/format.py deleted file mode 100644 index 20e2e9c..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/format.py +++ /dev/null @@ -1,916 +0,0 @@ -""" -Binary serialization - -NPY format -========== - -A simple format for saving numpy arrays to disk with the full -information about them. - -The ``.npy`` format is the standard binary file format in NumPy for -persisting a *single* arbitrary NumPy array on disk. The format stores all -of the shape and dtype information necessary to reconstruct the array -correctly even on another machine with a different architecture. -The format is designed to be as simple as possible while achieving -its limited goals. - -The ``.npz`` format is the standard format for persisting *multiple* NumPy -arrays on disk. A ``.npz`` file is a zip file containing multiple ``.npy`` -files, one for each array. - -Capabilities ------------- - -- Can represent all NumPy arrays including nested record arrays and - object arrays. - -- Represents the data in its native binary form. - -- Supports Fortran-contiguous arrays directly. - -- Stores all of the necessary information to reconstruct the array - including shape and dtype on a machine of a different - architecture. Both little-endian and big-endian arrays are - supported, and a file with little-endian numbers will yield - a little-endian array on any machine reading the file. The - types are described in terms of their actual sizes. For example, - if a machine with a 64-bit C "long int" writes out an array with - "long ints", a reading machine with 32-bit C "long ints" will yield - an array with 64-bit integers. - -- Is straightforward to reverse engineer. Datasets often live longer than - the programs that created them. A competent developer should be - able to create a solution in their preferred programming language to - read most ``.npy`` files that he has been given without much - documentation. - -- Allows memory-mapping of the data. See `open_memmep`. - -- Can be read from a filelike stream object instead of an actual file. - -- Stores object arrays, i.e. arrays containing elements that are arbitrary - Python objects. Files with object arrays are not to be mmapable, but - can be read and written to disk. - -Limitations ------------ - -- Arbitrary subclasses of numpy.ndarray are not completely preserved. - Subclasses will be accepted for writing, but only the array data will - be written out. A regular numpy.ndarray object will be created - upon reading the file. - -.. warning:: - - Due to limitations in the interpretation of structured dtypes, dtypes - with fields with empty names will have the names replaced by 'f0', 'f1', - etc. Such arrays will not round-trip through the format entirely - accurately. The data is intact; only the field names will differ. We are - working on a fix for this. This fix will not require a change in the - file format. The arrays with such structures can still be saved and - restored, and the correct dtype may be restored by using the - ``loadedarray.view(correct_dtype)`` method. - -File extensions ---------------- - -We recommend using the ``.npy`` and ``.npz`` extensions for files saved -in this format. This is by no means a requirement; applications may wish -to use these file formats but use an extension specific to the -application. In the absence of an obvious alternative, however, -we suggest using ``.npy`` and ``.npz``. - -Version numbering ------------------ - -The version numbering of these formats is independent of NumPy version -numbering. If the format is upgraded, the code in `numpy.io` will still -be able to read and write Version 1.0 files. - -Format Version 1.0 ------------------- - -The first 6 bytes are a magic string: exactly ``\\x93NUMPY``. - -The next 1 byte is an unsigned byte: the major version number of the file -format, e.g. ``\\x01``. - -The next 1 byte is an unsigned byte: the minor version number of the file -format, e.g. ``\\x00``. Note: the version of the file format is not tied -to the version of the numpy package. - -The next 2 bytes form a little-endian unsigned short int: the length of -the header data HEADER_LEN. - -The next HEADER_LEN bytes form the header data describing the array's -format. It is an ASCII string which contains a Python literal expression -of a dictionary. It is terminated by a newline (``\\n``) and padded with -spaces (``\\x20``) to make the total of -``len(magic string) + 2 + len(length) + HEADER_LEN`` be evenly divisible -by 64 for alignment purposes. - -The dictionary contains three keys: - - "descr" : dtype.descr - An object that can be passed as an argument to the `numpy.dtype` - constructor to create the array's dtype. - "fortran_order" : bool - Whether the array data is Fortran-contiguous or not. Since - Fortran-contiguous arrays are a common form of non-C-contiguity, - we allow them to be written directly to disk for efficiency. - "shape" : tuple of int - The shape of the array. - -For repeatability and readability, the dictionary keys are sorted in -alphabetic order. This is for convenience only. A writer SHOULD implement -this if possible. A reader MUST NOT depend on this. - -Following the header comes the array data. If the dtype contains Python -objects (i.e. ``dtype.hasobject is True``), then the data is a Python -pickle of the array. Otherwise the data is the contiguous (either C- -or Fortran-, depending on ``fortran_order``) bytes of the array. -Consumers can figure out the number of bytes by multiplying the number -of elements given by the shape (noting that ``shape=()`` means there is -1 element) by ``dtype.itemsize``. - -Format Version 2.0 ------------------- - -The version 1.0 format only allowed the array header to have a total size of -65535 bytes. This can be exceeded by structured arrays with a large number of -columns. The version 2.0 format extends the header size to 4 GiB. -`numpy.save` will automatically save in 2.0 format if the data requires it, -else it will always use the more compatible 1.0 format. - -The description of the fourth element of the header therefore has become: -"The next 4 bytes form a little-endian unsigned int: the length of the header -data HEADER_LEN." - -Format Version 3.0 ------------------- - -This version replaces the ASCII string (which in practice was latin1) with -a utf8-encoded string, so supports structured types with any unicode field -names. - -Notes ------ -The ``.npy`` format, including motivation for creating it and a comparison of -alternatives, is described in the `"npy-format" NEP -`_, however details have -evolved with time and this document is more current. - -""" -from __future__ import division, absolute_import, print_function - -import numpy -import sys -import io -import warnings -from numpy.lib.utils import safe_eval -from numpy.compat import ( - isfileobj, long, os_fspath, pickle - ) - - -__all__ = [] - - -MAGIC_PREFIX = b'\x93NUMPY' -MAGIC_LEN = len(MAGIC_PREFIX) + 2 -ARRAY_ALIGN = 64 # plausible values are powers of 2 between 16 and 4096 -BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes - -# difference between version 1.0 and 2.0 is a 4 byte (I) header length -# instead of 2 bytes (H) allowing storage of large structured arrays -_header_size_info = { - (1, 0): (' 255: - raise ValueError("major version must be 0 <= major < 256") - if minor < 0 or minor > 255: - raise ValueError("minor version must be 0 <= minor < 256") - if sys.version_info[0] < 3: - return MAGIC_PREFIX + chr(major) + chr(minor) - else: - return MAGIC_PREFIX + bytes([major, minor]) - -def read_magic(fp): - """ Read the magic string to get the version of the file format. - - Parameters - ---------- - fp : filelike object - - Returns - ------- - major : int - minor : int - """ - magic_str = _read_bytes(fp, MAGIC_LEN, "magic string") - if magic_str[:-2] != MAGIC_PREFIX: - msg = "the magic string is not correct; expected %r, got %r" - raise ValueError(msg % (MAGIC_PREFIX, magic_str[:-2])) - if sys.version_info[0] < 3: - major, minor = map(ord, magic_str[-2:]) - else: - major, minor = magic_str[-2:] - return major, minor - -def _has_metadata(dt): - if dt.metadata is not None: - return True - elif dt.names is not None: - return any(_has_metadata(dt[k]) for k in dt.names) - elif dt.subdtype is not None: - return _has_metadata(dt.base) - else: - return False - -def dtype_to_descr(dtype): - """ - Get a serializable descriptor from the dtype. - - The .descr attribute of a dtype object cannot be round-tripped through - the dtype() constructor. Simple types, like dtype('float32'), have - a descr which looks like a record array with one field with '' as - a name. The dtype() constructor interprets this as a request to give - a default name. Instead, we construct descriptor that can be passed to - dtype(). - - Parameters - ---------- - dtype : dtype - The dtype of the array that will be written to disk. - - Returns - ------- - descr : object - An object that can be passed to `numpy.dtype()` in order to - replicate the input dtype. - - """ - if _has_metadata(dtype): - warnings.warn("metadata on a dtype may be saved or ignored, but will " - "raise if saved when read. Use another form of storage.", - UserWarning, stacklevel=2) - if dtype.names is not None: - # This is a record array. The .descr is fine. XXX: parts of the - # record array with an empty name, like padding bytes, still get - # fiddled with. This needs to be fixed in the C implementation of - # dtype(). - return dtype.descr - else: - return dtype.str - -def descr_to_dtype(descr): - ''' - descr may be stored as dtype.descr, which is a list of - (name, format, [shape]) tuples where format may be a str or a tuple. - Offsets are not explicitly saved, rather empty fields with - name, format == '', '|Vn' are added as padding. - - This function reverses the process, eliminating the empty padding fields. - ''' - if isinstance(descr, str): - # No padding removal needed - return numpy.dtype(descr) - elif isinstance(descr, tuple): - # subtype, will always have a shape descr[1] - dt = descr_to_dtype(descr[0]) - return numpy.dtype((dt, descr[1])) - fields = [] - offset = 0 - for field in descr: - if len(field) == 2: - name, descr_str = field - dt = descr_to_dtype(descr_str) - else: - name, descr_str, shape = field - dt = numpy.dtype((descr_to_dtype(descr_str), shape)) - - # Ignore padding bytes, which will be void bytes with '' as name - # Once support for blank names is removed, only "if name == ''" needed) - is_pad = (name == '' and dt.type is numpy.void and dt.names is None) - if not is_pad: - fields.append((name, dt, offset)) - - offset += dt.itemsize - - names, formats, offsets = zip(*fields) - # names may be (title, names) tuples - nametups = (n if isinstance(n, tuple) else (None, n) for n in names) - titles, names = zip(*nametups) - return numpy.dtype({'names': names, 'formats': formats, 'titles': titles, - 'offsets': offsets, 'itemsize': offset}) - -def header_data_from_array_1_0(array): - """ Get the dictionary of header metadata from a numpy.ndarray. - - Parameters - ---------- - array : numpy.ndarray - - Returns - ------- - d : dict - This has the appropriate entries for writing its string representation - to the header of the file. - """ - d = {'shape': array.shape} - if array.flags.c_contiguous: - d['fortran_order'] = False - elif array.flags.f_contiguous: - d['fortran_order'] = True - else: - # Totally non-contiguous data. We will have to make it C-contiguous - # before writing. Note that we need to test for C_CONTIGUOUS first - # because a 1-D array is both C_CONTIGUOUS and F_CONTIGUOUS. - d['fortran_order'] = False - - d['descr'] = dtype_to_descr(array.dtype) - return d - - -def _wrap_header(header, version): - """ - Takes a stringified header, and attaches the prefix and padding to it - """ - import struct - assert version is not None - fmt, encoding = _header_size_info[version] - if not isinstance(header, bytes): # always true on python 3 - header = header.encode(encoding) - hlen = len(header) + 1 - padlen = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize(fmt) + hlen) % ARRAY_ALIGN) - try: - header_prefix = magic(*version) + struct.pack(fmt, hlen + padlen) - except struct.error: - msg = "Header length {} too big for version={}".format(hlen, version) - raise ValueError(msg) - - # Pad the header with spaces and a final newline such that the magic - # string, the header-length short and the header are aligned on a - # ARRAY_ALIGN byte boundary. This supports memory mapping of dtypes - # aligned up to ARRAY_ALIGN on systems like Linux where mmap() - # offset must be page-aligned (i.e. the beginning of the file). - return header_prefix + header + b' '*padlen + b'\n' - - -def _wrap_header_guess_version(header): - """ - Like `_wrap_header`, but chooses an appropriate version given the contents - """ - try: - return _wrap_header(header, (1, 0)) - except ValueError: - pass - - try: - ret = _wrap_header(header, (2, 0)) - except UnicodeEncodeError: - pass - else: - warnings.warn("Stored array in format 2.0. It can only be" - "read by NumPy >= 1.9", UserWarning, stacklevel=2) - return ret - - header = _wrap_header(header, (3, 0)) - warnings.warn("Stored array in format 3.0. It can only be " - "read by NumPy >= 1.17", UserWarning, stacklevel=2) - return header - - -def _write_array_header(fp, d, version=None): - """ Write the header for an array and returns the version used - - Parameters - ---------- - fp : filelike object - d : dict - This has the appropriate entries for writing its string representation - to the header of the file. - version: tuple or None - None means use oldest that works - explicit version will raise a ValueError if the format does not - allow saving this data. Default: None - """ - header = ["{"] - for key, value in sorted(d.items()): - # Need to use repr here, since we eval these when reading - header.append("'%s': %s, " % (key, repr(value))) - header.append("}") - header = "".join(header) - header = _filter_header(header) - if version is None: - header = _wrap_header_guess_version(header) - else: - header = _wrap_header(header, version) - fp.write(header) - -def write_array_header_1_0(fp, d): - """ Write the header for an array using the 1.0 format. - - Parameters - ---------- - fp : filelike object - d : dict - This has the appropriate entries for writing its string - representation to the header of the file. - """ - _write_array_header(fp, d, (1, 0)) - - -def write_array_header_2_0(fp, d): - """ Write the header for an array using the 2.0 format. - The 2.0 format allows storing very large structured arrays. - - .. versionadded:: 1.9.0 - - Parameters - ---------- - fp : filelike object - d : dict - This has the appropriate entries for writing its string - representation to the header of the file. - """ - _write_array_header(fp, d, (2, 0)) - -def read_array_header_1_0(fp): - """ - Read an array header from a filelike object using the 1.0 file format - version. - - This will leave the file object located just after the header. - - Parameters - ---------- - fp : filelike object - A file object or something with a `.read()` method like a file. - - Returns - ------- - shape : tuple of int - The shape of the array. - fortran_order : bool - The array data will be written out directly if it is either - C-contiguous or Fortran-contiguous. Otherwise, it will be made - contiguous before writing it out. - dtype : dtype - The dtype of the file's data. - - Raises - ------ - ValueError - If the data is invalid. - - """ - return _read_array_header(fp, version=(1, 0)) - -def read_array_header_2_0(fp): - """ - Read an array header from a filelike object using the 2.0 file format - version. - - This will leave the file object located just after the header. - - .. versionadded:: 1.9.0 - - Parameters - ---------- - fp : filelike object - A file object or something with a `.read()` method like a file. - - Returns - ------- - shape : tuple of int - The shape of the array. - fortran_order : bool - The array data will be written out directly if it is either - C-contiguous or Fortran-contiguous. Otherwise, it will be made - contiguous before writing it out. - dtype : dtype - The dtype of the file's data. - - Raises - ------ - ValueError - If the data is invalid. - - """ - return _read_array_header(fp, version=(2, 0)) - - -def _filter_header(s): - """Clean up 'L' in npz header ints. - - Cleans up the 'L' in strings representing integers. Needed to allow npz - headers produced in Python2 to be read in Python3. - - Parameters - ---------- - s : string - Npy file header. - - Returns - ------- - header : str - Cleaned up header. - - """ - import tokenize - if sys.version_info[0] >= 3: - from io import StringIO - else: - from StringIO import StringIO - - tokens = [] - last_token_was_number = False - # adding newline as python 2.7.5 workaround - string = s + "\n" - for token in tokenize.generate_tokens(StringIO(string).readline): - token_type = token[0] - token_string = token[1] - if (last_token_was_number and - token_type == tokenize.NAME and - token_string == "L"): - continue - else: - tokens.append(token) - last_token_was_number = (token_type == tokenize.NUMBER) - # removing newline (see above) as python 2.7.5 workaround - return tokenize.untokenize(tokens)[:-1] - - -def _read_array_header(fp, version): - """ - see read_array_header_1_0 - """ - # Read an unsigned, little-endian short int which has the length of the - # header. - import struct - hinfo = _header_size_info.get(version) - if hinfo is None: - raise ValueError("Invalid version {!r}".format(version)) - hlength_type, encoding = hinfo - - hlength_str = _read_bytes(fp, struct.calcsize(hlength_type), "array header length") - header_length = struct.unpack(hlength_type, hlength_str)[0] - header = _read_bytes(fp, header_length, "array header") - header = header.decode(encoding) - - # The header is a pretty-printed string representation of a literal - # Python dictionary with trailing newlines padded to a ARRAY_ALIGN byte - # boundary. The keys are strings. - # "shape" : tuple of int - # "fortran_order" : bool - # "descr" : dtype.descr - header = _filter_header(header) - try: - d = safe_eval(header) - except SyntaxError as e: - msg = "Cannot parse header: {!r}\nException: {!r}" - raise ValueError(msg.format(header, e)) - if not isinstance(d, dict): - msg = "Header is not a dictionary: {!r}" - raise ValueError(msg.format(d)) - keys = sorted(d.keys()) - if keys != ['descr', 'fortran_order', 'shape']: - msg = "Header does not contain the correct keys: {!r}" - raise ValueError(msg.format(keys)) - - # Sanity-check the values. - if (not isinstance(d['shape'], tuple) or - not numpy.all([isinstance(x, (int, long)) for x in d['shape']])): - msg = "shape is not valid: {!r}" - raise ValueError(msg.format(d['shape'])) - if not isinstance(d['fortran_order'], bool): - msg = "fortran_order is not a valid bool: {!r}" - raise ValueError(msg.format(d['fortran_order'])) - try: - dtype = descr_to_dtype(d['descr']) - except TypeError as e: - msg = "descr is not a valid dtype descriptor: {!r}" - raise ValueError(msg.format(d['descr'])) - - return d['shape'], d['fortran_order'], dtype - -def write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None): - """ - Write an array to an NPY file, including a header. - - If the array is neither C-contiguous nor Fortran-contiguous AND the - file_like object is not a real file object, this function will have to - copy data in memory. - - Parameters - ---------- - fp : file_like object - An open, writable file object, or similar object with a - ``.write()`` method. - array : ndarray - The array to write to disk. - version : (int, int) or None, optional - The version number of the format. None means use the oldest - supported version that is able to store the data. Default: None - allow_pickle : bool, optional - Whether to allow writing pickled data. Default: True - pickle_kwargs : dict, optional - Additional keyword arguments to pass to pickle.dump, excluding - 'protocol'. These are only useful when pickling objects in object - arrays on Python 3 to Python 2 compatible format. - - Raises - ------ - ValueError - If the array cannot be persisted. This includes the case of - allow_pickle=False and array being an object array. - Various other errors - If the array contains Python objects as part of its dtype, the - process of pickling them may raise various errors if the objects - are not picklable. - - """ - _check_version(version) - _write_array_header(fp, header_data_from_array_1_0(array), version) - - if array.itemsize == 0: - buffersize = 0 - else: - # Set buffer size to 16 MiB to hide the Python loop overhead. - buffersize = max(16 * 1024 ** 2 // array.itemsize, 1) - - if array.dtype.hasobject: - # We contain Python objects so we cannot write out the data - # directly. Instead, we will pickle it out - if not allow_pickle: - raise ValueError("Object arrays cannot be saved when " - "allow_pickle=False") - if pickle_kwargs is None: - pickle_kwargs = {} - pickle.dump(array, fp, protocol=3, **pickle_kwargs) - elif array.flags.f_contiguous and not array.flags.c_contiguous: - if isfileobj(fp): - array.T.tofile(fp) - else: - for chunk in numpy.nditer( - array, flags=['external_loop', 'buffered', 'zerosize_ok'], - buffersize=buffersize, order='F'): - fp.write(chunk.tobytes('C')) - else: - if isfileobj(fp): - array.tofile(fp) - else: - for chunk in numpy.nditer( - array, flags=['external_loop', 'buffered', 'zerosize_ok'], - buffersize=buffersize, order='C'): - fp.write(chunk.tobytes('C')) - - -def read_array(fp, allow_pickle=False, pickle_kwargs=None): - """ - Read an array from an NPY file. - - Parameters - ---------- - fp : file_like object - If this is not a real file object, then this may take extra memory - and time. - allow_pickle : bool, optional - Whether to allow writing pickled data. Default: False - - .. versionchanged:: 1.16.3 - Made default False in response to CVE-2019-6446. - - pickle_kwargs : dict - Additional keyword arguments to pass to pickle.load. These are only - useful when loading object arrays saved on Python 2 when using - Python 3. - - Returns - ------- - array : ndarray - The array from the data on disk. - - Raises - ------ - ValueError - If the data is invalid, or allow_pickle=False and the file contains - an object array. - - """ - version = read_magic(fp) - _check_version(version) - shape, fortran_order, dtype = _read_array_header(fp, version) - if len(shape) == 0: - count = 1 - else: - count = numpy.multiply.reduce(shape, dtype=numpy.int64) - - # Now read the actual data. - if dtype.hasobject: - # The array contained Python objects. We need to unpickle the data. - if not allow_pickle: - raise ValueError("Object arrays cannot be loaded when " - "allow_pickle=False") - if pickle_kwargs is None: - pickle_kwargs = {} - try: - array = pickle.load(fp, **pickle_kwargs) - except UnicodeError as err: - if sys.version_info[0] >= 3: - # Friendlier error message - raise UnicodeError("Unpickling a python object failed: %r\n" - "You may need to pass the encoding= option " - "to numpy.load" % (err,)) - raise - else: - if isfileobj(fp): - # We can use the fast fromfile() function. - array = numpy.fromfile(fp, dtype=dtype, count=count) - else: - # This is not a real file. We have to read it the - # memory-intensive way. - # crc32 module fails on reads greater than 2 ** 32 bytes, - # breaking large reads from gzip streams. Chunk reads to - # BUFFER_SIZE bytes to avoid issue and reduce memory overhead - # of the read. In non-chunked case count < max_read_count, so - # only one read is performed. - - # Use np.ndarray instead of np.empty since the latter does - # not correctly instantiate zero-width string dtypes; see - # https://github.com/numpy/numpy/pull/6430 - array = numpy.ndarray(count, dtype=dtype) - - if dtype.itemsize > 0: - # If dtype.itemsize == 0 then there's nothing more to read - max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, dtype.itemsize) - - for i in range(0, count, max_read_count): - read_count = min(max_read_count, count - i) - read_size = int(read_count * dtype.itemsize) - data = _read_bytes(fp, read_size, "array data") - array[i:i+read_count] = numpy.frombuffer(data, dtype=dtype, - count=read_count) - - if fortran_order: - array.shape = shape[::-1] - array = array.transpose() - else: - array.shape = shape - - return array - - -def open_memmap(filename, mode='r+', dtype=None, shape=None, - fortran_order=False, version=None): - """ - Open a .npy file as a memory-mapped array. - - This may be used to read an existing file or create a new one. - - Parameters - ---------- - filename : str or path-like - The name of the file on disk. This may *not* be a file-like - object. - mode : str, optional - The mode in which to open the file; the default is 'r+'. In - addition to the standard file modes, 'c' is also accepted to mean - "copy on write." See `memmap` for the available mode strings. - dtype : data-type, optional - The data type of the array if we are creating a new file in "write" - mode, if not, `dtype` is ignored. The default value is None, which - results in a data-type of `float64`. - shape : tuple of int - The shape of the array if we are creating a new file in "write" - mode, in which case this parameter is required. Otherwise, this - parameter is ignored and is thus optional. - fortran_order : bool, optional - Whether the array should be Fortran-contiguous (True) or - C-contiguous (False, the default) if we are creating a new file in - "write" mode. - version : tuple of int (major, minor) or None - If the mode is a "write" mode, then this is the version of the file - format used to create the file. None means use the oldest - supported version that is able to store the data. Default: None - - Returns - ------- - marray : memmap - The memory-mapped array. - - Raises - ------ - ValueError - If the data or the mode is invalid. - IOError - If the file is not found or cannot be opened correctly. - - See Also - -------- - memmap - - """ - if isfileobj(filename): - raise ValueError("Filename must be a string or a path-like object." - " Memmap cannot use existing file handles.") - - if 'w' in mode: - # We are creating the file, not reading it. - # Check if we ought to create the file. - _check_version(version) - # Ensure that the given dtype is an authentic dtype object rather - # than just something that can be interpreted as a dtype object. - dtype = numpy.dtype(dtype) - if dtype.hasobject: - msg = "Array can't be memory-mapped: Python objects in dtype." - raise ValueError(msg) - d = dict( - descr=dtype_to_descr(dtype), - fortran_order=fortran_order, - shape=shape, - ) - # If we got here, then it should be safe to create the file. - with open(os_fspath(filename), mode+'b') as fp: - _write_array_header(fp, d, version) - offset = fp.tell() - else: - # Read the header of the file first. - with open(os_fspath(filename), 'rb') as fp: - version = read_magic(fp) - _check_version(version) - - shape, fortran_order, dtype = _read_array_header(fp, version) - if dtype.hasobject: - msg = "Array can't be memory-mapped: Python objects in dtype." - raise ValueError(msg) - offset = fp.tell() - - if fortran_order: - order = 'F' - else: - order = 'C' - - # We need to change a write-only mode to a read-write mode since we've - # already written data to the file. - if mode == 'w+': - mode = 'r+' - - marray = numpy.memmap(filename, dtype=dtype, shape=shape, order=order, - mode=mode, offset=offset) - - return marray - - -def _read_bytes(fp, size, error_template="ran out of data"): - """ - Read from file-like object until size bytes are read. - Raises ValueError if not EOF is encountered before size bytes are read. - Non-blocking objects only supported if they derive from io objects. - - Required as e.g. ZipExtFile in python 2.6 can return less data than - requested. - """ - data = bytes() - while True: - # io files (default in python3) return None or raise on - # would-block, python2 file will truncate, probably nothing can be - # done about that. note that regular files can't be non-blocking - try: - r = fp.read(size - len(data)) - data += r - if len(r) == 0 or len(data) == size: - break - except io.BlockingIOError: - pass - if len(data) != size: - msg = "EOF: reading %s, expected %d bytes got %d" - raise ValueError(msg % (error_template, size, len(data))) - else: - return data diff --git a/venv/lib/python3.7/site-packages/numpy/lib/function_base.py b/venv/lib/python3.7/site-packages/numpy/lib/function_base.py deleted file mode 100644 index df06d10..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/function_base.py +++ /dev/null @@ -1,4808 +0,0 @@ -from __future__ import division, absolute_import, print_function - -try: - # Accessing collections abstract classes from collections - # has been deprecated since Python 3.3 - import collections.abc as collections_abc -except ImportError: - import collections as collections_abc -import functools -import re -import sys -import warnings - -import numpy as np -import numpy.core.numeric as _nx -from numpy.core import atleast_1d, transpose -from numpy.core.numeric import ( - ones, zeros, arange, concatenate, array, asarray, asanyarray, empty, - empty_like, ndarray, around, floor, ceil, take, dot, where, intp, - integer, isscalar, absolute - ) -from numpy.core.umath import ( - pi, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin, - mod, exp, not_equal, subtract - ) -from numpy.core.fromnumeric import ( - ravel, nonzero, partition, mean, any, sum - ) -from numpy.core.numerictypes import typecodes -from numpy.core.overrides import set_module -from numpy.core import overrides -from numpy.core.function_base import add_newdoc -from numpy.lib.twodim_base import diag -from numpy.core.multiarray import ( - _insert, add_docstring, bincount, normalize_axis_index, _monotonicity, - interp as compiled_interp, interp_complex as compiled_interp_complex - ) -from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc -from numpy.compat import long - -if sys.version_info[0] < 3: - # Force range to be a generator, for np.delete's usage. - range = xrange - import __builtin__ as builtins -else: - import builtins - - -array_function_dispatch = functools.partial( - overrides.array_function_dispatch, module='numpy') - - -# needed in this module for compatibility -from numpy.lib.histograms import histogram, histogramdd - -__all__ = [ - 'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile', - 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp', 'flip', - 'rot90', 'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average', - 'bincount', 'digitize', 'cov', 'corrcoef', - 'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett', - 'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring', - 'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc', - 'quantile' - ] - - -def _rot90_dispatcher(m, k=None, axes=None): - return (m,) - - -@array_function_dispatch(_rot90_dispatcher) -def rot90(m, k=1, axes=(0,1)): - """ - Rotate an array by 90 degrees in the plane specified by axes. - - Rotation direction is from the first towards the second axis. - - Parameters - ---------- - m : array_like - Array of two or more dimensions. - k : integer - Number of times the array is rotated by 90 degrees. - axes: (2,) array_like - The array is rotated in the plane defined by the axes. - Axes must be different. - - .. versionadded:: 1.12.0 - - Returns - ------- - y : ndarray - A rotated view of `m`. - - See Also - -------- - flip : Reverse the order of elements in an array along the given axis. - fliplr : Flip an array horizontally. - flipud : Flip an array vertically. - - Notes - ----- - rot90(m, k=1, axes=(1,0)) is the reverse of rot90(m, k=1, axes=(0,1)) - rot90(m, k=1, axes=(1,0)) is equivalent to rot90(m, k=-1, axes=(0,1)) - - Examples - -------- - >>> m = np.array([[1,2],[3,4]], int) - >>> m - array([[1, 2], - [3, 4]]) - >>> np.rot90(m) - array([[2, 4], - [1, 3]]) - >>> np.rot90(m, 2) - array([[4, 3], - [2, 1]]) - >>> m = np.arange(8).reshape((2,2,2)) - >>> np.rot90(m, 1, (1,2)) - array([[[1, 3], - [0, 2]], - [[5, 7], - [4, 6]]]) - - """ - axes = tuple(axes) - if len(axes) != 2: - raise ValueError("len(axes) must be 2.") - - m = asanyarray(m) - - if axes[0] == axes[1] or absolute(axes[0] - axes[1]) == m.ndim: - raise ValueError("Axes must be different.") - - if (axes[0] >= m.ndim or axes[0] < -m.ndim - or axes[1] >= m.ndim or axes[1] < -m.ndim): - raise ValueError("Axes={} out of range for array of ndim={}." - .format(axes, m.ndim)) - - k %= 4 - - if k == 0: - return m[:] - if k == 2: - return flip(flip(m, axes[0]), axes[1]) - - axes_list = arange(0, m.ndim) - (axes_list[axes[0]], axes_list[axes[1]]) = (axes_list[axes[1]], - axes_list[axes[0]]) - - if k == 1: - return transpose(flip(m,axes[1]), axes_list) - else: - # k == 3 - return flip(transpose(m, axes_list), axes[1]) - - -def _flip_dispatcher(m, axis=None): - return (m,) - - -@array_function_dispatch(_flip_dispatcher) -def flip(m, axis=None): - """ - Reverse the order of elements in an array along the given axis. - - The shape of the array is preserved, but the elements are reordered. - - .. versionadded:: 1.12.0 - - Parameters - ---------- - m : array_like - Input array. - axis : None or int or tuple of ints, optional - Axis or axes along which to flip over. The default, - axis=None, will flip over all of the axes of the input array. - If axis is negative it counts from the last to the first axis. - - If axis is a tuple of ints, flipping is performed on all of the axes - specified in the tuple. - - .. versionchanged:: 1.15.0 - None and tuples of axes are supported - - Returns - ------- - out : array_like - A view of `m` with the entries of axis reversed. Since a view is - returned, this operation is done in constant time. - - See Also - -------- - flipud : Flip an array vertically (axis=0). - fliplr : Flip an array horizontally (axis=1). - - Notes - ----- - flip(m, 0) is equivalent to flipud(m). - - flip(m, 1) is equivalent to fliplr(m). - - flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n. - - flip(m) corresponds to ``m[::-1,::-1,...,::-1]`` with ``::-1`` at all - positions. - - flip(m, (0, 1)) corresponds to ``m[::-1,::-1,...]`` with ``::-1`` at - position 0 and position 1. - - Examples - -------- - >>> A = np.arange(8).reshape((2,2,2)) - >>> A - array([[[0, 1], - [2, 3]], - [[4, 5], - [6, 7]]]) - >>> np.flip(A, 0) - array([[[4, 5], - [6, 7]], - [[0, 1], - [2, 3]]]) - >>> np.flip(A, 1) - array([[[2, 3], - [0, 1]], - [[6, 7], - [4, 5]]]) - >>> np.flip(A) - array([[[7, 6], - [5, 4]], - [[3, 2], - [1, 0]]]) - >>> np.flip(A, (0, 2)) - array([[[5, 4], - [7, 6]], - [[1, 0], - [3, 2]]]) - >>> A = np.random.randn(3,4,5) - >>> np.all(np.flip(A,2) == A[:,:,::-1,...]) - True - """ - if not hasattr(m, 'ndim'): - m = asarray(m) - if axis is None: - indexer = (np.s_[::-1],) * m.ndim - else: - axis = _nx.normalize_axis_tuple(axis, m.ndim) - indexer = [np.s_[:]] * m.ndim - for ax in axis: - indexer[ax] = np.s_[::-1] - indexer = tuple(indexer) - return m[indexer] - - -@set_module('numpy') -def iterable(y): - """ - Check whether or not an object can be iterated over. - - Parameters - ---------- - y : object - Input object. - - Returns - ------- - b : bool - Return ``True`` if the object has an iterator method or is a - sequence and ``False`` otherwise. - - - Examples - -------- - >>> np.iterable([1, 2, 3]) - True - >>> np.iterable(2) - False - - """ - try: - iter(y) - except TypeError: - return False - return True - - -def _average_dispatcher(a, axis=None, weights=None, returned=None): - return (a, weights) - - -@array_function_dispatch(_average_dispatcher) -def average(a, axis=None, weights=None, returned=False): - """ - Compute the weighted average along the specified axis. - - Parameters - ---------- - a : array_like - Array containing data to be averaged. If `a` is not an array, a - conversion is attempted. - axis : None or int or tuple of ints, optional - Axis or axes along which to average `a`. The default, - axis=None, will average over all of the elements of the input array. - If axis is negative it counts from the last to the first axis. - - .. versionadded:: 1.7.0 - - If axis is a tuple of ints, averaging is performed on all of the axes - specified in the tuple instead of a single axis or all the axes as - before. - weights : array_like, optional - An array of weights associated with the values in `a`. Each value in - `a` contributes to the average according to its associated weight. - The weights array can either be 1-D (in which case its length must be - the size of `a` along the given axis) or of the same shape as `a`. - If `weights=None`, then all data in `a` are assumed to have a - weight equal to one. The 1-D calculation is:: - - avg = sum(a * weights) / sum(weights) - - The only constraint on `weights` is that `sum(weights)` must not be 0. - returned : bool, optional - Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`) - is returned, otherwise only the average is returned. - If `weights=None`, `sum_of_weights` is equivalent to the number of - elements over which the average is taken. - - Returns - ------- - retval, [sum_of_weights] : array_type or double - Return the average along the specified axis. When `returned` is `True`, - return a tuple with the average as the first element and the sum - of the weights as the second element. `sum_of_weights` is of the - same type as `retval`. The result dtype follows a genereal pattern. - If `weights` is None, the result dtype will be that of `a` , or ``float64`` - if `a` is integral. Otherwise, if `weights` is not None and `a` is non- - integral, the result type will be the type of lowest precision capable of - representing values of both `a` and `weights`. If `a` happens to be - integral, the previous rules still applies but the result dtype will - at least be ``float64``. - - Raises - ------ - ZeroDivisionError - When all weights along axis are zero. See `numpy.ma.average` for a - version robust to this type of error. - TypeError - When the length of 1D `weights` is not the same as the shape of `a` - along axis. - - See Also - -------- - mean - - ma.average : average for masked arrays -- useful if your data contains - "missing" values - numpy.result_type : Returns the type that results from applying the - numpy type promotion rules to the arguments. - - Examples - -------- - >>> data = np.arange(1, 5) - >>> data - array([1, 2, 3, 4]) - >>> np.average(data) - 2.5 - >>> np.average(np.arange(1, 11), weights=np.arange(10, 0, -1)) - 4.0 - - >>> data = np.arange(6).reshape((3,2)) - >>> data - array([[0, 1], - [2, 3], - [4, 5]]) - >>> np.average(data, axis=1, weights=[1./4, 3./4]) - array([0.75, 2.75, 4.75]) - >>> np.average(data, weights=[1./4, 3./4]) - Traceback (most recent call last): - ... - TypeError: Axis must be specified when shapes of a and weights differ. - - >>> a = np.ones(5, dtype=np.float128) - >>> w = np.ones(5, dtype=np.complex64) - >>> avg = np.average(a, weights=w) - >>> print(avg.dtype) - complex256 - """ - a = np.asanyarray(a) - - if weights is None: - avg = a.mean(axis) - scl = avg.dtype.type(a.size/avg.size) - else: - wgt = np.asanyarray(weights) - - if issubclass(a.dtype.type, (np.integer, np.bool_)): - result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8') - else: - result_dtype = np.result_type(a.dtype, wgt.dtype) - - # Sanity checks - if a.shape != wgt.shape: - if axis is None: - raise TypeError( - "Axis must be specified when shapes of a and weights " - "differ.") - if wgt.ndim != 1: - raise TypeError( - "1D weights expected when shapes of a and weights differ.") - if wgt.shape[0] != a.shape[axis]: - raise ValueError( - "Length of weights not compatible with specified axis.") - - # setup wgt to broadcast along axis - wgt = np.broadcast_to(wgt, (a.ndim-1)*(1,) + wgt.shape) - wgt = wgt.swapaxes(-1, axis) - - scl = wgt.sum(axis=axis, dtype=result_dtype) - if np.any(scl == 0.0): - raise ZeroDivisionError( - "Weights sum to zero, can't be normalized") - - avg = np.multiply(a, wgt, dtype=result_dtype).sum(axis)/scl - - if returned: - if scl.shape != avg.shape: - scl = np.broadcast_to(scl, avg.shape).copy() - return avg, scl - else: - return avg - - -@set_module('numpy') -def asarray_chkfinite(a, dtype=None, order=None): - """Convert the input to an array, checking for NaNs or Infs. - - Parameters - ---------- - a : array_like - Input data, in any form that can be converted to an array. This - includes lists, lists of tuples, tuples, tuples of tuples, tuples - of lists and ndarrays. Success requires no NaNs or Infs. - dtype : data-type, optional - By default, the data-type is inferred from the input data. - order : {'C', 'F'}, optional - Whether to use row-major (C-style) or - column-major (Fortran-style) memory representation. - Defaults to 'C'. - - Returns - ------- - out : ndarray - Array interpretation of `a`. No copy is performed if the input - is already an ndarray. If `a` is a subclass of ndarray, a base - class ndarray is returned. - - Raises - ------ - ValueError - Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity). - - See Also - -------- - asarray : Create and array. - asanyarray : Similar function which passes through subclasses. - ascontiguousarray : Convert input to a contiguous array. - asfarray : Convert input to a floating point ndarray. - asfortranarray : Convert input to an ndarray with column-major - memory order. - fromiter : Create an array from an iterator. - fromfunction : Construct an array by executing a function on grid - positions. - - Examples - -------- - Convert a list into an array. If all elements are finite - ``asarray_chkfinite`` is identical to ``asarray``. - - >>> a = [1, 2] - >>> np.asarray_chkfinite(a, dtype=float) - array([1., 2.]) - - Raises ValueError if array_like contains Nans or Infs. - - >>> a = [1, 2, np.inf] - >>> try: - ... np.asarray_chkfinite(a) - ... except ValueError: - ... print('ValueError') - ... - ValueError - - """ - a = asarray(a, dtype=dtype, order=order) - if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all(): - raise ValueError( - "array must not contain infs or NaNs") - return a - - -def _piecewise_dispatcher(x, condlist, funclist, *args, **kw): - yield x - # support the undocumented behavior of allowing scalars - if np.iterable(condlist): - for c in condlist: - yield c - - -@array_function_dispatch(_piecewise_dispatcher) -def piecewise(x, condlist, funclist, *args, **kw): - """ - Evaluate a piecewise-defined function. - - Given a set of conditions and corresponding functions, evaluate each - function on the input data wherever its condition is true. - - Parameters - ---------- - x : ndarray or scalar - The input domain. - condlist : list of bool arrays or bool scalars - Each boolean array corresponds to a function in `funclist`. Wherever - `condlist[i]` is True, `funclist[i](x)` is used as the output value. - - Each boolean array in `condlist` selects a piece of `x`, - and should therefore be of the same shape as `x`. - - The length of `condlist` must correspond to that of `funclist`. - If one extra function is given, i.e. if - ``len(funclist) == len(condlist) + 1``, then that extra function - is the default value, used wherever all conditions are false. - funclist : list of callables, f(x,*args,**kw), or scalars - Each function is evaluated over `x` wherever its corresponding - condition is True. It should take a 1d array as input and give an 1d - array or a scalar value as output. If, instead of a callable, - a scalar is provided then a constant function (``lambda x: scalar``) is - assumed. - args : tuple, optional - Any further arguments given to `piecewise` are passed to the functions - upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then - each function is called as ``f(x, 1, 'a')``. - kw : dict, optional - Keyword arguments used in calling `piecewise` are passed to the - functions upon execution, i.e., if called - ``piecewise(..., ..., alpha=1)``, then each function is called as - ``f(x, alpha=1)``. - - Returns - ------- - out : ndarray - The output is the same shape and type as x and is found by - calling the functions in `funclist` on the appropriate portions of `x`, - as defined by the boolean arrays in `condlist`. Portions not covered - by any condition have a default value of 0. - - - See Also - -------- - choose, select, where - - Notes - ----- - This is similar to choose or select, except that functions are - evaluated on elements of `x` that satisfy the corresponding condition from - `condlist`. - - The result is:: - - |-- - |funclist[0](x[condlist[0]]) - out = |funclist[1](x[condlist[1]]) - |... - |funclist[n2](x[condlist[n2]]) - |-- - - Examples - -------- - Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``. - - >>> x = np.linspace(-2.5, 2.5, 6) - >>> np.piecewise(x, [x < 0, x >= 0], [-1, 1]) - array([-1., -1., -1., 1., 1., 1.]) - - Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for - ``x >= 0``. - - >>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x]) - array([2.5, 1.5, 0.5, 0.5, 1.5, 2.5]) - - Apply the same function to a scalar value. - - >>> y = -2 - >>> np.piecewise(y, [y < 0, y >= 0], [lambda x: -x, lambda x: x]) - array(2) - - """ - x = asanyarray(x) - n2 = len(funclist) - - # undocumented: single condition is promoted to a list of one condition - if isscalar(condlist) or ( - not isinstance(condlist[0], (list, ndarray)) and x.ndim != 0): - condlist = [condlist] - - condlist = array(condlist, dtype=bool) - n = len(condlist) - - if n == n2 - 1: # compute the "otherwise" condition. - condelse = ~np.any(condlist, axis=0, keepdims=True) - condlist = np.concatenate([condlist, condelse], axis=0) - n += 1 - elif n != n2: - raise ValueError( - "with {} condition(s), either {} or {} functions are expected" - .format(n, n, n+1) - ) - - y = zeros(x.shape, x.dtype) - for k in range(n): - item = funclist[k] - if not isinstance(item, collections_abc.Callable): - y[condlist[k]] = item - else: - vals = x[condlist[k]] - if vals.size > 0: - y[condlist[k]] = item(vals, *args, **kw) - - return y - - -def _select_dispatcher(condlist, choicelist, default=None): - for c in condlist: - yield c - for c in choicelist: - yield c - - -@array_function_dispatch(_select_dispatcher) -def select(condlist, choicelist, default=0): - """ - Return an array drawn from elements in choicelist, depending on conditions. - - Parameters - ---------- - condlist : list of bool ndarrays - The list of conditions which determine from which array in `choicelist` - the output elements are taken. When multiple conditions are satisfied, - the first one encountered in `condlist` is used. - choicelist : list of ndarrays - The list of arrays from which the output elements are taken. It has - to be of the same length as `condlist`. - default : scalar, optional - The element inserted in `output` when all conditions evaluate to False. - - Returns - ------- - output : ndarray - The output at position m is the m-th element of the array in - `choicelist` where the m-th element of the corresponding array in - `condlist` is True. - - See Also - -------- - where : Return elements from one of two arrays depending on condition. - take, choose, compress, diag, diagonal - - Examples - -------- - >>> x = np.arange(10) - >>> condlist = [x<3, x>5] - >>> choicelist = [x, x**2] - >>> np.select(condlist, choicelist) - array([ 0, 1, 2, ..., 49, 64, 81]) - - """ - # Check the size of condlist and choicelist are the same, or abort. - if len(condlist) != len(choicelist): - raise ValueError( - 'list of cases must be same length as list of conditions') - - # Now that the dtype is known, handle the deprecated select([], []) case - if len(condlist) == 0: - raise ValueError("select with an empty condition list is not possible") - - choicelist = [np.asarray(choice) for choice in choicelist] - choicelist.append(np.asarray(default)) - - # need to get the result type before broadcasting for correct scalar - # behaviour - dtype = np.result_type(*choicelist) - - # Convert conditions to arrays and broadcast conditions and choices - # as the shape is needed for the result. Doing it separately optimizes - # for example when all choices are scalars. - condlist = np.broadcast_arrays(*condlist) - choicelist = np.broadcast_arrays(*choicelist) - - # If cond array is not an ndarray in boolean format or scalar bool, abort. - for i in range(len(condlist)): - cond = condlist[i] - if cond.dtype.type is not np.bool_: - raise TypeError( - 'invalid entry {} in condlist: should be boolean ndarray'.format(i)) - - if choicelist[0].ndim == 0: - # This may be common, so avoid the call. - result_shape = condlist[0].shape - else: - result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape - - result = np.full(result_shape, choicelist[-1], dtype) - - # Use np.copyto to burn each choicelist array onto result, using the - # corresponding condlist as a boolean mask. This is done in reverse - # order since the first choice should take precedence. - choicelist = choicelist[-2::-1] - condlist = condlist[::-1] - for choice, cond in zip(choicelist, condlist): - np.copyto(result, choice, where=cond) - - return result - - -def _copy_dispatcher(a, order=None): - return (a,) - - -@array_function_dispatch(_copy_dispatcher) -def copy(a, order='K'): - """ - Return an array copy of the given object. - - Parameters - ---------- - a : array_like - Input data. - order : {'C', 'F', 'A', 'K'}, optional - Controls the memory layout of the copy. 'C' means C-order, - 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, - 'C' otherwise. 'K' means match the layout of `a` as closely - as possible. (Note that this function and :meth:`ndarray.copy` are very - similar, but have different default values for their order= - arguments.) - - Returns - ------- - arr : ndarray - Array interpretation of `a`. - - Notes - ----- - This is equivalent to: - - >>> np.array(a, copy=True) #doctest: +SKIP - - Examples - -------- - Create an array x, with a reference y and a copy z: - - >>> x = np.array([1, 2, 3]) - >>> y = x - >>> z = np.copy(x) - - Note that, when we modify x, y changes, but not z: - - >>> x[0] = 10 - >>> x[0] == y[0] - True - >>> x[0] == z[0] - False - - """ - return array(a, order=order, copy=True) - -# Basic operations - - -def _gradient_dispatcher(f, *varargs, **kwargs): - yield f - for v in varargs: - yield v - - -@array_function_dispatch(_gradient_dispatcher) -def gradient(f, *varargs, **kwargs): - """ - Return the gradient of an N-dimensional array. - - The gradient is computed using second order accurate central differences - in the interior points and either first or second order accurate one-sides - (forward or backwards) differences at the boundaries. - The returned gradient hence has the same shape as the input array. - - Parameters - ---------- - f : array_like - An N-dimensional array containing samples of a scalar function. - varargs : list of scalar or array, optional - Spacing between f values. Default unitary spacing for all dimensions. - Spacing can be specified using: - - 1. single scalar to specify a sample distance for all dimensions. - 2. N scalars to specify a constant sample distance for each dimension. - i.e. `dx`, `dy`, `dz`, ... - 3. N arrays to specify the coordinates of the values along each - dimension of F. The length of the array must match the size of - the corresponding dimension - 4. Any combination of N scalars/arrays with the meaning of 2. and 3. - - If `axis` is given, the number of varargs must equal the number of axes. - Default: 1. - - edge_order : {1, 2}, optional - Gradient is calculated using N-th order accurate differences - at the boundaries. Default: 1. - - .. versionadded:: 1.9.1 - - axis : None or int or tuple of ints, optional - Gradient is calculated only along the given axis or axes - The default (axis = None) is to calculate the gradient for all the axes - of the input array. axis may be negative, in which case it counts from - the last to the first axis. - - .. versionadded:: 1.11.0 - - Returns - ------- - gradient : ndarray or list of ndarray - A set of ndarrays (or a single ndarray if there is only one dimension) - corresponding to the derivatives of f with respect to each dimension. - Each derivative has the same shape as f. - - Examples - -------- - >>> f = np.array([1, 2, 4, 7, 11, 16], dtype=float) - >>> np.gradient(f) - array([1. , 1.5, 2.5, 3.5, 4.5, 5. ]) - >>> np.gradient(f, 2) - array([0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ]) - - Spacing can be also specified with an array that represents the coordinates - of the values F along the dimensions. - For instance a uniform spacing: - - >>> x = np.arange(f.size) - >>> np.gradient(f, x) - array([1. , 1.5, 2.5, 3.5, 4.5, 5. ]) - - Or a non uniform one: - - >>> x = np.array([0., 1., 1.5, 3.5, 4., 6.], dtype=float) - >>> np.gradient(f, x) - array([1. , 3. , 3.5, 6.7, 6.9, 2.5]) - - For two dimensional arrays, the return will be two arrays ordered by - axis. In this example the first array stands for the gradient in - rows and the second one in columns direction: - - >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float)) - [array([[ 2., 2., -1.], - [ 2., 2., -1.]]), array([[1. , 2.5, 4. ], - [1. , 1. , 1. ]])] - - In this example the spacing is also specified: - uniform for axis=0 and non uniform for axis=1 - - >>> dx = 2. - >>> y = [1., 1.5, 3.5] - >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), dx, y) - [array([[ 1. , 1. , -0.5], - [ 1. , 1. , -0.5]]), array([[2. , 2. , 2. ], - [2. , 1.7, 0.5]])] - - It is possible to specify how boundaries are treated using `edge_order` - - >>> x = np.array([0, 1, 2, 3, 4]) - >>> f = x**2 - >>> np.gradient(f, edge_order=1) - array([1., 2., 4., 6., 7.]) - >>> np.gradient(f, edge_order=2) - array([0., 2., 4., 6., 8.]) - - The `axis` keyword can be used to specify a subset of axes of which the - gradient is calculated - - >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), axis=0) - array([[ 2., 2., -1.], - [ 2., 2., -1.]]) - - Notes - ----- - Assuming that :math:`f\\in C^{3}` (i.e., :math:`f` has at least 3 continuous - derivatives) and let :math:`h_{*}` be a non-homogeneous stepsize, we - minimize the "consistency error" :math:`\\eta_{i}` between the true gradient - and its estimate from a linear combination of the neighboring grid-points: - - .. math:: - - \\eta_{i} = f_{i}^{\\left(1\\right)} - - \\left[ \\alpha f\\left(x_{i}\\right) + - \\beta f\\left(x_{i} + h_{d}\\right) + - \\gamma f\\left(x_{i}-h_{s}\\right) - \\right] - - By substituting :math:`f(x_{i} + h_{d})` and :math:`f(x_{i} - h_{s})` - with their Taylor series expansion, this translates into solving - the following the linear system: - - .. math:: - - \\left\\{ - \\begin{array}{r} - \\alpha+\\beta+\\gamma=0 \\\\ - \\beta h_{d}-\\gamma h_{s}=1 \\\\ - \\beta h_{d}^{2}+\\gamma h_{s}^{2}=0 - \\end{array} - \\right. - - The resulting approximation of :math:`f_{i}^{(1)}` is the following: - - .. math:: - - \\hat f_{i}^{(1)} = - \\frac{ - h_{s}^{2}f\\left(x_{i} + h_{d}\\right) - + \\left(h_{d}^{2} - h_{s}^{2}\\right)f\\left(x_{i}\\right) - - h_{d}^{2}f\\left(x_{i}-h_{s}\\right)} - { h_{s}h_{d}\\left(h_{d} + h_{s}\\right)} - + \\mathcal{O}\\left(\\frac{h_{d}h_{s}^{2} - + h_{s}h_{d}^{2}}{h_{d} - + h_{s}}\\right) - - It is worth noting that if :math:`h_{s}=h_{d}` - (i.e., data are evenly spaced) - we find the standard second order approximation: - - .. math:: - - \\hat f_{i}^{(1)}= - \\frac{f\\left(x_{i+1}\\right) - f\\left(x_{i-1}\\right)}{2h} - + \\mathcal{O}\\left(h^{2}\\right) - - With a similar procedure the forward/backward approximations used for - boundaries can be derived. - - References - ---------- - .. [1] Quarteroni A., Sacco R., Saleri F. (2007) Numerical Mathematics - (Texts in Applied Mathematics). New York: Springer. - .. [2] Durran D. R. (1999) Numerical Methods for Wave Equations - in Geophysical Fluid Dynamics. New York: Springer. - .. [3] Fornberg B. (1988) Generation of Finite Difference Formulas on - Arbitrarily Spaced Grids, - Mathematics of Computation 51, no. 184 : 699-706. - `PDF `_. - """ - f = np.asanyarray(f) - N = f.ndim # number of dimensions - - axes = kwargs.pop('axis', None) - if axes is None: - axes = tuple(range(N)) - else: - axes = _nx.normalize_axis_tuple(axes, N) - - len_axes = len(axes) - n = len(varargs) - if n == 0: - # no spacing argument - use 1 in all axes - dx = [1.0] * len_axes - elif n == 1 and np.ndim(varargs[0]) == 0: - # single scalar for all axes - dx = varargs * len_axes - elif n == len_axes: - # scalar or 1d array for each axis - dx = list(varargs) - for i, distances in enumerate(dx): - distances = np.asanyarray(distances) - if distances.ndim == 0: - continue - elif distances.ndim != 1: - raise ValueError("distances must be either scalars or 1d") - if len(distances) != f.shape[axes[i]]: - raise ValueError("when 1d, distances must match " - "the length of the corresponding dimension") - if np.issubdtype(distances.dtype, np.integer): - # Convert numpy integer types to float64 to avoid modular - # arithmetic in np.diff(distances). - distances = distances.astype(np.float64) - diffx = np.diff(distances) - # if distances are constant reduce to the scalar case - # since it brings a consistent speedup - if (diffx == diffx[0]).all(): - diffx = diffx[0] - dx[i] = diffx - else: - raise TypeError("invalid number of arguments") - - edge_order = kwargs.pop('edge_order', 1) - if kwargs: - raise TypeError('"{}" are not valid keyword arguments.'.format( - '", "'.join(kwargs.keys()))) - if edge_order > 2: - raise ValueError("'edge_order' greater than 2 not supported") - - # use central differences on interior and one-sided differences on the - # endpoints. This preserves second order-accuracy over the full domain. - - outvals = [] - - # create slice objects --- initially all are [:, :, ..., :] - slice1 = [slice(None)]*N - slice2 = [slice(None)]*N - slice3 = [slice(None)]*N - slice4 = [slice(None)]*N - - otype = f.dtype - if otype.type is np.datetime64: - # the timedelta dtype with the same unit information - otype = np.dtype(otype.name.replace('datetime', 'timedelta')) - # view as timedelta to allow addition - f = f.view(otype) - elif otype.type is np.timedelta64: - pass - elif np.issubdtype(otype, np.inexact): - pass - else: - # All other types convert to floating point. - # First check if f is a numpy integer type; if so, convert f to float64 - # to avoid modular arithmetic when computing the changes in f. - if np.issubdtype(otype, np.integer): - f = f.astype(np.float64) - otype = np.float64 - - for axis, ax_dx in zip(axes, dx): - if f.shape[axis] < edge_order + 1: - raise ValueError( - "Shape of array too small to calculate a numerical gradient, " - "at least (edge_order + 1) elements are required.") - # result allocation - out = np.empty_like(f, dtype=otype) - - # spacing for the current axis - uniform_spacing = np.ndim(ax_dx) == 0 - - # Numerical differentiation: 2nd order interior - slice1[axis] = slice(1, -1) - slice2[axis] = slice(None, -2) - slice3[axis] = slice(1, -1) - slice4[axis] = slice(2, None) - - if uniform_spacing: - out[tuple(slice1)] = (f[tuple(slice4)] - f[tuple(slice2)]) / (2. * ax_dx) - else: - dx1 = ax_dx[0:-1] - dx2 = ax_dx[1:] - a = -(dx2)/(dx1 * (dx1 + dx2)) - b = (dx2 - dx1) / (dx1 * dx2) - c = dx1 / (dx2 * (dx1 + dx2)) - # fix the shape for broadcasting - shape = np.ones(N, dtype=int) - shape[axis] = -1 - a.shape = b.shape = c.shape = shape - # 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:] - out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] - - # Numerical differentiation: 1st order edges - if edge_order == 1: - slice1[axis] = 0 - slice2[axis] = 1 - slice3[axis] = 0 - dx_0 = ax_dx if uniform_spacing else ax_dx[0] - # 1D equivalent -- out[0] = (f[1] - f[0]) / (x[1] - x[0]) - out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_0 - - slice1[axis] = -1 - slice2[axis] = -1 - slice3[axis] = -2 - dx_n = ax_dx if uniform_spacing else ax_dx[-1] - # 1D equivalent -- out[-1] = (f[-1] - f[-2]) / (x[-1] - x[-2]) - out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_n - - # Numerical differentiation: 2nd order edges - else: - slice1[axis] = 0 - slice2[axis] = 0 - slice3[axis] = 1 - slice4[axis] = 2 - if uniform_spacing: - a = -1.5 / ax_dx - b = 2. / ax_dx - c = -0.5 / ax_dx - else: - dx1 = ax_dx[0] - dx2 = ax_dx[1] - a = -(2. * dx1 + dx2)/(dx1 * (dx1 + dx2)) - b = (dx1 + dx2) / (dx1 * dx2) - c = - dx1 / (dx2 * (dx1 + dx2)) - # 1D equivalent -- out[0] = a * f[0] + b * f[1] + c * f[2] - out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] - - slice1[axis] = -1 - slice2[axis] = -3 - slice3[axis] = -2 - slice4[axis] = -1 - if uniform_spacing: - a = 0.5 / ax_dx - b = -2. / ax_dx - c = 1.5 / ax_dx - else: - dx1 = ax_dx[-2] - dx2 = ax_dx[-1] - a = (dx2) / (dx1 * (dx1 + dx2)) - b = - (dx2 + dx1) / (dx1 * dx2) - c = (2. * dx2 + dx1) / (dx2 * (dx1 + dx2)) - # 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1] - out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] - - outvals.append(out) - - # reset the slice object in this dimension to ":" - slice1[axis] = slice(None) - slice2[axis] = slice(None) - slice3[axis] = slice(None) - slice4[axis] = slice(None) - - if len_axes == 1: - return outvals[0] - else: - return outvals - - -def _diff_dispatcher(a, n=None, axis=None, prepend=None, append=None): - return (a, prepend, append) - - -@array_function_dispatch(_diff_dispatcher) -def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue): - """ - Calculate the n-th discrete difference along the given axis. - - The first difference is given by ``out[i] = a[i+1] - a[i]`` along - the given axis, higher differences are calculated by using `diff` - recursively. - - Parameters - ---------- - a : array_like - Input array - n : int, optional - The number of times values are differenced. If zero, the input - is returned as-is. - axis : int, optional - The axis along which the difference is taken, default is the - last axis. - prepend, append : array_like, optional - Values to prepend or append to `a` along axis prior to - performing the difference. Scalar values are expanded to - arrays with length 1 in the direction of axis and the shape - of the input array in along all other axes. Otherwise the - dimension and shape must match `a` except along axis. - - .. versionadded:: 1.16.0 - - Returns - ------- - diff : ndarray - The n-th differences. The shape of the output is the same as `a` - except along `axis` where the dimension is smaller by `n`. The - type of the output is the same as the type of the difference - between any two elements of `a`. This is the same as the type of - `a` in most cases. A notable exception is `datetime64`, which - results in a `timedelta64` output array. - - See Also - -------- - gradient, ediff1d, cumsum - - Notes - ----- - Type is preserved for boolean arrays, so the result will contain - `False` when consecutive elements are the same and `True` when they - differ. - - For unsigned integer arrays, the results will also be unsigned. This - should not be surprising, as the result is consistent with - calculating the difference directly: - - >>> u8_arr = np.array([1, 0], dtype=np.uint8) - >>> np.diff(u8_arr) - array([255], dtype=uint8) - >>> u8_arr[1,...] - u8_arr[0,...] - 255 - - If this is not desirable, then the array should be cast to a larger - integer type first: - - >>> i16_arr = u8_arr.astype(np.int16) - >>> np.diff(i16_arr) - array([-1], dtype=int16) - - Examples - -------- - >>> x = np.array([1, 2, 4, 7, 0]) - >>> np.diff(x) - array([ 1, 2, 3, -7]) - >>> np.diff(x, n=2) - array([ 1, 1, -10]) - - >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]]) - >>> np.diff(x) - array([[2, 3, 4], - [5, 1, 2]]) - >>> np.diff(x, axis=0) - array([[-1, 2, 0, -2]]) - - >>> x = np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64) - >>> np.diff(x) - array([1, 1], dtype='timedelta64[D]') - - """ - if n == 0: - return a - if n < 0: - raise ValueError( - "order must be non-negative but got " + repr(n)) - - a = asanyarray(a) - nd = a.ndim - if nd == 0: - raise ValueError("diff requires input that is at least one dimensional") - axis = normalize_axis_index(axis, nd) - - combined = [] - if prepend is not np._NoValue: - prepend = np.asanyarray(prepend) - if prepend.ndim == 0: - shape = list(a.shape) - shape[axis] = 1 - prepend = np.broadcast_to(prepend, tuple(shape)) - combined.append(prepend) - - combined.append(a) - - if append is not np._NoValue: - append = np.asanyarray(append) - if append.ndim == 0: - shape = list(a.shape) - shape[axis] = 1 - append = np.broadcast_to(append, tuple(shape)) - combined.append(append) - - if len(combined) > 1: - a = np.concatenate(combined, axis) - - slice1 = [slice(None)] * nd - slice2 = [slice(None)] * nd - slice1[axis] = slice(1, None) - slice2[axis] = slice(None, -1) - slice1 = tuple(slice1) - slice2 = tuple(slice2) - - op = not_equal if a.dtype == np.bool_ else subtract - for _ in range(n): - a = op(a[slice1], a[slice2]) - - return a - - -def _interp_dispatcher(x, xp, fp, left=None, right=None, period=None): - return (x, xp, fp) - - -@array_function_dispatch(_interp_dispatcher) -def interp(x, xp, fp, left=None, right=None, period=None): - """ - One-dimensional linear interpolation. - - Returns the one-dimensional piecewise linear interpolant to a function - with given discrete data points (`xp`, `fp`), evaluated at `x`. - - Parameters - ---------- - x : array_like - The x-coordinates at which to evaluate the interpolated values. - - xp : 1-D sequence of floats - The x-coordinates of the data points, must be increasing if argument - `period` is not specified. Otherwise, `xp` is internally sorted after - normalizing the periodic boundaries with ``xp = xp % period``. - - fp : 1-D sequence of float or complex - The y-coordinates of the data points, same length as `xp`. - - left : optional float or complex corresponding to fp - Value to return for `x < xp[0]`, default is `fp[0]`. - - right : optional float or complex corresponding to fp - Value to return for `x > xp[-1]`, default is `fp[-1]`. - - period : None or float, optional - A period for the x-coordinates. This parameter allows the proper - interpolation of angular x-coordinates. Parameters `left` and `right` - are ignored if `period` is specified. - - .. versionadded:: 1.10.0 - - Returns - ------- - y : float or complex (corresponding to fp) or ndarray - The interpolated values, same shape as `x`. - - Raises - ------ - ValueError - If `xp` and `fp` have different length - If `xp` or `fp` are not 1-D sequences - If `period == 0` - - Notes - ----- - The x-coordinate sequence is expected to be increasing, but this is not - explicitly enforced. However, if the sequence `xp` is non-increasing, - interpolation results are meaningless. - - Note that, since NaN is unsortable, `xp` also cannot contain NaNs. - - A simple check for `xp` being strictly increasing is:: - - np.all(np.diff(xp) > 0) - - Examples - -------- - >>> xp = [1, 2, 3] - >>> fp = [3, 2, 0] - >>> np.interp(2.5, xp, fp) - 1.0 - >>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp) - array([3. , 3. , 2.5 , 0.56, 0. ]) - >>> UNDEF = -99.0 - >>> np.interp(3.14, xp, fp, right=UNDEF) - -99.0 - - Plot an interpolant to the sine function: - - >>> x = np.linspace(0, 2*np.pi, 10) - >>> y = np.sin(x) - >>> xvals = np.linspace(0, 2*np.pi, 50) - >>> yinterp = np.interp(xvals, x, y) - >>> import matplotlib.pyplot as plt - >>> plt.plot(x, y, 'o') - [] - >>> plt.plot(xvals, yinterp, '-x') - [] - >>> plt.show() - - Interpolation with periodic x-coordinates: - - >>> x = [-180, -170, -185, 185, -10, -5, 0, 365] - >>> xp = [190, -190, 350, -350] - >>> fp = [5, 10, 3, 4] - >>> np.interp(x, xp, fp, period=360) - array([7.5 , 5. , 8.75, 6.25, 3. , 3.25, 3.5 , 3.75]) - - Complex interpolation: - - >>> x = [1.5, 4.0] - >>> xp = [2,3,5] - >>> fp = [1.0j, 0, 2+3j] - >>> np.interp(x, xp, fp) - array([0.+1.j , 1.+1.5j]) - - """ - - fp = np.asarray(fp) - - if np.iscomplexobj(fp): - interp_func = compiled_interp_complex - input_dtype = np.complex128 - else: - interp_func = compiled_interp - input_dtype = np.float64 - - if period is not None: - if period == 0: - raise ValueError("period must be a non-zero value") - period = abs(period) - left = None - right = None - - x = np.asarray(x, dtype=np.float64) - xp = np.asarray(xp, dtype=np.float64) - fp = np.asarray(fp, dtype=input_dtype) - - if xp.ndim != 1 or fp.ndim != 1: - raise ValueError("Data points must be 1-D sequences") - if xp.shape[0] != fp.shape[0]: - raise ValueError("fp and xp are not of the same length") - # normalizing periodic boundaries - x = x % period - xp = xp % period - asort_xp = np.argsort(xp) - xp = xp[asort_xp] - fp = fp[asort_xp] - xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period)) - fp = np.concatenate((fp[-1:], fp, fp[0:1])) - - return interp_func(x, xp, fp, left, right) - - -def _angle_dispatcher(z, deg=None): - return (z,) - - -@array_function_dispatch(_angle_dispatcher) -def angle(z, deg=False): - """ - Return the angle of the complex argument. - - Parameters - ---------- - z : array_like - A complex number or sequence of complex numbers. - deg : bool, optional - Return angle in degrees if True, radians if False (default). - - Returns - ------- - angle : ndarray or scalar - The counterclockwise angle from the positive real axis on the complex - plane in the range ``(-pi, pi]``, with dtype as numpy.float64. - - ..versionchanged:: 1.16.0 - This function works on subclasses of ndarray like `ma.array`. - - See Also - -------- - arctan2 - absolute - - Examples - -------- - >>> np.angle([1.0, 1.0j, 1+1j]) # in radians - array([ 0. , 1.57079633, 0.78539816]) # may vary - >>> np.angle(1+1j, deg=True) # in degrees - 45.0 - - """ - z = asanyarray(z) - if issubclass(z.dtype.type, _nx.complexfloating): - zimag = z.imag - zreal = z.real - else: - zimag = 0 - zreal = z - - a = arctan2(zimag, zreal) - if deg: - a *= 180/pi - return a - - -def _unwrap_dispatcher(p, discont=None, axis=None): - return (p,) - - -@array_function_dispatch(_unwrap_dispatcher) -def unwrap(p, discont=pi, axis=-1): - """ - Unwrap by changing deltas between values to 2*pi complement. - - Unwrap radian phase `p` by changing absolute jumps greater than - `discont` to their 2*pi complement along the given axis. - - Parameters - ---------- - p : array_like - Input array. - discont : float, optional - Maximum discontinuity between values, default is ``pi``. - axis : int, optional - Axis along which unwrap will operate, default is the last axis. - - Returns - ------- - out : ndarray - Output array. - - See Also - -------- - rad2deg, deg2rad - - Notes - ----- - If the discontinuity in `p` is smaller than ``pi``, but larger than - `discont`, no unwrapping is done because taking the 2*pi complement - would only make the discontinuity larger. - - Examples - -------- - >>> phase = np.linspace(0, np.pi, num=5) - >>> phase[3:] += np.pi - >>> phase - array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531]) # may vary - >>> np.unwrap(phase) - array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ]) # may vary - - """ - p = asarray(p) - nd = p.ndim - dd = diff(p, axis=axis) - slice1 = [slice(None, None)]*nd # full slices - slice1[axis] = slice(1, None) - slice1 = tuple(slice1) - ddmod = mod(dd + pi, 2*pi) - pi - _nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0)) - ph_correct = ddmod - dd - _nx.copyto(ph_correct, 0, where=abs(dd) < discont) - up = array(p, copy=True, dtype='d') - up[slice1] = p[slice1] + ph_correct.cumsum(axis) - return up - - -def _sort_complex(a): - return (a,) - - -@array_function_dispatch(_sort_complex) -def sort_complex(a): - """ - Sort a complex array using the real part first, then the imaginary part. - - Parameters - ---------- - a : array_like - Input array - - Returns - ------- - out : complex ndarray - Always returns a sorted complex array. - - Examples - -------- - >>> np.sort_complex([5, 3, 6, 2, 1]) - array([1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j]) - - >>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j]) - array([1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j]) - - """ - b = array(a, copy=True) - b.sort() - if not issubclass(b.dtype.type, _nx.complexfloating): - if b.dtype.char in 'bhBH': - return b.astype('F') - elif b.dtype.char == 'g': - return b.astype('G') - else: - return b.astype('D') - else: - return b - - -def _trim_zeros(filt, trim=None): - return (filt,) - - -@array_function_dispatch(_trim_zeros) -def trim_zeros(filt, trim='fb'): - """ - Trim the leading and/or trailing zeros from a 1-D array or sequence. - - Parameters - ---------- - filt : 1-D array or sequence - Input array. - trim : str, optional - A string with 'f' representing trim from front and 'b' to trim from - back. Default is 'fb', trim zeros from both front and back of the - array. - - Returns - ------- - trimmed : 1-D array or sequence - The result of trimming the input. The input data type is preserved. - - Examples - -------- - >>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0)) - >>> np.trim_zeros(a) - array([1, 2, 3, 0, 2, 1]) - - >>> np.trim_zeros(a, 'b') - array([0, 0, 0, ..., 0, 2, 1]) - - The input data type is preserved, list/tuple in means list/tuple out. - - >>> np.trim_zeros([0, 1, 2, 0]) - [1, 2] - - """ - first = 0 - trim = trim.upper() - if 'F' in trim: - for i in filt: - if i != 0.: - break - else: - first = first + 1 - last = len(filt) - if 'B' in trim: - for i in filt[::-1]: - if i != 0.: - break - else: - last = last - 1 - return filt[first:last] - -def _extract_dispatcher(condition, arr): - return (condition, arr) - - -@array_function_dispatch(_extract_dispatcher) -def extract(condition, arr): - """ - Return the elements of an array that satisfy some condition. - - This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If - `condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``. - - Note that `place` does the exact opposite of `extract`. - - Parameters - ---------- - condition : array_like - An array whose nonzero or True entries indicate the elements of `arr` - to extract. - arr : array_like - Input array of the same size as `condition`. - - Returns - ------- - extract : ndarray - Rank 1 array of values from `arr` where `condition` is True. - - See Also - -------- - take, put, copyto, compress, place - - Examples - -------- - >>> arr = np.arange(12).reshape((3, 4)) - >>> arr - array([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - >>> condition = np.mod(arr, 3)==0 - >>> condition - array([[ True, False, False, True], - [False, False, True, False], - [False, True, False, False]]) - >>> np.extract(condition, arr) - array([0, 3, 6, 9]) - - - If `condition` is boolean: - - >>> arr[condition] - array([0, 3, 6, 9]) - - """ - return _nx.take(ravel(arr), nonzero(ravel(condition))[0]) - - -def _place_dispatcher(arr, mask, vals): - return (arr, mask, vals) - - -@array_function_dispatch(_place_dispatcher) -def place(arr, mask, vals): - """ - Change elements of an array based on conditional and input values. - - Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that - `place` uses the first N elements of `vals`, where N is the number of - True values in `mask`, while `copyto` uses the elements where `mask` - is True. - - Note that `extract` does the exact opposite of `place`. - - Parameters - ---------- - arr : ndarray - Array to put data into. - mask : array_like - Boolean mask array. Must have the same size as `a`. - vals : 1-D sequence - Values to put into `a`. Only the first N elements are used, where - N is the number of True values in `mask`. If `vals` is smaller - than N, it will be repeated, and if elements of `a` are to be masked, - this sequence must be non-empty. - - See Also - -------- - copyto, put, take, extract - - Examples - -------- - >>> arr = np.arange(6).reshape(2, 3) - >>> np.place(arr, arr>2, [44, 55]) - >>> arr - array([[ 0, 1, 2], - [44, 55, 44]]) - - """ - if not isinstance(arr, np.ndarray): - raise TypeError("argument 1 must be numpy.ndarray, " - "not {name}".format(name=type(arr).__name__)) - - return _insert(arr, mask, vals) - - -def disp(mesg, device=None, linefeed=True): - """ - Display a message on a device. - - Parameters - ---------- - mesg : str - Message to display. - device : object - Device to write message. If None, defaults to ``sys.stdout`` which is - very similar to ``print``. `device` needs to have ``write()`` and - ``flush()`` methods. - linefeed : bool, optional - Option whether to print a line feed or not. Defaults to True. - - Raises - ------ - AttributeError - If `device` does not have a ``write()`` or ``flush()`` method. - - Examples - -------- - Besides ``sys.stdout``, a file-like object can also be used as it has - both required methods: - - >>> from io import StringIO - >>> buf = StringIO() - >>> np.disp(u'"Display" in a file', device=buf) - >>> buf.getvalue() - '"Display" in a file\\n' - - """ - if device is None: - device = sys.stdout - if linefeed: - device.write('%s\n' % mesg) - else: - device.write('%s' % mesg) - device.flush() - return - - -# See https://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html -_DIMENSION_NAME = r'\w+' -_CORE_DIMENSION_LIST = '(?:{0:}(?:,{0:})*)?'.format(_DIMENSION_NAME) -_ARGUMENT = r'\({}\)'.format(_CORE_DIMENSION_LIST) -_ARGUMENT_LIST = '{0:}(?:,{0:})*'.format(_ARGUMENT) -_SIGNATURE = '^{0:}->{0:}$'.format(_ARGUMENT_LIST) - - -def _parse_gufunc_signature(signature): - """ - Parse string signatures for a generalized universal function. - - Arguments - --------- - signature : string - Generalized universal function signature, e.g., ``(m,n),(n,p)->(m,p)`` - for ``np.matmul``. - - Returns - ------- - Tuple of input and output core dimensions parsed from the signature, each - of the form List[Tuple[str, ...]]. - """ - if not re.match(_SIGNATURE, signature): - raise ValueError( - 'not a valid gufunc signature: {}'.format(signature)) - return tuple([tuple(re.findall(_DIMENSION_NAME, arg)) - for arg in re.findall(_ARGUMENT, arg_list)] - for arg_list in signature.split('->')) - - -def _update_dim_sizes(dim_sizes, arg, core_dims): - """ - Incrementally check and update core dimension sizes for a single argument. - - Arguments - --------- - dim_sizes : Dict[str, int] - Sizes of existing core dimensions. Will be updated in-place. - arg : ndarray - Argument to examine. - core_dims : Tuple[str, ...] - Core dimensions for this argument. - """ - if not core_dims: - return - - num_core_dims = len(core_dims) - if arg.ndim < num_core_dims: - raise ValueError( - '%d-dimensional argument does not have enough ' - 'dimensions for all core dimensions %r' - % (arg.ndim, core_dims)) - - core_shape = arg.shape[-num_core_dims:] - for dim, size in zip(core_dims, core_shape): - if dim in dim_sizes: - if size != dim_sizes[dim]: - raise ValueError( - 'inconsistent size for core dimension %r: %r vs %r' - % (dim, size, dim_sizes[dim])) - else: - dim_sizes[dim] = size - - -def _parse_input_dimensions(args, input_core_dims): - """ - Parse broadcast and core dimensions for vectorize with a signature. - - Arguments - --------- - args : Tuple[ndarray, ...] - Tuple of input arguments to examine. - input_core_dims : List[Tuple[str, ...]] - List of core dimensions corresponding to each input. - - Returns - ------- - broadcast_shape : Tuple[int, ...] - Common shape to broadcast all non-core dimensions to. - dim_sizes : Dict[str, int] - Common sizes for named core dimensions. - """ - broadcast_args = [] - dim_sizes = {} - for arg, core_dims in zip(args, input_core_dims): - _update_dim_sizes(dim_sizes, arg, core_dims) - ndim = arg.ndim - len(core_dims) - dummy_array = np.lib.stride_tricks.as_strided(0, arg.shape[:ndim]) - broadcast_args.append(dummy_array) - broadcast_shape = np.lib.stride_tricks._broadcast_shape(*broadcast_args) - return broadcast_shape, dim_sizes - - -def _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims): - """Helper for calculating broadcast shapes with core dimensions.""" - return [broadcast_shape + tuple(dim_sizes[dim] for dim in core_dims) - for core_dims in list_of_core_dims] - - -def _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes): - """Helper for creating output arrays in vectorize.""" - shapes = _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims) - arrays = tuple(np.empty(shape, dtype=dtype) - for shape, dtype in zip(shapes, dtypes)) - return arrays - - -@set_module('numpy') -class vectorize(object): - """ - vectorize(pyfunc, otypes=None, doc=None, excluded=None, cache=False, - signature=None) - - Generalized function class. - - Define a vectorized function which takes a nested sequence of objects or - numpy arrays as inputs and returns a single numpy array or a tuple of numpy - arrays. The vectorized function evaluates `pyfunc` over successive tuples - of the input arrays like the python map function, except it uses the - broadcasting rules of numpy. - - The data type of the output of `vectorized` is determined by calling - the function with the first element of the input. This can be avoided - by specifying the `otypes` argument. - - Parameters - ---------- - pyfunc : callable - A python function or method. - otypes : str or list of dtypes, optional - The output data type. It must be specified as either a string of - typecode characters or a list of data type specifiers. There should - be one data type specifier for each output. - doc : str, optional - The docstring for the function. If None, the docstring will be the - ``pyfunc.__doc__``. - excluded : set, optional - Set of strings or integers representing the positional or keyword - arguments for which the function will not be vectorized. These will be - passed directly to `pyfunc` unmodified. - - .. versionadded:: 1.7.0 - - cache : bool, optional - If `True`, then cache the first function call that determines the number - of outputs if `otypes` is not provided. - - .. versionadded:: 1.7.0 - - signature : string, optional - Generalized universal function signature, e.g., ``(m,n),(n)->(m)`` for - vectorized matrix-vector multiplication. If provided, ``pyfunc`` will - be called with (and expected to return) arrays with shapes given by the - size of corresponding core dimensions. By default, ``pyfunc`` is - assumed to take scalars as input and output. - - .. versionadded:: 1.12.0 - - Returns - ------- - vectorized : callable - Vectorized function. - - See Also - -------- - frompyfunc : Takes an arbitrary Python function and returns a ufunc - - Notes - ----- - The `vectorize` function is provided primarily for convenience, not for - performance. The implementation is essentially a for loop. - - If `otypes` is not specified, then a call to the function with the - first argument will be used to determine the number of outputs. The - results of this call will be cached if `cache` is `True` to prevent - calling the function twice. However, to implement the cache, the - original function must be wrapped which will slow down subsequent - calls, so only do this if your function is expensive. - - The new keyword argument interface and `excluded` argument support - further degrades performance. - - References - ---------- - .. [1] NumPy Reference, section `Generalized Universal Function API - `_. - - Examples - -------- - >>> def myfunc(a, b): - ... "Return a-b if a>b, otherwise return a+b" - ... if a > b: - ... return a - b - ... else: - ... return a + b - - >>> vfunc = np.vectorize(myfunc) - >>> vfunc([1, 2, 3, 4], 2) - array([3, 4, 1, 2]) - - The docstring is taken from the input function to `vectorize` unless it - is specified: - - >>> vfunc.__doc__ - 'Return a-b if a>b, otherwise return a+b' - >>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`') - >>> vfunc.__doc__ - 'Vectorized `myfunc`' - - The output type is determined by evaluating the first element of the input, - unless it is specified: - - >>> out = vfunc([1, 2, 3, 4], 2) - >>> type(out[0]) - - >>> vfunc = np.vectorize(myfunc, otypes=[float]) - >>> out = vfunc([1, 2, 3, 4], 2) - >>> type(out[0]) - - - The `excluded` argument can be used to prevent vectorizing over certain - arguments. This can be useful for array-like arguments of a fixed length - such as the coefficients for a polynomial as in `polyval`: - - >>> def mypolyval(p, x): - ... _p = list(p) - ... res = _p.pop(0) - ... while _p: - ... res = res*x + _p.pop(0) - ... return res - >>> vpolyval = np.vectorize(mypolyval, excluded=['p']) - >>> vpolyval(p=[1, 2, 3], x=[0, 1]) - array([3, 6]) - - Positional arguments may also be excluded by specifying their position: - - >>> vpolyval.excluded.add(0) - >>> vpolyval([1, 2, 3], x=[0, 1]) - array([3, 6]) - - The `signature` argument allows for vectorizing functions that act on - non-scalar arrays of fixed length. For example, you can use it for a - vectorized calculation of Pearson correlation coefficient and its p-value: - - >>> import scipy.stats - >>> pearsonr = np.vectorize(scipy.stats.pearsonr, - ... signature='(n),(n)->(),()') - >>> pearsonr([[0, 1, 2, 3]], [[1, 2, 3, 4], [4, 3, 2, 1]]) - (array([ 1., -1.]), array([ 0., 0.])) - - Or for a vectorized convolution: - - >>> convolve = np.vectorize(np.convolve, signature='(n),(m)->(k)') - >>> convolve(np.eye(4), [1, 2, 1]) - array([[1., 2., 1., 0., 0., 0.], - [0., 1., 2., 1., 0., 0.], - [0., 0., 1., 2., 1., 0.], - [0., 0., 0., 1., 2., 1.]]) - - """ - def __init__(self, pyfunc, otypes=None, doc=None, excluded=None, - cache=False, signature=None): - self.pyfunc = pyfunc - self.cache = cache - self.signature = signature - self._ufunc = None # Caching to improve default performance - - if doc is None: - self.__doc__ = pyfunc.__doc__ - else: - self.__doc__ = doc - - if isinstance(otypes, str): - for char in otypes: - if char not in typecodes['All']: - raise ValueError("Invalid otype specified: %s" % (char,)) - elif iterable(otypes): - otypes = ''.join([_nx.dtype(x).char for x in otypes]) - elif otypes is not None: - raise ValueError("Invalid otype specification") - self.otypes = otypes - - # Excluded variable support - if excluded is None: - excluded = set() - self.excluded = set(excluded) - - if signature is not None: - self._in_and_out_core_dims = _parse_gufunc_signature(signature) - else: - self._in_and_out_core_dims = None - - def __call__(self, *args, **kwargs): - """ - Return arrays with the results of `pyfunc` broadcast (vectorized) over - `args` and `kwargs` not in `excluded`. - """ - excluded = self.excluded - if not kwargs and not excluded: - func = self.pyfunc - vargs = args - else: - # The wrapper accepts only positional arguments: we use `names` and - # `inds` to mutate `the_args` and `kwargs` to pass to the original - # function. - nargs = len(args) - - names = [_n for _n in kwargs if _n not in excluded] - inds = [_i for _i in range(nargs) if _i not in excluded] - the_args = list(args) - - def func(*vargs): - for _n, _i in enumerate(inds): - the_args[_i] = vargs[_n] - kwargs.update(zip(names, vargs[len(inds):])) - return self.pyfunc(*the_args, **kwargs) - - vargs = [args[_i] for _i in inds] - vargs.extend([kwargs[_n] for _n in names]) - - return self._vectorize_call(func=func, args=vargs) - - def _get_ufunc_and_otypes(self, func, args): - """Return (ufunc, otypes).""" - # frompyfunc will fail if args is empty - if not args: - raise ValueError('args can not be empty') - - if self.otypes is not None: - otypes = self.otypes - nout = len(otypes) - - # Note logic here: We only *use* self._ufunc if func is self.pyfunc - # even though we set self._ufunc regardless. - if func is self.pyfunc and self._ufunc is not None: - ufunc = self._ufunc - else: - ufunc = self._ufunc = frompyfunc(func, len(args), nout) - else: - # Get number of outputs and output types by calling the function on - # the first entries of args. We also cache the result to prevent - # the subsequent call when the ufunc is evaluated. - # Assumes that ufunc first evaluates the 0th elements in the input - # arrays (the input values are not checked to ensure this) - args = [asarray(arg) for arg in args] - if builtins.any(arg.size == 0 for arg in args): - raise ValueError('cannot call `vectorize` on size 0 inputs ' - 'unless `otypes` is set') - - inputs = [arg.flat[0] for arg in args] - outputs = func(*inputs) - - # Performance note: profiling indicates that -- for simple - # functions at least -- this wrapping can almost double the - # execution time. - # Hence we make it optional. - if self.cache: - _cache = [outputs] - - def _func(*vargs): - if _cache: - return _cache.pop() - else: - return func(*vargs) - else: - _func = func - - if isinstance(outputs, tuple): - nout = len(outputs) - else: - nout = 1 - outputs = (outputs,) - - otypes = ''.join([asarray(outputs[_k]).dtype.char - for _k in range(nout)]) - - # Performance note: profiling indicates that creating the ufunc is - # not a significant cost compared with wrapping so it seems not - # worth trying to cache this. - ufunc = frompyfunc(_func, len(args), nout) - - return ufunc, otypes - - def _vectorize_call(self, func, args): - """Vectorized call to `func` over positional `args`.""" - if self.signature is not None: - res = self._vectorize_call_with_signature(func, args) - elif not args: - res = func() - else: - ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args) - - # Convert args to object arrays first - inputs = [array(a, copy=False, subok=True, dtype=object) - for a in args] - - outputs = ufunc(*inputs) - - if ufunc.nout == 1: - res = array(outputs, copy=False, subok=True, dtype=otypes[0]) - else: - res = tuple([array(x, copy=False, subok=True, dtype=t) - for x, t in zip(outputs, otypes)]) - return res - - def _vectorize_call_with_signature(self, func, args): - """Vectorized call over positional arguments with a signature.""" - input_core_dims, output_core_dims = self._in_and_out_core_dims - - if len(args) != len(input_core_dims): - raise TypeError('wrong number of positional arguments: ' - 'expected %r, got %r' - % (len(input_core_dims), len(args))) - args = tuple(asanyarray(arg) for arg in args) - - broadcast_shape, dim_sizes = _parse_input_dimensions( - args, input_core_dims) - input_shapes = _calculate_shapes(broadcast_shape, dim_sizes, - input_core_dims) - args = [np.broadcast_to(arg, shape, subok=True) - for arg, shape in zip(args, input_shapes)] - - outputs = None - otypes = self.otypes - nout = len(output_core_dims) - - for index in np.ndindex(*broadcast_shape): - results = func(*(arg[index] for arg in args)) - - n_results = len(results) if isinstance(results, tuple) else 1 - - if nout != n_results: - raise ValueError( - 'wrong number of outputs from pyfunc: expected %r, got %r' - % (nout, n_results)) - - if nout == 1: - results = (results,) - - if outputs is None: - for result, core_dims in zip(results, output_core_dims): - _update_dim_sizes(dim_sizes, result, core_dims) - - if otypes is None: - otypes = [asarray(result).dtype for result in results] - - outputs = _create_arrays(broadcast_shape, dim_sizes, - output_core_dims, otypes) - - for output, result in zip(outputs, results): - output[index] = result - - if outputs is None: - # did not call the function even once - if otypes is None: - raise ValueError('cannot call `vectorize` on size 0 inputs ' - 'unless `otypes` is set') - if builtins.any(dim not in dim_sizes - for dims in output_core_dims - for dim in dims): - raise ValueError('cannot call `vectorize` with a signature ' - 'including new output dimensions on size 0 ' - 'inputs') - outputs = _create_arrays(broadcast_shape, dim_sizes, - output_core_dims, otypes) - - return outputs[0] if nout == 1 else outputs - - -def _cov_dispatcher(m, y=None, rowvar=None, bias=None, ddof=None, - fweights=None, aweights=None): - return (m, y, fweights, aweights) - - -@array_function_dispatch(_cov_dispatcher) -def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, - aweights=None): - """ - Estimate a covariance matrix, given data and weights. - - Covariance indicates the level to which two variables vary together. - If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`, - then the covariance matrix element :math:`C_{ij}` is the covariance of - :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance - of :math:`x_i`. - - See the notes for an outline of the algorithm. - - Parameters - ---------- - m : array_like - A 1-D or 2-D array containing multiple variables and observations. - Each row of `m` represents a variable, and each column a single - observation of all those variables. Also see `rowvar` below. - y : array_like, optional - An additional set of variables and observations. `y` has the same form - as that of `m`. - rowvar : bool, optional - If `rowvar` is True (default), then each row represents a - variable, with observations in the columns. Otherwise, the relationship - is transposed: each column represents a variable, while the rows - contain observations. - bias : bool, optional - Default normalization (False) is by ``(N - 1)``, where ``N`` is the - number of observations given (unbiased estimate). If `bias` is True, - then normalization is by ``N``. These values can be overridden by using - the keyword ``ddof`` in numpy versions >= 1.5. - ddof : int, optional - If not ``None`` the default value implied by `bias` is overridden. - Note that ``ddof=1`` will return the unbiased estimate, even if both - `fweights` and `aweights` are specified, and ``ddof=0`` will return - the simple average. See the notes for the details. The default value - is ``None``. - - .. versionadded:: 1.5 - fweights : array_like, int, optional - 1-D array of integer frequency weights; the number of times each - observation vector should be repeated. - - .. versionadded:: 1.10 - aweights : array_like, optional - 1-D array of observation vector weights. These relative weights are - typically large for observations considered "important" and smaller for - observations considered less "important". If ``ddof=0`` the array of - weights can be used to assign probabilities to observation vectors. - - .. versionadded:: 1.10 - - Returns - ------- - out : ndarray - The covariance matrix of the variables. - - See Also - -------- - corrcoef : Normalized covariance matrix - - Notes - ----- - Assume that the observations are in the columns of the observation - array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The - steps to compute the weighted covariance are as follows:: - - >>> m = np.arange(10, dtype=np.float64) - >>> f = np.arange(10) * 2 - >>> a = np.arange(10) ** 2. - >>> ddof = 1 - >>> w = f * a - >>> v1 = np.sum(w) - >>> v2 = np.sum(w * a) - >>> m -= np.sum(m * w, axis=None, keepdims=True) / v1 - >>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2) - - Note that when ``a == 1``, the normalization factor - ``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)`` - as it should. - - Examples - -------- - Consider two variables, :math:`x_0` and :math:`x_1`, which - correlate perfectly, but in opposite directions: - - >>> x = np.array([[0, 2], [1, 1], [2, 0]]).T - >>> x - array([[0, 1, 2], - [2, 1, 0]]) - - Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance - matrix shows this clearly: - - >>> np.cov(x) - array([[ 1., -1.], - [-1., 1.]]) - - Note that element :math:`C_{0,1}`, which shows the correlation between - :math:`x_0` and :math:`x_1`, is negative. - - Further, note how `x` and `y` are combined: - - >>> x = [-2.1, -1, 4.3] - >>> y = [3, 1.1, 0.12] - >>> X = np.stack((x, y), axis=0) - >>> np.cov(X) - array([[11.71 , -4.286 ], # may vary - [-4.286 , 2.144133]]) - >>> np.cov(x, y) - array([[11.71 , -4.286 ], # may vary - [-4.286 , 2.144133]]) - >>> np.cov(x) - array(11.71) - - """ - # Check inputs - if ddof is not None and ddof != int(ddof): - raise ValueError( - "ddof must be integer") - - # Handles complex arrays too - m = np.asarray(m) - if m.ndim > 2: - raise ValueError("m has more than 2 dimensions") - - if y is None: - dtype = np.result_type(m, np.float64) - else: - y = np.asarray(y) - if y.ndim > 2: - raise ValueError("y has more than 2 dimensions") - dtype = np.result_type(m, y, np.float64) - - X = array(m, ndmin=2, dtype=dtype) - if not rowvar and X.shape[0] != 1: - X = X.T - if X.shape[0] == 0: - return np.array([]).reshape(0, 0) - if y is not None: - y = array(y, copy=False, ndmin=2, dtype=dtype) - if not rowvar and y.shape[0] != 1: - y = y.T - X = np.concatenate((X, y), axis=0) - - if ddof is None: - if bias == 0: - ddof = 1 - else: - ddof = 0 - - # Get the product of frequencies and weights - w = None - if fweights is not None: - fweights = np.asarray(fweights, dtype=float) - if not np.all(fweights == np.around(fweights)): - raise TypeError( - "fweights must be integer") - if fweights.ndim > 1: - raise RuntimeError( - "cannot handle multidimensional fweights") - if fweights.shape[0] != X.shape[1]: - raise RuntimeError( - "incompatible numbers of samples and fweights") - if any(fweights < 0): - raise ValueError( - "fweights cannot be negative") - w = fweights - if aweights is not None: - aweights = np.asarray(aweights, dtype=float) - if aweights.ndim > 1: - raise RuntimeError( - "cannot handle multidimensional aweights") - if aweights.shape[0] != X.shape[1]: - raise RuntimeError( - "incompatible numbers of samples and aweights") - if any(aweights < 0): - raise ValueError( - "aweights cannot be negative") - if w is None: - w = aweights - else: - w *= aweights - - avg, w_sum = average(X, axis=1, weights=w, returned=True) - w_sum = w_sum[0] - - # Determine the normalization - if w is None: - fact = X.shape[1] - ddof - elif ddof == 0: - fact = w_sum - elif aweights is None: - fact = w_sum - ddof - else: - fact = w_sum - ddof*sum(w*aweights)/w_sum - - if fact <= 0: - warnings.warn("Degrees of freedom <= 0 for slice", - RuntimeWarning, stacklevel=3) - fact = 0.0 - - X -= avg[:, None] - if w is None: - X_T = X.T - else: - X_T = (X*w).T - c = dot(X, X_T.conj()) - c *= np.true_divide(1, fact) - return c.squeeze() - - -def _corrcoef_dispatcher(x, y=None, rowvar=None, bias=None, ddof=None): - return (x, y) - - -@array_function_dispatch(_corrcoef_dispatcher) -def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue): - """ - Return Pearson product-moment correlation coefficients. - - Please refer to the documentation for `cov` for more detail. The - relationship between the correlation coefficient matrix, `R`, and the - covariance matrix, `C`, is - - .. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } } - - The values of `R` are between -1 and 1, inclusive. - - Parameters - ---------- - x : array_like - A 1-D or 2-D array containing multiple variables and observations. - Each row of `x` represents a variable, and each column a single - observation of all those variables. Also see `rowvar` below. - y : array_like, optional - An additional set of variables and observations. `y` has the same - shape as `x`. - rowvar : bool, optional - If `rowvar` is True (default), then each row represents a - variable, with observations in the columns. Otherwise, the relationship - is transposed: each column represents a variable, while the rows - contain observations. - bias : _NoValue, optional - Has no effect, do not use. - - .. deprecated:: 1.10.0 - ddof : _NoValue, optional - Has no effect, do not use. - - .. deprecated:: 1.10.0 - - Returns - ------- - R : ndarray - The correlation coefficient matrix of the variables. - - See Also - -------- - cov : Covariance matrix - - Notes - ----- - Due to floating point rounding the resulting array may not be Hermitian, - the diagonal elements may not be 1, and the elements may not satisfy the - inequality abs(a) <= 1. The real and imaginary parts are clipped to the - interval [-1, 1] in an attempt to improve on that situation but is not - much help in the complex case. - - This function accepts but discards arguments `bias` and `ddof`. This is - for backwards compatibility with previous versions of this function. These - arguments had no effect on the return values of the function and can be - safely ignored in this and previous versions of numpy. - - """ - if bias is not np._NoValue or ddof is not np._NoValue: - # 2015-03-15, 1.10 - warnings.warn('bias and ddof have no effect and are deprecated', - DeprecationWarning, stacklevel=3) - c = cov(x, y, rowvar) - try: - d = diag(c) - except ValueError: - # scalar covariance - # nan if incorrect value (nan, inf, 0), 1 otherwise - return c / c - stddev = sqrt(d.real) - c /= stddev[:, None] - c /= stddev[None, :] - - # Clip real and imaginary parts to [-1, 1]. This does not guarantee - # abs(a[i,j]) <= 1 for complex arrays, but is the best we can do without - # excessive work. - np.clip(c.real, -1, 1, out=c.real) - if np.iscomplexobj(c): - np.clip(c.imag, -1, 1, out=c.imag) - - return c - - -@set_module('numpy') -def blackman(M): - """ - Return the Blackman window. - - The Blackman window is a taper formed by using the first three - terms of a summation of cosines. It was designed to have close to the - minimal leakage possible. It is close to optimal, only slightly worse - than a Kaiser window. - - Parameters - ---------- - M : int - Number of points in the output window. If zero or less, an empty - array is returned. - - Returns - ------- - out : ndarray - The window, with the maximum value normalized to one (the value one - appears only if the number of samples is odd). - - See Also - -------- - bartlett, hamming, hanning, kaiser - - Notes - ----- - The Blackman window is defined as - - .. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M) - - Most references to the Blackman window come from the signal processing - literature, where it is used as one of many windowing functions for - smoothing values. It is also known as an apodization (which means - "removing the foot", i.e. smoothing discontinuities at the beginning - and end of the sampled signal) or tapering function. It is known as a - "near optimal" tapering function, almost as good (by some measures) - as the kaiser window. - - References - ---------- - Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, - Dover Publications, New York. - - Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing. - Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471. - - Examples - -------- - >>> import matplotlib.pyplot as plt - >>> np.blackman(12) - array([-1.38777878e-17, 3.26064346e-02, 1.59903635e-01, # may vary - 4.14397981e-01, 7.36045180e-01, 9.67046769e-01, - 9.67046769e-01, 7.36045180e-01, 4.14397981e-01, - 1.59903635e-01, 3.26064346e-02, -1.38777878e-17]) - - Plot the window and the frequency response: - - >>> from numpy.fft import fft, fftshift - >>> window = np.blackman(51) - >>> plt.plot(window) - [] - >>> plt.title("Blackman window") - Text(0.5, 1.0, 'Blackman window') - >>> plt.ylabel("Amplitude") - Text(0, 0.5, 'Amplitude') - >>> plt.xlabel("Sample") - Text(0.5, 0, 'Sample') - >>> plt.show() - - >>> plt.figure() -

- >>> A = fft(window, 2048) / 25.5 - >>> mag = np.abs(fftshift(A)) - >>> freq = np.linspace(-0.5, 0.5, len(A)) - >>> with np.errstate(divide='ignore', invalid='ignore'): - ... response = 20 * np.log10(mag) - ... - >>> response = np.clip(response, -100, 100) - >>> plt.plot(freq, response) - [] - >>> plt.title("Frequency response of Blackman window") - Text(0.5, 1.0, 'Frequency response of Blackman window') - >>> plt.ylabel("Magnitude [dB]") - Text(0, 0.5, 'Magnitude [dB]') - >>> plt.xlabel("Normalized frequency [cycles per sample]") - Text(0.5, 0, 'Normalized frequency [cycles per sample]') - >>> _ = plt.axis('tight') - >>> plt.show() - - """ - if M < 1: - return array([]) - if M == 1: - return ones(1, float) - n = arange(0, M) - return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1)) - - -@set_module('numpy') -def bartlett(M): - """ - Return the Bartlett window. - - The Bartlett window is very similar to a triangular window, except - that the end points are at zero. It is often used in signal - processing for tapering a signal, without generating too much - ripple in the frequency domain. - - Parameters - ---------- - M : int - Number of points in the output window. If zero or less, an - empty array is returned. - - Returns - ------- - out : array - The triangular window, with the maximum value normalized to one - (the value one appears only if the number of samples is odd), with - the first and last samples equal to zero. - - See Also - -------- - blackman, hamming, hanning, kaiser - - Notes - ----- - The Bartlett window is defined as - - .. math:: w(n) = \\frac{2}{M-1} \\left( - \\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right| - \\right) - - Most references to the Bartlett window come from the signal - processing literature, where it is used as one of many windowing - functions for smoothing values. Note that convolution with this - window produces linear interpolation. It is also known as an - apodization (which means"removing the foot", i.e. smoothing - discontinuities at the beginning and end of the sampled signal) or - tapering function. The fourier transform of the Bartlett is the product - of two sinc functions. - Note the excellent discussion in Kanasewich. - - References - ---------- - .. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", - Biometrika 37, 1-16, 1950. - .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", - The University of Alberta Press, 1975, pp. 109-110. - .. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal - Processing", Prentice-Hall, 1999, pp. 468-471. - .. [4] Wikipedia, "Window function", - https://en.wikipedia.org/wiki/Window_function - .. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, - "Numerical Recipes", Cambridge University Press, 1986, page 429. - - Examples - -------- - >>> import matplotlib.pyplot as plt - >>> np.bartlett(12) - array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, # may vary - 0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636, - 0.18181818, 0. ]) - - Plot the window and its frequency response (requires SciPy and matplotlib): - - >>> from numpy.fft import fft, fftshift - >>> window = np.bartlett(51) - >>> plt.plot(window) - [] - >>> plt.title("Bartlett window") - Text(0.5, 1.0, 'Bartlett window') - >>> plt.ylabel("Amplitude") - Text(0, 0.5, 'Amplitude') - >>> plt.xlabel("Sample") - Text(0.5, 0, 'Sample') - >>> plt.show() - - >>> plt.figure() -
- >>> A = fft(window, 2048) / 25.5 - >>> mag = np.abs(fftshift(A)) - >>> freq = np.linspace(-0.5, 0.5, len(A)) - >>> with np.errstate(divide='ignore', invalid='ignore'): - ... response = 20 * np.log10(mag) - ... - >>> response = np.clip(response, -100, 100) - >>> plt.plot(freq, response) - [] - >>> plt.title("Frequency response of Bartlett window") - Text(0.5, 1.0, 'Frequency response of Bartlett window') - >>> plt.ylabel("Magnitude [dB]") - Text(0, 0.5, 'Magnitude [dB]') - >>> plt.xlabel("Normalized frequency [cycles per sample]") - Text(0.5, 0, 'Normalized frequency [cycles per sample]') - >>> _ = plt.axis('tight') - >>> plt.show() - - """ - if M < 1: - return array([]) - if M == 1: - return ones(1, float) - n = arange(0, M) - return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1)) - - -@set_module('numpy') -def hanning(M): - """ - Return the Hanning window. - - The Hanning window is a taper formed by using a weighted cosine. - - Parameters - ---------- - M : int - Number of points in the output window. If zero or less, an - empty array is returned. - - Returns - ------- - out : ndarray, shape(M,) - The window, with the maximum value normalized to one (the value - one appears only if `M` is odd). - - See Also - -------- - bartlett, blackman, hamming, kaiser - - Notes - ----- - The Hanning window is defined as - - .. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right) - \\qquad 0 \\leq n \\leq M-1 - - The Hanning was named for Julius von Hann, an Austrian meteorologist. - It is also known as the Cosine Bell. Some authors prefer that it be - called a Hann window, to help avoid confusion with the very similar - Hamming window. - - Most references to the Hanning window come from the signal processing - literature, where it is used as one of many windowing functions for - smoothing values. It is also known as an apodization (which means - "removing the foot", i.e. smoothing discontinuities at the beginning - and end of the sampled signal) or tapering function. - - References - ---------- - .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power - spectra, Dover Publications, New York. - .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", - The University of Alberta Press, 1975, pp. 106-108. - .. [3] Wikipedia, "Window function", - https://en.wikipedia.org/wiki/Window_function - .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, - "Numerical Recipes", Cambridge University Press, 1986, page 425. - - Examples - -------- - >>> np.hanning(12) - array([0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037, - 0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249, - 0.07937323, 0. ]) - - Plot the window and its frequency response: - - >>> import matplotlib.pyplot as plt - >>> from numpy.fft import fft, fftshift - >>> window = np.hanning(51) - >>> plt.plot(window) - [] - >>> plt.title("Hann window") - Text(0.5, 1.0, 'Hann window') - >>> plt.ylabel("Amplitude") - Text(0, 0.5, 'Amplitude') - >>> plt.xlabel("Sample") - Text(0.5, 0, 'Sample') - >>> plt.show() - - >>> plt.figure() -
- >>> A = fft(window, 2048) / 25.5 - >>> mag = np.abs(fftshift(A)) - >>> freq = np.linspace(-0.5, 0.5, len(A)) - >>> with np.errstate(divide='ignore', invalid='ignore'): - ... response = 20 * np.log10(mag) - ... - >>> response = np.clip(response, -100, 100) - >>> plt.plot(freq, response) - [] - >>> plt.title("Frequency response of the Hann window") - Text(0.5, 1.0, 'Frequency response of the Hann window') - >>> plt.ylabel("Magnitude [dB]") - Text(0, 0.5, 'Magnitude [dB]') - >>> plt.xlabel("Normalized frequency [cycles per sample]") - Text(0.5, 0, 'Normalized frequency [cycles per sample]') - >>> plt.axis('tight') - ... - >>> plt.show() - - """ - if M < 1: - return array([]) - if M == 1: - return ones(1, float) - n = arange(0, M) - return 0.5 - 0.5*cos(2.0*pi*n/(M-1)) - - -@set_module('numpy') -def hamming(M): - """ - Return the Hamming window. - - The Hamming window is a taper formed by using a weighted cosine. - - Parameters - ---------- - M : int - Number of points in the output window. If zero or less, an - empty array is returned. - - Returns - ------- - out : ndarray - The window, with the maximum value normalized to one (the value - one appears only if the number of samples is odd). - - See Also - -------- - bartlett, blackman, hanning, kaiser - - Notes - ----- - The Hamming window is defined as - - .. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right) - \\qquad 0 \\leq n \\leq M-1 - - The Hamming was named for R. W. Hamming, an associate of J. W. Tukey - and is described in Blackman and Tukey. It was recommended for - smoothing the truncated autocovariance function in the time domain. - Most references to the Hamming window come from the signal processing - literature, where it is used as one of many windowing functions for - smoothing values. It is also known as an apodization (which means - "removing the foot", i.e. smoothing discontinuities at the beginning - and end of the sampled signal) or tapering function. - - References - ---------- - .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power - spectra, Dover Publications, New York. - .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The - University of Alberta Press, 1975, pp. 109-110. - .. [3] Wikipedia, "Window function", - https://en.wikipedia.org/wiki/Window_function - .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, - "Numerical Recipes", Cambridge University Press, 1986, page 425. - - Examples - -------- - >>> np.hamming(12) - array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594, # may vary - 0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909, - 0.15302337, 0.08 ]) - - Plot the window and the frequency response: - - >>> import matplotlib.pyplot as plt - >>> from numpy.fft import fft, fftshift - >>> window = np.hamming(51) - >>> plt.plot(window) - [] - >>> plt.title("Hamming window") - Text(0.5, 1.0, 'Hamming window') - >>> plt.ylabel("Amplitude") - Text(0, 0.5, 'Amplitude') - >>> plt.xlabel("Sample") - Text(0.5, 0, 'Sample') - >>> plt.show() - - >>> plt.figure() -
- >>> A = fft(window, 2048) / 25.5 - >>> mag = np.abs(fftshift(A)) - >>> freq = np.linspace(-0.5, 0.5, len(A)) - >>> response = 20 * np.log10(mag) - >>> response = np.clip(response, -100, 100) - >>> plt.plot(freq, response) - [] - >>> plt.title("Frequency response of Hamming window") - Text(0.5, 1.0, 'Frequency response of Hamming window') - >>> plt.ylabel("Magnitude [dB]") - Text(0, 0.5, 'Magnitude [dB]') - >>> plt.xlabel("Normalized frequency [cycles per sample]") - Text(0.5, 0, 'Normalized frequency [cycles per sample]') - >>> plt.axis('tight') - ... - >>> plt.show() - - """ - if M < 1: - return array([]) - if M == 1: - return ones(1, float) - n = arange(0, M) - return 0.54 - 0.46*cos(2.0*pi*n/(M-1)) - -## Code from cephes for i0 - -_i0A = [ - -4.41534164647933937950E-18, - 3.33079451882223809783E-17, - -2.43127984654795469359E-16, - 1.71539128555513303061E-15, - -1.16853328779934516808E-14, - 7.67618549860493561688E-14, - -4.85644678311192946090E-13, - 2.95505266312963983461E-12, - -1.72682629144155570723E-11, - 9.67580903537323691224E-11, - -5.18979560163526290666E-10, - 2.65982372468238665035E-9, - -1.30002500998624804212E-8, - 6.04699502254191894932E-8, - -2.67079385394061173391E-7, - 1.11738753912010371815E-6, - -4.41673835845875056359E-6, - 1.64484480707288970893E-5, - -5.75419501008210370398E-5, - 1.88502885095841655729E-4, - -5.76375574538582365885E-4, - 1.63947561694133579842E-3, - -4.32430999505057594430E-3, - 1.05464603945949983183E-2, - -2.37374148058994688156E-2, - 4.93052842396707084878E-2, - -9.49010970480476444210E-2, - 1.71620901522208775349E-1, - -3.04682672343198398683E-1, - 6.76795274409476084995E-1 - ] - -_i0B = [ - -7.23318048787475395456E-18, - -4.83050448594418207126E-18, - 4.46562142029675999901E-17, - 3.46122286769746109310E-17, - -2.82762398051658348494E-16, - -3.42548561967721913462E-16, - 1.77256013305652638360E-15, - 3.81168066935262242075E-15, - -9.55484669882830764870E-15, - -4.15056934728722208663E-14, - 1.54008621752140982691E-14, - 3.85277838274214270114E-13, - 7.18012445138366623367E-13, - -1.79417853150680611778E-12, - -1.32158118404477131188E-11, - -3.14991652796324136454E-11, - 1.18891471078464383424E-11, - 4.94060238822496958910E-10, - 3.39623202570838634515E-9, - 2.26666899049817806459E-8, - 2.04891858946906374183E-7, - 2.89137052083475648297E-6, - 6.88975834691682398426E-5, - 3.36911647825569408990E-3, - 8.04490411014108831608E-1 - ] - - -def _chbevl(x, vals): - b0 = vals[0] - b1 = 0.0 - - for i in range(1, len(vals)): - b2 = b1 - b1 = b0 - b0 = x*b1 - b2 + vals[i] - - return 0.5*(b0 - b2) - - -def _i0_1(x): - return exp(x) * _chbevl(x/2.0-2, _i0A) - - -def _i0_2(x): - return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x) - - -def _i0_dispatcher(x): - return (x,) - - -@array_function_dispatch(_i0_dispatcher) -def i0(x): - """ - Modified Bessel function of the first kind, order 0. - - Usually denoted :math:`I_0`. This function does broadcast, but will *not* - "up-cast" int dtype arguments unless accompanied by at least one float or - complex dtype argument (see Raises below). - - Parameters - ---------- - x : array_like, dtype float or complex - Argument of the Bessel function. - - Returns - ------- - out : ndarray, shape = x.shape, dtype = x.dtype - The modified Bessel function evaluated at each of the elements of `x`. - - Raises - ------ - TypeError: array cannot be safely cast to required type - If argument consists exclusively of int dtypes. - - See Also - -------- - scipy.special.i0, scipy.special.iv, scipy.special.ive - - Notes - ----- - The scipy implementation is recommended over this function: it is a - proper ufunc written in C, and more than an order of magnitude faster. - - We use the algorithm published by Clenshaw [1]_ and referenced by - Abramowitz and Stegun [2]_, for which the function domain is - partitioned into the two intervals [0,8] and (8,inf), and Chebyshev - polynomial expansions are employed in each interval. Relative error on - the domain [0,30] using IEEE arithmetic is documented [3]_ as having a - peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000). - - References - ---------- - .. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in - *National Physical Laboratory Mathematical Tables*, vol. 5, London: - Her Majesty's Stationery Office, 1962. - .. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical - Functions*, 10th printing, New York: Dover, 1964, pp. 379. - http://www.math.sfu.ca/~cbm/aands/page_379.htm - .. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html - - Examples - -------- - >>> np.i0(0.) - array(1.0) # may vary - >>> np.i0([0., 1. + 2j]) - array([ 1.00000000+0.j , 0.18785373+0.64616944j]) # may vary - - """ - x = np.asanyarray(x) - x = np.abs(x) - return piecewise(x, [x <= 8.0], [_i0_1, _i0_2]) - -## End of cephes code for i0 - - -@set_module('numpy') -def kaiser(M, beta): - """ - Return the Kaiser window. - - The Kaiser window is a taper formed by using a Bessel function. - - Parameters - ---------- - M : int - Number of points in the output window. If zero or less, an - empty array is returned. - beta : float - Shape parameter for window. - - Returns - ------- - out : array - The window, with the maximum value normalized to one (the value - one appears only if the number of samples is odd). - - See Also - -------- - bartlett, blackman, hamming, hanning - - Notes - ----- - The Kaiser window is defined as - - .. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}} - \\right)/I_0(\\beta) - - with - - .. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2}, - - where :math:`I_0` is the modified zeroth-order Bessel function. - - The Kaiser was named for Jim Kaiser, who discovered a simple - approximation to the DPSS window based on Bessel functions. The Kaiser - window is a very good approximation to the Digital Prolate Spheroidal - Sequence, or Slepian window, which is the transform which maximizes the - energy in the main lobe of the window relative to total energy. - - The Kaiser can approximate many other windows by varying the beta - parameter. - - ==== ======================= - beta Window shape - ==== ======================= - 0 Rectangular - 5 Similar to a Hamming - 6 Similar to a Hanning - 8.6 Similar to a Blackman - ==== ======================= - - A beta value of 14 is probably a good starting point. Note that as beta - gets large, the window narrows, and so the number of samples needs to be - large enough to sample the increasingly narrow spike, otherwise NaNs will - get returned. - - Most references to the Kaiser window come from the signal processing - literature, where it is used as one of many windowing functions for - smoothing values. It is also known as an apodization (which means - "removing the foot", i.e. smoothing discontinuities at the beginning - and end of the sampled signal) or tapering function. - - References - ---------- - .. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by - digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285. - John Wiley and Sons, New York, (1966). - .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The - University of Alberta Press, 1975, pp. 177-178. - .. [3] Wikipedia, "Window function", - https://en.wikipedia.org/wiki/Window_function - - Examples - -------- - >>> import matplotlib.pyplot as plt - >>> np.kaiser(12, 14) - array([7.72686684e-06, 3.46009194e-03, 4.65200189e-02, # may vary - 2.29737120e-01, 5.99885316e-01, 9.45674898e-01, - 9.45674898e-01, 5.99885316e-01, 2.29737120e-01, - 4.65200189e-02, 3.46009194e-03, 7.72686684e-06]) - - - Plot the window and the frequency response: - - >>> from numpy.fft import fft, fftshift - >>> window = np.kaiser(51, 14) - >>> plt.plot(window) - [] - >>> plt.title("Kaiser window") - Text(0.5, 1.0, 'Kaiser window') - >>> plt.ylabel("Amplitude") - Text(0, 0.5, 'Amplitude') - >>> plt.xlabel("Sample") - Text(0.5, 0, 'Sample') - >>> plt.show() - - >>> plt.figure() -
- >>> A = fft(window, 2048) / 25.5 - >>> mag = np.abs(fftshift(A)) - >>> freq = np.linspace(-0.5, 0.5, len(A)) - >>> response = 20 * np.log10(mag) - >>> response = np.clip(response, -100, 100) - >>> plt.plot(freq, response) - [] - >>> plt.title("Frequency response of Kaiser window") - Text(0.5, 1.0, 'Frequency response of Kaiser window') - >>> plt.ylabel("Magnitude [dB]") - Text(0, 0.5, 'Magnitude [dB]') - >>> plt.xlabel("Normalized frequency [cycles per sample]") - Text(0.5, 0, 'Normalized frequency [cycles per sample]') - >>> plt.axis('tight') - (-0.5, 0.5, -100.0, ...) # may vary - >>> plt.show() - - """ - from numpy.dual import i0 - if M == 1: - return np.array([1.]) - n = arange(0, M) - alpha = (M-1)/2.0 - return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta)) - - -def _sinc_dispatcher(x): - return (x,) - - -@array_function_dispatch(_sinc_dispatcher) -def sinc(x): - """ - Return the sinc function. - - The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`. - - Parameters - ---------- - x : ndarray - Array (possibly multi-dimensional) of values for which to to - calculate ``sinc(x)``. - - Returns - ------- - out : ndarray - ``sinc(x)``, which has the same shape as the input. - - Notes - ----- - ``sinc(0)`` is the limit value 1. - - The name sinc is short for "sine cardinal" or "sinus cardinalis". - - The sinc function is used in various signal processing applications, - including in anti-aliasing, in the construction of a Lanczos resampling - filter, and in interpolation. - - For bandlimited interpolation of discrete-time signals, the ideal - interpolation kernel is proportional to the sinc function. - - References - ---------- - .. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web - Resource. http://mathworld.wolfram.com/SincFunction.html - .. [2] Wikipedia, "Sinc function", - https://en.wikipedia.org/wiki/Sinc_function - - Examples - -------- - >>> import matplotlib.pyplot as plt - >>> x = np.linspace(-4, 4, 41) - >>> np.sinc(x) - array([-3.89804309e-17, -4.92362781e-02, -8.40918587e-02, # may vary - -8.90384387e-02, -5.84680802e-02, 3.89804309e-17, - 6.68206631e-02, 1.16434881e-01, 1.26137788e-01, - 8.50444803e-02, -3.89804309e-17, -1.03943254e-01, - -1.89206682e-01, -2.16236208e-01, -1.55914881e-01, - 3.89804309e-17, 2.33872321e-01, 5.04551152e-01, - 7.56826729e-01, 9.35489284e-01, 1.00000000e+00, - 9.35489284e-01, 7.56826729e-01, 5.04551152e-01, - 2.33872321e-01, 3.89804309e-17, -1.55914881e-01, - -2.16236208e-01, -1.89206682e-01, -1.03943254e-01, - -3.89804309e-17, 8.50444803e-02, 1.26137788e-01, - 1.16434881e-01, 6.68206631e-02, 3.89804309e-17, - -5.84680802e-02, -8.90384387e-02, -8.40918587e-02, - -4.92362781e-02, -3.89804309e-17]) - - >>> plt.plot(x, np.sinc(x)) - [] - >>> plt.title("Sinc Function") - Text(0.5, 1.0, 'Sinc Function') - >>> plt.ylabel("Amplitude") - Text(0, 0.5, 'Amplitude') - >>> plt.xlabel("X") - Text(0.5, 0, 'X') - >>> plt.show() - - """ - x = np.asanyarray(x) - y = pi * where(x == 0, 1.0e-20, x) - return sin(y)/y - - -def _msort_dispatcher(a): - return (a,) - - -@array_function_dispatch(_msort_dispatcher) -def msort(a): - """ - Return a copy of an array sorted along the first axis. - - Parameters - ---------- - a : array_like - Array to be sorted. - - Returns - ------- - sorted_array : ndarray - Array of the same type and shape as `a`. - - See Also - -------- - sort - - Notes - ----- - ``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``. - - """ - b = array(a, subok=True, copy=True) - b.sort(0) - return b - - -def _ureduce(a, func, **kwargs): - """ - Internal Function. - Call `func` with `a` as first argument swapping the axes to use extended - axis on functions that don't support it natively. - - Returns result and a.shape with axis dims set to 1. - - Parameters - ---------- - a : array_like - Input array or object that can be converted to an array. - func : callable - Reduction function capable of receiving a single axis argument. - It is called with `a` as first argument followed by `kwargs`. - kwargs : keyword arguments - additional keyword arguments to pass to `func`. - - Returns - ------- - result : tuple - Result of func(a, **kwargs) and a.shape with axis dims set to 1 - which can be used to reshape the result to the same shape a ufunc with - keepdims=True would produce. - - """ - a = np.asanyarray(a) - axis = kwargs.get('axis', None) - if axis is not None: - keepdim = list(a.shape) - nd = a.ndim - axis = _nx.normalize_axis_tuple(axis, nd) - - for ax in axis: - keepdim[ax] = 1 - - if len(axis) == 1: - kwargs['axis'] = axis[0] - else: - keep = set(range(nd)) - set(axis) - nkeep = len(keep) - # swap axis that should not be reduced to front - for i, s in enumerate(sorted(keep)): - a = a.swapaxes(i, s) - # merge reduced axis - a = a.reshape(a.shape[:nkeep] + (-1,)) - kwargs['axis'] = -1 - keepdim = tuple(keepdim) - else: - keepdim = (1,) * a.ndim - - r = func(a, **kwargs) - return r, keepdim - - -def _median_dispatcher( - a, axis=None, out=None, overwrite_input=None, keepdims=None): - return (a, out) - - -@array_function_dispatch(_median_dispatcher) -def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): - """ - Compute the median along the specified axis. - - Returns the median of the array elements. - - Parameters - ---------- - a : array_like - Input array or object that can be converted to an array. - axis : {int, sequence of int, None}, optional - Axis or axes along which the medians are computed. The default - is to compute the median along a flattened version of the array. - A sequence of axes is supported since version 1.9.0. - out : ndarray, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output, - but the type (of the output) will be cast if necessary. - overwrite_input : bool, optional - If True, then allow use of memory of input array `a` for - calculations. The input array will be modified by the call to - `median`. This will save memory when you do not need to preserve - the contents of the input array. Treat the input as undefined, - but it will probably be fully or partially sorted. Default is - False. If `overwrite_input` is ``True`` and `a` is not already an - `ndarray`, an error will be raised. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `arr`. - - .. versionadded:: 1.9.0 - - Returns - ------- - median : ndarray - A new array holding the result. If the input contains integers - or floats smaller than ``float64``, then the output data-type is - ``np.float64``. Otherwise, the data-type of the output is the - same as that of the input. If `out` is specified, that array is - returned instead. - - See Also - -------- - mean, percentile - - Notes - ----- - Given a vector ``V`` of length ``N``, the median of ``V`` is the - middle value of a sorted copy of ``V``, ``V_sorted`` - i - e., ``V_sorted[(N-1)/2]``, when ``N`` is odd, and the average of the - two middle values of ``V_sorted`` when ``N`` is even. - - Examples - -------- - >>> a = np.array([[10, 7, 4], [3, 2, 1]]) - >>> a - array([[10, 7, 4], - [ 3, 2, 1]]) - >>> np.median(a) - 3.5 - >>> np.median(a, axis=0) - array([6.5, 4.5, 2.5]) - >>> np.median(a, axis=1) - array([7., 2.]) - >>> m = np.median(a, axis=0) - >>> out = np.zeros_like(m) - >>> np.median(a, axis=0, out=m) - array([6.5, 4.5, 2.5]) - >>> m - array([6.5, 4.5, 2.5]) - >>> b = a.copy() - >>> np.median(b, axis=1, overwrite_input=True) - array([7., 2.]) - >>> assert not np.all(a==b) - >>> b = a.copy() - >>> np.median(b, axis=None, overwrite_input=True) - 3.5 - >>> assert not np.all(a==b) - - """ - r, k = _ureduce(a, func=_median, axis=axis, out=out, - overwrite_input=overwrite_input) - if keepdims: - return r.reshape(k) - else: - return r - -def _median(a, axis=None, out=None, overwrite_input=False): - # can't be reasonably be implemented in terms of percentile as we have to - # call mean to not break astropy - a = np.asanyarray(a) - - # Set the partition indexes - if axis is None: - sz = a.size - else: - sz = a.shape[axis] - if sz % 2 == 0: - szh = sz // 2 - kth = [szh - 1, szh] - else: - kth = [(sz - 1) // 2] - # Check if the array contains any nan's - if np.issubdtype(a.dtype, np.inexact): - kth.append(-1) - - if overwrite_input: - if axis is None: - part = a.ravel() - part.partition(kth) - else: - a.partition(kth, axis=axis) - part = a - else: - part = partition(a, kth, axis=axis) - - if part.shape == (): - # make 0-D arrays work - return part.item() - if axis is None: - axis = 0 - - indexer = [slice(None)] * part.ndim - index = part.shape[axis] // 2 - if part.shape[axis] % 2 == 1: - # index with slice to allow mean (below) to work - indexer[axis] = slice(index, index+1) - else: - indexer[axis] = slice(index-1, index+1) - indexer = tuple(indexer) - - # Check if the array contains any nan's - if np.issubdtype(a.dtype, np.inexact) and sz > 0: - # warn and return nans like mean would - rout = mean(part[indexer], axis=axis, out=out) - return np.lib.utils._median_nancheck(part, rout, axis, out) - else: - # if there are no nans - # Use mean in odd and even case to coerce data type - # and check, use out array. - return mean(part[indexer], axis=axis, out=out) - - -def _percentile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, - interpolation=None, keepdims=None): - return (a, q, out) - - -@array_function_dispatch(_percentile_dispatcher) -def percentile(a, q, axis=None, out=None, - overwrite_input=False, interpolation='linear', keepdims=False): - """ - Compute the q-th percentile of the data along the specified axis. - - Returns the q-th percentile(s) of the array elements. - - Parameters - ---------- - a : array_like - Input array or object that can be converted to an array. - q : array_like of float - Percentile or sequence of percentiles to compute, which must be between - 0 and 100 inclusive. - axis : {int, tuple of int, None}, optional - Axis or axes along which the percentiles are computed. The - default is to compute the percentile(s) along a flattened - version of the array. - - .. versionchanged:: 1.9.0 - A tuple of axes is supported - out : ndarray, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output, - but the type (of the output) will be cast if necessary. - overwrite_input : bool, optional - If True, then allow the input array `a` to be modified by intermediate - calculations, to save memory. In this case, the contents of the input - `a` after this function completes is undefined. - - interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} - This optional parameter specifies the interpolation method to - use when the desired percentile lies between two data points - ``i < j``: - - * 'linear': ``i + (j - i) * fraction``, where ``fraction`` - is the fractional part of the index surrounded by ``i`` - and ``j``. - * 'lower': ``i``. - * 'higher': ``j``. - * 'nearest': ``i`` or ``j``, whichever is nearest. - * 'midpoint': ``(i + j) / 2``. - - .. versionadded:: 1.9.0 - keepdims : bool, optional - If this is set to True, the axes which are reduced are left in - the result as dimensions with size one. With this option, the - result will broadcast correctly against the original array `a`. - - .. versionadded:: 1.9.0 - - Returns - ------- - percentile : scalar or ndarray - If `q` is a single percentile and `axis=None`, then the result - is a scalar. If multiple percentiles are given, first axis of - the result corresponds to the percentiles. The other axes are - the axes that remain after the reduction of `a`. If the input - contains integers or floats smaller than ``float64``, the output - data-type is ``float64``. Otherwise, the output data-type is the - same as that of the input. If `out` is specified, that array is - returned instead. - - See Also - -------- - mean - median : equivalent to ``percentile(..., 50)`` - nanpercentile - quantile : equivalent to percentile, except with q in the range [0, 1]. - - Notes - ----- - Given a vector ``V`` of length ``N``, the q-th percentile of - ``V`` is the value ``q/100`` of the way from the minimum to the - maximum in a sorted copy of ``V``. The values and distances of - the two nearest neighbors as well as the `interpolation` parameter - will determine the percentile if the normalized ranking does not - match the location of ``q`` exactly. This function is the same as - the median if ``q=50``, the same as the minimum if ``q=0`` and the - same as the maximum if ``q=100``. - - Examples - -------- - >>> a = np.array([[10, 7, 4], [3, 2, 1]]) - >>> a - array([[10, 7, 4], - [ 3, 2, 1]]) - >>> np.percentile(a, 50) - 3.5 - >>> np.percentile(a, 50, axis=0) - array([6.5, 4.5, 2.5]) - >>> np.percentile(a, 50, axis=1) - array([7., 2.]) - >>> np.percentile(a, 50, axis=1, keepdims=True) - array([[7.], - [2.]]) - - >>> m = np.percentile(a, 50, axis=0) - >>> out = np.zeros_like(m) - >>> np.percentile(a, 50, axis=0, out=out) - array([6.5, 4.5, 2.5]) - >>> m - array([6.5, 4.5, 2.5]) - - >>> b = a.copy() - >>> np.percentile(b, 50, axis=1, overwrite_input=True) - array([7., 2.]) - >>> assert not np.all(a == b) - - The different types of interpolation can be visualized graphically: - - .. plot:: - - import matplotlib.pyplot as plt - - a = np.arange(4) - p = np.linspace(0, 100, 6001) - ax = plt.gca() - lines = [ - ('linear', None), - ('higher', '--'), - ('lower', '--'), - ('nearest', '-.'), - ('midpoint', '-.'), - ] - for interpolation, style in lines: - ax.plot( - p, np.percentile(a, p, interpolation=interpolation), - label=interpolation, linestyle=style) - ax.set( - title='Interpolation methods for list: ' + str(a), - xlabel='Percentile', - ylabel='List item returned', - yticks=a) - ax.legend() - plt.show() - - """ - q = np.true_divide(q, 100) - q = asanyarray(q) # undo any decay that the ufunc performed (see gh-13105) - if not _quantile_is_valid(q): - raise ValueError("Percentiles must be in the range [0, 100]") - return _quantile_unchecked( - a, q, axis, out, overwrite_input, interpolation, keepdims) - - -def _quantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, - interpolation=None, keepdims=None): - return (a, q, out) - - -@array_function_dispatch(_quantile_dispatcher) -def quantile(a, q, axis=None, out=None, - overwrite_input=False, interpolation='linear', keepdims=False): - """ - Compute the q-th quantile of the data along the specified axis. - - .. versionadded:: 1.15.0 - - Parameters - ---------- - a : array_like - Input array or object that can be converted to an array. - q : array_like of float - Quantile or sequence of quantiles to compute, which must be between - 0 and 1 inclusive. - axis : {int, tuple of int, None}, optional - Axis or axes along which the quantiles are computed. The - default is to compute the quantile(s) along a flattened - version of the array. - out : ndarray, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output, - but the type (of the output) will be cast if necessary. - overwrite_input : bool, optional - If True, then allow the input array `a` to be modified by intermediate - calculations, to save memory. In this case, the contents of the input - `a` after this function completes is undefined. - interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} - This optional parameter specifies the interpolation method to - use when the desired quantile lies between two data points - ``i < j``: - - * linear: ``i + (j - i) * fraction``, where ``fraction`` - is the fractional part of the index surrounded by ``i`` - and ``j``. - * lower: ``i``. - * higher: ``j``. - * nearest: ``i`` or ``j``, whichever is nearest. - * midpoint: ``(i + j) / 2``. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left in - the result as dimensions with size one. With this option, the - result will broadcast correctly against the original array `a`. - - Returns - ------- - quantile : scalar or ndarray - If `q` is a single quantile and `axis=None`, then the result - is a scalar. If multiple quantiles are given, first axis of - the result corresponds to the quantiles. The other axes are - the axes that remain after the reduction of `a`. If the input - contains integers or floats smaller than ``float64``, the output - data-type is ``float64``. Otherwise, the output data-type is the - same as that of the input. If `out` is specified, that array is - returned instead. - - See Also - -------- - mean - percentile : equivalent to quantile, but with q in the range [0, 100]. - median : equivalent to ``quantile(..., 0.5)`` - nanquantile - - Notes - ----- - Given a vector ``V`` of length ``N``, the q-th quantile of - ``V`` is the value ``q`` of the way from the minimum to the - maximum in a sorted copy of ``V``. The values and distances of - the two nearest neighbors as well as the `interpolation` parameter - will determine the quantile if the normalized ranking does not - match the location of ``q`` exactly. This function is the same as - the median if ``q=0.5``, the same as the minimum if ``q=0.0`` and the - same as the maximum if ``q=1.0``. - - Examples - -------- - >>> a = np.array([[10, 7, 4], [3, 2, 1]]) - >>> a - array([[10, 7, 4], - [ 3, 2, 1]]) - >>> np.quantile(a, 0.5) - 3.5 - >>> np.quantile(a, 0.5, axis=0) - array([6.5, 4.5, 2.5]) - >>> np.quantile(a, 0.5, axis=1) - array([7., 2.]) - >>> np.quantile(a, 0.5, axis=1, keepdims=True) - array([[7.], - [2.]]) - >>> m = np.quantile(a, 0.5, axis=0) - >>> out = np.zeros_like(m) - >>> np.quantile(a, 0.5, axis=0, out=out) - array([6.5, 4.5, 2.5]) - >>> m - array([6.5, 4.5, 2.5]) - >>> b = a.copy() - >>> np.quantile(b, 0.5, axis=1, overwrite_input=True) - array([7., 2.]) - >>> assert not np.all(a == b) - """ - q = np.asanyarray(q) - if not _quantile_is_valid(q): - raise ValueError("Quantiles must be in the range [0, 1]") - return _quantile_unchecked( - a, q, axis, out, overwrite_input, interpolation, keepdims) - - -def _quantile_unchecked(a, q, axis=None, out=None, overwrite_input=False, - interpolation='linear', keepdims=False): - """Assumes that q is in [0, 1], and is an ndarray""" - r, k = _ureduce(a, func=_quantile_ureduce_func, q=q, axis=axis, out=out, - overwrite_input=overwrite_input, - interpolation=interpolation) - if keepdims: - return r.reshape(q.shape + k) - else: - return r - - -def _quantile_is_valid(q): - # avoid expensive reductions, relevant for arrays with < O(1000) elements - if q.ndim == 1 and q.size < 10: - for i in range(q.size): - if q[i] < 0.0 or q[i] > 1.0: - return False - else: - # faster than any() - if np.count_nonzero(q < 0.0) or np.count_nonzero(q > 1.0): - return False - return True - - -def _quantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False, - interpolation='linear', keepdims=False): - a = asarray(a) - if q.ndim == 0: - # Do not allow 0-d arrays because following code fails for scalar - zerod = True - q = q[None] - else: - zerod = False - - # prepare a for partitioning - if overwrite_input: - if axis is None: - ap = a.ravel() - else: - ap = a - else: - if axis is None: - ap = a.flatten() - else: - ap = a.copy() - - if axis is None: - axis = 0 - - Nx = ap.shape[axis] - indices = q * (Nx - 1) - - # round fractional indices according to interpolation method - if interpolation == 'lower': - indices = floor(indices).astype(intp) - elif interpolation == 'higher': - indices = ceil(indices).astype(intp) - elif interpolation == 'midpoint': - indices = 0.5 * (floor(indices) + ceil(indices)) - elif interpolation == 'nearest': - indices = around(indices).astype(intp) - elif interpolation == 'linear': - pass # keep index as fraction and interpolate - else: - raise ValueError( - "interpolation can only be 'linear', 'lower' 'higher', " - "'midpoint', or 'nearest'") - - n = np.array(False, dtype=bool) # check for nan's flag - if indices.dtype == intp: # take the points along axis - # Check if the array contains any nan's - if np.issubdtype(a.dtype, np.inexact): - indices = concatenate((indices, [-1])) - - ap.partition(indices, axis=axis) - # ensure axis with q-th is first - ap = np.moveaxis(ap, axis, 0) - axis = 0 - - # Check if the array contains any nan's - if np.issubdtype(a.dtype, np.inexact): - indices = indices[:-1] - n = np.isnan(ap[-1:, ...]) - - if zerod: - indices = indices[0] - r = take(ap, indices, axis=axis, out=out) - - - else: # weight the points above and below the indices - indices_below = floor(indices).astype(intp) - indices_above = indices_below + 1 - indices_above[indices_above > Nx - 1] = Nx - 1 - - # Check if the array contains any nan's - if np.issubdtype(a.dtype, np.inexact): - indices_above = concatenate((indices_above, [-1])) - - weights_above = indices - indices_below - weights_below = 1 - weights_above - - weights_shape = [1, ] * ap.ndim - weights_shape[axis] = len(indices) - weights_below.shape = weights_shape - weights_above.shape = weights_shape - - ap.partition(concatenate((indices_below, indices_above)), axis=axis) - - # ensure axis with q-th is first - ap = np.moveaxis(ap, axis, 0) - weights_below = np.moveaxis(weights_below, axis, 0) - weights_above = np.moveaxis(weights_above, axis, 0) - axis = 0 - - # Check if the array contains any nan's - if np.issubdtype(a.dtype, np.inexact): - indices_above = indices_above[:-1] - n = np.isnan(ap[-1:, ...]) - - x1 = take(ap, indices_below, axis=axis) * weights_below - x2 = take(ap, indices_above, axis=axis) * weights_above - - # ensure axis with q-th is first - x1 = np.moveaxis(x1, axis, 0) - x2 = np.moveaxis(x2, axis, 0) - - if zerod: - x1 = x1.squeeze(0) - x2 = x2.squeeze(0) - - if out is not None: - r = add(x1, x2, out=out) - else: - r = add(x1, x2) - - if np.any(n): - if zerod: - if ap.ndim == 1: - if out is not None: - out[...] = a.dtype.type(np.nan) - r = out - else: - r = a.dtype.type(np.nan) - else: - r[..., n.squeeze(0)] = a.dtype.type(np.nan) - else: - if r.ndim == 1: - r[:] = a.dtype.type(np.nan) - else: - r[..., n.repeat(q.size, 0)] = a.dtype.type(np.nan) - - return r - - -def _trapz_dispatcher(y, x=None, dx=None, axis=None): - return (y, x) - - -@array_function_dispatch(_trapz_dispatcher) -def trapz(y, x=None, dx=1.0, axis=-1): - """ - Integrate along the given axis using the composite trapezoidal rule. - - Integrate `y` (`x`) along given axis. - - Parameters - ---------- - y : array_like - Input array to integrate. - x : array_like, optional - The sample points corresponding to the `y` values. If `x` is None, - the sample points are assumed to be evenly spaced `dx` apart. The - default is None. - dx : scalar, optional - The spacing between sample points when `x` is None. The default is 1. - axis : int, optional - The axis along which to integrate. - - Returns - ------- - trapz : float - Definite integral as approximated by trapezoidal rule. - - See Also - -------- - sum, cumsum - - Notes - ----- - Image [2]_ illustrates trapezoidal rule -- y-axis locations of points - will be taken from `y` array, by default x-axis distances between - points will be 1.0, alternatively they can be provided with `x` array - or with `dx` scalar. Return value will be equal to combined area under - the red lines. - - - References - ---------- - .. [1] Wikipedia page: https://en.wikipedia.org/wiki/Trapezoidal_rule - - .. [2] Illustration image: - https://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png - - Examples - -------- - >>> np.trapz([1,2,3]) - 4.0 - >>> np.trapz([1,2,3], x=[4,6,8]) - 8.0 - >>> np.trapz([1,2,3], dx=2) - 8.0 - >>> a = np.arange(6).reshape(2, 3) - >>> a - array([[0, 1, 2], - [3, 4, 5]]) - >>> np.trapz(a, axis=0) - array([1.5, 2.5, 3.5]) - >>> np.trapz(a, axis=1) - array([2., 8.]) - - """ - y = asanyarray(y) - if x is None: - d = dx - else: - x = asanyarray(x) - if x.ndim == 1: - d = diff(x) - # reshape to correct shape - shape = [1]*y.ndim - shape[axis] = d.shape[0] - d = d.reshape(shape) - else: - d = diff(x, axis=axis) - nd = y.ndim - slice1 = [slice(None)]*nd - slice2 = [slice(None)]*nd - slice1[axis] = slice(1, None) - slice2[axis] = slice(None, -1) - try: - ret = (d * (y[tuple(slice1)] + y[tuple(slice2)]) / 2.0).sum(axis) - except ValueError: - # Operations didn't work, cast to ndarray - d = np.asarray(d) - y = np.asarray(y) - ret = add.reduce(d * (y[tuple(slice1)]+y[tuple(slice2)])/2.0, axis) - return ret - - -def _meshgrid_dispatcher(*xi, **kwargs): - return xi - - -# Based on scitools meshgrid -@array_function_dispatch(_meshgrid_dispatcher) -def meshgrid(*xi, **kwargs): - """ - Return coordinate matrices from coordinate vectors. - - Make N-D coordinate arrays for vectorized evaluations of - N-D scalar/vector fields over N-D grids, given - one-dimensional coordinate arrays x1, x2,..., xn. - - .. versionchanged:: 1.9 - 1-D and 0-D cases are allowed. - - Parameters - ---------- - x1, x2,..., xn : array_like - 1-D arrays representing the coordinates of a grid. - indexing : {'xy', 'ij'}, optional - Cartesian ('xy', default) or matrix ('ij') indexing of output. - See Notes for more details. - - .. versionadded:: 1.7.0 - sparse : bool, optional - If True a sparse grid is returned in order to conserve memory. - Default is False. - - .. versionadded:: 1.7.0 - copy : bool, optional - If False, a view into the original arrays are returned in order to - conserve memory. Default is True. Please note that - ``sparse=False, copy=False`` will likely return non-contiguous - arrays. Furthermore, more than one element of a broadcast array - may refer to a single memory location. If you need to write to the - arrays, make copies first. - - .. versionadded:: 1.7.0 - - Returns - ------- - X1, X2,..., XN : ndarray - For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` , - return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij' - or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy' - with the elements of `xi` repeated to fill the matrix along - the first dimension for `x1`, the second for `x2` and so on. - - Notes - ----- - This function supports both indexing conventions through the indexing - keyword argument. Giving the string 'ij' returns a meshgrid with - matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing. - In the 2-D case with inputs of length M and N, the outputs are of shape - (N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case - with inputs of length M, N and P, outputs are of shape (N, M, P) for - 'xy' indexing and (M, N, P) for 'ij' indexing. The difference is - illustrated by the following code snippet:: - - xv, yv = np.meshgrid(x, y, sparse=False, indexing='ij') - for i in range(nx): - for j in range(ny): - # treat xv[i,j], yv[i,j] - - xv, yv = np.meshgrid(x, y, sparse=False, indexing='xy') - for i in range(nx): - for j in range(ny): - # treat xv[j,i], yv[j,i] - - In the 1-D and 0-D case, the indexing and sparse keywords have no effect. - - See Also - -------- - index_tricks.mgrid : Construct a multi-dimensional "meshgrid" - using indexing notation. - index_tricks.ogrid : Construct an open multi-dimensional "meshgrid" - using indexing notation. - - Examples - -------- - >>> nx, ny = (3, 2) - >>> x = np.linspace(0, 1, nx) - >>> y = np.linspace(0, 1, ny) - >>> xv, yv = np.meshgrid(x, y) - >>> xv - array([[0. , 0.5, 1. ], - [0. , 0.5, 1. ]]) - >>> yv - array([[0., 0., 0.], - [1., 1., 1.]]) - >>> xv, yv = np.meshgrid(x, y, sparse=True) # make sparse output arrays - >>> xv - array([[0. , 0.5, 1. ]]) - >>> yv - array([[0.], - [1.]]) - - `meshgrid` is very useful to evaluate functions on a grid. - - >>> import matplotlib.pyplot as plt - >>> x = np.arange(-5, 5, 0.1) - >>> y = np.arange(-5, 5, 0.1) - >>> xx, yy = np.meshgrid(x, y, sparse=True) - >>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2) - >>> h = plt.contourf(x,y,z) - >>> plt.show() - - """ - ndim = len(xi) - - copy_ = kwargs.pop('copy', True) - sparse = kwargs.pop('sparse', False) - indexing = kwargs.pop('indexing', 'xy') - - if kwargs: - raise TypeError("meshgrid() got an unexpected keyword argument '%s'" - % (list(kwargs)[0],)) - - if indexing not in ['xy', 'ij']: - raise ValueError( - "Valid values for `indexing` are 'xy' and 'ij'.") - - s0 = (1,) * ndim - output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1:]) - for i, x in enumerate(xi)] - - if indexing == 'xy' and ndim > 1: - # switch first and second axis - output[0].shape = (1, -1) + s0[2:] - output[1].shape = (-1, 1) + s0[2:] - - if not sparse: - # Return the full N-D matrix (not only the 1-D vector) - output = np.broadcast_arrays(*output, subok=True) - - if copy_: - output = [x.copy() for x in output] - - return output - - -def _delete_dispatcher(arr, obj, axis=None): - return (arr, obj) - - -@array_function_dispatch(_delete_dispatcher) -def delete(arr, obj, axis=None): - """ - Return a new array with sub-arrays along an axis deleted. For a one - dimensional array, this returns those entries not returned by - `arr[obj]`. - - Parameters - ---------- - arr : array_like - Input array. - obj : slice, int or array of ints - Indicate indices of sub-arrays to remove along the specified axis. - axis : int, optional - The axis along which to delete the subarray defined by `obj`. - If `axis` is None, `obj` is applied to the flattened array. - - Returns - ------- - out : ndarray - A copy of `arr` with the elements specified by `obj` removed. Note - that `delete` does not occur in-place. If `axis` is None, `out` is - a flattened array. - - See Also - -------- - insert : Insert elements into an array. - append : Append elements at the end of an array. - - Notes - ----- - Often it is preferable to use a boolean mask. For example: - - >>> arr = np.arange(12) + 1 - >>> mask = np.ones(len(arr), dtype=bool) - >>> mask[[0,2,4]] = False - >>> result = arr[mask,...] - - Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further - use of `mask`. - - Examples - -------- - >>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) - >>> arr - array([[ 1, 2, 3, 4], - [ 5, 6, 7, 8], - [ 9, 10, 11, 12]]) - >>> np.delete(arr, 1, 0) - array([[ 1, 2, 3, 4], - [ 9, 10, 11, 12]]) - - >>> np.delete(arr, np.s_[::2], 1) - array([[ 2, 4], - [ 6, 8], - [10, 12]]) - >>> np.delete(arr, [1,3,5], None) - array([ 1, 3, 5, 7, 8, 9, 10, 11, 12]) - - """ - wrap = None - if type(arr) is not ndarray: - try: - wrap = arr.__array_wrap__ - except AttributeError: - pass - - arr = asarray(arr) - ndim = arr.ndim - arrorder = 'F' if arr.flags.fnc else 'C' - if axis is None: - if ndim != 1: - arr = arr.ravel() - ndim = arr.ndim - axis = -1 - - if ndim == 0: - # 2013-09-24, 1.9 - warnings.warn( - "in the future the special handling of scalars will be removed " - "from delete and raise an error", DeprecationWarning, stacklevel=3) - if wrap: - return wrap(arr) - else: - return arr.copy(order=arrorder) - - axis = normalize_axis_index(axis, ndim) - - slobj = [slice(None)]*ndim - N = arr.shape[axis] - newshape = list(arr.shape) - - if isinstance(obj, slice): - start, stop, step = obj.indices(N) - xr = range(start, stop, step) - numtodel = len(xr) - - if numtodel <= 0: - if wrap: - return wrap(arr.copy(order=arrorder)) - else: - return arr.copy(order=arrorder) - - # Invert if step is negative: - if step < 0: - step = -step - start = xr[-1] - stop = xr[0] + 1 - - newshape[axis] -= numtodel - new = empty(newshape, arr.dtype, arrorder) - # copy initial chunk - if start == 0: - pass - else: - slobj[axis] = slice(None, start) - new[tuple(slobj)] = arr[tuple(slobj)] - # copy end chunk - if stop == N: - pass - else: - slobj[axis] = slice(stop-numtodel, None) - slobj2 = [slice(None)]*ndim - slobj2[axis] = slice(stop, None) - new[tuple(slobj)] = arr[tuple(slobj2)] - # copy middle pieces - if step == 1: - pass - else: # use array indexing. - keep = ones(stop-start, dtype=bool) - keep[:stop-start:step] = False - slobj[axis] = slice(start, stop-numtodel) - slobj2 = [slice(None)]*ndim - slobj2[axis] = slice(start, stop) - arr = arr[tuple(slobj2)] - slobj2[axis] = keep - new[tuple(slobj)] = arr[tuple(slobj2)] - if wrap: - return wrap(new) - else: - return new - - _obj = obj - obj = np.asarray(obj) - # After removing the special handling of booleans and out of - # bounds values, the conversion to the array can be removed. - if obj.dtype == bool: - warnings.warn("in the future insert will treat boolean arrays and " - "array-likes as boolean index instead of casting it " - "to integer", FutureWarning, stacklevel=3) - obj = obj.astype(intp) - if isinstance(_obj, (int, long, integer)): - # optimization for a single value - obj = obj.item() - if (obj < -N or obj >= N): - raise IndexError( - "index %i is out of bounds for axis %i with " - "size %i" % (obj, axis, N)) - if (obj < 0): - obj += N - newshape[axis] -= 1 - new = empty(newshape, arr.dtype, arrorder) - slobj[axis] = slice(None, obj) - new[tuple(slobj)] = arr[tuple(slobj)] - slobj[axis] = slice(obj, None) - slobj2 = [slice(None)]*ndim - slobj2[axis] = slice(obj+1, None) - new[tuple(slobj)] = arr[tuple(slobj2)] - else: - if obj.size == 0 and not isinstance(_obj, np.ndarray): - obj = obj.astype(intp) - if not np.can_cast(obj, intp, 'same_kind'): - # obj.size = 1 special case always failed and would just - # give superfluous warnings. - # 2013-09-24, 1.9 - warnings.warn( - "using a non-integer array as obj in delete will result in an " - "error in the future", DeprecationWarning, stacklevel=3) - obj = obj.astype(intp) - keep = ones(N, dtype=bool) - - # Test if there are out of bound indices, this is deprecated - inside_bounds = (obj < N) & (obj >= -N) - if not inside_bounds.all(): - # 2013-09-24, 1.9 - warnings.warn( - "in the future out of bounds indices will raise an error " - "instead of being ignored by `numpy.delete`.", - DeprecationWarning, stacklevel=3) - obj = obj[inside_bounds] - positive_indices = obj >= 0 - if not positive_indices.all(): - warnings.warn( - "in the future negative indices will not be ignored by " - "`numpy.delete`.", FutureWarning, stacklevel=3) - obj = obj[positive_indices] - - keep[obj, ] = False - slobj[axis] = keep - new = arr[tuple(slobj)] - - if wrap: - return wrap(new) - else: - return new - - -def _insert_dispatcher(arr, obj, values, axis=None): - return (arr, obj, values) - - -@array_function_dispatch(_insert_dispatcher) -def insert(arr, obj, values, axis=None): - """ - Insert values along the given axis before the given indices. - - Parameters - ---------- - arr : array_like - Input array. - obj : int, slice or sequence of ints - Object that defines the index or indices before which `values` is - inserted. - - .. versionadded:: 1.8.0 - - Support for multiple insertions when `obj` is a single scalar or a - sequence with one element (similar to calling insert multiple - times). - values : array_like - Values to insert into `arr`. If the type of `values` is different - from that of `arr`, `values` is converted to the type of `arr`. - `values` should be shaped so that ``arr[...,obj,...] = values`` - is legal. - axis : int, optional - Axis along which to insert `values`. If `axis` is None then `arr` - is flattened first. - - Returns - ------- - out : ndarray - A copy of `arr` with `values` inserted. Note that `insert` - does not occur in-place: a new array is returned. If - `axis` is None, `out` is a flattened array. - - See Also - -------- - append : Append elements at the end of an array. - concatenate : Join a sequence of arrays along an existing axis. - delete : Delete elements from an array. - - Notes - ----- - Note that for higher dimensional inserts `obj=0` behaves very different - from `obj=[0]` just like `arr[:,0,:] = values` is different from - `arr[:,[0],:] = values`. - - Examples - -------- - >>> a = np.array([[1, 1], [2, 2], [3, 3]]) - >>> a - array([[1, 1], - [2, 2], - [3, 3]]) - >>> np.insert(a, 1, 5) - array([1, 5, 1, ..., 2, 3, 3]) - >>> np.insert(a, 1, 5, axis=1) - array([[1, 5, 1], - [2, 5, 2], - [3, 5, 3]]) - - Difference between sequence and scalars: - - >>> np.insert(a, [1], [[1],[2],[3]], axis=1) - array([[1, 1, 1], - [2, 2, 2], - [3, 3, 3]]) - >>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1), - ... np.insert(a, [1], [[1],[2],[3]], axis=1)) - True - - >>> b = a.flatten() - >>> b - array([1, 1, 2, 2, 3, 3]) - >>> np.insert(b, [2, 2], [5, 6]) - array([1, 1, 5, ..., 2, 3, 3]) - - >>> np.insert(b, slice(2, 4), [5, 6]) - array([1, 1, 5, ..., 2, 3, 3]) - - >>> np.insert(b, [2, 2], [7.13, False]) # type casting - array([1, 1, 7, ..., 2, 3, 3]) - - >>> x = np.arange(8).reshape(2, 4) - >>> idx = (1, 3) - >>> np.insert(x, idx, 999, axis=1) - array([[ 0, 999, 1, 2, 999, 3], - [ 4, 999, 5, 6, 999, 7]]) - - """ - wrap = None - if type(arr) is not ndarray: - try: - wrap = arr.__array_wrap__ - except AttributeError: - pass - - arr = asarray(arr) - ndim = arr.ndim - arrorder = 'F' if arr.flags.fnc else 'C' - if axis is None: - if ndim != 1: - arr = arr.ravel() - ndim = arr.ndim - axis = ndim - 1 - elif ndim == 0: - # 2013-09-24, 1.9 - warnings.warn( - "in the future the special handling of scalars will be removed " - "from insert and raise an error", DeprecationWarning, stacklevel=3) - arr = arr.copy(order=arrorder) - arr[...] = values - if wrap: - return wrap(arr) - else: - return arr - else: - axis = normalize_axis_index(axis, ndim) - slobj = [slice(None)]*ndim - N = arr.shape[axis] - newshape = list(arr.shape) - - if isinstance(obj, slice): - # turn it into a range object - indices = arange(*obj.indices(N), **{'dtype': intp}) - else: - # need to copy obj, because indices will be changed in-place - indices = np.array(obj) - if indices.dtype == bool: - # See also delete - warnings.warn( - "in the future insert will treat boolean arrays and " - "array-likes as a boolean index instead of casting it to " - "integer", FutureWarning, stacklevel=3) - indices = indices.astype(intp) - # Code after warning period: - #if obj.ndim != 1: - # raise ValueError('boolean array argument obj to insert ' - # 'must be one dimensional') - #indices = np.flatnonzero(obj) - elif indices.ndim > 1: - raise ValueError( - "index array argument obj to insert must be one dimensional " - "or scalar") - if indices.size == 1: - index = indices.item() - if index < -N or index > N: - raise IndexError( - "index %i is out of bounds for axis %i with " - "size %i" % (obj, axis, N)) - if (index < 0): - index += N - - # There are some object array corner cases here, but we cannot avoid - # that: - values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype) - if indices.ndim == 0: - # broadcasting is very different here, since a[:,0,:] = ... behaves - # very different from a[:,[0],:] = ...! This changes values so that - # it works likes the second case. (here a[:,0:1,:]) - values = np.moveaxis(values, 0, axis) - numnew = values.shape[axis] - newshape[axis] += numnew - new = empty(newshape, arr.dtype, arrorder) - slobj[axis] = slice(None, index) - new[tuple(slobj)] = arr[tuple(slobj)] - slobj[axis] = slice(index, index+numnew) - new[tuple(slobj)] = values - slobj[axis] = slice(index+numnew, None) - slobj2 = [slice(None)] * ndim - slobj2[axis] = slice(index, None) - new[tuple(slobj)] = arr[tuple(slobj2)] - if wrap: - return wrap(new) - return new - elif indices.size == 0 and not isinstance(obj, np.ndarray): - # Can safely cast the empty list to intp - indices = indices.astype(intp) - - if not np.can_cast(indices, intp, 'same_kind'): - # 2013-09-24, 1.9 - warnings.warn( - "using a non-integer array as obj in insert will result in an " - "error in the future", DeprecationWarning, stacklevel=3) - indices = indices.astype(intp) - - indices[indices < 0] += N - - numnew = len(indices) - order = indices.argsort(kind='mergesort') # stable sort - indices[order] += np.arange(numnew) - - newshape[axis] += numnew - old_mask = ones(newshape[axis], dtype=bool) - old_mask[indices] = False - - new = empty(newshape, arr.dtype, arrorder) - slobj2 = [slice(None)]*ndim - slobj[axis] = indices - slobj2[axis] = old_mask - new[tuple(slobj)] = values - new[tuple(slobj2)] = arr - - if wrap: - return wrap(new) - return new - - -def _append_dispatcher(arr, values, axis=None): - return (arr, values) - - -@array_function_dispatch(_append_dispatcher) -def append(arr, values, axis=None): - """ - Append values to the end of an array. - - Parameters - ---------- - arr : array_like - Values are appended to a copy of this array. - values : array_like - These values are appended to a copy of `arr`. It must be of the - correct shape (the same shape as `arr`, excluding `axis`). If - `axis` is not specified, `values` can be any shape and will be - flattened before use. - axis : int, optional - The axis along which `values` are appended. If `axis` is not - given, both `arr` and `values` are flattened before use. - - Returns - ------- - append : ndarray - A copy of `arr` with `values` appended to `axis`. Note that - `append` does not occur in-place: a new array is allocated and - filled. If `axis` is None, `out` is a flattened array. - - See Also - -------- - insert : Insert elements into an array. - delete : Delete elements from an array. - - Examples - -------- - >>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]]) - array([1, 2, 3, ..., 7, 8, 9]) - - When `axis` is specified, `values` must have the correct shape. - - >>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0) - array([[1, 2, 3], - [4, 5, 6], - [7, 8, 9]]) - >>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0) - Traceback (most recent call last): - ... - ValueError: all the input arrays must have same number of dimensions - - """ - arr = asanyarray(arr) - if axis is None: - if arr.ndim != 1: - arr = arr.ravel() - values = ravel(values) - axis = arr.ndim-1 - return concatenate((arr, values), axis=axis) - - -def _digitize_dispatcher(x, bins, right=None): - return (x, bins) - - -@array_function_dispatch(_digitize_dispatcher) -def digitize(x, bins, right=False): - """ - Return the indices of the bins to which each value in input array belongs. - - ========= ============= ============================ - `right` order of bins returned index `i` satisfies - ========= ============= ============================ - ``False`` increasing ``bins[i-1] <= x < bins[i]`` - ``True`` increasing ``bins[i-1] < x <= bins[i]`` - ``False`` decreasing ``bins[i-1] > x >= bins[i]`` - ``True`` decreasing ``bins[i-1] >= x > bins[i]`` - ========= ============= ============================ - - If values in `x` are beyond the bounds of `bins`, 0 or ``len(bins)`` is - returned as appropriate. - - Parameters - ---------- - x : array_like - Input array to be binned. Prior to NumPy 1.10.0, this array had to - be 1-dimensional, but can now have any shape. - bins : array_like - Array of bins. It has to be 1-dimensional and monotonic. - right : bool, optional - Indicating whether the intervals include the right or the left bin - edge. Default behavior is (right==False) indicating that the interval - does not include the right edge. The left bin end is open in this - case, i.e., bins[i-1] <= x < bins[i] is the default behavior for - monotonically increasing bins. - - Returns - ------- - indices : ndarray of ints - Output array of indices, of same shape as `x`. - - Raises - ------ - ValueError - If `bins` is not monotonic. - TypeError - If the type of the input is complex. - - See Also - -------- - bincount, histogram, unique, searchsorted - - Notes - ----- - If values in `x` are such that they fall outside the bin range, - attempting to index `bins` with the indices that `digitize` returns - will result in an IndexError. - - .. versionadded:: 1.10.0 - - `np.digitize` is implemented in terms of `np.searchsorted`. This means - that a binary search is used to bin the values, which scales much better - for larger number of bins than the previous linear search. It also removes - the requirement for the input array to be 1-dimensional. - - For monotonically _increasing_ `bins`, the following are equivalent:: - - np.digitize(x, bins, right=True) - np.searchsorted(bins, x, side='left') - - Note that as the order of the arguments are reversed, the side must be too. - The `searchsorted` call is marginally faster, as it does not do any - monotonicity checks. Perhaps more importantly, it supports all dtypes. - - Examples - -------- - >>> x = np.array([0.2, 6.4, 3.0, 1.6]) - >>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0]) - >>> inds = np.digitize(x, bins) - >>> inds - array([1, 4, 3, 2]) - >>> for n in range(x.size): - ... print(bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]]) - ... - 0.0 <= 0.2 < 1.0 - 4.0 <= 6.4 < 10.0 - 2.5 <= 3.0 < 4.0 - 1.0 <= 1.6 < 2.5 - - >>> x = np.array([1.2, 10.0, 12.4, 15.5, 20.]) - >>> bins = np.array([0, 5, 10, 15, 20]) - >>> np.digitize(x,bins,right=True) - array([1, 2, 3, 4, 4]) - >>> np.digitize(x,bins,right=False) - array([1, 3, 3, 4, 5]) - """ - x = _nx.asarray(x) - bins = _nx.asarray(bins) - - # here for compatibility, searchsorted below is happy to take this - if np.issubdtype(x.dtype, _nx.complexfloating): - raise TypeError("x may not be complex") - - mono = _monotonicity(bins) - if mono == 0: - raise ValueError("bins must be monotonically increasing or decreasing") - - # this is backwards because the arguments below are swapped - side = 'left' if right else 'right' - if mono == -1: - # reverse the bins, and invert the results - return len(bins) - _nx.searchsorted(bins[::-1], x, side=side) - else: - return _nx.searchsorted(bins, x, side=side) diff --git a/venv/lib/python3.7/site-packages/numpy/lib/histograms.py b/venv/lib/python3.7/site-packages/numpy/lib/histograms.py deleted file mode 100644 index 03c365a..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/histograms.py +++ /dev/null @@ -1,1123 +0,0 @@ -""" -Histogram-related functions -""" -from __future__ import division, absolute_import, print_function - -import contextlib -import functools -import operator -import warnings - -import numpy as np -from numpy.compat.py3k import basestring -from numpy.core import overrides - -__all__ = ['histogram', 'histogramdd', 'histogram_bin_edges'] - -array_function_dispatch = functools.partial( - overrides.array_function_dispatch, module='numpy') - -# range is a keyword argument to many functions, so save the builtin so they can -# use it. -_range = range - - -def _ptp(x): - """Peak-to-peak value of x. - - This implementation avoids the problem of signed integer arrays having a - peak-to-peak value that cannot be represented with the array's data type. - This function returns an unsigned value for signed integer arrays. - """ - return _unsigned_subtract(x.max(), x.min()) - - -def _hist_bin_sqrt(x, range): - """ - Square root histogram bin estimator. - - Bin width is inversely proportional to the data size. Used by many - programs for its simplicity. - - Parameters - ---------- - x : array_like - Input data that is to be histogrammed, trimmed to range. May not - be empty. - - Returns - ------- - h : An estimate of the optimal bin width for the given data. - """ - del range # unused - return _ptp(x) / np.sqrt(x.size) - - -def _hist_bin_sturges(x, range): - """ - Sturges histogram bin estimator. - - A very simplistic estimator based on the assumption of normality of - the data. This estimator has poor performance for non-normal data, - which becomes especially obvious for large data sets. The estimate - depends only on size of the data. - - Parameters - ---------- - x : array_like - Input data that is to be histogrammed, trimmed to range. May not - be empty. - - Returns - ------- - h : An estimate of the optimal bin width for the given data. - """ - del range # unused - return _ptp(x) / (np.log2(x.size) + 1.0) - - -def _hist_bin_rice(x, range): - """ - Rice histogram bin estimator. - - Another simple estimator with no normality assumption. It has better - performance for large data than Sturges, but tends to overestimate - the number of bins. The number of bins is proportional to the cube - root of data size (asymptotically optimal). The estimate depends - only on size of the data. - - Parameters - ---------- - x : array_like - Input data that is to be histogrammed, trimmed to range. May not - be empty. - - Returns - ------- - h : An estimate of the optimal bin width for the given data. - """ - del range # unused - return _ptp(x) / (2.0 * x.size ** (1.0 / 3)) - - -def _hist_bin_scott(x, range): - """ - Scott histogram bin estimator. - - The binwidth is proportional to the standard deviation of the data - and inversely proportional to the cube root of data size - (asymptotically optimal). - - Parameters - ---------- - x : array_like - Input data that is to be histogrammed, trimmed to range. May not - be empty. - - Returns - ------- - h : An estimate of the optimal bin width for the given data. - """ - del range # unused - return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x) - - -def _hist_bin_stone(x, range): - """ - Histogram bin estimator based on minimizing the estimated integrated squared error (ISE). - - The number of bins is chosen by minimizing the estimated ISE against the unknown true distribution. - The ISE is estimated using cross-validation and can be regarded as a generalization of Scott's rule. - https://en.wikipedia.org/wiki/Histogram#Scott.27s_normal_reference_rule - - This paper by Stone appears to be the origination of this rule. - http://digitalassets.lib.berkeley.edu/sdtr/ucb/text/34.pdf - - Parameters - ---------- - x : array_like - Input data that is to be histogrammed, trimmed to range. May not - be empty. - range : (float, float) - The lower and upper range of the bins. - - Returns - ------- - h : An estimate of the optimal bin width for the given data. - """ - - n = x.size - ptp_x = _ptp(x) - if n <= 1 or ptp_x == 0: - return 0 - - def jhat(nbins): - hh = ptp_x / nbins - p_k = np.histogram(x, bins=nbins, range=range)[0] / n - return (2 - (n + 1) * p_k.dot(p_k)) / hh - - nbins_upper_bound = max(100, int(np.sqrt(n))) - nbins = min(_range(1, nbins_upper_bound + 1), key=jhat) - if nbins == nbins_upper_bound: - warnings.warn("The number of bins estimated may be suboptimal.", - RuntimeWarning, stacklevel=3) - return ptp_x / nbins - - -def _hist_bin_doane(x, range): - """ - Doane's histogram bin estimator. - - Improved version of Sturges' formula which works better for - non-normal data. See - stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning - - Parameters - ---------- - x : array_like - Input data that is to be histogrammed, trimmed to range. May not - be empty. - - Returns - ------- - h : An estimate of the optimal bin width for the given data. - """ - del range # unused - if x.size > 2: - sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3))) - sigma = np.std(x) - if sigma > 0.0: - # These three operations add up to - # g1 = np.mean(((x - np.mean(x)) / sigma)**3) - # but use only one temp array instead of three - temp = x - np.mean(x) - np.true_divide(temp, sigma, temp) - np.power(temp, 3, temp) - g1 = np.mean(temp) - return _ptp(x) / (1.0 + np.log2(x.size) + - np.log2(1.0 + np.absolute(g1) / sg1)) - return 0.0 - - -def _hist_bin_fd(x, range): - """ - The Freedman-Diaconis histogram bin estimator. - - The Freedman-Diaconis rule uses interquartile range (IQR) to - estimate binwidth. It is considered a variation of the Scott rule - with more robustness as the IQR is less affected by outliers than - the standard deviation. However, the IQR depends on fewer points - than the standard deviation, so it is less accurate, especially for - long tailed distributions. - - If the IQR is 0, this function returns 1 for the number of bins. - Binwidth is inversely proportional to the cube root of data size - (asymptotically optimal). - - Parameters - ---------- - x : array_like - Input data that is to be histogrammed, trimmed to range. May not - be empty. - - Returns - ------- - h : An estimate of the optimal bin width for the given data. - """ - del range # unused - iqr = np.subtract(*np.percentile(x, [75, 25])) - return 2.0 * iqr * x.size ** (-1.0 / 3.0) - - -def _hist_bin_auto(x, range): - """ - Histogram bin estimator that uses the minimum width of the - Freedman-Diaconis and Sturges estimators if the FD bandwidth is non zero - and the Sturges estimator if the FD bandwidth is 0. - - The FD estimator is usually the most robust method, but its width - estimate tends to be too large for small `x` and bad for data with limited - variance. The Sturges estimator is quite good for small (<1000) datasets - and is the default in the R language. This method gives good off the shelf - behaviour. - - .. versionchanged:: 1.15.0 - If there is limited variance the IQR can be 0, which results in the - FD bin width being 0 too. This is not a valid bin width, so - ``np.histogram_bin_edges`` chooses 1 bin instead, which may not be optimal. - If the IQR is 0, it's unlikely any variance based estimators will be of - use, so we revert to the sturges estimator, which only uses the size of the - dataset in its calculation. - - Parameters - ---------- - x : array_like - Input data that is to be histogrammed, trimmed to range. May not - be empty. - - Returns - ------- - h : An estimate of the optimal bin width for the given data. - - See Also - -------- - _hist_bin_fd, _hist_bin_sturges - """ - fd_bw = _hist_bin_fd(x, range) - sturges_bw = _hist_bin_sturges(x, range) - del range # unused - if fd_bw: - return min(fd_bw, sturges_bw) - else: - # limited variance, so we return a len dependent bw estimator - return sturges_bw - -# Private dict initialized at module load time -_hist_bin_selectors = {'stone': _hist_bin_stone, - 'auto': _hist_bin_auto, - 'doane': _hist_bin_doane, - 'fd': _hist_bin_fd, - 'rice': _hist_bin_rice, - 'scott': _hist_bin_scott, - 'sqrt': _hist_bin_sqrt, - 'sturges': _hist_bin_sturges} - - -def _ravel_and_check_weights(a, weights): - """ Check a and weights have matching shapes, and ravel both """ - a = np.asarray(a) - - # Ensure that the array is a "subtractable" dtype - if a.dtype == np.bool_: - warnings.warn("Converting input from {} to {} for compatibility." - .format(a.dtype, np.uint8), - RuntimeWarning, stacklevel=3) - a = a.astype(np.uint8) - - if weights is not None: - weights = np.asarray(weights) - if weights.shape != a.shape: - raise ValueError( - 'weights should have the same shape as a.') - weights = weights.ravel() - a = a.ravel() - return a, weights - - -def _get_outer_edges(a, range): - """ - Determine the outer bin edges to use, from either the data or the range - argument - """ - if range is not None: - first_edge, last_edge = range - if first_edge > last_edge: - raise ValueError( - 'max must be larger than min in range parameter.') - if not (np.isfinite(first_edge) and np.isfinite(last_edge)): - raise ValueError( - "supplied range of [{}, {}] is not finite".format(first_edge, last_edge)) - elif a.size == 0: - # handle empty arrays. Can't determine range, so use 0-1. - first_edge, last_edge = 0, 1 - else: - first_edge, last_edge = a.min(), a.max() - if not (np.isfinite(first_edge) and np.isfinite(last_edge)): - raise ValueError( - "autodetected range of [{}, {}] is not finite".format(first_edge, last_edge)) - - # expand empty range to avoid divide by zero - if first_edge == last_edge: - first_edge = first_edge - 0.5 - last_edge = last_edge + 0.5 - - return first_edge, last_edge - - -def _unsigned_subtract(a, b): - """ - Subtract two values where a >= b, and produce an unsigned result - - This is needed when finding the difference between the upper and lower - bound of an int16 histogram - """ - # coerce to a single type - signed_to_unsigned = { - np.byte: np.ubyte, - np.short: np.ushort, - np.intc: np.uintc, - np.int_: np.uint, - np.longlong: np.ulonglong - } - dt = np.result_type(a, b) - try: - dt = signed_to_unsigned[dt.type] - except KeyError: - return np.subtract(a, b, dtype=dt) - else: - # we know the inputs are integers, and we are deliberately casting - # signed to unsigned - return np.subtract(a, b, casting='unsafe', dtype=dt) - - -def _get_bin_edges(a, bins, range, weights): - """ - Computes the bins used internally by `histogram`. - - Parameters - ========== - a : ndarray - Ravelled data array - bins, range - Forwarded arguments from `histogram`. - weights : ndarray, optional - Ravelled weights array, or None - - Returns - ======= - bin_edges : ndarray - Array of bin edges - uniform_bins : (Number, Number, int): - The upper bound, lowerbound, and number of bins, used in the optimized - implementation of `histogram` that works on uniform bins. - """ - # parse the overloaded bins argument - n_equal_bins = None - bin_edges = None - - if isinstance(bins, basestring): - bin_name = bins - # if `bins` is a string for an automatic method, - # this will replace it with the number of bins calculated - if bin_name not in _hist_bin_selectors: - raise ValueError( - "{!r} is not a valid estimator for `bins`".format(bin_name)) - if weights is not None: - raise TypeError("Automated estimation of the number of " - "bins is not supported for weighted data") - - first_edge, last_edge = _get_outer_edges(a, range) - - # truncate the range if needed - if range is not None: - keep = (a >= first_edge) - keep &= (a <= last_edge) - if not np.logical_and.reduce(keep): - a = a[keep] - - if a.size == 0: - n_equal_bins = 1 - else: - # Do not call selectors on empty arrays - width = _hist_bin_selectors[bin_name](a, (first_edge, last_edge)) - if width: - n_equal_bins = int(np.ceil(_unsigned_subtract(last_edge, first_edge) / width)) - else: - # Width can be zero for some estimators, e.g. FD when - # the IQR of the data is zero. - n_equal_bins = 1 - - elif np.ndim(bins) == 0: - try: - n_equal_bins = operator.index(bins) - except TypeError: - raise TypeError( - '`bins` must be an integer, a string, or an array') - if n_equal_bins < 1: - raise ValueError('`bins` must be positive, when an integer') - - first_edge, last_edge = _get_outer_edges(a, range) - - elif np.ndim(bins) == 1: - bin_edges = np.asarray(bins) - if np.any(bin_edges[:-1] > bin_edges[1:]): - raise ValueError( - '`bins` must increase monotonically, when an array') - - else: - raise ValueError('`bins` must be 1d, when an array') - - if n_equal_bins is not None: - # gh-10322 means that type resolution rules are dependent on array - # shapes. To avoid this causing problems, we pick a type now and stick - # with it throughout. - bin_type = np.result_type(first_edge, last_edge, a) - if np.issubdtype(bin_type, np.integer): - bin_type = np.result_type(bin_type, float) - - # bin edges must be computed - bin_edges = np.linspace( - first_edge, last_edge, n_equal_bins + 1, - endpoint=True, dtype=bin_type) - return bin_edges, (first_edge, last_edge, n_equal_bins) - else: - return bin_edges, None - - -def _search_sorted_inclusive(a, v): - """ - Like `searchsorted`, but where the last item in `v` is placed on the right. - - In the context of a histogram, this makes the last bin edge inclusive - """ - return np.concatenate(( - a.searchsorted(v[:-1], 'left'), - a.searchsorted(v[-1:], 'right') - )) - - -def _histogram_bin_edges_dispatcher(a, bins=None, range=None, weights=None): - return (a, bins, weights) - - -@array_function_dispatch(_histogram_bin_edges_dispatcher) -def histogram_bin_edges(a, bins=10, range=None, weights=None): - r""" - Function to calculate only the edges of the bins used by the `histogram` - function. - - Parameters - ---------- - a : array_like - Input data. The histogram is computed over the flattened array. - bins : int or sequence of scalars or str, optional - If `bins` is an int, it defines the number of equal-width - bins in the given range (10, by default). If `bins` is a - sequence, it defines the bin edges, including the rightmost - edge, allowing for non-uniform bin widths. - - If `bins` is a string from the list below, `histogram_bin_edges` will use - the method chosen to calculate the optimal bin width and - consequently the number of bins (see `Notes` for more detail on - the estimators) from the data that falls within the requested - range. While the bin width will be optimal for the actual data - in the range, the number of bins will be computed to fill the - entire range, including the empty portions. For visualisation, - using the 'auto' option is suggested. Weighted data is not - supported for automated bin size selection. - - 'auto' - Maximum of the 'sturges' and 'fd' estimators. Provides good - all around performance. - - 'fd' (Freedman Diaconis Estimator) - Robust (resilient to outliers) estimator that takes into - account data variability and data size. - - 'doane' - An improved version of Sturges' estimator that works better - with non-normal datasets. - - 'scott' - Less robust estimator that that takes into account data - variability and data size. - - 'stone' - Estimator based on leave-one-out cross-validation estimate of - the integrated squared error. Can be regarded as a generalization - of Scott's rule. - - 'rice' - Estimator does not take variability into account, only data - size. Commonly overestimates number of bins required. - - 'sturges' - R's default method, only accounts for data size. Only - optimal for gaussian data and underestimates number of bins - for large non-gaussian datasets. - - 'sqrt' - Square root (of data size) estimator, used by Excel and - other programs for its speed and simplicity. - - range : (float, float), optional - The lower and upper range of the bins. If not provided, range - is simply ``(a.min(), a.max())``. Values outside the range are - ignored. The first element of the range must be less than or - equal to the second. `range` affects the automatic bin - computation as well. While bin width is computed to be optimal - based on the actual data within `range`, the bin count will fill - the entire range including portions containing no data. - - weights : array_like, optional - An array of weights, of the same shape as `a`. Each value in - `a` only contributes its associated weight towards the bin count - (instead of 1). This is currently not used by any of the bin estimators, - but may be in the future. - - Returns - ------- - bin_edges : array of dtype float - The edges to pass into `histogram` - - See Also - -------- - histogram - - Notes - ----- - The methods to estimate the optimal number of bins are well founded - in literature, and are inspired by the choices R provides for - histogram visualisation. Note that having the number of bins - proportional to :math:`n^{1/3}` is asymptotically optimal, which is - why it appears in most estimators. These are simply plug-in methods - that give good starting points for number of bins. In the equations - below, :math:`h` is the binwidth and :math:`n_h` is the number of - bins. All estimators that compute bin counts are recast to bin width - using the `ptp` of the data. The final bin count is obtained from - ``np.round(np.ceil(range / h))``. - - 'auto' (maximum of the 'sturges' and 'fd' estimators) - A compromise to get a good value. For small datasets the Sturges - value will usually be chosen, while larger datasets will usually - default to FD. Avoids the overly conservative behaviour of FD - and Sturges for small and large datasets respectively. - Switchover point is usually :math:`a.size \approx 1000`. - - 'fd' (Freedman Diaconis Estimator) - .. math:: h = 2 \frac{IQR}{n^{1/3}} - - The binwidth is proportional to the interquartile range (IQR) - and inversely proportional to cube root of a.size. Can be too - conservative for small datasets, but is quite good for large - datasets. The IQR is very robust to outliers. - - 'scott' - .. math:: h = \sigma \sqrt[3]{\frac{24 * \sqrt{\pi}}{n}} - - The binwidth is proportional to the standard deviation of the - data and inversely proportional to cube root of ``x.size``. Can - be too conservative for small datasets, but is quite good for - large datasets. The standard deviation is not very robust to - outliers. Values are very similar to the Freedman-Diaconis - estimator in the absence of outliers. - - 'rice' - .. math:: n_h = 2n^{1/3} - - The number of bins is only proportional to cube root of - ``a.size``. It tends to overestimate the number of bins and it - does not take into account data variability. - - 'sturges' - .. math:: n_h = \log _{2}n+1 - - The number of bins is the base 2 log of ``a.size``. This - estimator assumes normality of data and is too conservative for - larger, non-normal datasets. This is the default method in R's - ``hist`` method. - - 'doane' - .. math:: n_h = 1 + \log_{2}(n) + - \log_{2}(1 + \frac{|g_1|}{\sigma_{g_1}}) - - g_1 = mean[(\frac{x - \mu}{\sigma})^3] - - \sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}} - - An improved version of Sturges' formula that produces better - estimates for non-normal datasets. This estimator attempts to - account for the skew of the data. - - 'sqrt' - .. math:: n_h = \sqrt n - - The simplest and fastest estimator. Only takes into account the - data size. - - Examples - -------- - >>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5]) - >>> np.histogram_bin_edges(arr, bins='auto', range=(0, 1)) - array([0. , 0.25, 0.5 , 0.75, 1. ]) - >>> np.histogram_bin_edges(arr, bins=2) - array([0. , 2.5, 5. ]) - - For consistency with histogram, an array of pre-computed bins is - passed through unmodified: - - >>> np.histogram_bin_edges(arr, [1, 2]) - array([1, 2]) - - This function allows one set of bins to be computed, and reused across - multiple histograms: - - >>> shared_bins = np.histogram_bin_edges(arr, bins='auto') - >>> shared_bins - array([0., 1., 2., 3., 4., 5.]) - - >>> group_id = np.array([0, 1, 1, 0, 1, 1, 0, 1, 1]) - >>> hist_0, _ = np.histogram(arr[group_id == 0], bins=shared_bins) - >>> hist_1, _ = np.histogram(arr[group_id == 1], bins=shared_bins) - - >>> hist_0; hist_1 - array([1, 1, 0, 1, 0]) - array([2, 0, 1, 1, 2]) - - Which gives more easily comparable results than using separate bins for - each histogram: - - >>> hist_0, bins_0 = np.histogram(arr[group_id == 0], bins='auto') - >>> hist_1, bins_1 = np.histogram(arr[group_id == 1], bins='auto') - >>> hist_0; hist_1 - array([1, 1, 1]) - array([2, 1, 1, 2]) - >>> bins_0; bins_1 - array([0., 1., 2., 3.]) - array([0. , 1.25, 2.5 , 3.75, 5. ]) - - """ - a, weights = _ravel_and_check_weights(a, weights) - bin_edges, _ = _get_bin_edges(a, bins, range, weights) - return bin_edges - - -def _histogram_dispatcher( - a, bins=None, range=None, normed=None, weights=None, density=None): - return (a, bins, weights) - - -@array_function_dispatch(_histogram_dispatcher) -def histogram(a, bins=10, range=None, normed=None, weights=None, - density=None): - r""" - Compute the histogram of a set of data. - - Parameters - ---------- - a : array_like - Input data. The histogram is computed over the flattened array. - bins : int or sequence of scalars or str, optional - If `bins` is an int, it defines the number of equal-width - bins in the given range (10, by default). If `bins` is a - sequence, it defines a monotonically increasing array of bin edges, - including the rightmost edge, allowing for non-uniform bin widths. - - .. versionadded:: 1.11.0 - - If `bins` is a string, it defines the method used to calculate the - optimal bin width, as defined by `histogram_bin_edges`. - - range : (float, float), optional - The lower and upper range of the bins. If not provided, range - is simply ``(a.min(), a.max())``. Values outside the range are - ignored. The first element of the range must be less than or - equal to the second. `range` affects the automatic bin - computation as well. While bin width is computed to be optimal - based on the actual data within `range`, the bin count will fill - the entire range including portions containing no data. - normed : bool, optional - - .. deprecated:: 1.6.0 - - This is equivalent to the `density` argument, but produces incorrect - results for unequal bin widths. It should not be used. - - .. versionchanged:: 1.15.0 - DeprecationWarnings are actually emitted. - - weights : array_like, optional - An array of weights, of the same shape as `a`. Each value in - `a` only contributes its associated weight towards the bin count - (instead of 1). If `density` is True, the weights are - normalized, so that the integral of the density over the range - remains 1. - density : bool, optional - If ``False``, the result will contain the number of samples in - each bin. If ``True``, the result is the value of the - probability *density* function at the bin, normalized such that - the *integral* over the range is 1. Note that the sum of the - histogram values will not be equal to 1 unless bins of unity - width are chosen; it is not a probability *mass* function. - - Overrides the ``normed`` keyword if given. - - Returns - ------- - hist : array - The values of the histogram. See `density` and `weights` for a - description of the possible semantics. - bin_edges : array of dtype float - Return the bin edges ``(length(hist)+1)``. - - - See Also - -------- - histogramdd, bincount, searchsorted, digitize, histogram_bin_edges - - Notes - ----- - All but the last (righthand-most) bin is half-open. In other words, - if `bins` is:: - - [1, 2, 3, 4] - - then the first bin is ``[1, 2)`` (including 1, but excluding 2) and - the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which - *includes* 4. - - - Examples - -------- - >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3]) - (array([0, 2, 1]), array([0, 1, 2, 3])) - >>> np.histogram(np.arange(4), bins=np.arange(5), density=True) - (array([0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4])) - >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3]) - (array([1, 4, 1]), array([0, 1, 2, 3])) - - >>> a = np.arange(5) - >>> hist, bin_edges = np.histogram(a, density=True) - >>> hist - array([0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5]) - >>> hist.sum() - 2.4999999999999996 - >>> np.sum(hist * np.diff(bin_edges)) - 1.0 - - .. versionadded:: 1.11.0 - - Automated Bin Selection Methods example, using 2 peak random data - with 2000 points: - - >>> import matplotlib.pyplot as plt - >>> rng = np.random.RandomState(10) # deterministic random data - >>> a = np.hstack((rng.normal(size=1000), - ... rng.normal(loc=5, scale=2, size=1000))) - >>> _ = plt.hist(a, bins='auto') # arguments are passed to np.histogram - >>> plt.title("Histogram with 'auto' bins") - Text(0.5, 1.0, "Histogram with 'auto' bins") - >>> plt.show() - - """ - a, weights = _ravel_and_check_weights(a, weights) - - bin_edges, uniform_bins = _get_bin_edges(a, bins, range, weights) - - # Histogram is an integer or a float array depending on the weights. - if weights is None: - ntype = np.dtype(np.intp) - else: - ntype = weights.dtype - - # We set a block size, as this allows us to iterate over chunks when - # computing histograms, to minimize memory usage. - BLOCK = 65536 - - # The fast path uses bincount, but that only works for certain types - # of weight - simple_weights = ( - weights is None or - np.can_cast(weights.dtype, np.double) or - np.can_cast(weights.dtype, complex) - ) - - if uniform_bins is not None and simple_weights: - # Fast algorithm for equal bins - # We now convert values of a to bin indices, under the assumption of - # equal bin widths (which is valid here). - first_edge, last_edge, n_equal_bins = uniform_bins - - # Initialize empty histogram - n = np.zeros(n_equal_bins, ntype) - - # Pre-compute histogram scaling factor - norm = n_equal_bins / _unsigned_subtract(last_edge, first_edge) - - # We iterate over blocks here for two reasons: the first is that for - # large arrays, it is actually faster (for example for a 10^8 array it - # is 2x as fast) and it results in a memory footprint 3x lower in the - # limit of large arrays. - for i in _range(0, len(a), BLOCK): - tmp_a = a[i:i+BLOCK] - if weights is None: - tmp_w = None - else: - tmp_w = weights[i:i + BLOCK] - - # Only include values in the right range - keep = (tmp_a >= first_edge) - keep &= (tmp_a <= last_edge) - if not np.logical_and.reduce(keep): - tmp_a = tmp_a[keep] - if tmp_w is not None: - tmp_w = tmp_w[keep] - - # This cast ensures no type promotions occur below, which gh-10322 - # make unpredictable. Getting it wrong leads to precision errors - # like gh-8123. - tmp_a = tmp_a.astype(bin_edges.dtype, copy=False) - - # Compute the bin indices, and for values that lie exactly on - # last_edge we need to subtract one - f_indices = _unsigned_subtract(tmp_a, first_edge) * norm - indices = f_indices.astype(np.intp) - indices[indices == n_equal_bins] -= 1 - - # The index computation is not guaranteed to give exactly - # consistent results within ~1 ULP of the bin edges. - decrement = tmp_a < bin_edges[indices] - indices[decrement] -= 1 - # The last bin includes the right edge. The other bins do not. - increment = ((tmp_a >= bin_edges[indices + 1]) - & (indices != n_equal_bins - 1)) - indices[increment] += 1 - - # We now compute the histogram using bincount - if ntype.kind == 'c': - n.real += np.bincount(indices, weights=tmp_w.real, - minlength=n_equal_bins) - n.imag += np.bincount(indices, weights=tmp_w.imag, - minlength=n_equal_bins) - else: - n += np.bincount(indices, weights=tmp_w, - minlength=n_equal_bins).astype(ntype) - else: - # Compute via cumulative histogram - cum_n = np.zeros(bin_edges.shape, ntype) - if weights is None: - for i in _range(0, len(a), BLOCK): - sa = np.sort(a[i:i+BLOCK]) - cum_n += _search_sorted_inclusive(sa, bin_edges) - else: - zero = np.zeros(1, dtype=ntype) - for i in _range(0, len(a), BLOCK): - tmp_a = a[i:i+BLOCK] - tmp_w = weights[i:i+BLOCK] - sorting_index = np.argsort(tmp_a) - sa = tmp_a[sorting_index] - sw = tmp_w[sorting_index] - cw = np.concatenate((zero, sw.cumsum())) - bin_index = _search_sorted_inclusive(sa, bin_edges) - cum_n += cw[bin_index] - - n = np.diff(cum_n) - - # density overrides the normed keyword - if density is not None: - if normed is not None: - # 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6) - warnings.warn( - "The normed argument is ignored when density is provided. " - "In future passing both will result in an error.", - DeprecationWarning, stacklevel=3) - normed = None - - if density: - db = np.array(np.diff(bin_edges), float) - return n/db/n.sum(), bin_edges - elif normed: - # 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6) - warnings.warn( - "Passing `normed=True` on non-uniform bins has always been " - "broken, and computes neither the probability density " - "function nor the probability mass function. " - "The result is only correct if the bins are uniform, when " - "density=True will produce the same result anyway. " - "The argument will be removed in a future version of " - "numpy.", - np.VisibleDeprecationWarning, stacklevel=3) - - # this normalization is incorrect, but - db = np.array(np.diff(bin_edges), float) - return n/(n*db).sum(), bin_edges - else: - if normed is not None: - # 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6) - warnings.warn( - "Passing normed=False is deprecated, and has no effect. " - "Consider passing the density argument instead.", - DeprecationWarning, stacklevel=3) - return n, bin_edges - - -def _histogramdd_dispatcher(sample, bins=None, range=None, normed=None, - weights=None, density=None): - if hasattr(sample, 'shape'): # same condition as used in histogramdd - yield sample - else: - yield from sample - with contextlib.suppress(TypeError): - yield from bins - yield weights - - -@array_function_dispatch(_histogramdd_dispatcher) -def histogramdd(sample, bins=10, range=None, normed=None, weights=None, - density=None): - """ - Compute the multidimensional histogram of some data. - - Parameters - ---------- - sample : (N, D) array, or (D, N) array_like - The data to be histogrammed. - - Note the unusual interpretation of sample when an array_like: - - * When an array, each row is a coordinate in a D-dimensional space - - such as ``histogramgramdd(np.array([p1, p2, p3]))``. - * When an array_like, each element is the list of values for single - coordinate - such as ``histogramgramdd((X, Y, Z))``. - - The first form should be preferred. - - bins : sequence or int, optional - The bin specification: - - * A sequence of arrays describing the monotonically increasing bin - edges along each dimension. - * The number of bins for each dimension (nx, ny, ... =bins) - * The number of bins for all dimensions (nx=ny=...=bins). - - range : sequence, optional - A sequence of length D, each an optional (lower, upper) tuple giving - the outer bin edges to be used if the edges are not given explicitly in - `bins`. - An entry of None in the sequence results in the minimum and maximum - values being used for the corresponding dimension. - The default, None, is equivalent to passing a tuple of D None values. - density : bool, optional - If False, the default, returns the number of samples in each bin. - If True, returns the probability *density* function at the bin, - ``bin_count / sample_count / bin_volume``. - normed : bool, optional - An alias for the density argument that behaves identically. To avoid - confusion with the broken normed argument to `histogram`, `density` - should be preferred. - weights : (N,) array_like, optional - An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`. - Weights are normalized to 1 if normed is True. If normed is False, - the values of the returned histogram are equal to the sum of the - weights belonging to the samples falling into each bin. - - Returns - ------- - H : ndarray - The multidimensional histogram of sample x. See normed and weights - for the different possible semantics. - edges : list - A list of D arrays describing the bin edges for each dimension. - - See Also - -------- - histogram: 1-D histogram - histogram2d: 2-D histogram - - Examples - -------- - >>> r = np.random.randn(100,3) - >>> H, edges = np.histogramdd(r, bins = (5, 8, 4)) - >>> H.shape, edges[0].size, edges[1].size, edges[2].size - ((5, 8, 4), 6, 9, 5) - - """ - - try: - # Sample is an ND-array. - N, D = sample.shape - except (AttributeError, ValueError): - # Sample is a sequence of 1D arrays. - sample = np.atleast_2d(sample).T - N, D = sample.shape - - nbin = np.empty(D, int) - edges = D*[None] - dedges = D*[None] - if weights is not None: - weights = np.asarray(weights) - - try: - M = len(bins) - if M != D: - raise ValueError( - 'The dimension of bins must be equal to the dimension of the ' - ' sample x.') - except TypeError: - # bins is an integer - bins = D*[bins] - - # normalize the range argument - if range is None: - range = (None,) * D - elif len(range) != D: - raise ValueError('range argument must have one entry per dimension') - - # Create edge arrays - for i in _range(D): - if np.ndim(bins[i]) == 0: - if bins[i] < 1: - raise ValueError( - '`bins[{}]` must be positive, when an integer'.format(i)) - smin, smax = _get_outer_edges(sample[:,i], range[i]) - edges[i] = np.linspace(smin, smax, bins[i] + 1) - elif np.ndim(bins[i]) == 1: - edges[i] = np.asarray(bins[i]) - if np.any(edges[i][:-1] > edges[i][1:]): - raise ValueError( - '`bins[{}]` must be monotonically increasing, when an array' - .format(i)) - else: - raise ValueError( - '`bins[{}]` must be a scalar or 1d array'.format(i)) - - nbin[i] = len(edges[i]) + 1 # includes an outlier on each end - dedges[i] = np.diff(edges[i]) - - # Compute the bin number each sample falls into. - Ncount = tuple( - # avoid np.digitize to work around gh-11022 - np.searchsorted(edges[i], sample[:, i], side='right') - for i in _range(D) - ) - - # Using digitize, values that fall on an edge are put in the right bin. - # For the rightmost bin, we want values equal to the right edge to be - # counted in the last bin, and not as an outlier. - for i in _range(D): - # Find which points are on the rightmost edge. - on_edge = (sample[:, i] == edges[i][-1]) - # Shift these points one bin to the left. - Ncount[i][on_edge] -= 1 - - # Compute the sample indices in the flattened histogram matrix. - # This raises an error if the array is too large. - xy = np.ravel_multi_index(Ncount, nbin) - - # Compute the number of repetitions in xy and assign it to the - # flattened histmat. - hist = np.bincount(xy, weights, minlength=nbin.prod()) - - # Shape into a proper matrix - hist = hist.reshape(nbin) - - # This preserves the (bad) behavior observed in gh-7845, for now. - hist = hist.astype(float, casting='safe') - - # Remove outliers (indices 0 and -1 for each dimension). - core = D*(slice(1, -1),) - hist = hist[core] - - # handle the aliasing normed argument - if normed is None: - if density is None: - density = False - elif density is None: - # an explicit normed argument was passed, alias it to the new name - density = normed - else: - raise TypeError("Cannot specify both 'normed' and 'density'") - - if density: - # calculate the probability density function - s = hist.sum() - for i in _range(D): - shape = np.ones(D, int) - shape[i] = nbin[i] - 2 - hist = hist / dedges[i].reshape(shape) - hist /= s - - if (hist.shape != nbin - 2).any(): - raise RuntimeError( - "Internal Shape Error") - return hist, edges diff --git a/venv/lib/python3.7/site-packages/numpy/lib/index_tricks.py b/venv/lib/python3.7/site-packages/numpy/lib/index_tricks.py deleted file mode 100644 index 0438485..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/index_tricks.py +++ /dev/null @@ -1,984 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import functools -import sys -import math - -import numpy.core.numeric as _nx -from numpy.core.numeric import ( - asarray, ScalarType, array, alltrue, cumprod, arange, ndim - ) -from numpy.core.numerictypes import find_common_type, issubdtype - -import numpy.matrixlib as matrixlib -from .function_base import diff -from numpy.core.multiarray import ravel_multi_index, unravel_index -from numpy.core.overrides import set_module -from numpy.core import overrides, linspace -from numpy.lib.stride_tricks import as_strided - - -array_function_dispatch = functools.partial( - overrides.array_function_dispatch, module='numpy') - - -__all__ = [ - 'ravel_multi_index', 'unravel_index', 'mgrid', 'ogrid', 'r_', 'c_', - 's_', 'index_exp', 'ix_', 'ndenumerate', 'ndindex', 'fill_diagonal', - 'diag_indices', 'diag_indices_from' - ] - - -def _ix__dispatcher(*args): - return args - - -@array_function_dispatch(_ix__dispatcher) -def ix_(*args): - """ - Construct an open mesh from multiple sequences. - - This function takes N 1-D sequences and returns N outputs with N - dimensions each, such that the shape is 1 in all but one dimension - and the dimension with the non-unit shape value cycles through all - N dimensions. - - Using `ix_` one can quickly construct index arrays that will index - the cross product. ``a[np.ix_([1,3],[2,5])]`` returns the array - ``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``. - - Parameters - ---------- - args : 1-D sequences - Each sequence should be of integer or boolean type. - Boolean sequences will be interpreted as boolean masks for the - corresponding dimension (equivalent to passing in - ``np.nonzero(boolean_sequence)``). - - Returns - ------- - out : tuple of ndarrays - N arrays with N dimensions each, with N the number of input - sequences. Together these arrays form an open mesh. - - See Also - -------- - ogrid, mgrid, meshgrid - - Examples - -------- - >>> a = np.arange(10).reshape(2, 5) - >>> a - array([[0, 1, 2, 3, 4], - [5, 6, 7, 8, 9]]) - >>> ixgrid = np.ix_([0, 1], [2, 4]) - >>> ixgrid - (array([[0], - [1]]), array([[2, 4]])) - >>> ixgrid[0].shape, ixgrid[1].shape - ((2, 1), (1, 2)) - >>> a[ixgrid] - array([[2, 4], - [7, 9]]) - - >>> ixgrid = np.ix_([True, True], [2, 4]) - >>> a[ixgrid] - array([[2, 4], - [7, 9]]) - >>> ixgrid = np.ix_([True, True], [False, False, True, False, True]) - >>> a[ixgrid] - array([[2, 4], - [7, 9]]) - - """ - out = [] - nd = len(args) - for k, new in enumerate(args): - if not isinstance(new, _nx.ndarray): - new = asarray(new) - if new.size == 0: - # Explicitly type empty arrays to avoid float default - new = new.astype(_nx.intp) - if new.ndim != 1: - raise ValueError("Cross index must be 1 dimensional") - if issubdtype(new.dtype, _nx.bool_): - new, = new.nonzero() - new = new.reshape((1,)*k + (new.size,) + (1,)*(nd-k-1)) - out.append(new) - return tuple(out) - -class nd_grid(object): - """ - Construct a multi-dimensional "meshgrid". - - ``grid = nd_grid()`` creates an instance which will return a mesh-grid - when indexed. The dimension and number of the output arrays are equal - to the number of indexing dimensions. If the step length is not a - complex number, then the stop is not inclusive. - - However, if the step length is a **complex number** (e.g. 5j), then the - integer part of its magnitude is interpreted as specifying the - number of points to create between the start and stop values, where - the stop value **is inclusive**. - - If instantiated with an argument of ``sparse=True``, the mesh-grid is - open (or not fleshed out) so that only one-dimension of each returned - argument is greater than 1. - - Parameters - ---------- - sparse : bool, optional - Whether the grid is sparse or not. Default is False. - - Notes - ----- - Two instances of `nd_grid` are made available in the NumPy namespace, - `mgrid` and `ogrid`, approximately defined as:: - - mgrid = nd_grid(sparse=False) - ogrid = nd_grid(sparse=True) - - Users should use these pre-defined instances instead of using `nd_grid` - directly. - """ - - def __init__(self, sparse=False): - self.sparse = sparse - - def __getitem__(self, key): - try: - size = [] - typ = int - for k in range(len(key)): - step = key[k].step - start = key[k].start - if start is None: - start = 0 - if step is None: - step = 1 - if isinstance(step, complex): - size.append(int(abs(step))) - typ = float - else: - size.append( - int(math.ceil((key[k].stop - start)/(step*1.0)))) - if (isinstance(step, float) or - isinstance(start, float) or - isinstance(key[k].stop, float)): - typ = float - if self.sparse: - nn = [_nx.arange(_x, dtype=_t) - for _x, _t in zip(size, (typ,)*len(size))] - else: - nn = _nx.indices(size, typ) - for k in range(len(size)): - step = key[k].step - start = key[k].start - if start is None: - start = 0 - if step is None: - step = 1 - if isinstance(step, complex): - step = int(abs(step)) - if step != 1: - step = (key[k].stop - start)/float(step-1) - nn[k] = (nn[k]*step+start) - if self.sparse: - slobj = [_nx.newaxis]*len(size) - for k in range(len(size)): - slobj[k] = slice(None, None) - nn[k] = nn[k][tuple(slobj)] - slobj[k] = _nx.newaxis - return nn - except (IndexError, TypeError): - step = key.step - stop = key.stop - start = key.start - if start is None: - start = 0 - if isinstance(step, complex): - step = abs(step) - length = int(step) - if step != 1: - step = (key.stop-start)/float(step-1) - stop = key.stop + step - return _nx.arange(0, length, 1, float)*step + start - else: - return _nx.arange(start, stop, step) - - -class MGridClass(nd_grid): - """ - `nd_grid` instance which returns a dense multi-dimensional "meshgrid". - - An instance of `numpy.lib.index_tricks.nd_grid` which returns an dense - (or fleshed out) mesh-grid when indexed, so that each returned argument - has the same shape. The dimensions and number of the output arrays are - equal to the number of indexing dimensions. If the step length is not a - complex number, then the stop is not inclusive. - - However, if the step length is a **complex number** (e.g. 5j), then - the integer part of its magnitude is interpreted as specifying the - number of points to create between the start and stop values, where - the stop value **is inclusive**. - - Returns - ---------- - mesh-grid `ndarrays` all of the same dimensions - - See Also - -------- - numpy.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects - ogrid : like mgrid but returns open (not fleshed out) mesh grids - r_ : array concatenator - - Examples - -------- - >>> np.mgrid[0:5,0:5] - array([[[0, 0, 0, 0, 0], - [1, 1, 1, 1, 1], - [2, 2, 2, 2, 2], - [3, 3, 3, 3, 3], - [4, 4, 4, 4, 4]], - [[0, 1, 2, 3, 4], - [0, 1, 2, 3, 4], - [0, 1, 2, 3, 4], - [0, 1, 2, 3, 4], - [0, 1, 2, 3, 4]]]) - >>> np.mgrid[-1:1:5j] - array([-1. , -0.5, 0. , 0.5, 1. ]) - - """ - def __init__(self): - super(MGridClass, self).__init__(sparse=False) - -mgrid = MGridClass() - -class OGridClass(nd_grid): - """ - `nd_grid` instance which returns an open multi-dimensional "meshgrid". - - An instance of `numpy.lib.index_tricks.nd_grid` which returns an open - (i.e. not fleshed out) mesh-grid when indexed, so that only one dimension - of each returned array is greater than 1. The dimension and number of the - output arrays are equal to the number of indexing dimensions. If the step - length is not a complex number, then the stop is not inclusive. - - However, if the step length is a **complex number** (e.g. 5j), then - the integer part of its magnitude is interpreted as specifying the - number of points to create between the start and stop values, where - the stop value **is inclusive**. - - Returns - ------- - mesh-grid - `ndarrays` with only one dimension not equal to 1 - - See Also - -------- - np.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects - mgrid : like `ogrid` but returns dense (or fleshed out) mesh grids - r_ : array concatenator - - Examples - -------- - >>> from numpy import ogrid - >>> ogrid[-1:1:5j] - array([-1. , -0.5, 0. , 0.5, 1. ]) - >>> ogrid[0:5,0:5] - [array([[0], - [1], - [2], - [3], - [4]]), array([[0, 1, 2, 3, 4]])] - - """ - def __init__(self): - super(OGridClass, self).__init__(sparse=True) - -ogrid = OGridClass() - - -class AxisConcatenator(object): - """ - Translates slice objects to concatenation along an axis. - - For detailed documentation on usage, see `r_`. - """ - # allow ma.mr_ to override this - concatenate = staticmethod(_nx.concatenate) - makemat = staticmethod(matrixlib.matrix) - - def __init__(self, axis=0, matrix=False, ndmin=1, trans1d=-1): - self.axis = axis - self.matrix = matrix - self.trans1d = trans1d - self.ndmin = ndmin - - def __getitem__(self, key): - # handle matrix builder syntax - if isinstance(key, str): - frame = sys._getframe().f_back - mymat = matrixlib.bmat(key, frame.f_globals, frame.f_locals) - return mymat - - if not isinstance(key, tuple): - key = (key,) - - # copy attributes, since they can be overridden in the first argument - trans1d = self.trans1d - ndmin = self.ndmin - matrix = self.matrix - axis = self.axis - - objs = [] - scalars = [] - arraytypes = [] - scalartypes = [] - - for k, item in enumerate(key): - scalar = False - if isinstance(item, slice): - step = item.step - start = item.start - stop = item.stop - if start is None: - start = 0 - if step is None: - step = 1 - if isinstance(step, complex): - size = int(abs(step)) - newobj = linspace(start, stop, num=size) - else: - newobj = _nx.arange(start, stop, step) - if ndmin > 1: - newobj = array(newobj, copy=False, ndmin=ndmin) - if trans1d != -1: - newobj = newobj.swapaxes(-1, trans1d) - elif isinstance(item, str): - if k != 0: - raise ValueError("special directives must be the " - "first entry.") - if item in ('r', 'c'): - matrix = True - col = (item == 'c') - continue - if ',' in item: - vec = item.split(',') - try: - axis, ndmin = [int(x) for x in vec[:2]] - if len(vec) == 3: - trans1d = int(vec[2]) - continue - except Exception: - raise ValueError("unknown special directive") - try: - axis = int(item) - continue - except (ValueError, TypeError): - raise ValueError("unknown special directive") - elif type(item) in ScalarType: - newobj = array(item, ndmin=ndmin) - scalars.append(len(objs)) - scalar = True - scalartypes.append(newobj.dtype) - else: - item_ndim = ndim(item) - newobj = array(item, copy=False, subok=True, ndmin=ndmin) - if trans1d != -1 and item_ndim < ndmin: - k2 = ndmin - item_ndim - k1 = trans1d - if k1 < 0: - k1 += k2 + 1 - defaxes = list(range(ndmin)) - axes = defaxes[:k1] + defaxes[k2:] + defaxes[k1:k2] - newobj = newobj.transpose(axes) - objs.append(newobj) - if not scalar and isinstance(newobj, _nx.ndarray): - arraytypes.append(newobj.dtype) - - # Ensure that scalars won't up-cast unless warranted - final_dtype = find_common_type(arraytypes, scalartypes) - if final_dtype is not None: - for k in scalars: - objs[k] = objs[k].astype(final_dtype) - - res = self.concatenate(tuple(objs), axis=axis) - - if matrix: - oldndim = res.ndim - res = self.makemat(res) - if oldndim == 1 and col: - res = res.T - return res - - def __len__(self): - return 0 - -# separate classes are used here instead of just making r_ = concatentor(0), -# etc. because otherwise we couldn't get the doc string to come out right -# in help(r_) - -class RClass(AxisConcatenator): - """ - Translates slice objects to concatenation along the first axis. - - This is a simple way to build up arrays quickly. There are two use cases. - - 1. If the index expression contains comma separated arrays, then stack - them along their first axis. - 2. If the index expression contains slice notation or scalars then create - a 1-D array with a range indicated by the slice notation. - - If slice notation is used, the syntax ``start:stop:step`` is equivalent - to ``np.arange(start, stop, step)`` inside of the brackets. However, if - ``step`` is an imaginary number (i.e. 100j) then its integer portion is - interpreted as a number-of-points desired and the start and stop are - inclusive. In other words ``start:stop:stepj`` is interpreted as - ``np.linspace(start, stop, step, endpoint=1)`` inside of the brackets. - After expansion of slice notation, all comma separated sequences are - concatenated together. - - Optional character strings placed as the first element of the index - expression can be used to change the output. The strings 'r' or 'c' result - in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row) - matrix is produced. If the result is 1-D and 'c' is specified, then a N x 1 - (column) matrix is produced. If the result is 2-D then both provide the - same matrix result. - - A string integer specifies which axis to stack multiple comma separated - arrays along. A string of two comma-separated integers allows indication - of the minimum number of dimensions to force each entry into as the - second integer (the axis to concatenate along is still the first integer). - - A string with three comma-separated integers allows specification of the - axis to concatenate along, the minimum number of dimensions to force the - entries to, and which axis should contain the start of the arrays which - are less than the specified number of dimensions. In other words the third - integer allows you to specify where the 1's should be placed in the shape - of the arrays that have their shapes upgraded. By default, they are placed - in the front of the shape tuple. The third argument allows you to specify - where the start of the array should be instead. Thus, a third argument of - '0' would place the 1's at the end of the array shape. Negative integers - specify where in the new shape tuple the last dimension of upgraded arrays - should be placed, so the default is '-1'. - - Parameters - ---------- - Not a function, so takes no parameters - - - Returns - ------- - A concatenated ndarray or matrix. - - See Also - -------- - concatenate : Join a sequence of arrays along an existing axis. - c_ : Translates slice objects to concatenation along the second axis. - - Examples - -------- - >>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])] - array([1, 2, 3, ..., 4, 5, 6]) - >>> np.r_[-1:1:6j, [0]*3, 5, 6] - array([-1. , -0.6, -0.2, 0.2, 0.6, 1. , 0. , 0. , 0. , 5. , 6. ]) - - String integers specify the axis to concatenate along or the minimum - number of dimensions to force entries into. - - >>> a = np.array([[0, 1, 2], [3, 4, 5]]) - >>> np.r_['-1', a, a] # concatenate along last axis - array([[0, 1, 2, 0, 1, 2], - [3, 4, 5, 3, 4, 5]]) - >>> np.r_['0,2', [1,2,3], [4,5,6]] # concatenate along first axis, dim>=2 - array([[1, 2, 3], - [4, 5, 6]]) - - >>> np.r_['0,2,0', [1,2,3], [4,5,6]] - array([[1], - [2], - [3], - [4], - [5], - [6]]) - >>> np.r_['1,2,0', [1,2,3], [4,5,6]] - array([[1, 4], - [2, 5], - [3, 6]]) - - Using 'r' or 'c' as a first string argument creates a matrix. - - >>> np.r_['r',[1,2,3], [4,5,6]] - matrix([[1, 2, 3, 4, 5, 6]]) - - """ - - def __init__(self): - AxisConcatenator.__init__(self, 0) - -r_ = RClass() - -class CClass(AxisConcatenator): - """ - Translates slice objects to concatenation along the second axis. - - This is short-hand for ``np.r_['-1,2,0', index expression]``, which is - useful because of its common occurrence. In particular, arrays will be - stacked along their last axis after being upgraded to at least 2-D with - 1's post-pended to the shape (column vectors made out of 1-D arrays). - - See Also - -------- - column_stack : Stack 1-D arrays as columns into a 2-D array. - r_ : For more detailed documentation. - - Examples - -------- - >>> np.c_[np.array([1,2,3]), np.array([4,5,6])] - array([[1, 4], - [2, 5], - [3, 6]]) - >>> np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])] - array([[1, 2, 3, ..., 4, 5, 6]]) - - """ - - def __init__(self): - AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0) - - -c_ = CClass() - - -@set_module('numpy') -class ndenumerate(object): - """ - Multidimensional index iterator. - - Return an iterator yielding pairs of array coordinates and values. - - Parameters - ---------- - arr : ndarray - Input array. - - See Also - -------- - ndindex, flatiter - - Examples - -------- - >>> a = np.array([[1, 2], [3, 4]]) - >>> for index, x in np.ndenumerate(a): - ... print(index, x) - (0, 0) 1 - (0, 1) 2 - (1, 0) 3 - (1, 1) 4 - - """ - - def __init__(self, arr): - self.iter = asarray(arr).flat - - def __next__(self): - """ - Standard iterator method, returns the index tuple and array value. - - Returns - ------- - coords : tuple of ints - The indices of the current iteration. - val : scalar - The array element of the current iteration. - - """ - return self.iter.coords, next(self.iter) - - def __iter__(self): - return self - - next = __next__ - - -@set_module('numpy') -class ndindex(object): - """ - An N-dimensional iterator object to index arrays. - - Given the shape of an array, an `ndindex` instance iterates over - the N-dimensional index of the array. At each iteration a tuple - of indices is returned, the last dimension is iterated over first. - - Parameters - ---------- - `*args` : ints - The size of each dimension of the array. - - See Also - -------- - ndenumerate, flatiter - - Examples - -------- - >>> for index in np.ndindex(3, 2, 1): - ... print(index) - (0, 0, 0) - (0, 1, 0) - (1, 0, 0) - (1, 1, 0) - (2, 0, 0) - (2, 1, 0) - - """ - - def __init__(self, *shape): - if len(shape) == 1 and isinstance(shape[0], tuple): - shape = shape[0] - x = as_strided(_nx.zeros(1), shape=shape, - strides=_nx.zeros_like(shape)) - self._it = _nx.nditer(x, flags=['multi_index', 'zerosize_ok'], - order='C') - - def __iter__(self): - return self - - def ndincr(self): - """ - Increment the multi-dimensional index by one. - - This method is for backward compatibility only: do not use. - """ - next(self) - - def __next__(self): - """ - Standard iterator method, updates the index and returns the index - tuple. - - Returns - ------- - val : tuple of ints - Returns a tuple containing the indices of the current - iteration. - - """ - next(self._it) - return self._it.multi_index - - next = __next__ - - -# You can do all this with slice() plus a few special objects, -# but there's a lot to remember. This version is simpler because -# it uses the standard array indexing syntax. -# -# Written by Konrad Hinsen -# last revision: 1999-7-23 -# -# Cosmetic changes by T. Oliphant 2001 -# -# - -class IndexExpression(object): - """ - A nicer way to build up index tuples for arrays. - - .. note:: - Use one of the two predefined instances `index_exp` or `s_` - rather than directly using `IndexExpression`. - - For any index combination, including slicing and axis insertion, - ``a[indices]`` is the same as ``a[np.index_exp[indices]]`` for any - array `a`. However, ``np.index_exp[indices]`` can be used anywhere - in Python code and returns a tuple of slice objects that can be - used in the construction of complex index expressions. - - Parameters - ---------- - maketuple : bool - If True, always returns a tuple. - - See Also - -------- - index_exp : Predefined instance that always returns a tuple: - `index_exp = IndexExpression(maketuple=True)`. - s_ : Predefined instance without tuple conversion: - `s_ = IndexExpression(maketuple=False)`. - - Notes - ----- - You can do all this with `slice()` plus a few special objects, - but there's a lot to remember and this version is simpler because - it uses the standard array indexing syntax. - - Examples - -------- - >>> np.s_[2::2] - slice(2, None, 2) - >>> np.index_exp[2::2] - (slice(2, None, 2),) - - >>> np.array([0, 1, 2, 3, 4])[np.s_[2::2]] - array([2, 4]) - - """ - - def __init__(self, maketuple): - self.maketuple = maketuple - - def __getitem__(self, item): - if self.maketuple and not isinstance(item, tuple): - return (item,) - else: - return item - -index_exp = IndexExpression(maketuple=True) -s_ = IndexExpression(maketuple=False) - -# End contribution from Konrad. - - -# The following functions complement those in twodim_base, but are -# applicable to N-dimensions. - - -def _fill_diagonal_dispatcher(a, val, wrap=None): - return (a,) - - -@array_function_dispatch(_fill_diagonal_dispatcher) -def fill_diagonal(a, val, wrap=False): - """Fill the main diagonal of the given array of any dimensionality. - - For an array `a` with ``a.ndim >= 2``, the diagonal is the list of - locations with indices ``a[i, ..., i]`` all identical. This function - modifies the input array in-place, it does not return a value. - - Parameters - ---------- - a : array, at least 2-D. - Array whose diagonal is to be filled, it gets modified in-place. - - val : scalar - Value to be written on the diagonal, its type must be compatible with - that of the array a. - - wrap : bool - For tall matrices in NumPy version up to 1.6.2, the - diagonal "wrapped" after N columns. You can have this behavior - with this option. This affects only tall matrices. - - See also - -------- - diag_indices, diag_indices_from - - Notes - ----- - .. versionadded:: 1.4.0 - - This functionality can be obtained via `diag_indices`, but internally - this version uses a much faster implementation that never constructs the - indices and uses simple slicing. - - Examples - -------- - >>> a = np.zeros((3, 3), int) - >>> np.fill_diagonal(a, 5) - >>> a - array([[5, 0, 0], - [0, 5, 0], - [0, 0, 5]]) - - The same function can operate on a 4-D array: - - >>> a = np.zeros((3, 3, 3, 3), int) - >>> np.fill_diagonal(a, 4) - - We only show a few blocks for clarity: - - >>> a[0, 0] - array([[4, 0, 0], - [0, 0, 0], - [0, 0, 0]]) - >>> a[1, 1] - array([[0, 0, 0], - [0, 4, 0], - [0, 0, 0]]) - >>> a[2, 2] - array([[0, 0, 0], - [0, 0, 0], - [0, 0, 4]]) - - The wrap option affects only tall matrices: - - >>> # tall matrices no wrap - >>> a = np.zeros((5, 3), int) - >>> np.fill_diagonal(a, 4) - >>> a - array([[4, 0, 0], - [0, 4, 0], - [0, 0, 4], - [0, 0, 0], - [0, 0, 0]]) - - >>> # tall matrices wrap - >>> a = np.zeros((5, 3), int) - >>> np.fill_diagonal(a, 4, wrap=True) - >>> a - array([[4, 0, 0], - [0, 4, 0], - [0, 0, 4], - [0, 0, 0], - [4, 0, 0]]) - - >>> # wide matrices - >>> a = np.zeros((3, 5), int) - >>> np.fill_diagonal(a, 4, wrap=True) - >>> a - array([[4, 0, 0, 0, 0], - [0, 4, 0, 0, 0], - [0, 0, 4, 0, 0]]) - - The anti-diagonal can be filled by reversing the order of elements - using either `numpy.flipud` or `numpy.fliplr`. - - >>> a = np.zeros((3, 3), int); - >>> np.fill_diagonal(np.fliplr(a), [1,2,3]) # Horizontal flip - >>> a - array([[0, 0, 1], - [0, 2, 0], - [3, 0, 0]]) - >>> np.fill_diagonal(np.flipud(a), [1,2,3]) # Vertical flip - >>> a - array([[0, 0, 3], - [0, 2, 0], - [1, 0, 0]]) - - Note that the order in which the diagonal is filled varies depending - on the flip function. - """ - if a.ndim < 2: - raise ValueError("array must be at least 2-d") - end = None - if a.ndim == 2: - # Explicit, fast formula for the common case. For 2-d arrays, we - # accept rectangular ones. - step = a.shape[1] + 1 - #This is needed to don't have tall matrix have the diagonal wrap. - if not wrap: - end = a.shape[1] * a.shape[1] - else: - # For more than d=2, the strided formula is only valid for arrays with - # all dimensions equal, so we check first. - if not alltrue(diff(a.shape) == 0): - raise ValueError("All dimensions of input must be of equal length") - step = 1 + (cumprod(a.shape[:-1])).sum() - - # Write the value out into the diagonal. - a.flat[:end:step] = val - - -@set_module('numpy') -def diag_indices(n, ndim=2): - """ - Return the indices to access the main diagonal of an array. - - This returns a tuple of indices that can be used to access the main - diagonal of an array `a` with ``a.ndim >= 2`` dimensions and shape - (n, n, ..., n). For ``a.ndim = 2`` this is the usual diagonal, for - ``a.ndim > 2`` this is the set of indices to access ``a[i, i, ..., i]`` - for ``i = [0..n-1]``. - - Parameters - ---------- - n : int - The size, along each dimension, of the arrays for which the returned - indices can be used. - - ndim : int, optional - The number of dimensions. - - See also - -------- - diag_indices_from - - Notes - ----- - .. versionadded:: 1.4.0 - - Examples - -------- - Create a set of indices to access the diagonal of a (4, 4) array: - - >>> di = np.diag_indices(4) - >>> di - (array([0, 1, 2, 3]), array([0, 1, 2, 3])) - >>> a = np.arange(16).reshape(4, 4) - >>> a - array([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11], - [12, 13, 14, 15]]) - >>> a[di] = 100 - >>> a - array([[100, 1, 2, 3], - [ 4, 100, 6, 7], - [ 8, 9, 100, 11], - [ 12, 13, 14, 100]]) - - Now, we create indices to manipulate a 3-D array: - - >>> d3 = np.diag_indices(2, 3) - >>> d3 - (array([0, 1]), array([0, 1]), array([0, 1])) - - And use it to set the diagonal of an array of zeros to 1: - - >>> a = np.zeros((2, 2, 2), dtype=int) - >>> a[d3] = 1 - >>> a - array([[[1, 0], - [0, 0]], - [[0, 0], - [0, 1]]]) - - """ - idx = arange(n) - return (idx,) * ndim - - -def _diag_indices_from(arr): - return (arr,) - - -@array_function_dispatch(_diag_indices_from) -def diag_indices_from(arr): - """ - Return the indices to access the main diagonal of an n-dimensional array. - - See `diag_indices` for full details. - - Parameters - ---------- - arr : array, at least 2-D - - See Also - -------- - diag_indices - - Notes - ----- - .. versionadded:: 1.4.0 - - """ - - if not arr.ndim >= 2: - raise ValueError("input array must be at least 2-d") - # For more than d=2, the strided formula is only valid for arrays with - # all dimensions equal, so we check first. - if not alltrue(diff(arr.shape) == 0): - raise ValueError("All dimensions of input must be of equal length") - - return diag_indices(arr.shape[0], arr.ndim) diff --git a/venv/lib/python3.7/site-packages/numpy/lib/mixins.py b/venv/lib/python3.7/site-packages/numpy/lib/mixins.py deleted file mode 100644 index f974a77..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/mixins.py +++ /dev/null @@ -1,182 +0,0 @@ -"""Mixin classes for custom array types that don't inherit from ndarray.""" -from __future__ import division, absolute_import, print_function - -import sys - -from numpy.core import umath as um - - -__all__ = ['NDArrayOperatorsMixin'] - - -def _disables_array_ufunc(obj): - """True when __array_ufunc__ is set to None.""" - try: - return obj.__array_ufunc__ is None - except AttributeError: - return False - - -def _binary_method(ufunc, name): - """Implement a forward binary method with a ufunc, e.g., __add__.""" - def func(self, other): - if _disables_array_ufunc(other): - return NotImplemented - return ufunc(self, other) - func.__name__ = '__{}__'.format(name) - return func - - -def _reflected_binary_method(ufunc, name): - """Implement a reflected binary method with a ufunc, e.g., __radd__.""" - def func(self, other): - if _disables_array_ufunc(other): - return NotImplemented - return ufunc(other, self) - func.__name__ = '__r{}__'.format(name) - return func - - -def _inplace_binary_method(ufunc, name): - """Implement an in-place binary method with a ufunc, e.g., __iadd__.""" - def func(self, other): - return ufunc(self, other, out=(self,)) - func.__name__ = '__i{}__'.format(name) - return func - - -def _numeric_methods(ufunc, name): - """Implement forward, reflected and inplace binary methods with a ufunc.""" - return (_binary_method(ufunc, name), - _reflected_binary_method(ufunc, name), - _inplace_binary_method(ufunc, name)) - - -def _unary_method(ufunc, name): - """Implement a unary special method with a ufunc.""" - def func(self): - return ufunc(self) - func.__name__ = '__{}__'.format(name) - return func - - -class NDArrayOperatorsMixin(object): - """Mixin defining all operator special methods using __array_ufunc__. - - This class implements the special methods for almost all of Python's - builtin operators defined in the `operator` module, including comparisons - (``==``, ``>``, etc.) and arithmetic (``+``, ``*``, ``-``, etc.), by - deferring to the ``__array_ufunc__`` method, which subclasses must - implement. - - It is useful for writing classes that do not inherit from `numpy.ndarray`, - but that should support arithmetic and numpy universal functions like - arrays as described in `A Mechanism for Overriding Ufuncs - <../../neps/nep-0013-ufunc-overrides.html>`_. - - As an trivial example, consider this implementation of an ``ArrayLike`` - class that simply wraps a NumPy array and ensures that the result of any - arithmetic operation is also an ``ArrayLike`` object:: - - class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin): - def __init__(self, value): - self.value = np.asarray(value) - - # One might also consider adding the built-in list type to this - # list, to support operations like np.add(array_like, list) - _HANDLED_TYPES = (np.ndarray, numbers.Number) - - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - out = kwargs.get('out', ()) - for x in inputs + out: - # Only support operations with instances of _HANDLED_TYPES. - # Use ArrayLike instead of type(self) for isinstance to - # allow subclasses that don't override __array_ufunc__ to - # handle ArrayLike objects. - if not isinstance(x, self._HANDLED_TYPES + (ArrayLike,)): - return NotImplemented - - # Defer to the implementation of the ufunc on unwrapped values. - inputs = tuple(x.value if isinstance(x, ArrayLike) else x - for x in inputs) - if out: - kwargs['out'] = tuple( - x.value if isinstance(x, ArrayLike) else x - for x in out) - result = getattr(ufunc, method)(*inputs, **kwargs) - - if type(result) is tuple: - # multiple return values - return tuple(type(self)(x) for x in result) - elif method == 'at': - # no return value - return None - else: - # one return value - return type(self)(result) - - def __repr__(self): - return '%s(%r)' % (type(self).__name__, self.value) - - In interactions between ``ArrayLike`` objects and numbers or numpy arrays, - the result is always another ``ArrayLike``: - - >>> x = ArrayLike([1, 2, 3]) - >>> x - 1 - ArrayLike(array([0, 1, 2])) - >>> 1 - x - ArrayLike(array([ 0, -1, -2])) - >>> np.arange(3) - x - ArrayLike(array([-1, -1, -1])) - >>> x - np.arange(3) - ArrayLike(array([1, 1, 1])) - - Note that unlike ``numpy.ndarray``, ``ArrayLike`` does not allow operations - with arbitrary, unrecognized types. This ensures that interactions with - ArrayLike preserve a well-defined casting hierarchy. - - .. versionadded:: 1.13 - """ - # Like np.ndarray, this mixin class implements "Option 1" from the ufunc - # overrides NEP. - - # comparisons don't have reflected and in-place versions - __lt__ = _binary_method(um.less, 'lt') - __le__ = _binary_method(um.less_equal, 'le') - __eq__ = _binary_method(um.equal, 'eq') - __ne__ = _binary_method(um.not_equal, 'ne') - __gt__ = _binary_method(um.greater, 'gt') - __ge__ = _binary_method(um.greater_equal, 'ge') - - # numeric methods - __add__, __radd__, __iadd__ = _numeric_methods(um.add, 'add') - __sub__, __rsub__, __isub__ = _numeric_methods(um.subtract, 'sub') - __mul__, __rmul__, __imul__ = _numeric_methods(um.multiply, 'mul') - __matmul__, __rmatmul__, __imatmul__ = _numeric_methods( - um.matmul, 'matmul') - if sys.version_info.major < 3: - # Python 3 uses only __truediv__ and __floordiv__ - __div__, __rdiv__, __idiv__ = _numeric_methods(um.divide, 'div') - __truediv__, __rtruediv__, __itruediv__ = _numeric_methods( - um.true_divide, 'truediv') - __floordiv__, __rfloordiv__, __ifloordiv__ = _numeric_methods( - um.floor_divide, 'floordiv') - __mod__, __rmod__, __imod__ = _numeric_methods(um.remainder, 'mod') - __divmod__ = _binary_method(um.divmod, 'divmod') - __rdivmod__ = _reflected_binary_method(um.divmod, 'divmod') - # __idivmod__ does not exist - # TODO: handle the optional third argument for __pow__? - __pow__, __rpow__, __ipow__ = _numeric_methods(um.power, 'pow') - __lshift__, __rlshift__, __ilshift__ = _numeric_methods( - um.left_shift, 'lshift') - __rshift__, __rrshift__, __irshift__ = _numeric_methods( - um.right_shift, 'rshift') - __and__, __rand__, __iand__ = _numeric_methods(um.bitwise_and, 'and') - __xor__, __rxor__, __ixor__ = _numeric_methods(um.bitwise_xor, 'xor') - __or__, __ror__, __ior__ = _numeric_methods(um.bitwise_or, 'or') - - # unary methods - __neg__ = _unary_method(um.negative, 'neg') - __pos__ = _unary_method(um.positive, 'pos') - __abs__ = _unary_method(um.absolute, 'abs') - __invert__ = _unary_method(um.invert, 'invert') diff --git a/venv/lib/python3.7/site-packages/numpy/lib/nanfunctions.py b/venv/lib/python3.7/site-packages/numpy/lib/nanfunctions.py deleted file mode 100644 index 8e2a34e..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/nanfunctions.py +++ /dev/null @@ -1,1672 +0,0 @@ -""" -Functions that ignore NaN. - -Functions ---------- - -- `nanmin` -- minimum non-NaN value -- `nanmax` -- maximum non-NaN value -- `nanargmin` -- index of minimum non-NaN value -- `nanargmax` -- index of maximum non-NaN value -- `nansum` -- sum of non-NaN values -- `nanprod` -- product of non-NaN values -- `nancumsum` -- cumulative sum of non-NaN values -- `nancumprod` -- cumulative product of non-NaN values -- `nanmean` -- mean of non-NaN values -- `nanvar` -- variance of non-NaN values -- `nanstd` -- standard deviation of non-NaN values -- `nanmedian` -- median of non-NaN values -- `nanquantile` -- qth quantile of non-NaN values -- `nanpercentile` -- qth percentile of non-NaN values - -""" -from __future__ import division, absolute_import, print_function - -import functools -import warnings -import numpy as np -from numpy.lib import function_base -from numpy.core import overrides - - -array_function_dispatch = functools.partial( - overrides.array_function_dispatch, module='numpy') - - -__all__ = [ - 'nansum', 'nanmax', 'nanmin', 'nanargmax', 'nanargmin', 'nanmean', - 'nanmedian', 'nanpercentile', 'nanvar', 'nanstd', 'nanprod', - 'nancumsum', 'nancumprod', 'nanquantile' - ] - - -def _nan_mask(a, out=None): - """ - Parameters - ---------- - a : array-like - Input array with at least 1 dimension. - out : ndarray, optional - Alternate output array in which to place the result. The default - is ``None``; if provided, it must have the same shape as the - expected output and will prevent the allocation of a new array. - - Returns - ------- - y : bool ndarray or True - A bool array where ``np.nan`` positions are marked with ``False`` - and other positions are marked with ``True``. If the type of ``a`` - is such that it can't possibly contain ``np.nan``, returns ``True``. - """ - # we assume that a is an array for this private function - - if a.dtype.kind not in 'fc': - return True - - y = np.isnan(a, out=out) - y = np.invert(y, out=y) - return y - -def _replace_nan(a, val): - """ - If `a` is of inexact type, make a copy of `a`, replace NaNs with - the `val` value, and return the copy together with a boolean mask - marking the locations where NaNs were present. If `a` is not of - inexact type, do nothing and return `a` together with a mask of None. - - Note that scalars will end up as array scalars, which is important - for using the result as the value of the out argument in some - operations. - - Parameters - ---------- - a : array-like - Input array. - val : float - NaN values are set to val before doing the operation. - - Returns - ------- - y : ndarray - If `a` is of inexact type, return a copy of `a` with the NaNs - replaced by the fill value, otherwise return `a`. - mask: {bool, None} - If `a` is of inexact type, return a boolean mask marking locations of - NaNs, otherwise return None. - - """ - a = np.asanyarray(a) - - if a.dtype == np.object_: - # object arrays do not support `isnan` (gh-9009), so make a guess - mask = np.not_equal(a, a, dtype=bool) - elif issubclass(a.dtype.type, np.inexact): - mask = np.isnan(a) - else: - mask = None - - if mask is not None: - a = np.array(a, subok=True, copy=True) - np.copyto(a, val, where=mask) - - return a, mask - - -def _copyto(a, val, mask): - """ - Replace values in `a` with NaN where `mask` is True. This differs from - copyto in that it will deal with the case where `a` is a numpy scalar. - - Parameters - ---------- - a : ndarray or numpy scalar - Array or numpy scalar some of whose values are to be replaced - by val. - val : numpy scalar - Value used a replacement. - mask : ndarray, scalar - Boolean array. Where True the corresponding element of `a` is - replaced by `val`. Broadcasts. - - Returns - ------- - res : ndarray, scalar - Array with elements replaced or scalar `val`. - - """ - if isinstance(a, np.ndarray): - np.copyto(a, val, where=mask, casting='unsafe') - else: - a = a.dtype.type(val) - return a - - -def _remove_nan_1d(arr1d, overwrite_input=False): - """ - Equivalent to arr1d[~arr1d.isnan()], but in a different order - - Presumably faster as it incurs fewer copies - - Parameters - ---------- - arr1d : ndarray - Array to remove nans from - overwrite_input : bool - True if `arr1d` can be modified in place - - Returns - ------- - res : ndarray - Array with nan elements removed - overwrite_input : bool - True if `res` can be modified in place, given the constraint on the - input - """ - - c = np.isnan(arr1d) - s = np.nonzero(c)[0] - if s.size == arr1d.size: - warnings.warn("All-NaN slice encountered", RuntimeWarning, - stacklevel=5) - return arr1d[:0], True - elif s.size == 0: - return arr1d, overwrite_input - else: - if not overwrite_input: - arr1d = arr1d.copy() - # select non-nans at end of array - enonan = arr1d[-s.size:][~c[-s.size:]] - # fill nans in beginning of array with non-nans of end - arr1d[s[:enonan.size]] = enonan - - return arr1d[:-s.size], True - - -def _divide_by_count(a, b, out=None): - """ - Compute a/b ignoring invalid results. If `a` is an array the division - is done in place. If `a` is a scalar, then its type is preserved in the - output. If out is None, then then a is used instead so that the - division is in place. Note that this is only called with `a` an inexact - type. - - Parameters - ---------- - a : {ndarray, numpy scalar} - Numerator. Expected to be of inexact type but not checked. - b : {ndarray, numpy scalar} - Denominator. - out : ndarray, optional - Alternate output array in which to place the result. The default - is ``None``; if provided, it must have the same shape as the - expected output, but the type will be cast if necessary. - - Returns - ------- - ret : {ndarray, numpy scalar} - The return value is a/b. If `a` was an ndarray the division is done - in place. If `a` is a numpy scalar, the division preserves its type. - - """ - with np.errstate(invalid='ignore', divide='ignore'): - if isinstance(a, np.ndarray): - if out is None: - return np.divide(a, b, out=a, casting='unsafe') - else: - return np.divide(a, b, out=out, casting='unsafe') - else: - if out is None: - return a.dtype.type(a / b) - else: - # This is questionable, but currently a numpy scalar can - # be output to a zero dimensional array. - return np.divide(a, b, out=out, casting='unsafe') - - -def _nanmin_dispatcher(a, axis=None, out=None, keepdims=None): - return (a, out) - - -@array_function_dispatch(_nanmin_dispatcher) -def nanmin(a, axis=None, out=None, keepdims=np._NoValue): - """ - Return minimum of an array or minimum along an axis, ignoring any NaNs. - When all-NaN slices are encountered a ``RuntimeWarning`` is raised and - Nan is returned for that slice. - - Parameters - ---------- - a : array_like - Array containing numbers whose minimum is desired. If `a` is not an - array, a conversion is attempted. - axis : {int, tuple of int, None}, optional - Axis or axes along which the minimum is computed. The default is to compute - the minimum of the flattened array. - out : ndarray, optional - Alternate output array in which to place the result. The default - is ``None``; if provided, it must have the same shape as the - expected output, but the type will be cast if necessary. See - `ufuncs-output-type` for more details. - - .. versionadded:: 1.8.0 - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `a`. - - If the value is anything but the default, then - `keepdims` will be passed through to the `min` method - of sub-classes of `ndarray`. If the sub-classes methods - does not implement `keepdims` any exceptions will be raised. - - .. versionadded:: 1.8.0 - - Returns - ------- - nanmin : ndarray - An array with the same shape as `a`, with the specified axis - removed. If `a` is a 0-d array, or if axis is None, an ndarray - scalar is returned. The same dtype as `a` is returned. - - See Also - -------- - nanmax : - The maximum value of an array along a given axis, ignoring any NaNs. - amin : - The minimum value of an array along a given axis, propagating any NaNs. - fmin : - Element-wise minimum of two arrays, ignoring any NaNs. - minimum : - Element-wise minimum of two arrays, propagating any NaNs. - isnan : - Shows which elements are Not a Number (NaN). - isfinite: - Shows which elements are neither NaN nor infinity. - - amax, fmax, maximum - - Notes - ----- - NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). This means that Not a Number is not equivalent to infinity. - Positive infinity is treated as a very large number and negative - infinity is treated as a very small (i.e. negative) number. - - If the input has a integer type the function is equivalent to np.min. - - Examples - -------- - >>> a = np.array([[1, 2], [3, np.nan]]) - >>> np.nanmin(a) - 1.0 - >>> np.nanmin(a, axis=0) - array([1., 2.]) - >>> np.nanmin(a, axis=1) - array([1., 3.]) - - When positive infinity and negative infinity are present: - - >>> np.nanmin([1, 2, np.nan, np.inf]) - 1.0 - >>> np.nanmin([1, 2, np.nan, np.NINF]) - -inf - - """ - kwargs = {} - if keepdims is not np._NoValue: - kwargs['keepdims'] = keepdims - if type(a) is np.ndarray and a.dtype != np.object_: - # Fast, but not safe for subclasses of ndarray, or object arrays, - # which do not implement isnan (gh-9009), or fmin correctly (gh-8975) - res = np.fmin.reduce(a, axis=axis, out=out, **kwargs) - if np.isnan(res).any(): - warnings.warn("All-NaN slice encountered", RuntimeWarning, - stacklevel=3) - else: - # Slow, but safe for subclasses of ndarray - a, mask = _replace_nan(a, +np.inf) - res = np.amin(a, axis=axis, out=out, **kwargs) - if mask is None: - return res - - # Check for all-NaN axis - mask = np.all(mask, axis=axis, **kwargs) - if np.any(mask): - res = _copyto(res, np.nan, mask) - warnings.warn("All-NaN axis encountered", RuntimeWarning, - stacklevel=3) - return res - - -def _nanmax_dispatcher(a, axis=None, out=None, keepdims=None): - return (a, out) - - -@array_function_dispatch(_nanmax_dispatcher) -def nanmax(a, axis=None, out=None, keepdims=np._NoValue): - """ - Return the maximum of an array or maximum along an axis, ignoring any - NaNs. When all-NaN slices are encountered a ``RuntimeWarning`` is - raised and NaN is returned for that slice. - - Parameters - ---------- - a : array_like - Array containing numbers whose maximum is desired. If `a` is not an - array, a conversion is attempted. - axis : {int, tuple of int, None}, optional - Axis or axes along which the maximum is computed. The default is to compute - the maximum of the flattened array. - out : ndarray, optional - Alternate output array in which to place the result. The default - is ``None``; if provided, it must have the same shape as the - expected output, but the type will be cast if necessary. See - `ufuncs-output-type` for more details. - - .. versionadded:: 1.8.0 - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `a`. - - If the value is anything but the default, then - `keepdims` will be passed through to the `max` method - of sub-classes of `ndarray`. If the sub-classes methods - does not implement `keepdims` any exceptions will be raised. - - .. versionadded:: 1.8.0 - - Returns - ------- - nanmax : ndarray - An array with the same shape as `a`, with the specified axis removed. - If `a` is a 0-d array, or if axis is None, an ndarray scalar is - returned. The same dtype as `a` is returned. - - See Also - -------- - nanmin : - The minimum value of an array along a given axis, ignoring any NaNs. - amax : - The maximum value of an array along a given axis, propagating any NaNs. - fmax : - Element-wise maximum of two arrays, ignoring any NaNs. - maximum : - Element-wise maximum of two arrays, propagating any NaNs. - isnan : - Shows which elements are Not a Number (NaN). - isfinite: - Shows which elements are neither NaN nor infinity. - - amin, fmin, minimum - - Notes - ----- - NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). This means that Not a Number is not equivalent to infinity. - Positive infinity is treated as a very large number and negative - infinity is treated as a very small (i.e. negative) number. - - If the input has a integer type the function is equivalent to np.max. - - Examples - -------- - >>> a = np.array([[1, 2], [3, np.nan]]) - >>> np.nanmax(a) - 3.0 - >>> np.nanmax(a, axis=0) - array([3., 2.]) - >>> np.nanmax(a, axis=1) - array([2., 3.]) - - When positive infinity and negative infinity are present: - - >>> np.nanmax([1, 2, np.nan, np.NINF]) - 2.0 - >>> np.nanmax([1, 2, np.nan, np.inf]) - inf - - """ - kwargs = {} - if keepdims is not np._NoValue: - kwargs['keepdims'] = keepdims - if type(a) is np.ndarray and a.dtype != np.object_: - # Fast, but not safe for subclasses of ndarray, or object arrays, - # which do not implement isnan (gh-9009), or fmax correctly (gh-8975) - res = np.fmax.reduce(a, axis=axis, out=out, **kwargs) - if np.isnan(res).any(): - warnings.warn("All-NaN slice encountered", RuntimeWarning, - stacklevel=3) - else: - # Slow, but safe for subclasses of ndarray - a, mask = _replace_nan(a, -np.inf) - res = np.amax(a, axis=axis, out=out, **kwargs) - if mask is None: - return res - - # Check for all-NaN axis - mask = np.all(mask, axis=axis, **kwargs) - if np.any(mask): - res = _copyto(res, np.nan, mask) - warnings.warn("All-NaN axis encountered", RuntimeWarning, - stacklevel=3) - return res - - -def _nanargmin_dispatcher(a, axis=None): - return (a,) - - -@array_function_dispatch(_nanargmin_dispatcher) -def nanargmin(a, axis=None): - """ - Return the indices of the minimum values in the specified axis ignoring - NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the results - cannot be trusted if a slice contains only NaNs and Infs. - - Parameters - ---------- - a : array_like - Input data. - axis : int, optional - Axis along which to operate. By default flattened input is used. - - Returns - ------- - index_array : ndarray - An array of indices or a single index value. - - See Also - -------- - argmin, nanargmax - - Examples - -------- - >>> a = np.array([[np.nan, 4], [2, 3]]) - >>> np.argmin(a) - 0 - >>> np.nanargmin(a) - 2 - >>> np.nanargmin(a, axis=0) - array([1, 1]) - >>> np.nanargmin(a, axis=1) - array([1, 0]) - - """ - a, mask = _replace_nan(a, np.inf) - res = np.argmin(a, axis=axis) - if mask is not None: - mask = np.all(mask, axis=axis) - if np.any(mask): - raise ValueError("All-NaN slice encountered") - return res - - -def _nanargmax_dispatcher(a, axis=None): - return (a,) - - -@array_function_dispatch(_nanargmax_dispatcher) -def nanargmax(a, axis=None): - """ - Return the indices of the maximum values in the specified axis ignoring - NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the - results cannot be trusted if a slice contains only NaNs and -Infs. - - - Parameters - ---------- - a : array_like - Input data. - axis : int, optional - Axis along which to operate. By default flattened input is used. - - Returns - ------- - index_array : ndarray - An array of indices or a single index value. - - See Also - -------- - argmax, nanargmin - - Examples - -------- - >>> a = np.array([[np.nan, 4], [2, 3]]) - >>> np.argmax(a) - 0 - >>> np.nanargmax(a) - 1 - >>> np.nanargmax(a, axis=0) - array([1, 0]) - >>> np.nanargmax(a, axis=1) - array([1, 1]) - - """ - a, mask = _replace_nan(a, -np.inf) - res = np.argmax(a, axis=axis) - if mask is not None: - mask = np.all(mask, axis=axis) - if np.any(mask): - raise ValueError("All-NaN slice encountered") - return res - - -def _nansum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None): - return (a, out) - - -@array_function_dispatch(_nansum_dispatcher) -def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): - """ - Return the sum of array elements over a given axis treating Not a - Numbers (NaNs) as zero. - - In NumPy versions <= 1.9.0 Nan is returned for slices that are all-NaN or - empty. In later versions zero is returned. - - Parameters - ---------- - a : array_like - Array containing numbers whose sum is desired. If `a` is not an - array, a conversion is attempted. - axis : {int, tuple of int, None}, optional - Axis or axes along which the sum is computed. The default is to compute the - sum of the flattened array. - dtype : data-type, optional - The type of the returned array and of the accumulator in which the - elements are summed. By default, the dtype of `a` is used. An - exception is when `a` has an integer type with less precision than - the platform (u)intp. In that case, the default will be either - (u)int32 or (u)int64 depending on whether the platform is 32 or 64 - bits. For inexact inputs, dtype must be inexact. - - .. versionadded:: 1.8.0 - out : ndarray, optional - Alternate output array in which to place the result. The default - is ``None``. If provided, it must have the same shape as the - expected output, but the type will be cast if necessary. See - `ufuncs-output-type` for more details. The casting of NaN to integer - can yield unexpected results. - - .. versionadded:: 1.8.0 - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `a`. - - - If the value is anything but the default, then - `keepdims` will be passed through to the `mean` or `sum` methods - of sub-classes of `ndarray`. If the sub-classes methods - does not implement `keepdims` any exceptions will be raised. - - .. versionadded:: 1.8.0 - - Returns - ------- - nansum : ndarray. - A new array holding the result is returned unless `out` is - specified, in which it is returned. The result has the same - size as `a`, and the same shape as `a` if `axis` is not None - or `a` is a 1-d array. - - See Also - -------- - numpy.sum : Sum across array propagating NaNs. - isnan : Show which elements are NaN. - isfinite: Show which elements are not NaN or +/-inf. - - Notes - ----- - If both positive and negative infinity are present, the sum will be Not - A Number (NaN). - - Examples - -------- - >>> np.nansum(1) - 1 - >>> np.nansum([1]) - 1 - >>> np.nansum([1, np.nan]) - 1.0 - >>> a = np.array([[1, 1], [1, np.nan]]) - >>> np.nansum(a) - 3.0 - >>> np.nansum(a, axis=0) - array([2., 1.]) - >>> np.nansum([1, np.nan, np.inf]) - inf - >>> np.nansum([1, np.nan, np.NINF]) - -inf - >>> from numpy.testing import suppress_warnings - >>> with suppress_warnings() as sup: - ... sup.filter(RuntimeWarning) - ... np.nansum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present - nan - - """ - a, mask = _replace_nan(a, 0) - return np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims) - - -def _nanprod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None): - return (a, out) - - -@array_function_dispatch(_nanprod_dispatcher) -def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): - """ - Return the product of array elements over a given axis treating Not a - Numbers (NaNs) as ones. - - One is returned for slices that are all-NaN or empty. - - .. versionadded:: 1.10.0 - - Parameters - ---------- - a : array_like - Array containing numbers whose product is desired. If `a` is not an - array, a conversion is attempted. - axis : {int, tuple of int, None}, optional - Axis or axes along which the product is computed. The default is to compute - the product of the flattened array. - dtype : data-type, optional - The type of the returned array and of the accumulator in which the - elements are summed. By default, the dtype of `a` is used. An - exception is when `a` has an integer type with less precision than - the platform (u)intp. In that case, the default will be either - (u)int32 or (u)int64 depending on whether the platform is 32 or 64 - bits. For inexact inputs, dtype must be inexact. - out : ndarray, optional - Alternate output array in which to place the result. The default - is ``None``. If provided, it must have the same shape as the - expected output, but the type will be cast if necessary. See - `ufuncs-output-type` for more details. The casting of NaN to integer - can yield unexpected results. - keepdims : bool, optional - If True, the axes which are reduced are left in the result as - dimensions with size one. With this option, the result will - broadcast correctly against the original `arr`. - - Returns - ------- - nanprod : ndarray - A new array holding the result is returned unless `out` is - specified, in which case it is returned. - - See Also - -------- - numpy.prod : Product across array propagating NaNs. - isnan : Show which elements are NaN. - - Examples - -------- - >>> np.nanprod(1) - 1 - >>> np.nanprod([1]) - 1 - >>> np.nanprod([1, np.nan]) - 1.0 - >>> a = np.array([[1, 2], [3, np.nan]]) - >>> np.nanprod(a) - 6.0 - >>> np.nanprod(a, axis=0) - array([3., 2.]) - - """ - a, mask = _replace_nan(a, 1) - return np.prod(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims) - - -def _nancumsum_dispatcher(a, axis=None, dtype=None, out=None): - return (a, out) - - -@array_function_dispatch(_nancumsum_dispatcher) -def nancumsum(a, axis=None, dtype=None, out=None): - """ - Return the cumulative sum of array elements over a given axis treating Not a - Numbers (NaNs) as zero. The cumulative sum does not change when NaNs are - encountered and leading NaNs are replaced by zeros. - - Zeros are returned for slices that are all-NaN or empty. - - .. versionadded:: 1.12.0 - - Parameters - ---------- - a : array_like - Input array. - axis : int, optional - Axis along which the cumulative sum is computed. The default - (None) is to compute the cumsum over the flattened array. - dtype : dtype, optional - Type of the returned array and of the accumulator in which the - elements are summed. If `dtype` is not specified, it defaults - to the dtype of `a`, unless `a` has an integer dtype with a - precision less than that of the default platform integer. In - that case, the default platform integer is used. - out : ndarray, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output - but the type will be cast if necessary. See `ufuncs-output-type` for - more details. - - Returns - ------- - nancumsum : ndarray. - A new array holding the result is returned unless `out` is - specified, in which it is returned. The result has the same - size as `a`, and the same shape as `a` if `axis` is not None - or `a` is a 1-d array. - - See Also - -------- - numpy.cumsum : Cumulative sum across array propagating NaNs. - isnan : Show which elements are NaN. - - Examples - -------- - >>> np.nancumsum(1) - array([1]) - >>> np.nancumsum([1]) - array([1]) - >>> np.nancumsum([1, np.nan]) - array([1., 1.]) - >>> a = np.array([[1, 2], [3, np.nan]]) - >>> np.nancumsum(a) - array([1., 3., 6., 6.]) - >>> np.nancumsum(a, axis=0) - array([[1., 2.], - [4., 2.]]) - >>> np.nancumsum(a, axis=1) - array([[1., 3.], - [3., 3.]]) - - """ - a, mask = _replace_nan(a, 0) - return np.cumsum(a, axis=axis, dtype=dtype, out=out) - - -def _nancumprod_dispatcher(a, axis=None, dtype=None, out=None): - return (a, out) - - -@array_function_dispatch(_nancumprod_dispatcher) -def nancumprod(a, axis=None, dtype=None, out=None): - """ - Return the cumulative product of array elements over a given axis treating Not a - Numbers (NaNs) as one. The cumulative product does not change when NaNs are - encountered and leading NaNs are replaced by ones. - - Ones are returned for slices that are all-NaN or empty. - - .. versionadded:: 1.12.0 - - Parameters - ---------- - a : array_like - Input array. - axis : int, optional - Axis along which the cumulative product is computed. By default - the input is flattened. - dtype : dtype, optional - Type of the returned array, as well as of the accumulator in which - the elements are multiplied. If *dtype* is not specified, it - defaults to the dtype of `a`, unless `a` has an integer dtype with - a precision less than that of the default platform integer. In - that case, the default platform integer is used instead. - out : ndarray, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output - but the type of the resulting values will be cast if necessary. - - Returns - ------- - nancumprod : ndarray - A new array holding the result is returned unless `out` is - specified, in which case it is returned. - - See Also - -------- - numpy.cumprod : Cumulative product across array propagating NaNs. - isnan : Show which elements are NaN. - - Examples - -------- - >>> np.nancumprod(1) - array([1]) - >>> np.nancumprod([1]) - array([1]) - >>> np.nancumprod([1, np.nan]) - array([1., 1.]) - >>> a = np.array([[1, 2], [3, np.nan]]) - >>> np.nancumprod(a) - array([1., 2., 6., 6.]) - >>> np.nancumprod(a, axis=0) - array([[1., 2.], - [3., 2.]]) - >>> np.nancumprod(a, axis=1) - array([[1., 2.], - [3., 3.]]) - - """ - a, mask = _replace_nan(a, 1) - return np.cumprod(a, axis=axis, dtype=dtype, out=out) - - -def _nanmean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None): - return (a, out) - - -@array_function_dispatch(_nanmean_dispatcher) -def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): - """ - Compute the arithmetic mean along the specified axis, ignoring NaNs. - - Returns the average of the array elements. The average is taken over - the flattened array by default, otherwise over the specified axis. - `float64` intermediate and return values are used for integer inputs. - - For all-NaN slices, NaN is returned and a `RuntimeWarning` is raised. - - .. versionadded:: 1.8.0 - - Parameters - ---------- - a : array_like - Array containing numbers whose mean is desired. If `a` is not an - array, a conversion is attempted. - axis : {int, tuple of int, None}, optional - Axis or axes along which the means are computed. The default is to compute - the mean of the flattened array. - dtype : data-type, optional - Type to use in computing the mean. For integer inputs, the default - is `float64`; for inexact inputs, it is the same as the input - dtype. - out : ndarray, optional - Alternate output array in which to place the result. The default - is ``None``; if provided, it must have the same shape as the - expected output, but the type will be cast if necessary. See - `ufuncs-output-type` for more details. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `a`. - - If the value is anything but the default, then - `keepdims` will be passed through to the `mean` or `sum` methods - of sub-classes of `ndarray`. If the sub-classes methods - does not implement `keepdims` any exceptions will be raised. - - Returns - ------- - m : ndarray, see dtype parameter above - If `out=None`, returns a new array containing the mean values, - otherwise a reference to the output array is returned. Nan is - returned for slices that contain only NaNs. - - See Also - -------- - average : Weighted average - mean : Arithmetic mean taken while not ignoring NaNs - var, nanvar - - Notes - ----- - The arithmetic mean is the sum of the non-NaN elements along the axis - divided by the number of non-NaN elements. - - Note that for floating-point input, the mean is computed using the same - precision the input has. Depending on the input data, this can cause - the results to be inaccurate, especially for `float32`. Specifying a - higher-precision accumulator using the `dtype` keyword can alleviate - this issue. - - Examples - -------- - >>> a = np.array([[1, np.nan], [3, 4]]) - >>> np.nanmean(a) - 2.6666666666666665 - >>> np.nanmean(a, axis=0) - array([2., 4.]) - >>> np.nanmean(a, axis=1) - array([1., 3.5]) # may vary - - """ - arr, mask = _replace_nan(a, 0) - if mask is None: - return np.mean(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims) - - if dtype is not None: - dtype = np.dtype(dtype) - if dtype is not None and not issubclass(dtype.type, np.inexact): - raise TypeError("If a is inexact, then dtype must be inexact") - if out is not None and not issubclass(out.dtype.type, np.inexact): - raise TypeError("If a is inexact, then out must be inexact") - - cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=keepdims) - tot = np.sum(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims) - avg = _divide_by_count(tot, cnt, out=out) - - isbad = (cnt == 0) - if isbad.any(): - warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=3) - # NaN is the only possible bad value, so no further - # action is needed to handle bad results. - return avg - - -def _nanmedian1d(arr1d, overwrite_input=False): - """ - Private function for rank 1 arrays. Compute the median ignoring NaNs. - See nanmedian for parameter usage - """ - arr1d, overwrite_input = _remove_nan_1d(arr1d, - overwrite_input=overwrite_input) - if arr1d.size == 0: - return np.nan - - return np.median(arr1d, overwrite_input=overwrite_input) - - -def _nanmedian(a, axis=None, out=None, overwrite_input=False): - """ - Private function that doesn't support extended axis or keepdims. - These methods are extended to this function using _ureduce - See nanmedian for parameter usage - - """ - if axis is None or a.ndim == 1: - part = a.ravel() - if out is None: - return _nanmedian1d(part, overwrite_input) - else: - out[...] = _nanmedian1d(part, overwrite_input) - return out - else: - # for small medians use sort + indexing which is still faster than - # apply_along_axis - # benchmarked with shuffled (50, 50, x) containing a few NaN - if a.shape[axis] < 600: - return _nanmedian_small(a, axis, out, overwrite_input) - result = np.apply_along_axis(_nanmedian1d, axis, a, overwrite_input) - if out is not None: - out[...] = result - return result - - -def _nanmedian_small(a, axis=None, out=None, overwrite_input=False): - """ - sort + indexing median, faster for small medians along multiple - dimensions due to the high overhead of apply_along_axis - - see nanmedian for parameter usage - """ - a = np.ma.masked_array(a, np.isnan(a)) - m = np.ma.median(a, axis=axis, overwrite_input=overwrite_input) - for i in range(np.count_nonzero(m.mask.ravel())): - warnings.warn("All-NaN slice encountered", RuntimeWarning, - stacklevel=4) - if out is not None: - out[...] = m.filled(np.nan) - return out - return m.filled(np.nan) - - -def _nanmedian_dispatcher( - a, axis=None, out=None, overwrite_input=None, keepdims=None): - return (a, out) - - -@array_function_dispatch(_nanmedian_dispatcher) -def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValue): - """ - Compute the median along the specified axis, while ignoring NaNs. - - Returns the median of the array elements. - - .. versionadded:: 1.9.0 - - Parameters - ---------- - a : array_like - Input array or object that can be converted to an array. - axis : {int, sequence of int, None}, optional - Axis or axes along which the medians are computed. The default - is to compute the median along a flattened version of the array. - A sequence of axes is supported since version 1.9.0. - out : ndarray, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output, - but the type (of the output) will be cast if necessary. - overwrite_input : bool, optional - If True, then allow use of memory of input array `a` for - calculations. The input array will be modified by the call to - `median`. This will save memory when you do not need to preserve - the contents of the input array. Treat the input as undefined, - but it will probably be fully or partially sorted. Default is - False. If `overwrite_input` is ``True`` and `a` is not already an - `ndarray`, an error will be raised. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `a`. - - If this is anything but the default value it will be passed - through (in the special case of an empty array) to the - `mean` function of the underlying array. If the array is - a sub-class and `mean` does not have the kwarg `keepdims` this - will raise a RuntimeError. - - Returns - ------- - median : ndarray - A new array holding the result. If the input contains integers - or floats smaller than ``float64``, then the output data-type is - ``np.float64``. Otherwise, the data-type of the output is the - same as that of the input. If `out` is specified, that array is - returned instead. - - See Also - -------- - mean, median, percentile - - Notes - ----- - Given a vector ``V`` of length ``N``, the median of ``V`` is the - middle value of a sorted copy of ``V``, ``V_sorted`` - i.e., - ``V_sorted[(N-1)/2]``, when ``N`` is odd and the average of the two - middle values of ``V_sorted`` when ``N`` is even. - - Examples - -------- - >>> a = np.array([[10.0, 7, 4], [3, 2, 1]]) - >>> a[0, 1] = np.nan - >>> a - array([[10., nan, 4.], - [ 3., 2., 1.]]) - >>> np.median(a) - nan - >>> np.nanmedian(a) - 3.0 - >>> np.nanmedian(a, axis=0) - array([6.5, 2. , 2.5]) - >>> np.median(a, axis=1) - array([nan, 2.]) - >>> b = a.copy() - >>> np.nanmedian(b, axis=1, overwrite_input=True) - array([7., 2.]) - >>> assert not np.all(a==b) - >>> b = a.copy() - >>> np.nanmedian(b, axis=None, overwrite_input=True) - 3.0 - >>> assert not np.all(a==b) - - """ - a = np.asanyarray(a) - # apply_along_axis in _nanmedian doesn't handle empty arrays well, - # so deal them upfront - if a.size == 0: - return np.nanmean(a, axis, out=out, keepdims=keepdims) - - r, k = function_base._ureduce(a, func=_nanmedian, axis=axis, out=out, - overwrite_input=overwrite_input) - if keepdims and keepdims is not np._NoValue: - return r.reshape(k) - else: - return r - - -def _nanpercentile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, - interpolation=None, keepdims=None): - return (a, q, out) - - -@array_function_dispatch(_nanpercentile_dispatcher) -def nanpercentile(a, q, axis=None, out=None, overwrite_input=False, - interpolation='linear', keepdims=np._NoValue): - """ - Compute the qth percentile of the data along the specified axis, - while ignoring nan values. - - Returns the qth percentile(s) of the array elements. - - .. versionadded:: 1.9.0 - - Parameters - ---------- - a : array_like - Input array or object that can be converted to an array, containing - nan values to be ignored. - q : array_like of float - Percentile or sequence of percentiles to compute, which must be between - 0 and 100 inclusive. - axis : {int, tuple of int, None}, optional - Axis or axes along which the percentiles are computed. The - default is to compute the percentile(s) along a flattened - version of the array. - out : ndarray, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output, - but the type (of the output) will be cast if necessary. - overwrite_input : bool, optional - If True, then allow the input array `a` to be modified by intermediate - calculations, to save memory. In this case, the contents of the input - `a` after this function completes is undefined. - interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} - This optional parameter specifies the interpolation method to - use when the desired percentile lies between two data points - ``i < j``: - - * 'linear': ``i + (j - i) * fraction``, where ``fraction`` - is the fractional part of the index surrounded by ``i`` - and ``j``. - * 'lower': ``i``. - * 'higher': ``j``. - * 'nearest': ``i`` or ``j``, whichever is nearest. - * 'midpoint': ``(i + j) / 2``. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left in - the result as dimensions with size one. With this option, the - result will broadcast correctly against the original array `a`. - - If this is anything but the default value it will be passed - through (in the special case of an empty array) to the - `mean` function of the underlying array. If the array is - a sub-class and `mean` does not have the kwarg `keepdims` this - will raise a RuntimeError. - - Returns - ------- - percentile : scalar or ndarray - If `q` is a single percentile and `axis=None`, then the result - is a scalar. If multiple percentiles are given, first axis of - the result corresponds to the percentiles. The other axes are - the axes that remain after the reduction of `a`. If the input - contains integers or floats smaller than ``float64``, the output - data-type is ``float64``. Otherwise, the output data-type is the - same as that of the input. If `out` is specified, that array is - returned instead. - - See Also - -------- - nanmean - nanmedian : equivalent to ``nanpercentile(..., 50)`` - percentile, median, mean - nanquantile : equivalent to nanpercentile, but with q in the range [0, 1]. - - Notes - ----- - Given a vector ``V`` of length ``N``, the ``q``-th percentile of - ``V`` is the value ``q/100`` of the way from the minimum to the - maximum in a sorted copy of ``V``. The values and distances of - the two nearest neighbors as well as the `interpolation` parameter - will determine the percentile if the normalized ranking does not - match the location of ``q`` exactly. This function is the same as - the median if ``q=50``, the same as the minimum if ``q=0`` and the - same as the maximum if ``q=100``. - - Examples - -------- - >>> a = np.array([[10., 7., 4.], [3., 2., 1.]]) - >>> a[0][1] = np.nan - >>> a - array([[10., nan, 4.], - [ 3., 2., 1.]]) - >>> np.percentile(a, 50) - nan - >>> np.nanpercentile(a, 50) - 3.0 - >>> np.nanpercentile(a, 50, axis=0) - array([6.5, 2. , 2.5]) - >>> np.nanpercentile(a, 50, axis=1, keepdims=True) - array([[7.], - [2.]]) - >>> m = np.nanpercentile(a, 50, axis=0) - >>> out = np.zeros_like(m) - >>> np.nanpercentile(a, 50, axis=0, out=out) - array([6.5, 2. , 2.5]) - >>> m - array([6.5, 2. , 2.5]) - - >>> b = a.copy() - >>> np.nanpercentile(b, 50, axis=1, overwrite_input=True) - array([7., 2.]) - >>> assert not np.all(a==b) - - """ - a = np.asanyarray(a) - q = np.true_divide(q, 100.0) # handles the asarray for us too - if not function_base._quantile_is_valid(q): - raise ValueError("Percentiles must be in the range [0, 100]") - return _nanquantile_unchecked( - a, q, axis, out, overwrite_input, interpolation, keepdims) - - -def _nanquantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, - interpolation=None, keepdims=None): - return (a, q, out) - - -@array_function_dispatch(_nanquantile_dispatcher) -def nanquantile(a, q, axis=None, out=None, overwrite_input=False, - interpolation='linear', keepdims=np._NoValue): - """ - Compute the qth quantile of the data along the specified axis, - while ignoring nan values. - Returns the qth quantile(s) of the array elements. - - .. versionadded:: 1.15.0 - - Parameters - ---------- - a : array_like - Input array or object that can be converted to an array, containing - nan values to be ignored - q : array_like of float - Quantile or sequence of quantiles to compute, which must be between - 0 and 1 inclusive. - axis : {int, tuple of int, None}, optional - Axis or axes along which the quantiles are computed. The - default is to compute the quantile(s) along a flattened - version of the array. - out : ndarray, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output, - but the type (of the output) will be cast if necessary. - overwrite_input : bool, optional - If True, then allow the input array `a` to be modified by intermediate - calculations, to save memory. In this case, the contents of the input - `a` after this function completes is undefined. - interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} - This optional parameter specifies the interpolation method to - use when the desired quantile lies between two data points - ``i < j``: - - * linear: ``i + (j - i) * fraction``, where ``fraction`` - is the fractional part of the index surrounded by ``i`` - and ``j``. - * lower: ``i``. - * higher: ``j``. - * nearest: ``i`` or ``j``, whichever is nearest. - * midpoint: ``(i + j) / 2``. - - keepdims : bool, optional - If this is set to True, the axes which are reduced are left in - the result as dimensions with size one. With this option, the - result will broadcast correctly against the original array `a`. - - If this is anything but the default value it will be passed - through (in the special case of an empty array) to the - `mean` function of the underlying array. If the array is - a sub-class and `mean` does not have the kwarg `keepdims` this - will raise a RuntimeError. - - Returns - ------- - quantile : scalar or ndarray - If `q` is a single percentile and `axis=None`, then the result - is a scalar. If multiple quantiles are given, first axis of - the result corresponds to the quantiles. The other axes are - the axes that remain after the reduction of `a`. If the input - contains integers or floats smaller than ``float64``, the output - data-type is ``float64``. Otherwise, the output data-type is the - same as that of the input. If `out` is specified, that array is - returned instead. - - See Also - -------- - quantile - nanmean, nanmedian - nanmedian : equivalent to ``nanquantile(..., 0.5)`` - nanpercentile : same as nanquantile, but with q in the range [0, 100]. - - Examples - -------- - >>> a = np.array([[10., 7., 4.], [3., 2., 1.]]) - >>> a[0][1] = np.nan - >>> a - array([[10., nan, 4.], - [ 3., 2., 1.]]) - >>> np.quantile(a, 0.5) - nan - >>> np.nanquantile(a, 0.5) - 3.0 - >>> np.nanquantile(a, 0.5, axis=0) - array([6.5, 2. , 2.5]) - >>> np.nanquantile(a, 0.5, axis=1, keepdims=True) - array([[7.], - [2.]]) - >>> m = np.nanquantile(a, 0.5, axis=0) - >>> out = np.zeros_like(m) - >>> np.nanquantile(a, 0.5, axis=0, out=out) - array([6.5, 2. , 2.5]) - >>> m - array([6.5, 2. , 2.5]) - >>> b = a.copy() - >>> np.nanquantile(b, 0.5, axis=1, overwrite_input=True) - array([7., 2.]) - >>> assert not np.all(a==b) - """ - a = np.asanyarray(a) - q = np.asanyarray(q) - if not function_base._quantile_is_valid(q): - raise ValueError("Quantiles must be in the range [0, 1]") - return _nanquantile_unchecked( - a, q, axis, out, overwrite_input, interpolation, keepdims) - - -def _nanquantile_unchecked(a, q, axis=None, out=None, overwrite_input=False, - interpolation='linear', keepdims=np._NoValue): - """Assumes that q is in [0, 1], and is an ndarray""" - # apply_along_axis in _nanpercentile doesn't handle empty arrays well, - # so deal them upfront - if a.size == 0: - return np.nanmean(a, axis, out=out, keepdims=keepdims) - - r, k = function_base._ureduce( - a, func=_nanquantile_ureduce_func, q=q, axis=axis, out=out, - overwrite_input=overwrite_input, interpolation=interpolation - ) - if keepdims and keepdims is not np._NoValue: - return r.reshape(q.shape + k) - else: - return r - - -def _nanquantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False, - interpolation='linear'): - """ - Private function that doesn't support extended axis or keepdims. - These methods are extended to this function using _ureduce - See nanpercentile for parameter usage - """ - if axis is None or a.ndim == 1: - part = a.ravel() - result = _nanquantile_1d(part, q, overwrite_input, interpolation) - else: - result = np.apply_along_axis(_nanquantile_1d, axis, a, q, - overwrite_input, interpolation) - # apply_along_axis fills in collapsed axis with results. - # Move that axis to the beginning to match percentile's - # convention. - if q.ndim != 0: - result = np.moveaxis(result, axis, 0) - - if out is not None: - out[...] = result - return result - - -def _nanquantile_1d(arr1d, q, overwrite_input=False, interpolation='linear'): - """ - Private function for rank 1 arrays. Compute quantile ignoring NaNs. - See nanpercentile for parameter usage - """ - arr1d, overwrite_input = _remove_nan_1d(arr1d, - overwrite_input=overwrite_input) - if arr1d.size == 0: - return np.full(q.shape, np.nan)[()] # convert to scalar - - return function_base._quantile_unchecked( - arr1d, q, overwrite_input=overwrite_input, interpolation=interpolation) - - -def _nanvar_dispatcher( - a, axis=None, dtype=None, out=None, ddof=None, keepdims=None): - return (a, out) - - -@array_function_dispatch(_nanvar_dispatcher) -def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): - """ - Compute the variance along the specified axis, while ignoring NaNs. - - Returns the variance of the array elements, a measure of the spread of - a distribution. The variance is computed for the flattened array by - default, otherwise over the specified axis. - - For all-NaN slices or slices with zero degrees of freedom, NaN is - returned and a `RuntimeWarning` is raised. - - .. versionadded:: 1.8.0 - - Parameters - ---------- - a : array_like - Array containing numbers whose variance is desired. If `a` is not an - array, a conversion is attempted. - axis : {int, tuple of int, None}, optional - Axis or axes along which the variance is computed. The default is to compute - the variance of the flattened array. - dtype : data-type, optional - Type to use in computing the variance. For arrays of integer type - the default is `float64`; for arrays of float types it is the same as - the array type. - out : ndarray, optional - Alternate output array in which to place the result. It must have - the same shape as the expected output, but the type is cast if - necessary. - ddof : int, optional - "Delta Degrees of Freedom": the divisor used in the calculation is - ``N - ddof``, where ``N`` represents the number of non-NaN - elements. By default `ddof` is zero. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `a`. - - - Returns - ------- - variance : ndarray, see dtype parameter above - If `out` is None, return a new array containing the variance, - otherwise return a reference to the output array. If ddof is >= the - number of non-NaN elements in a slice or the slice contains only - NaNs, then the result for that slice is NaN. - - See Also - -------- - std : Standard deviation - mean : Average - var : Variance while not ignoring NaNs - nanstd, nanmean - ufuncs-output-type - - Notes - ----- - The variance is the average of the squared deviations from the mean, - i.e., ``var = mean(abs(x - x.mean())**2)``. - - The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``. - If, however, `ddof` is specified, the divisor ``N - ddof`` is used - instead. In standard statistical practice, ``ddof=1`` provides an - unbiased estimator of the variance of a hypothetical infinite - population. ``ddof=0`` provides a maximum likelihood estimate of the - variance for normally distributed variables. - - Note that for complex numbers, the absolute value is taken before - squaring, so that the result is always real and nonnegative. - - For floating-point input, the variance is computed using the same - precision the input has. Depending on the input data, this can cause - the results to be inaccurate, especially for `float32` (see example - below). Specifying a higher-accuracy accumulator using the ``dtype`` - keyword can alleviate this issue. - - For this function to work on sub-classes of ndarray, they must define - `sum` with the kwarg `keepdims` - - Examples - -------- - >>> a = np.array([[1, np.nan], [3, 4]]) - >>> np.nanvar(a) - 1.5555555555555554 - >>> np.nanvar(a, axis=0) - array([1., 0.]) - >>> np.nanvar(a, axis=1) - array([0., 0.25]) # may vary - - """ - arr, mask = _replace_nan(a, 0) - if mask is None: - return np.var(arr, axis=axis, dtype=dtype, out=out, ddof=ddof, - keepdims=keepdims) - - if dtype is not None: - dtype = np.dtype(dtype) - if dtype is not None and not issubclass(dtype.type, np.inexact): - raise TypeError("If a is inexact, then dtype must be inexact") - if out is not None and not issubclass(out.dtype.type, np.inexact): - raise TypeError("If a is inexact, then out must be inexact") - - # Compute mean - if type(arr) is np.matrix: - _keepdims = np._NoValue - else: - _keepdims = True - # we need to special case matrix for reverse compatibility - # in order for this to work, these sums need to be called with - # keepdims=True, however matrix now raises an error in this case, but - # the reason that it drops the keepdims kwarg is to force keepdims=True - # so this used to work by serendipity. - cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=_keepdims) - avg = np.sum(arr, axis=axis, dtype=dtype, keepdims=_keepdims) - avg = _divide_by_count(avg, cnt) - - # Compute squared deviation from mean. - np.subtract(arr, avg, out=arr, casting='unsafe') - arr = _copyto(arr, 0, mask) - if issubclass(arr.dtype.type, np.complexfloating): - sqr = np.multiply(arr, arr.conj(), out=arr).real - else: - sqr = np.multiply(arr, arr, out=arr) - - # Compute variance. - var = np.sum(sqr, axis=axis, dtype=dtype, out=out, keepdims=keepdims) - if var.ndim < cnt.ndim: - # Subclasses of ndarray may ignore keepdims, so check here. - cnt = cnt.squeeze(axis) - dof = cnt - ddof - var = _divide_by_count(var, dof) - - isbad = (dof <= 0) - if np.any(isbad): - warnings.warn("Degrees of freedom <= 0 for slice.", RuntimeWarning, - stacklevel=3) - # NaN, inf, or negative numbers are all possible bad - # values, so explicitly replace them with NaN. - var = _copyto(var, np.nan, isbad) - return var - - -def _nanstd_dispatcher( - a, axis=None, dtype=None, out=None, ddof=None, keepdims=None): - return (a, out) - - -@array_function_dispatch(_nanstd_dispatcher) -def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): - """ - Compute the standard deviation along the specified axis, while - ignoring NaNs. - - Returns the standard deviation, a measure of the spread of a - distribution, of the non-NaN array elements. The standard deviation is - computed for the flattened array by default, otherwise over the - specified axis. - - For all-NaN slices or slices with zero degrees of freedom, NaN is - returned and a `RuntimeWarning` is raised. - - .. versionadded:: 1.8.0 - - Parameters - ---------- - a : array_like - Calculate the standard deviation of the non-NaN values. - axis : {int, tuple of int, None}, optional - Axis or axes along which the standard deviation is computed. The default is - to compute the standard deviation of the flattened array. - dtype : dtype, optional - Type to use in computing the standard deviation. For arrays of - integer type the default is float64, for arrays of float types it - is the same as the array type. - out : ndarray, optional - Alternative output array in which to place the result. It must have - the same shape as the expected output but the type (of the - calculated values) will be cast if necessary. - ddof : int, optional - Means Delta Degrees of Freedom. The divisor used in calculations - is ``N - ddof``, where ``N`` represents the number of non-NaN - elements. By default `ddof` is zero. - - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `a`. - - If this value is anything but the default it is passed through - as-is to the relevant functions of the sub-classes. If these - functions do not have a `keepdims` kwarg, a RuntimeError will - be raised. - - Returns - ------- - standard_deviation : ndarray, see dtype parameter above. - If `out` is None, return a new array containing the standard - deviation, otherwise return a reference to the output array. If - ddof is >= the number of non-NaN elements in a slice or the slice - contains only NaNs, then the result for that slice is NaN. - - See Also - -------- - var, mean, std - nanvar, nanmean - ufuncs-output-type - - Notes - ----- - The standard deviation is the square root of the average of the squared - deviations from the mean: ``std = sqrt(mean(abs(x - x.mean())**2))``. - - The average squared deviation is normally calculated as - ``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is - specified, the divisor ``N - ddof`` is used instead. In standard - statistical practice, ``ddof=1`` provides an unbiased estimator of the - variance of the infinite population. ``ddof=0`` provides a maximum - likelihood estimate of the variance for normally distributed variables. - The standard deviation computed in this function is the square root of - the estimated variance, so even with ``ddof=1``, it will not be an - unbiased estimate of the standard deviation per se. - - Note that, for complex numbers, `std` takes the absolute value before - squaring, so that the result is always real and nonnegative. - - For floating-point input, the *std* is computed using the same - precision the input has. Depending on the input data, this can cause - the results to be inaccurate, especially for float32 (see example - below). Specifying a higher-accuracy accumulator using the `dtype` - keyword can alleviate this issue. - - Examples - -------- - >>> a = np.array([[1, np.nan], [3, 4]]) - >>> np.nanstd(a) - 1.247219128924647 - >>> np.nanstd(a, axis=0) - array([1., 0.]) - >>> np.nanstd(a, axis=1) - array([0., 0.5]) # may vary - - """ - var = nanvar(a, axis=axis, dtype=dtype, out=out, ddof=ddof, - keepdims=keepdims) - if isinstance(var, np.ndarray): - std = np.sqrt(var, out=var) - else: - std = var.dtype.type(np.sqrt(var)) - return std diff --git a/venv/lib/python3.7/site-packages/numpy/lib/npyio.py b/venv/lib/python3.7/site-packages/numpy/lib/npyio.py deleted file mode 100644 index 3e54ff1..0000000 --- a/venv/lib/python3.7/site-packages/numpy/lib/npyio.py +++ /dev/null @@ -1,2380 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import os -import re -import functools -import itertools -import warnings -import weakref -import contextlib -from operator import itemgetter, index as opindex - -import numpy as np -from . import format -from ._datasource import DataSource -from numpy.core import overrides -from numpy.core.multiarray import packbits, unpackbits -from numpy.core.overrides import set_module -from numpy.core._internal import recursive -from ._iotools import ( - LineSplitter, NameValidator, StringConverter, ConverterError, - ConverterLockError, ConversionWarning, _is_string_like, - has_nested_fields, flatten_dtype, easy_dtype, _decode_line - ) - -from numpy.compat import ( - asbytes, asstr, asunicode, bytes, basestring, os_fspath, os_PathLike, - pickle, contextlib_nullcontext - ) - -if sys.version_info[0] >= 3: - from collections.abc import Mapping -else: - from future_builtins import map - from collections import Mapping - - -@set_module('numpy') -def loads(*args, **kwargs): - # NumPy 1.15.0, 2017-12-10 - warnings.warn( - "np.loads is deprecated, use pickle.loads instead", - DeprecationWarning, stacklevel=2) - return pickle.loads(*args, **kwargs) - - -__all__ = [ - 'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt', - 'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez', - 'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource' - ] - - -array_function_dispatch = functools.partial( - overrides.array_function_dispatch, module='numpy') - - -class BagObj(object): - """ - BagObj(obj) - - Convert attribute look-ups to getitems on the object passed in. - - Parameters - ---------- - obj : class instance - Object on which attribute look-up is performed. - - Examples - -------- - >>> from numpy.lib.npyio import BagObj as BO - >>> class BagDemo(object): - ... def __getitem__(self, key): # An instance of BagObj(BagDemo) - ... # will call this method when any - ... # attribute look-up is required - ... result = "Doesn't matter what you want, " - ... return result + "you're gonna get this" - ... - >>> demo_obj = BagDemo() - >>> bagobj = BO(demo_obj) - >>> bagobj.hello_there - "Doesn't matter what you want, you're gonna get this" - >>> bagobj.I_can_be_anything - "Doesn't matter what you want, you're gonna get this" - - """ - - def __init__(self, obj): - # Use weakref to make NpzFile objects collectable by refcount - self._obj = weakref.proxy(obj) - - def __getattribute__(self, key): - try: - return object.__getattribute__(self, '_obj')[key] - except KeyError: - raise AttributeError(key) - - def __dir__(self): - """ - Enables dir(bagobj) to list the files in an NpzFile. - - This also enables tab-completion in an interpreter or IPython. - """ - return list(object.__getattribute__(self, '_obj').keys()) - - -def zipfile_factory(file, *args, **kwargs): - """ - Create a ZipFile. - - Allows for Zip64, and the `file` argument can accept file, str, or - pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile - constructor. - """ - if not hasattr(file, 'read'): - file = os_fspath(file) - import zipfile - kwargs['allowZip64'] = True - return zipfile.ZipFile(file, *args, **kwargs) - - -class NpzFile(Mapping): - """ - NpzFile(fid) - - A dictionary-like object with lazy-loading of files in the zipped - archive provided on construction. - - `NpzFile` is used to load files in the NumPy ``.npz`` data archive - format. It assumes that files in the archive have a ``.npy`` extension, - other files are ignored. - - The arrays and file strings are lazily loaded on either - getitem access using ``obj['key']`` or attribute lookup using - ``obj.f.key``. A list of all files (without ``.npy`` extensions) can - be obtained with ``obj.files`` and the ZipFile object itself using - ``obj.zip``. - - Attributes - ---------- - files : list of str - List of all files in the archive with a ``.npy`` extension. - zip : ZipFile instance - The ZipFile object initialized with the zipped archive. - f : BagObj instance - An object on which attribute can be performed as an alternative - to getitem access on the `NpzFile` instance itself. - allow_pickle : bool, optional - Allow loading pickled data. Default: False - - .. versionchanged:: 1.16.3 - Made default False in response to CVE-2019-6446. - - pickle_kwargs : dict, optional - Additional keyword arguments to pass on to pickle.load. - These are only useful when loading object arrays saved on - Python 2 when using Python 3. - - Parameters - ---------- - fid : file or str - The zipped archive to open. This is either a file-like object - or a string containing the path to the archive. - own_fid : bool, optional - Whether NpzFile should close the file handle. - Requires that `fid` is a file-like object. - - Examples - -------- - >>> from tempfile import TemporaryFile - >>> outfile = TemporaryFile() - >>> x = np.arange(10) - >>> y = np.sin(x) - >>> np.savez(outfile, x=x, y=y) - >>> _ = outfile.seek(0) - - >>> npz = np.load(outfile) - >>> isinstance(npz, np.lib.io.NpzFile) - True - >>> sorted(npz.files) - ['x', 'y'] - >>> npz['x'] # getitem access - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - >>> npz.f.x # attribute lookup - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - - """ - - def __init__(self, fid, own_fid=False, allow_pickle=False, - pickle_kwargs=None): - # Import is postponed to here since zipfile depends on gzip, an - # optional component of the so-called standard library. - _zip = zipfile_factory(fid) - self._files = _zip.namelist() - self.files = [] - self.allow_pickle = allow_pickle - self.pickle_kwargs = pickle_kwargs - for x in self._files: - if x.endswith('.npy'): - self.files.append(x[:-4]) - else: - self.files.append(x) - self.zip = _zip - self.f = BagObj(self) - if own_fid: - self.fid = fid - else: - self.fid = None - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, traceback): - self.close() - - def close(self): - """ - Close the file. - - """ - if self.zip is not None: - self.zip.close() - self.zip = None - if self.fid is not None: - self.fid.close() - self.fid = None - self.f = None # break reference cycle - - def __del__(self): - self.close() - - # Implement the Mapping ABC - def __iter__(self): - return iter(self.files) - - def __len__(self): - return len(self.files) - - def __getitem__(self, key): - # FIXME: This seems like it will copy strings around - # more than is strictly necessary. The zipfile - # will read the string and then - # the format.read_array will copy the string - # to another place in memory. - # It would be better if the zipfile could read - # (or at least uncompress) the data - # directly into the array memory. - member = False - if key in self._files: - member = True - elif key in self.files: - member = True - key += '.npy' - if member: - bytes = self.zip.open(key) - magic = bytes.read(len(format.MAGIC_PREFIX)) - bytes.close() - if magic == format.MAGIC_PREFIX: - bytes = self.zip.open(key) - return format.read_array(bytes, - allow_pickle=self.allow_pickle, - pickle_kwargs=self.pickle_kwargs) - else: - return self.zip.read(key) - else: - raise KeyError("%s is not a file in the archive" % key) - - - if sys.version_info.major == 3: - # deprecate the python 2 dict apis that we supported by accident in - # python 3. We forgot to implement itervalues() at all in earlier - # versions of numpy, so no need to deprecated it here. - - def iteritems(self): - # Numpy 1.15, 2018-02-20 - warnings.warn( - "NpzFile.iteritems is deprecated in python 3, to match the " - "removal of dict.itertems. Use .items() instead.", - DeprecationWarning, stacklevel=2) - return self.items() - - def iterkeys(self): - # Numpy 1.15, 2018-02-20 - warnings.warn( - "NpzFile.iterkeys is deprecated in python 3, to match the " - "removal of dict.iterkeys. Use .keys() instead.", - DeprecationWarning, stacklevel=2) - return self.keys() - - -@set_module('numpy') -def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True, - encoding='ASCII'): - """ - Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files. - - .. warning:: Loading files that contain object arrays uses the ``pickle`` - module, which is not secure against erroneous or maliciously - constructed data. Consider passing ``allow_pickle=False`` to - load data that is known not to contain object arrays for the - safer handling of untrusted sources. - - Parameters - ---------- - file : file-like object, string, or pathlib.Path - The file to read. File-like objects must support the - ``seek()`` and ``read()`` methods. Pickled files require that the - file-like object support the ``readline()`` method as well. - mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional - If not None, then memory-map the file, using the given mode (see - `numpy.memmap` for a detailed description of the modes). A - memory-mapped array is kept on disk. However, it can be accessed - and sliced like any ndarray. Memory mapping is especially useful - for accessing small fragments of large files without reading the - entire file into memory. - allow_pickle : bool, optional - Allow loading pickled object arrays stored in npy files. Reasons for - disallowing pickles include security, as loading pickled data can - execute arbitrary code. If pickles are disallowed, loading object - arrays will fail. Default: False - - .. versionchanged:: 1.16.3 - Made default False in response to CVE-2019-6446. - - fix_imports : bool, optional - Only useful when loading Python 2 generated pickled files on Python 3, - which includes npy/npz files containing object arrays. If `fix_imports` - is True, pickle will try to map the old Python 2 names to the new names - used in Python 3. - encoding : str, optional - What encoding to use when reading Python 2 strings. Only useful when - loading Python 2 generated pickled files in Python 3, which includes - npy/npz files containing object arrays. Values other than 'latin1', - 'ASCII', and 'bytes' are not allowed, as they can corrupt numerical - data. Default: 'ASCII' - - Returns - ------- - result : array, tuple, dict, etc. - Data stored in the file. For ``.npz`` files, the returned instance - of NpzFile class must be closed to avoid leaking file descriptors. - - Raises - ------ - IOError - If the input file does not exist or cannot be read. - ValueError - The file contains an object array, but allow_pickle=False given. - - See Also - -------- - save, savez, savez_compressed, loadtxt - memmap : Create a memory-map to an array stored in a file on disk. - lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file. - - Notes - ----- - - If the file contains pickle data, then whatever object is stored - in the pickle is returned. - - If the file is a ``.npy`` file, then a single array is returned. - - If the file is a ``.npz`` file, then a dictionary-like object is - returned, containing ``{filename: array}`` key-value pairs, one for - each file in the archive. - - If the file is a ``.npz`` file, the returned value supports the - context manager protocol in a similar fashion to the open function:: - - with load('foo.npz') as data: - a = data['a'] - - The underlying file descriptor is closed when exiting the 'with' - block. - - Examples - -------- - Store data to disk, and load it again: - - >>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]])) - >>> np.load('/tmp/123.npy') - array([[1, 2, 3], - [4, 5, 6]]) - - Store compressed data to disk, and load it again: - - >>> a=np.array([[1, 2, 3], [4, 5, 6]]) - >>> b=np.array([1, 2]) - >>> np.savez('/tmp/123.npz', a=a, b=b) - >>> data = np.load('/tmp/123.npz') - >>> data['a'] - array([[1, 2, 3], - [4, 5, 6]]) - >>> data['b'] - array([1, 2]) - >>> data.close() - - Mem-map the stored array, and then access the second row - directly from disk: - - >>> X = np.load('/tmp/123.npy', mmap_mode='r') - >>> X[1, :] - memmap([4, 5, 6]) - - """ - if encoding not in ('ASCII', 'latin1', 'bytes'): - # The 'encoding' value for pickle also affects what encoding - # the serialized binary data of NumPy arrays is loaded - # in. Pickle does not pass on the encoding information to - # NumPy. The unpickling code in numpy.core.multiarray is - # written to assume that unicode data appearing where binary - # should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'. - # - # Other encoding values can corrupt binary data, and we - # purposefully disallow them. For the same reason, the errors= - # argument is not exposed, as values other than 'strict' - # result can similarly silently corrupt numerical data. - raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'") - - if sys.version_info[0] >= 3: - pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports) - else: - # Nothing to do on Python 2 - pickle_kwargs = {} - - # TODO: Use contextlib.ExitStack once we drop Python 2 - if hasattr(file, 'read'): - fid = file - own_fid = False - else: - fid = open(os_fspath(file), "rb") - own_fid = True - - try: - # Code to distinguish from NumPy binary files and pickles. - _ZIP_PREFIX = b'PK\x03\x04' - _ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this - N = len(format.MAGIC_PREFIX) - magic = fid.read(N) - # If the file size is less than N, we need to make sure not - # to seek past the beginning of the file - fid.seek(-min(N, len(magic)), 1) # back-up - if magic.startswith(_ZIP_PREFIX) or magic.startswith(_ZIP_SUFFIX): - # zip-file (assume .npz) - # Transfer file ownership to NpzFile - ret = NpzFile(fid, own_fid=own_fid, allow_pickle=allow_pickle, - pickle_kwargs=pickle_kwargs) - own_fid = False - return ret - elif magic == format.MAGIC_PREFIX: - # .npy file - if mmap_mode: - return format.open_memmap(file, mode=mmap_mode) - else: - return format.read_array(fid, allow_pickle=allow_pickle, - pickle_kwargs=pickle_kwargs) - else: - # Try a pickle - if not allow_pickle: - raise ValueError("Cannot load file containing pickled data " - "when allow_pickle=False") - try: - return pickle.load(fid, **pickle_kwargs) - except Exception: - raise IOError( - "Failed to interpret file %s as a pickle" % repr(file)) - finally: - if own_fid: - fid.close() - - -def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None): - return (arr,) - - -@array_function_dispatch(_save_dispatcher) -def save(file, arr, allow_pickle=True, fix_imports=True): - """ - Save an array to a binary file in NumPy ``.npy`` format. - - Parameters - ---------- - file : file, str, or pathlib.Path - File or filename to which the data is saved. If file is a file-object, - then the filename is unchanged. If file is a string or Path, a ``.npy`` - extension will be appended to the filename if it does not already - have one. - arr : array_like - Array data to be saved. - allow_pickle : bool, optional - Allow saving object arrays using Python pickles. Reasons for disallowing - pickles include security (loading pickled data can execute arbitrary - code) and portability (pickled objects may not be loadable on different - Python installations, for example if the stored objects require libraries - that are not available, and not all pickled data is compatible between - Python 2 and Python 3). - Default: True - fix_imports : bool, optional - Only useful in forcing objects in object arrays on Python 3 to be - pickled in a Python 2 compatible way. If `fix_imports` is True, pickle - will try to map the new Python 3 names to the old module names used in - Python 2, so that the pickle data stream is readable with Python 2. - - See Also - -------- - savez : Save several arrays into a ``.npz`` archive - savetxt, load - - Notes - ----- - For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`. - - Any data saved to the file is appended to the end of the file. - - Examples - -------- - >>> from tempfile import TemporaryFile - >>> outfile = TemporaryFile() - - >>> x = np.arange(10) - >>> np.save(outfile, x) - - >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file - >>> np.load(outfile) - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - - - >>> with open('test.npy', 'wb') as f: - ... np.save(f, np.array([1, 2])) - ... np.save(f, np.array([1, 3])) - >>> with open('test.npy', 'rb') as f: - ... a = np.load(f) - ... b = np.load(f) - >>> print(a, b) - # [1 2] [1 3] - """ - own_fid = False - if hasattr(file, 'write'): - fid = file - else: - file = os_fspath(file) - if not file.endswith('.npy'): - file = file + '.npy' - fid = open(file, "wb") - own_fid = True - - if sys.version_info[0] >= 3: - pickle_kwargs = dict(fix_imports=fix_imports) - else: - # Nothing to do on Python 2 - pickle_kwargs = None - - try: - arr = np.asanyarray(arr) - format.write_array(fid, arr, allow_pickle=allow_pickle, - pickle_kwargs=pickle_kwargs) - finally: - if own_fid: - fid.close() - - -def _savez_dispatcher(file, *args, **kwds): - for a in args: - yield a - for v in kwds.values(): - yield v - - -@array_function_dispatch(_savez_dispatcher) -def savez(file, *args, **kwds): - """Save several arrays into a single file in uncompressed ``.npz`` format. - - If arguments are passed in with no keywords, the corresponding variable - names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword - arguments are given, the corresponding variable names, in the ``.npz`` - file will match the keyword names. - - Parameters - ---------- - file : str or file - Either the filename (string) or an open file (file-like object) - where the data will be saved. If file is a string or a Path, the - ``.npz`` extension will be appended to the filename if it is not - already there. - args : Arguments, optional - Arrays to save to the file. Since it is not possible for Python to - know the names of the arrays outside `savez`, the arrays will be saved - with names "arr_0", "arr_1", and so on. These arguments can be any - expression. - kwds : Keyword arguments, optional - Arrays to save to the file. Arrays will be saved in the file with the - keyword names. - - Returns - ------- - None - - See Also - -------- - save : Save a single array to a binary file in NumPy format. - savetxt : Save an array to a file as plain text. - savez_compressed : Save several arrays into a compressed ``.npz`` archive - - Notes - ----- - The ``.npz`` file format is a zipped archive of files named after the - variables they contain. The archive is not compressed and each file - in the archive contains one variable in ``.npy`` format. For a - description of the ``.npy`` format, see :py:mod:`numpy.lib.format`. - - When opening the saved ``.npz`` file with `load` a `NpzFile` object is - returned. This is a dictionary-like object which can be queried for - its list of arrays (with the ``.files`` attribute), and for the arrays - themselves. - - When saving dictionaries, the dictionary keys become filenames - inside the ZIP archive. Therefore, keys should be valid filenames. - E.g., avoid keys that begin with ``/`` or contain ``.``. - - Examples - -------- - >>> from tempfile import TemporaryFile - >>> outfile = TemporaryFile() - >>> x = np.arange(10) - >>> y = np.sin(x) - - Using `savez` with \\*args, the arrays are saved with default names. - - >>> np.savez(outfile, x, y) - >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file - >>> npzfile = np.load(outfile) - >>> npzfile.files - ['arr_0', 'arr_1'] - >>> npzfile['arr_0'] - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - - Using `savez` with \\**kwds, the arrays are saved with the keyword names. - - >>> outfile = TemporaryFile() - >>> np.savez(outfile, x=x, y=y) - >>> _ = outfile.seek(0) - >>> npzfile = np.load(outfile) - >>> sorted(npzfile.files) - ['x', 'y'] - >>> npzfile['x'] - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - """ - _savez(file, args, kwds, False) - - -def _savez_compressed_dispatcher(file, *args, **kwds): - for a in args: - yield a - for v in kwds.values(): - yield v - - -@array_function_dispatch(_savez_compressed_dispatcher) -def savez_compressed(file, *args, **kwds): - """ - Save several arrays into a single file in compressed ``.npz`` format. - - If keyword arguments are given, then filenames are taken from the keywords. - If arguments are passed in with no keywords, then stored filenames are - arr_0, arr_1, etc. - - Parameters - ---------- - file : str or file - Either the filename (string) or an open file (file-like object) - where the data will be saved. If file is a string or a Path, the - ``.npz`` extension will be appended to the filename if it is not - already there. - args : Arguments, optional - Arrays to save to the file. Since it is not possible for Python to - know the names of the arrays outside `savez`, the arrays will be saved - with names "arr_0", "arr_1", and so on. These arguments can be any - expression. - kwds : Keyword arguments, optional - Arrays to save to the file. Arrays will be saved in the file with the - keyword names. - - Returns - ------- - None - - See Also - -------- - numpy.save : Save a single array to a binary file in NumPy format. - numpy.savetxt : Save an array to a file as plain text. - numpy.savez : Save several arrays into an uncompressed ``.npz`` file format - numpy.load : Load the files created by savez_compressed. - - Notes - ----- - The ``.npz`` file format is a zipped archive of files named after the - variables they contain. The archive is compressed with - ``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable - in ``.npy`` format. For a description of the ``.npy`` format, see - :py:mod:`numpy.lib.format`. - - - When opening the saved ``.npz`` file with `load` a `NpzFile` object is - returned. This is a dictionary-like object which can be queried for - its list of arrays (with the ``.files`` attribute), and for the arrays - themselves. - - Examples - -------- - >>> test_array = np.random.rand(3, 2) - >>> test_vector = np.random.rand(4) - >>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector) - >>> loaded = np.load('/tmp/123.npz') - >>> print(np.array_equal(test_array, loaded['a'])) - True - >>> print(np.array_equal(test_vector, loaded['b'])) - True - - """ - _savez(file, args, kwds, True) - - -def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None): - # Import is postponed to here since zipfile depends on gzip, an optional - # component of the so-called standard library. - import zipfile - - if not hasattr(file, 'write'): - file = os_fspath(file) - if not file.endswith('.npz'): - file = file + '.npz' - - namedict = kwds - for i, val in enumerate(args): - key = 'arr_%d' % i - if key in namedict.keys(): - raise ValueError( - "Cannot use un-named variables and keyword %s" % key) - namedict[key] = val - - if compress: - compression = zipfile.ZIP_DEFLATED - else: - compression = zipfile.ZIP_STORED - - zipf = zipfile_factory(file, mode="w", compression=compression) - - if sys.version_info >= (3, 6): - # Since Python 3.6 it is possible to write directly to a ZIP file. - for key, val in namedict.items(): - fname = key + '.npy' - val = np.asanyarray(val) - # always force zip64, gh-10776 - with zipf.open(fname, 'w', force_zip64=True) as fid: - format.write_array(fid, val, - allow_pickle=allow_pickle, - pickle_kwargs=pickle_kwargs) - else: - # Stage arrays in a temporary file on disk, before writing to zip. - - # Import deferred for startup time improvement - import tempfile - # Since target file might be big enough to exceed capacity of a global - # temporary directory, create temp file side-by-side with the target file. - file_dir, file_prefix = os.path.split(file) if _is_string_like(file) else (None, 'tmp') - fd, tmpfile = tempfile.mkstemp(prefix=file_prefix, dir=file_dir, suffix='-numpy.npy') - os.close(fd) - try: - for key, val in namedict.items(): - fname = key + '.npy' - fid = open(tmpfile, 'wb') - try: - format.write_array(fid, np.asanyarray(val), - allow_pickle=allow_pickle, - pickle_kwargs=pickle_kwargs) - fid.close() - fid = None - zipf.write(tmpfile, arcname=fname) - except IOError as exc: - raise IOError("Failed to write to %s: %s" % (tmpfile, exc)) - finally: - if fid: - fid.close() - finally: - os.remove(tmpfile) - - zipf.close() - - -def _getconv(dtype): - """ Find the correct dtype converter. Adapted from matplotlib """ - - def floatconv(x): - x.lower() - if '0x' in x: - return float.fromhex(x) - return float(x) - - typ = dtype.type - if issubclass(typ, np.bool_): - return lambda x: bool(int(x)) - if issubclass(typ, np.uint64): - return np.uint64 - if issubclass(typ, np.int64): - return np.int64 - if issubclass(typ, np.integer): - return lambda x: int(float(x)) - elif issubclass(typ, np.longdouble): - return np.longdouble - elif issubclass(typ, np.floating): - return floatconv - elif issubclass(typ, complex): - return lambda x: complex(asstr(x).replace('+-', '-')) - elif issubclass(typ, np.bytes_): - return asbytes - elif issubclass(typ, np.unicode_): - return asunicode - else: - return asstr - -# amount of lines loadtxt reads in one chunk, can be overridden for testing -_loadtxt_chunksize = 50000 - - -@set_module('numpy') -def loadtxt(fname, dtype=float, comments='#', delimiter=None, - converters=None, skiprows=0, usecols=None, unpack=False, - ndmin=0, encoding='bytes', max_rows=None): - """ - Load data from a text file. - - Each row in the text file must have the same number of values. - - Parameters - ---------- - fname : file, str, or pathlib.Path - File, filename, or generator to read. If the filename extension is - ``.gz`` or ``.bz2``, the file is first decompressed. Note that - generators should return byte strings. - dtype : data-type, optional - Data-type of the resulting array; default: float. If this is a - structured data-type, the resulting array will be 1-dimensional, and - each row will be interpreted as an element of the array. In this - case, the number of columns used must match the number of fields in - the data-type. - comments : str or sequence of str, optional - The characters or list of characters used to indicate the start of a - comment. None implies no comments. For backwards compatibility, byte - strings will be decoded as 'latin1'. The default is '#'. - delimiter : str, optional - The string used to separate values. For backwards compatibility, byte - strings will be decoded as 'latin1'. The default is whitespace. - converters : dict, optional - A dictionary mapping column number to a function that will parse the - column string into the desired value. E.g., if column 0 is a date - string: ``converters = {0: datestr2num}``. Converters can also be - used to provide a default value for missing data (but see also - `genfromtxt`): ``converters = {3: lambda s: float(s.strip() or 0)}``. - Default: None. - skiprows : int, optional - Skip the first `skiprows` lines, including comments; default: 0. - usecols : int or sequence, optional - Which columns to read, with 0 being the first. For example, - ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns. - The default, None, results in all columns being read. - - .. versionchanged:: 1.11.0 - When a single column has to be read it is possible to use - an integer instead of a tuple. E.g ``usecols = 3`` reads the - fourth column the same way as ``usecols = (3,)`` would. - unpack : bool, optional - If True, the returned array is transposed, so that arguments may be - unpacked using ``x, y, z = loadtxt(...)``. When used with a structured - data-type, arrays are returned for each field. Default is False. - ndmin : int, optional - The returned array will have at least `ndmin` dimensions. - Otherwise mono-dimensional axes will be squeezed. - Legal values: 0 (default), 1 or 2. - - .. versionadded:: 1.6.0 - encoding : str, optional - Encoding used to decode the inputfile. Does not apply to input streams. - The special value 'bytes' enables backward compatibility workarounds - that ensures you receive byte arrays as results if possible and passes - 'latin1' encoded strings to converters. Override this value to receive - unicode arrays and pass strings as input to converters. If set to None - the system default is used. The default value is 'bytes'. - - .. versionadded:: 1.14.0 - max_rows : int, optional - Read `max_rows` lines of content after `skiprows` lines. The default - is to read all the lines. - - .. versionadded:: 1.16.0 - - Returns - ------- - out : ndarray - Data read from the text file. - - See Also - -------- - load, fromstring, fromregex - genfromtxt : Load data with missing values handled as specified. - scipy.io.loadmat : reads MATLAB data files - - Notes - ----- - This function aims to be a fast reader for simply formatted files. The - `genfromtxt` function provides more sophisticated handling of, e.g., - lines with missing values. - - .. versionadded:: 1.10.0 - - The strings produced by the Python float.hex method can be used as - input for floats. - - Examples - -------- - >>> from io import StringIO # StringIO behaves like a file object - >>> c = StringIO(u"0 1\\n2 3") - >>> np.loadtxt(c) - array([[0., 1.], - [2., 3.]]) - - >>> d = StringIO(u"M 21 72\\nF 35 58") - >>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'), - ... 'formats': ('S1', 'i4', 'f4')}) - array([(b'M', 21, 72.), (b'F', 35, 58.)], - dtype=[('gender', 'S1'), ('age', '>> c = StringIO(u"1,0,2\\n3,0,4") - >>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True) - >>> x - array([1., 3.]) - >>> y - array([2., 4.]) - - """ - # Type conversions for Py3 convenience - if comments is not None: - if isinstance(comments, (basestring, bytes)): - comments = [comments] - comments = [_decode_line(x) for x in comments] - # Compile regex for comments beforehand - comments = (re.escape(comment) for comment in comments) - regex_comments = re.compile('|'.join(comments)) - - if delimiter is not None: - delimiter = _decode_line(delimiter) - - user_converters = converters - - if encoding == 'bytes': - encoding = None - byte_converters = True - else: - byte_converters = False - - if usecols is not None: - # Allow usecols to be a single int or a sequence of ints - try: - usecols_as_list = list(usecols) - except TypeError: - usecols_as_list = [usecols] - for col_idx in usecols_as_list: - try: - opindex(col_idx) - except TypeError as e: - e.args = ( - "usecols must be an int or a sequence of ints but " - "it contains at least one element of type %s" % - type(col_idx), - ) - raise - # Fall back to existing code - usecols = usecols_as_list - - fown = False - try: - if isinstance(fname, os_PathLike): - fname = os_fspath(fname) - if _is_string_like(fname): - fh = np.lib._datasource.open(fname, 'rt', encoding=encoding) - fencoding = getattr(fh, 'encoding', 'latin1') - fh = iter(fh) - fown = True - else: - fh = iter(fname) - fencoding = getattr(fname, 'encoding', 'latin1') - except TypeError: - raise ValueError('fname must be a string, file handle, or generator') - - # input may be a python2 io stream - if encoding is not None: - fencoding = encoding - # we must assume local encoding - # TODO emit portability warning? - elif fencoding is None: - import locale - fencoding = locale.getpreferredencoding() - - # not to be confused with the flatten_dtype we import... - @recursive - def flatten_dtype_internal(self, dt): - """Unpack a structured data-type, and produce re-packing info.""" - if dt.names is None: - # If the dtype is flattened, return. - # If the dtype has a shape, the dtype occurs - # in the list more than once. - shape = dt.shape - if len(shape) == 0: - return ([dt.base], None) - else: - packing = [(shape[-1], list)] - if len(shape) > 1: - for dim in dt.shape[-2::-1]: - packing = [(dim*packing[0][0], packing*dim)] - return ([dt.base] * int(np.prod(dt.shape)), packing) - else: - types = [] - packing = [] - for field in dt.names: - tp, bytes = dt.fields[field] - flat_dt, flat_packing = self(tp) - types.extend(flat_dt) - # Avoid extra nesting for subarrays - if tp.ndim > 0: - packing.extend(flat_packing) - else: - packing.append((len(flat_dt), flat_packing)) - return (types, packing) - - @recursive - def pack_items(self, items, packing): - """Pack items into nested lists based on re-packing info.""" - if packing is None: - return items[0] - elif packing is tuple: - return tuple(items) - elif packing is list: - return list(items) - else: - start = 0 - ret = [] - for length, subpacking in packing: - ret.append(self(items[start:start+length], subpacking)) - start += length - return tuple(ret) - - def split_line(line): - """Chop off comments, strip, and split at delimiter. """ - line = _decode_line(line, encoding=encoding) - - if comments is not None: - line = regex_comments.split(line, maxsplit=1)[0] - line = line.strip('\r\n') - if line: - return line.split(delimiter) - else: - return [] - - def read_data(chunk_size): - """Parse each line, including the first. - - The file read, `fh`, is a global defined above. - - Parameters - ---------- - chunk_size : int - At most `chunk_size` lines are read at a time, with iteration - until all lines are read. - - """ - X = [] - line_iter = itertools.chain([first_line], fh) - line_iter = itertools.islice(line_iter, max_rows) - for i, line in enumerate(line_iter): - vals = split_line(line) - if len(vals) == 0: - continue - if usecols: - vals = [vals[j] for j in usecols] - if len(vals) != N: - line_num = i + skiprows + 1 - raise ValueError("Wrong number of columns at line %d" - % line_num) - - # Convert each value according to its column and store - items = [conv(val) for (conv, val) in zip(converters, vals)] - - # Then pack it according to the dtype's nesting - items = pack_items(items, packing) - X.append(items) - if len(X) > chunk_size: - yield X - X = [] - if X: - yield X - - try: - # Make sure we're dealing with a proper dtype - dtype = np.dtype(dtype) - defconv = _getconv(dtype) - - # Skip the first `skiprows` lines - for i in range(skiprows): - next(fh) - - # Read until we find a line with some values, and use - # it to estimate the number of columns, N. - first_vals = None - try: - while not first_vals: - first_line = next(fh) - first_vals = split_line(first_line) - except StopIteration: - # End of lines reached - first_line = '' - first_vals = [] - warnings.warn('loadtxt: Empty input file: "%s"' % fname, stacklevel=2) - N = len(usecols or first_vals) - - dtype_types, packing = flatten_dtype_internal(dtype) - if len(dtype_types) > 1: - # We're dealing with a structured array, each field of - # the dtype matches a column - converters = [_getconv(dt) for dt in dtype_types] - else: - # All fields have the same dtype - converters = [defconv for i in range(N)] - if N > 1: - packing = [(N, tuple)] - - # By preference, use the converters specified by the user - for i, conv in (user_converters or {}).items(): - if usecols: - try: - i = usecols.index(i) - except ValueError: - # Unused converter specified - continue - if byte_converters: - # converters may use decode to workaround numpy's old behaviour, - # so encode the string again before passing to the user converter - def tobytes_first(x, conv): - if type(x) is bytes: - return conv(x) - return conv(x.encode("latin1")) - converters[i] = functools.partial(tobytes_first, conv=conv) - else: - converters[i] = conv - - converters = [conv if conv is not bytes else - lambda x: x.encode(fencoding) for conv in converters] - - # read data in chunks and fill it into an array via resize - # over-allocating and shrinking the array later may be faster but is - # probably not relevant compared to the cost of actually reading and - # converting the data - X = None - for x in read_data(_loadtxt_chunksize): - if X is None: - X = np.array(x, dtype) - else: - nshape = list(X.shape) - pos = nshape[0] - nshape[0] += len(x) - X.resize(nshape, refcheck=False) - X[pos:, ...] = x - finally: - if fown: - fh.close() - - if X is None: - X = np.array([], dtype) - - # Multicolumn data are returned with shape (1, N, M), i.e. - # (1, 1, M) for a single row - remove the singleton dimension there - if X.ndim == 3 and X.shape[:2] == (1, 1): - X.shape = (1, -1) - - # Verify that the array has at least dimensions `ndmin`. - # Check correctness of the values of `ndmin` - if ndmin not in [0, 1, 2]: - raise ValueError('Illegal value of ndmin keyword: %s' % ndmin) - # Tweak the size and shape of the arrays - remove extraneous dimensions - if X.ndim > ndmin: - X = np.squeeze(X) - # and ensure we have the minimum number of dimensions asked for - # - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0 - if X.ndim < ndmin: - if ndmin == 1: - X = np.atleast_1d(X) - elif ndmin == 2: - X = np.atleast_2d(X).T - - if unpack: - if len(dtype_types) > 1: - # For structured arrays, return an array for each field. - return [X[field] for field in dtype.names] - else: - return X.T - else: - return X - - -def _savetxt_dispatcher(fname, X, fmt=None, delimiter=None, newline=None, - header=None, footer=None, comments=None, - encoding=None): - return (X,) - - -@array_function_dispatch(_savetxt_dispatcher) -def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='', - footer='', comments='# ', encoding=None): - """ - Save an array to a text file. - - Parameters - ---------- - fname : filename or file handle - If the filename ends in ``.gz``, the file is automatically saved in - compressed gzip format. `loadtxt` understands gzipped files - transparently. - X : 1D or 2D array_like - Data to be saved to a text file. - fmt : str or sequence of strs, optional - A single format (%10.5f), a sequence of formats, or a - multi-format string, e.g. 'Iteration %d -- %10.5f', in which - case `delimiter` is ignored. For complex `X`, the legal options - for `fmt` are: - - * a single specifier, `fmt='%.4e'`, resulting in numbers formatted - like `' (%s+%sj)' % (fmt, fmt)` - * a full string specifying every real and imaginary part, e.g. - `' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns - * a list of specifiers, one per column - in this case, the real - and imaginary part must have separate specifiers, - e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns - delimiter : str, optional - String or character separating columns. - newline : str, optional - String or character separating lines. - - .. versionadded:: 1.5.0 - header : str, optional - String that will be written at the beginning of the file. - - .. versionadded:: 1.7.0 - footer : str, optional - String that will be written at the end of the file. - - .. versionadded:: 1.7.0 - comments : str, optional - String that will be prepended to the ``header`` and ``footer`` strings, - to mark them as comments. Default: '# ', as expected by e.g. - ``numpy.loadtxt``. - - .. versionadded:: 1.7.0 - encoding : {None, str}, optional - Encoding used to encode the outputfile. Does not apply to output - streams. If the encoding is something other than 'bytes' or 'latin1' - you will not be able to load the file in NumPy versions < 1.14. Default - is 'latin1'. - - .. versionadded:: 1.14.0 - - - See Also - -------- - save : Save an array to a binary file in NumPy ``.npy`` format - savez : Save several arrays into an uncompressed ``.npz`` archive - savez_compressed : Save several arrays into a compressed ``.npz`` archive - - Notes - ----- - Further explanation of the `fmt` parameter - (``%[flag]width[.precision]specifier``): - - flags: - ``-`` : left justify - - ``+`` : Forces to precede result with + or -. - - ``0`` : Left pad the number with zeros instead of space (see width). - - width: - Minimum number of characters to be printed. The value is not truncated - if it has more characters. - - precision: - - For integer specifiers (eg. ``d,i,o,x``), the minimum number of - digits. - - For ``e, E`` and ``f`` specifiers, the number of digits to print - after the decimal point. - - For ``g`` and ``G``, the maximum number of significant digits. - - For ``s``, the maximum number of characters. - - specifiers: - ``c`` : character - - ``d`` or ``i`` : signed decimal integer - - ``e`` or ``E`` : scientific notation with ``e`` or ``E``. - - ``f`` : decimal floating point - - ``g,G`` : use the shorter of ``e,E`` or ``f`` - - ``o`` : signed octal - - ``s`` : string of characters - - ``u`` : unsigned decimal integer - - ``x,X`` : unsigned hexadecimal integer - - This explanation of ``fmt`` is not complete, for an exhaustive - specification see [1]_. - - References - ---------- - .. [1] `Format Specification Mini-Language - `_, - Python Documentation. - - Examples - -------- - >>> x = y = z = np.arange(0.0,5.0,1.0) - >>> np.savetxt('test.out', x, delimiter=',') # X is an array - >>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays - >>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation - - """ - - # Py3 conversions first - if isinstance(fmt, bytes): - fmt = asstr(fmt) - delimiter = asstr(delimiter) - - class WriteWrap(object): - """Convert to unicode in py2 or to bytes on bytestream inputs. - - """ - def __init__(self, fh, encoding): - self.fh = fh - self.encoding = encoding - self.do_write = self.first_write - - def close(self): - self.fh.close() - - def write(self, v): - self.do_write(v) - - def write_bytes(self, v): - if isinstance(v, bytes): - self.fh.write(v) - else: - self.fh.write(v.encode(self.encoding)) - - def write_normal(self, v): - self.fh.write(asunicode(v)) - - def first_write(self, v): - try: - self.write_normal(v) - self.write = self.write_normal - except TypeError: - # input is probably a bytestream - self.write_bytes(v) - self.write = self.write_bytes - - own_fh = False - if isinstance(fname, os_PathLike): - fname = os_fspath(fname) - if _is_string_like(fname): - # datasource doesn't support creating a new file ... - open(fname, 'wt').close() - fh = np.lib._datasource.open(fname, 'wt', encoding=encoding) - own_fh = True - # need to convert str to unicode for text io output - if sys.version_info[0] == 2: - fh = WriteWrap(fh, encoding or 'latin1') - elif hasattr(fname, 'write'): - # wrap to handle byte output streams - fh = WriteWrap(fname, encoding or 'latin1') - else: - raise ValueError('fname must be a string or file handle') - - try: - X = np.asarray(X) - - # Handle 1-dimensional arrays - if X.ndim == 0 or X.ndim > 2: - raise ValueError( - "Expected 1D or 2D array, got %dD array instead" % X.ndim) - elif X.ndim == 1: - # Common case -- 1d array of numbers - if X.dtype.names is None: - X = np.atleast_2d(X).T - ncol = 1 - - # Complex dtype -- each field indicates a separate column - else: - ncol = len(X.dtype.names) - else: - ncol = X.shape[1] - - iscomplex_X = np.iscomplexobj(X) - # `fmt` can be a string with multiple insertion points or a - # list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d') - if type(fmt) in (list, tuple): - if len(fmt) != ncol: - raise AttributeError('fmt has wrong shape. %s' % str(fmt)) - format = asstr(delimiter).join(map(asstr, fmt)) - elif isinstance(fmt, basestring): - n_fmt_chars = fmt.count('%') - error = ValueError('fmt has wrong number of %% formats: %s' % fmt) - if n_fmt_chars == 1: - if iscomplex_X: - fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol - else: - fmt = [fmt, ] * ncol - format = delimiter.join(fmt) - elif iscomplex_X and n_fmt_chars != (2 * ncol): - raise error - elif ((not iscomplex_X) and n_fmt_chars != ncol): - raise error - else: - format = fmt - else: - raise ValueError('invalid fmt: %r' % (fmt,)) - - if len(header) > 0: - header = header.replace('\n', '\n' + comments) - fh.write(comments + header + newline) - if iscomplex_X: - for row in X: - row2 = [] - for number in row: - row2.append(number.real) - row2.append(number.imag) - s = format % tuple(row2) + newline - fh.write(s.replace('+-', '-')) - else: - for row in X: - try: - v = format % tuple(row) + newline - except TypeError: - raise TypeError("Mismatch between array dtype ('%s') and " - "format specifier ('%s')" - % (str(X.dtype), format)) - fh.write(v) - - if len(footer) > 0: - footer = footer.replace('\n', '\n' + comments) - fh.write(comments + footer + newline) - finally: - if own_fh: - fh.close() - - -@set_module('numpy') -def fromregex(file, regexp, dtype, encoding=None): - """ - Construct an array from a text file, using regular expression parsing. - - The returned array is always a structured array, and is constructed from - all matches of the regular expression in the file. Groups in the regular - expression are converted to fields of the structured array. - - Parameters - ---------- - file : str or file - Filename or file object to read. - regexp : str or regexp - Regular expression used to parse the file. - Groups in the regular expression correspond to fields in the dtype. - dtype : dtype or list of dtypes - Dtype for the structured array. - encoding : str, optional - Encoding used to decode the inputfile. Does not apply to input streams. - - .. versionadded:: 1.14.0 - - Returns - ------- - output : ndarray - The output array, containing the part of the content of `file` that - was matched by `regexp`. `output` is always a structured array. - - Raises - ------ - TypeError - When `dtype` is not a valid dtype for a structured array. - - See Also - -------- - fromstring, loadtxt - - Notes - ----- - Dtypes for structured arrays can be specified in several forms, but all - forms specify at least the data type and field name. For details see - `doc.structured_arrays`. - - Examples - -------- - >>> f = open('test.dat', 'w') - >>> _ = f.write("1312 foo\\n1534 bar\\n444 qux") - >>> f.close() - - >>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything] - >>> output = np.fromregex('test.dat', regexp, - ... [('num', np.int64), ('key', 'S3')]) - >>> output - array([(1312, b'foo'), (1534, b'bar'), ( 444, b'qux')], - dtype=[('num', '>> output['num'] - array([1312, 1534, 444]) - - """ - own_fh = False - if not hasattr(file, "read"): - file = np.lib._datasource.open(file, 'rt', encoding=encoding) - own_fh = True - - try: - if not isinstance(dtype, np.dtype): - dtype = np.dtype(dtype) - - content = file.read() - if isinstance(content, bytes) and isinstance(regexp, np.compat.unicode): - regexp = asbytes(regexp) - elif isinstance(content, np.compat.unicode) and isinstance(regexp, bytes): - regexp = asstr(regexp) - - if not hasattr(regexp, 'match'): - regexp = re.compile(regexp) - seq = regexp.findall(content) - if seq and not isinstance(seq[0], tuple): - # Only one group is in the regexp. - # Create the new array as a single data-type and then - # re-interpret as a single-field structured array. - newdtype = np.dtype(dtype[dtype.names[0]]) - output = np.array(seq, dtype=newdtype) - output.dtype = dtype - else: - output = np.array(seq, dtype=dtype) - - return output - finally: - if own_fh: - file.close() - - -#####-------------------------------------------------------------------------- -#---- --- ASCII functions --- -#####-------------------------------------------------------------------------- - - -@set_module('numpy') -def genfromtxt(fname, dtype=float, comments='#', delimiter=None, - skip_header=0, skip_footer=0, converters=None, - missing_values=None, filling_values=None, usecols=None, - names=None, excludelist=None, - deletechars=''.join(sorted(NameValidator.defaultdeletechars)), - replace_space='_', autostrip=False, case_sensitive=True, - defaultfmt="f%i", unpack=None, usemask=False, loose=True, - invalid_raise=True, max_rows=None, encoding='bytes'): - """ - Load data from a text file, with missing values handled as specified. - - Each line past the first `skip_header` lines is split at the `delimiter` - character, and characters following the `comments` character are discarded. - - Parameters - ---------- - fname : file, str, pathlib.Path, list of str, generator - File, filename, list, or generator to read. If the filename - extension is `.gz` or `.bz2`, the file is first decompressed. Note - that generators must return byte strings. The strings - in a list or produced by a generator are treated as lines. - dtype : dtype, optional - Data type of the resulting array. - If None, the dtypes will be determined by the contents of each - column, individually. - comments : str, optional - The character used to indicate the start of a comment. - All the characters occurring on a line after a comment are discarded - delimiter : str, int, or sequence, optional - The string used to separate values. By default, any consecutive - whitespaces act as delimiter. An integer or sequence of integers - can also be provided as width(s) of each field. - skiprows : int, optional - `skiprows` was removed in numpy 1.10. Please use `skip_header` instead. - skip_header : int, optional - The number of lines to skip at the beginning of the file. - skip_footer : int, optional - The number of lines to skip at the end of the file. - converters : variable, optional - The set of functions that convert the data of a column to a value. - The converters can also be used to provide a default value - for missing data: ``converters = {3: lambda s: float(s or 0)}``. - missing : variable, optional - `missing` was removed in numpy 1.10. Please use `missing_values` - instead. - missing_values : variable, optional - The set of strings corresponding to missing data. - filling_values : variable, optional - The set of values to be used as default when the data are missing. - usecols : sequence, optional - Which columns to read, with 0 being the first. For example, - ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns. - names : {None, True, str, sequence}, optional - If `names` is True, the field names are read from the first line after - the first `skip_header` lines. This line can optionally be proceeded - by a comment delimiter. If `names` is a sequence or a single-string of - comma-separated names, the names will be used to define the field names - in a structured dtype. If `names` is None, the names of the dtype - fields will be used, if any. - excludelist : sequence, optional - A list of names to exclude. This list is appended to the default list - ['return','file','print']. Excluded names are appended an underscore: - for example, `file` would become `file_`. - deletechars : str, optional - A string combining invalid characters that must be deleted from the - names. - defaultfmt : str, optional - A format used to define default field names, such as "f%i" or "f_%02i". - autostrip : bool, optional - Whether to automatically strip white spaces from the variables. - replace_space : char, optional - Character(s) used in replacement of white spaces in the variables - names. By default, use a '_'. - case_sensitive : {True, False, 'upper', 'lower'}, optional - If True, field names are case sensitive. - If False or 'upper', field names are converted to upper case. - If 'lower', field names are converted to lower case. - unpack : bool, optional - If True, the returned array is transposed, so that arguments may be - unpacked using ``x, y, z = loadtxt(...)`` - usemask : bool, optional - If True, return a masked array. - If False, return a regular array. - loose : bool, optional - If True, do not raise errors for invalid values. - invalid_raise : bool, optional - If True, an exception is raised if an inconsistency is detected in the - number of columns. - If False, a warning is emitted and the offending lines are skipped. - max_rows : int, optional - The maximum number of rows to read. Must not be used with skip_footer - at the same time. If given, the value must be at least 1. Default is - to read the entire file. - - .. versionadded:: 1.10.0 - encoding : str, optional - Encoding used to decode the inputfile. Does not apply when `fname` is - a file object. The special value 'bytes' enables backward compatibility - workarounds that ensure that you receive byte arrays when possible - and passes latin1 encoded strings to converters. Override this value to - receive unicode arrays and pass strings as input to converters. If set - to None the system default is used. The default value is 'bytes'. - - .. versionadded:: 1.14.0 - - Returns - ------- - out : ndarray - Data read from the text file. If `usemask` is True, this is a - masked array. - - See Also - -------- - numpy.loadtxt : equivalent function when no data is missing. - - Notes - ----- - * When spaces are used as delimiters, or when no delimiter has been given - as input, there should not be any missing data between two fields. - * When the variables are named (either by a flexible dtype or with `names`, - there must not be any header in the file (else a ValueError - exception is raised). - * Individual values are not stripped of spaces by default. - When using a custom converter, make sure the function does remove spaces. - - References - ---------- - .. [1] NumPy User Guide, section `I/O with NumPy - `_. - - Examples - --------- - >>> from io import StringIO - >>> import numpy as np - - Comma delimited file with mixed dtype - - >>> s = StringIO(u"1,1.3,abcde") - >>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'), - ... ('mystring','S5')], delimiter=",") - >>> data - array((1, 1.3, b'abcde'), - dtype=[('myint', '>> _ = s.seek(0) # needed for StringIO example only - >>> data = np.genfromtxt(s, dtype=None, - ... names = ['myint','myfloat','mystring'], delimiter=",") - >>> data - array((1, 1.3, b'abcde'), - dtype=[('myint', '>> _ = s.seek(0) - >>> data = np.genfromtxt(s, dtype="i8,f8,S5", - ... names=['myint','myfloat','mystring'], delimiter=",") - >>> data - array((1, 1.3, b'abcde'), - dtype=[('myint', '>> s = StringIO(u"11.3abcde") - >>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'], - ... delimiter=[1,3,5]) - >>> data - array((1, 1.3, b'abcde'), - dtype=[('intvar', '>> f = StringIO(''' - ... text,# of chars - ... hello world,11 - ... numpy,5''') - >>> np.genfromtxt(f, dtype='S12,S12', delimiter=',') - array([(b'text', b''), (b'hello world', b'11'), (b'numpy', b'5')], - dtype=[('f0', 'S12'), ('f1', 'S12')]) - - """ - if max_rows is not None: - if skip_footer: - raise ValueError( - "The keywords 'skip_footer' and 'max_rows' can not be " - "specified at the same time.") - if max_rows < 1: - raise ValueError("'max_rows' must be at least 1.") - - if usemask: - from numpy.ma import MaskedArray, make_mask_descr - # Check the input dictionary of converters - user_converters = converters or {} - if not isinstance(user_converters, dict): - raise TypeError( - "The input argument 'converter' should be a valid dictionary " - "(got '%s' instead)" % type(user_converters)) - - if encoding == 'bytes': - encoding = None - byte_converters = True - else: - byte_converters = False - - # Initialize the filehandle, the LineSplitter and the NameValidator - try: - if isinstance(fname, os_PathLike): - fname = os_fspath(fname) - if isinstance(fname, basestring): - fid = np.lib._datasource.open(fname, 'rt', encoding=encoding) - fid_ctx = contextlib.closing(fid) - else: - fid = fname - fid_ctx = contextlib_nullcontext(fid) - fhd = iter(fid) - except TypeError: - raise TypeError( - "fname must be a string, filehandle, list of strings, " - "or generator. Got %s instead." % type(fname)) - - with fid_ctx: - split_line = LineSplitter(delimiter=delimiter, comments=comments, - autostrip=autostrip, encoding=encoding) - validate_names = NameValidator(excludelist=excludelist, - deletechars=deletechars, - case_sensitive=case_sensitive, - replace_space=replace_space) - - # Skip the first `skip_header` rows - try: - for i in range(skip_header): - next(fhd) - - # Keep on until we find the first valid values - first_values = None - - while not first_values: - first_line = _decode_line(next(fhd), encoding) - if (names is True) and (comments is not None): - if comments in first_line: - first_line = ( - ''.join(first_line.split(comments)[1:])) - first_values = split_line(first_line) - except StopIteration: - # return an empty array if the datafile is empty - first_line = '' - first_values = [] - warnings.warn('genfromtxt: Empty input file: "%s"' % fname, stacklevel=2) - - # Should we take the first values as names ? - if names is True: - fval = first_values[0].strip() - if comments is not None: - if fval in comments: - del first_values[0] - - # Check the columns to use: make sure `usecols` is a list - if usecols is not None: - try: - usecols = [_.strip() for _ in usecols.split(",")] - except AttributeError: - try: - usecols = list(usecols) - except TypeError: - usecols = [usecols, ] - nbcols = len(usecols or first_values) - - # Check the names and overwrite the dtype.names if needed - if names is True: - names = validate_names([str(_.strip()) for _ in first_values]) - first_line = '' - elif _is_string_like(names): - names = validate_names([_.strip() for _ in names.split(',')]) - elif names: - names = validate_names(names) - # Get the dtype - if dtype is not None: - dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names, - excludelist=excludelist, - deletechars=deletechars, - case_sensitive=case_sensitive, - replace_space=replace_space) - # Make sure the names is a list (for 2.5) - if names is not None: - names = list(names) - - if usecols: - for (i, current) in enumerate(usecols): - # if usecols is a list of names, convert to a list of indices - if _is_string_like(current): - usecols[i] = names.index(current) - elif current < 0: - usecols[i] = current + len(first_values) - # If the dtype is not None, make sure we update it - if (dtype is not None) and (len(dtype) > nbcols): - descr = dtype.descr - dtype = np.dtype([descr[_] for _ in usecols]) - names = list(dtype.names) - # If `names` is not None, update the names - elif (names is not None) and (len(names) > nbcols): - names = [names[_] for _ in usecols] - elif (names is not None) and (dtype is not None): - names = list(dtype.names) - - # Process the missing values ............................... - # Rename missing_values for convenience - user_missing_values = missing_values or () - if isinstance(user_missing_values, bytes): - user_missing_values = user_missing_values.decode('latin1') - - # Define the list of missing_values (one column: one list) - missing_values = [list(['']) for _ in range(nbcols)] - - # We have a dictionary: process it field by field - if isinstance(user_missing_values, dict): - # Loop on the items - for (key, val) in user_missing_values.items(): - # Is the key a string ? - if _is_string_like(key): - try: - # Transform it into an integer - key = names.index(key) - except ValueError: - # We couldn't find it: the name must have been dropped - continue - # Redefine the key as needed if it's a column number - if usecols: - try: - key = usecols.index(key) - except ValueError: - pass - # Transform the value as a list of string - if isinstance(val, (list, tuple)): - val = [str(_) for _ in val] - else: - val = [str(val), ] - # Add the value(s) to the current list of missing - if key is None: - # None acts as default - for miss in missing_values: - miss.extend(val) - else: - missing_values[key].extend(val) - # We have a sequence : each item matches a column - elif isinstance(user_missing_values, (list, tuple)): - for (value, entry) in zip(user_missing_values, missing_values): - value = str(value) - if value not in entry: - entry.append(value) - # We have a string : apply it to all entries - elif isinstance(user_missing_values, basestring): - user_value = user_missing_values.split(",") - for entry in missing_values: - entry.extend(user_value) - # We have something else: apply it to all entries - else: - for entry in missing_values: - entry.extend([str(user_missing_values)]) - - # Process the filling_values ............................... - # Rename the input for convenience - user_filling_values = filling_values - if user_filling_values is None: - user_filling_values = [] - # Def