Marta miniprojekt + raport(2)
This commit is contained in:
parent
c025d1b5bc
commit
d63fc89ed4
@ -13,8 +13,8 @@
|
|||||||
<file pinned="false" current-in-tab="true">
|
<file pinned="false" current-in-tab="true">
|
||||||
<entry file="file://$PROJECT_DIR$/DecisionTreeGenerate.py">
|
<entry file="file://$PROJECT_DIR$/DecisionTreeGenerate.py">
|
||||||
<provider selected="true" editor-type-id="text-editor">
|
<provider selected="true" editor-type-id="text-editor">
|
||||||
<state relative-caret-position="120">
|
<state relative-caret-position="104">
|
||||||
<caret line="76" column="51" selection-start-line="76" selection-start-column="51" selection-end-line="76" selection-end-column="51" />
|
<caret line="68" column="46" lean-forward="true" selection-start-line="68" selection-start-column="46" selection-end-line="68" selection-end-column="46" />
|
||||||
<folding>
|
<folding>
|
||||||
<element signature="e#0#19#0" expanded="true" />
|
<element signature="e#0#19#0" expanded="true" />
|
||||||
</folding>
|
</folding>
|
||||||
@ -129,7 +129,14 @@
|
|||||||
<option name="project" value="LOCAL" />
|
<option name="project" value="LOCAL" />
|
||||||
<updated>1589128951938</updated>
|
<updated>1589128951938</updated>
|
||||||
</task>
|
</task>
|
||||||
<option name="localTasksCounter" value="2" />
|
<task id="LOCAL-00002" summary="Marta miniprojekt + raport">
|
||||||
|
<created>1589129597764</created>
|
||||||
|
<option name="number" value="00002" />
|
||||||
|
<option name="presentableId" value="LOCAL-00002" />
|
||||||
|
<option name="project" value="LOCAL" />
|
||||||
|
<updated>1589129597764</updated>
|
||||||
|
</task>
|
||||||
|
<option name="localTasksCounter" value="3" />
|
||||||
<servers />
|
<servers />
|
||||||
</component>
|
</component>
|
||||||
<component name="ToolWindowManager">
|
<component name="ToolWindowManager">
|
||||||
@ -145,10 +152,10 @@
|
|||||||
<window_info anchor="bottom" id="Cvs" order="4" weight="0.25" />
|
<window_info anchor="bottom" id="Cvs" order="4" weight="0.25" />
|
||||||
<window_info anchor="bottom" id="Inspection" order="5" weight="0.4" />
|
<window_info anchor="bottom" id="Inspection" order="5" weight="0.4" />
|
||||||
<window_info anchor="bottom" id="TODO" order="6" />
|
<window_info anchor="bottom" id="TODO" order="6" />
|
||||||
<window_info active="true" anchor="bottom" id="Version Control" order="7" visible="true" weight="0.3289689" />
|
<window_info anchor="bottom" id="Version Control" order="7" weight="0.3289689" />
|
||||||
<window_info anchor="bottom" id="Terminal" order="8" />
|
<window_info active="true" anchor="bottom" id="Terminal" order="8" visible="true" weight="0.3289689" />
|
||||||
<window_info anchor="bottom" id="Event Log" order="9" side_tool="true" />
|
<window_info anchor="bottom" id="Event Log" order="9" side_tool="true" />
|
||||||
<window_info anchor="bottom" id="Python Console" order="10" />
|
<window_info anchor="bottom" id="Python Console" order="10" weight="0.3289689" />
|
||||||
<window_info anchor="right" id="Commander" order="0" weight="0.4" />
|
<window_info anchor="right" id="Commander" order="0" weight="0.4" />
|
||||||
<window_info anchor="right" id="Ant Build" order="1" weight="0.25" />
|
<window_info anchor="right" id="Ant Build" order="1" weight="0.25" />
|
||||||
<window_info anchor="right" content_ui="combo" id="Hierarchy" order="2" weight="0.25" />
|
<window_info anchor="right" content_ui="combo" id="Hierarchy" order="2" weight="0.25" />
|
||||||
@ -161,8 +168,8 @@
|
|||||||
<component name="editorHistoryManager">
|
<component name="editorHistoryManager">
|
||||||
<entry file="file://$PROJECT_DIR$/DecisionTreeGenerate.py">
|
<entry file="file://$PROJECT_DIR$/DecisionTreeGenerate.py">
|
||||||
<provider selected="true" editor-type-id="text-editor">
|
<provider selected="true" editor-type-id="text-editor">
|
||||||
<state relative-caret-position="120">
|
<state relative-caret-position="104">
|
||||||
<caret line="76" column="51" selection-start-line="76" selection-start-column="51" selection-end-line="76" selection-end-column="51" />
|
<caret line="68" column="46" lean-forward="true" selection-start-line="68" selection-start-column="46" selection-end-line="68" selection-end-column="46" />
|
||||||
<folding>
|
<folding>
|
||||||
<element signature="e#0#19#0" expanded="true" />
|
<element signature="e#0#19#0" expanded="true" />
|
||||||
</folding>
|
</folding>
|
||||||
|
@ -0,0 +1 @@
|
|||||||
|
pip
|
@ -0,0 +1,29 @@
|
|||||||
|
BSD 3-Clause License
|
||||||
|
|
||||||
|
Copyright (c) 2008-2016, The joblib developers.
|
||||||
|
All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright notice, this
|
||||||
|
list of conditions and the following disclaimer.
|
||||||
|
|
||||||
|
* Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
this list of conditions and the following disclaimer in the documentation
|
||||||
|
and/or other materials provided with the distribution.
|
||||||
|
|
||||||
|
* Neither the name of the copyright holder nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||||
|
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||||
|
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||||
|
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||||
|
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||||
|
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
@ -0,0 +1,118 @@
|
|||||||
|
Metadata-Version: 2.1
|
||||||
|
Name: joblib
|
||||||
|
Version: 0.14.1
|
||||||
|
Summary: Lightweight pipelining: using Python functions as pipeline jobs.
|
||||||
|
Home-page: https://joblib.readthedocs.io
|
||||||
|
Author: Gael Varoquaux
|
||||||
|
Author-email: gael.varoquaux@normalesup.org
|
||||||
|
License: BSD
|
||||||
|
Platform: any
|
||||||
|
Classifier: Development Status :: 5 - Production/Stable
|
||||||
|
Classifier: Environment :: Console
|
||||||
|
Classifier: Intended Audience :: Developers
|
||||||
|
Classifier: Intended Audience :: Science/Research
|
||||||
|
Classifier: Intended Audience :: Education
|
||||||
|
Classifier: License :: OSI Approved :: BSD License
|
||||||
|
Classifier: Operating System :: OS Independent
|
||||||
|
Classifier: Programming Language :: Python :: 2.7
|
||||||
|
Classifier: Programming Language :: Python :: 3
|
||||||
|
Classifier: Programming Language :: Python :: 3.4
|
||||||
|
Classifier: Programming Language :: Python :: 3.5
|
||||||
|
Classifier: Programming Language :: Python :: 3.6
|
||||||
|
Classifier: Programming Language :: Python :: 3.7
|
||||||
|
Classifier: Topic :: Scientific/Engineering
|
||||||
|
Classifier: Topic :: Utilities
|
||||||
|
Classifier: Topic :: Software Development :: Libraries
|
||||||
|
|
||||||
|
Joblib is a set of tools to provide **lightweight pipelining in
|
||||||
|
Python**. In particular:
|
||||||
|
|
||||||
|
1. transparent disk-caching of functions and lazy re-evaluation
|
||||||
|
(memoize pattern)
|
||||||
|
|
||||||
|
2. easy simple parallel computing
|
||||||
|
|
||||||
|
Joblib is optimized to be **fast** and **robust** on large
|
||||||
|
data in particular and has specific optimizations for `numpy` arrays. It is
|
||||||
|
**BSD-licensed**.
|
||||||
|
|
||||||
|
|
||||||
|
==================== ===============================================
|
||||||
|
**Documentation:** https://joblib.readthedocs.io
|
||||||
|
|
||||||
|
**Download:** https://pypi.python.org/pypi/joblib#downloads
|
||||||
|
|
||||||
|
**Source code:** https://github.com/joblib/joblib
|
||||||
|
|
||||||
|
**Report issues:** https://github.com/joblib/joblib/issues
|
||||||
|
==================== ===============================================
|
||||||
|
|
||||||
|
|
||||||
|
Vision
|
||||||
|
--------
|
||||||
|
|
||||||
|
The vision is to provide tools to easily achieve better performance and
|
||||||
|
reproducibility when working with long running jobs.
|
||||||
|
|
||||||
|
* **Avoid computing the same thing twice**: code is often rerun again and
|
||||||
|
again, for instance when prototyping computational-heavy jobs (as in
|
||||||
|
scientific development), but hand-crafted solutions to alleviate this
|
||||||
|
issue are error-prone and often lead to unreproducible results.
|
||||||
|
|
||||||
|
* **Persist to disk transparently**: efficiently persisting
|
||||||
|
arbitrary objects containing large data is hard. Using
|
||||||
|
joblib's caching mechanism avoids hand-written persistence and
|
||||||
|
implicitly links the file on disk to the execution context of
|
||||||
|
the original Python object. As a result, joblib's persistence is
|
||||||
|
good for resuming an application status or computational job, eg
|
||||||
|
after a crash.
|
||||||
|
|
||||||
|
Joblib addresses these problems while **leaving your code and your flow
|
||||||
|
control as unmodified as possible** (no framework, no new paradigms).
|
||||||
|
|
||||||
|
Main features
|
||||||
|
------------------
|
||||||
|
|
||||||
|
1) **Transparent and fast disk-caching of output value:** a memoize or
|
||||||
|
make-like functionality for Python functions that works well for
|
||||||
|
arbitrary Python objects, including very large numpy arrays. Separate
|
||||||
|
persistence and flow-execution logic from domain logic or algorithmic
|
||||||
|
code by writing the operations as a set of steps with well-defined
|
||||||
|
inputs and outputs: Python functions. Joblib can save their
|
||||||
|
computation to disk and rerun it only if necessary::
|
||||||
|
|
||||||
|
>>> from joblib import Memory
|
||||||
|
>>> cachedir = 'your_cache_dir_goes_here'
|
||||||
|
>>> mem = Memory(cachedir)
|
||||||
|
>>> import numpy as np
|
||||||
|
>>> a = np.vander(np.arange(3)).astype(np.float)
|
||||||
|
>>> square = mem.cache(np.square)
|
||||||
|
>>> b = square(a) # doctest: +ELLIPSIS
|
||||||
|
________________________________________________________________________________
|
||||||
|
[Memory] Calling square...
|
||||||
|
square(array([[0., 0., 1.],
|
||||||
|
[1., 1., 1.],
|
||||||
|
[4., 2., 1.]]))
|
||||||
|
___________________________________________________________square - 0...s, 0.0min
|
||||||
|
|
||||||
|
>>> c = square(a)
|
||||||
|
>>> # The above call did not trigger an evaluation
|
||||||
|
|
||||||
|
2) **Embarrassingly parallel helper:** to make it easy to write readable
|
||||||
|
parallel code and debug it quickly::
|
||||||
|
|
||||||
|
>>> from joblib import Parallel, delayed
|
||||||
|
>>> from math import sqrt
|
||||||
|
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
|
||||||
|
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
|
||||||
|
|
||||||
|
|
||||||
|
3) **Fast compressed Persistence**: a replacement for pickle to work
|
||||||
|
efficiently on Python objects containing large data (
|
||||||
|
*joblib.dump* & *joblib.load* ).
|
||||||
|
|
||||||
|
..
|
||||||
|
>>> import shutil ; shutil.rmtree(cachedir)
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -0,0 +1,228 @@
|
|||||||
|
joblib/__init__.py,sha256=mUi0nXB2wn4pFgtijXCZ2WNpXYehp5YVmx4wArqi42w,4994
|
||||||
|
joblib/_compat.py,sha256=j_3Np0GoJF9cF7EPU08SbfNlof1zdC_amV7RHBOZyWc,577
|
||||||
|
joblib/_dask.py,sha256=Q8V1OIIlL3qYAO8dVWel9nIt6jFyjyWOYn50vflOx7s,10421
|
||||||
|
joblib/_memmapping_reducer.py,sha256=gYU2ep139QiLQNj--eIDB8ISyC7RV9X1M4UpaRVe9XQ,17200
|
||||||
|
joblib/_memory_helpers.py,sha256=qoEGAc3AcWUGHRJF6k0jeosrZijoXRmFe5af8bKqGCU,3607
|
||||||
|
joblib/_multiprocessing_helpers.py,sha256=sRSrTSqRNj6f9b7CX4oDjeLqeSNfDjg80vfuq3szX3g,2250
|
||||||
|
joblib/_parallel_backends.py,sha256=mC1vSq0NwNASriK-C7GkEJHn6A3kBzn98BGyoHfpRq4,24960
|
||||||
|
joblib/_store_backends.py,sha256=dlFYpdnV16H5IkoIIl1jV6eXmnZ1qnGbQF7_LBJD1DM,14530
|
||||||
|
joblib/backports.py,sha256=NQjAA-ho-_yt3gYgxRgK92s0UuxuVO4LNjZAoLoW6L0,2663
|
||||||
|
joblib/compressor.py,sha256=0-nSA_4S02340ONRjMvN_Dr-XL6OWCcwvYTIRqc1trQ,21160
|
||||||
|
joblib/disk.py,sha256=n1rZ-sy0xtDBJO23c98585-3MDFgvxLKfjEf00hBGN8,3814
|
||||||
|
joblib/executor.py,sha256=Y19fV8qEYi6cHHCifkluNPJEECFpCKAL5fdvDiJNJwY,2864
|
||||||
|
joblib/format_stack.py,sha256=D53RrCnEzb2Ge8wn2tIZ0-GaTwSOVDlR692hltUGP-E,14653
|
||||||
|
joblib/func_inspect.py,sha256=fkJefyXk3v_MP39VX4ogdRtkTWpQL-SzTbF8lOIIgaw,13412
|
||||||
|
joblib/hashing.py,sha256=kDiz_BSQQ3gJ1F4JkvSWe9q8LvgnKzH6mXBZewQREHA,10362
|
||||||
|
joblib/logger.py,sha256=xjTDhqjpQU8DjIqrL35IiZ1z7HJ-fgbVc-7Ijcii3Eg,5129
|
||||||
|
joblib/memory.py,sha256=ZL8PWqdEJBACeyG24LYgZHuddrWsBSGUdY2FIl4Q-4w,39689
|
||||||
|
joblib/my_exceptions.py,sha256=DliZaY_ZaFjWfdC-VIVDLi2epdE_Rv6Fp35IntRo0is,4407
|
||||||
|
joblib/numpy_pickle.py,sha256=TrtIvknbPvfe-4EHwADu29-aZqDHthEABgQdc2lWN7A,24604
|
||||||
|
joblib/numpy_pickle_compat.py,sha256=cLOIVT05kRgzroPGO7LfvOw4tXNU8Ffm76ciUkKSb9k,8650
|
||||||
|
joblib/numpy_pickle_utils.py,sha256=kdMtF-YlO24S1oSRHiujfl7hQJsc0j-rbehC8Lagpok,8436
|
||||||
|
joblib/parallel.py,sha256=16KaV29dZWEwCUZ8VgaBykLWvxvgsNdW9eoAs67sI00,44754
|
||||||
|
joblib/pool.py,sha256=yz33MtLFnxm7ahlk3cvQLSUpYHtYbij6zytjSa4lTmo,13153
|
||||||
|
joblib/testing.py,sha256=YaXXAlfKhh3xTyJqEZoOxKrzAV3QOqHbvWjZHs3LTzU,2204
|
||||||
|
joblib/externals/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||||
|
joblib/externals/cloudpickle/__init__.py,sha256=Yx3vOBmwYVm6pPgOIfu-KvclhXvEtygjsurWGfMQGP8,212
|
||||||
|
joblib/externals/cloudpickle/cloudpickle.py,sha256=cAUwgRcWOp0up5S4O21YgChxE7JzkRBWYaOcK3IKaoE,52157
|
||||||
|
joblib/externals/cloudpickle/cloudpickle_fast.py,sha256=FWMFeB833k_wx-2SKsP7dNE_HUMN_6BZ9wpHMlYALLE,19604
|
||||||
|
joblib/externals/loky/__init__.py,sha256=xp5PhgGwvgyip8ainO2OGxFvXRayGB1cRdeLpulFFFM,1072
|
||||||
|
joblib/externals/loky/_base.py,sha256=Ze4r2g-HqBaQuBnXydo4FA1zmAVuTbRa_8GFadu7Wls,23423
|
||||||
|
joblib/externals/loky/cloudpickle_wrapper.py,sha256=U4vl1aG_W8u0_2blqua5np86wG4-21L5cjup79cD3Ww,3964
|
||||||
|
joblib/externals/loky/process_executor.py,sha256=KMLSoVwnVAYq62pwfAe-gexycExeHEDM7Wo-Oor8e5k,45719
|
||||||
|
joblib/externals/loky/reusable_executor.py,sha256=Q-V5Yt5lFJ9qFMNCp2AThLFpKk3_iRoMpU8YbrOvLSM,9268
|
||||||
|
joblib/externals/loky/backend/__init__.py,sha256=HIn7kzGoXCowleEzLikOjptBPLDjAXWpVe3DdxiCTWQ,398
|
||||||
|
joblib/externals/loky/backend/_posix_reduction.py,sha256=kzZ00XEIZkCT6YmkArwy2QVgF30mWgkGyktjPxBVLdQ,2223
|
||||||
|
joblib/externals/loky/backend/_posix_wait.py,sha256=4GDzBDe1kiHxHPGA9By5Zh2xpvsOf4zK9R5nuBjee3U,3319
|
||||||
|
joblib/externals/loky/backend/_win_reduction.py,sha256=Zhqi-2SQsn-mOCiyd8GoTkzhgG-q-gw9VN6intLzk9M,3724
|
||||||
|
joblib/externals/loky/backend/_win_wait.py,sha256=TaPjFsCWYhPgtzUZBjb961ShvEeuog5h_nc_bGG--gM,1956
|
||||||
|
joblib/externals/loky/backend/compat.py,sha256=-wqR1Z_M-VlANX7htToCBHtWWQ7DFPFaZ3nWcKoGE1Q,995
|
||||||
|
joblib/externals/loky/backend/compat_posix.py,sha256=V-0QGfaSWHDv2hgTxMgrhaf6ZyihutTnjd2Xy5FswD0,334
|
||||||
|
joblib/externals/loky/backend/compat_win32.py,sha256=V9MsGseX2aib89DChKDfC2PgLrYtbNyATJb3OWKtRn8,1407
|
||||||
|
joblib/externals/loky/backend/context.py,sha256=fEIC4v_VuvY9hcY_w_A1DNSw8dDRs0De49up_Xc2Fis,9752
|
||||||
|
joblib/externals/loky/backend/fork_exec.py,sha256=FkUlRNNVq-eYHsYPD5fHbyMkB_5I1nYz7AV_r6OEzI0,1372
|
||||||
|
joblib/externals/loky/backend/managers.py,sha256=3amteDFgQ2Xxqaobv-W-8pYdzDd6NgTtwT8SmluB9Us,1836
|
||||||
|
joblib/externals/loky/backend/popen_loky_posix.py,sha256=6n5iR7eAX7-tS-otLHtp4yquCx857PQOWCqYTcWBNrk,6911
|
||||||
|
joblib/externals/loky/backend/popen_loky_win32.py,sha256=gib6vwolIzndU-ag1hzepADkOuabW_9T-fmVD98ahaM,5720
|
||||||
|
joblib/externals/loky/backend/process.py,sha256=3s86s4Ca-QibEN6haOTvBFRip_I5SovXBLAAhwx6WTk,3526
|
||||||
|
joblib/externals/loky/backend/queues.py,sha256=e5kNMx_-RiBUAxoeiNSv_97nzLPZudOclY5UmXF_zj4,8827
|
||||||
|
joblib/externals/loky/backend/reduction.py,sha256=eIM41nXDPcY_Idp_0Y4fxMr27c5fG3y9Gf1Arb4N5uk,8263
|
||||||
|
joblib/externals/loky/backend/resource_tracker.py,sha256=JgphN4HDNaw0Uq5PYvlKOkkufJnPSVvfSUnLesmbUzY,11758
|
||||||
|
joblib/externals/loky/backend/semlock.py,sha256=5d7SXHLyw4AZROLZHwsZ9N7FgrrBLMzPB5YAPDWlu1o,8918
|
||||||
|
joblib/externals/loky/backend/spawn.py,sha256=obfNP6c-da6TyyFn7a-SuVDTTNxhKB03lxqkWUargwY,7885
|
||||||
|
joblib/externals/loky/backend/synchronize.py,sha256=6ayerlMy0nXU3jGooHwus7mY5WVRZoMZ8qbVsAuUkhk,11381
|
||||||
|
joblib/externals/loky/backend/utils.py,sha256=GcKkfL1_kk6oDn-YC6a9mW_xyF0Vvt4M-t96iiNB5nY,5691
|
||||||
|
joblib/test/__init__.py,sha256=bkIwY5OneyPcRn2VuzQlIFdtW5Cwo1mUJ7IfSztDO9c,73
|
||||||
|
joblib/test/common.py,sha256=ZytAxHuzPgfJxOiXeNq4O3UrObcqiMzy_4VNUiM1AFo,3348
|
||||||
|
joblib/test/test_backports.py,sha256=Y9bhGa6H-K_FgLkDyXaSHzpaWk148Rjn8R9IKCKdy-k,1175
|
||||||
|
joblib/test/test_dask.py,sha256=G9pKabNuZpM-e--ZhNlXF-Fsy8DDAN4wF_I_rCyNe4U,11889
|
||||||
|
joblib/test/test_disk.py,sha256=wJd1o9nLzqEjLqxxkgB9S7-UcKjHPQ8qK5l0czcNp0o,2205
|
||||||
|
joblib/test/test_format_stack.py,sha256=wTtjRlp0edNv7_NzxZU6DAVJQoebL-lnGsUEMwVZXpM,4250
|
||||||
|
joblib/test/test_func_inspect.py,sha256=pNO9cWemIyBUfPqb4CIBVwncaOYxGsp0bog_H1rfNks,9668
|
||||||
|
joblib/test/test_func_inspect_special_encoding.py,sha256=oHbMTPOK3XI0YVoS0GsouJ-GfM_neP4GOIJC-TKnNgU,146
|
||||||
|
joblib/test/test_hashing.py,sha256=440_YAbOfK77w9dq-U52fg2ghDtbAmgougEBJ_dz65Q,15708
|
||||||
|
joblib/test/test_init.py,sha256=bgNF-9CIJl1MFNA75LBWOaiNtvduVfuvglz_u9Tt8Uc,422
|
||||||
|
joblib/test/test_logger.py,sha256=a8u3tujL0wv_L--F9_1CZ8CI-KjcTpYARzl5C6cyyEg,1112
|
||||||
|
joblib/test/test_memmapping.py,sha256=oWjSmBxT_WqQxOKDiyjePie43U1N_jPIq-D_wl6pPRU,23435
|
||||||
|
joblib/test/test_memory.py,sha256=U4jcGPCsiXG9DCaQoxrvOILiD04hO-fHCoOeMlSO434,39993
|
||||||
|
joblib/test/test_module.py,sha256=qpPqdgId8eDUvDtM0ugTYG6fAFeXwS__ngwoVtZ-5iQ,1969
|
||||||
|
joblib/test/test_my_exceptions.py,sha256=de_-7A3EYzAv3u-SntDrEkVfaAC9pE7_YHaSO1blgQk,2383
|
||||||
|
joblib/test/test_numpy_pickle.py,sha256=j5_pcppN-O-xxTJs4SvigJrk6ch6FAdWyx8iEGz0Iek,39373
|
||||||
|
joblib/test/test_numpy_pickle_compat.py,sha256=C5OiaFrqmxYD57fr_LpmItd6OOZPeOMfo9RVr6ZZIkk,624
|
||||||
|
joblib/test/test_numpy_pickle_utils.py,sha256=PJVVgr-v3so9oAf9LblASRCpt-wXAo19FvsUpw-fZjI,421
|
||||||
|
joblib/test/test_parallel.py,sha256=-ZkXlMSFyiFxq4_sUHP2_FUcju45Yy1W2yizeVJQVYQ,62342
|
||||||
|
joblib/test/test_store_backends.py,sha256=fZh0_E5Rj5VTJ_UzH3autHpWwEaWQvWTiQB8felVAN4,1942
|
||||||
|
joblib/test/test_testing.py,sha256=I-EkdKHWdHu8m5fo2NnyB0AqR8zAOJ01WKKvyZYRneY,2467
|
||||||
|
joblib/test/data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||||
|
joblib/test/data/create_numpy_pickle.py,sha256=UwB5k8Yjh00CPBsCzxKnth0ZvkfdtZf3Bx1lYakhlOE,3616
|
||||||
|
joblib/test/data/joblib_0.10.0_compressed_pickle_py27_np16.gz,sha256=QYRH6Q2DSGVorjCSqWCxjTWCMOJKyew4Nl2qmfQVvQ8,769
|
||||||
|
joblib/test/data/joblib_0.10.0_compressed_pickle_py27_np17.gz,sha256=ofTozM_KlPJa50TR8FCwc09mMmO6OO0GQhgUBLNIsXs,757
|
||||||
|
joblib/test/data/joblib_0.10.0_compressed_pickle_py33_np18.gz,sha256=2eIVeA-XjOaT5IEQ6tI2UuHG3hwhiRciMmkBmPcIh4g,792
|
||||||
|
joblib/test/data/joblib_0.10.0_compressed_pickle_py34_np19.gz,sha256=Gr2z_1tVWDH1H3_wCVHmakknf8KqeHKT8Yz4d1vmUCM,794
|
||||||
|
joblib/test/data/joblib_0.10.0_compressed_pickle_py35_np19.gz,sha256=pWw_xuDbOkECqu1KGf1OFU7s2VbzC2v5F5iXhE7TwB4,790
|
||||||
|
joblib/test/data/joblib_0.10.0_pickle_py27_np16.pkl,sha256=icRQjj374B-AHk5znxre0T9oWUHokoHIBQ8MqKo8l-U,986
|
||||||
|
joblib/test/data/joblib_0.10.0_pickle_py27_np16.pkl.bz2,sha256=iM3fX-Z5ULqdH263VR0lgTSlGqJotLlbAF4gaSUKB6g,997
|
||||||
|
joblib/test/data/joblib_0.10.0_pickle_py27_np16.pkl.gzip,sha256=QYRH6Q2DSGVorjCSqWCxjTWCMOJKyew4Nl2qmfQVvQ8,769
|
||||||
|
joblib/test/data/joblib_0.10.0_pickle_py27_np17.pkl,sha256=icRQjj374B-AHk5znxre0T9oWUHokoHIBQ8MqKo8l-U,986
|
||||||
|
joblib/test/data/joblib_0.10.0_pickle_py27_np17.pkl.bz2,sha256=oYQVIyMiUxyRgWSuBBSOvCWKzToA-kUpcoQWdV4UoV4,997
|
||||||
|
joblib/test/data/joblib_0.10.0_pickle_py27_np17.pkl.gzip,sha256=Jpv3iGcDgKTv-O4nZsUreIbUK7qnt2cugZ-VMgNeEDQ,798
|
||||||
|
joblib/test/data/joblib_0.10.0_pickle_py27_np17.pkl.lzma,sha256=c0wu0x8pPv4BcStj7pE61rZpf68FLG_pNzQZ4e82zH8,660
|
||||||
|
joblib/test/data/joblib_0.10.0_pickle_py27_np17.pkl.xz,sha256=77FG1FDG0GHQav-1bxc4Tn9ky6ubUW_MbE0_iGmz5wc,712
|
||||||
|
joblib/test/data/joblib_0.10.0_pickle_py33_np18.pkl,sha256=4GTC7s_cWNVShERn2nvVbspZYJgyK_0man4TEqvdVzU,1068
|
||||||
|
joblib/test/data/joblib_0.10.0_pickle_py33_np18.pkl.bz2,sha256=6G1vbs_iYmz2kYJ6w4qB1k7D67UnxUMus0S4SWeBtFo,1000
|
||||||
|
joblib/test/data/joblib_0.10.0_pickle_py33_np18.pkl.gzip,sha256=tlRUWeJS1BXmcwtLNSNK9L0hDHekFl07CqWxTShinmY,831
|
||||||
|
joblib/test/data/joblib_0.10.0_pickle_py33_np18.pkl.lzma,sha256=CorPwnfv3rR5hjNtJI01-sEBMOnkSxNlRVaWTszMopA,694
|
||||||
|
joblib/test/data/joblib_0.10.0_pickle_py33_np18.pkl.xz,sha256=Dppj3MffOKsKETeptEtDaxPOv6MA6xnbpK5LzlDQ-oE,752
|
||||||
|
joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl,sha256=HL5Fb1uR9aPLjjhoOPJ2wwM1Qyo1FCZoYYd2HVw0Fos,1068
|
||||||
|
joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl.bz2,sha256=Pyr2fqZnwfUxXdyrBr-kRwBYY8HA_Yi7fgSguKy5pUs,1021
|
||||||
|
joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl.gzip,sha256=os8NJjQI9FhnlZM-Ay9dX_Uo35gZnoJCgQSIVvcBPfE,831
|
||||||
|
joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl.lzma,sha256=Q_0y43qU7_GqAabJ8y3PWVhOisurnCAq3GzuCu04V58,697
|
||||||
|
joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl.xz,sha256=BNfmiQfpeLVpdfkwlJK4hJ5Cpgl0vreVyekyc5d_PNM,752
|
||||||
|
joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl,sha256=l7nvLolhBDIdPFznOz3lBHiMOPBPCMi1bXop1tFSCpY,1068
|
||||||
|
joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl.bz2,sha256=pqGpuIS-ZU4uP8mkglHs8MaSDiVcPy7l3XHYJSppRgY,1005
|
||||||
|
joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl.gzip,sha256=YRFXE6LEb6qK72yPqnXdqQVY8Ts8xKUS9PWQKhLxWvk,833
|
||||||
|
joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl.lzma,sha256=Bf7gCUeTuTjCkbcIdyZYz69irblX4SAVQEzxCnMQhNU,701
|
||||||
|
joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl.xz,sha256=As8w2LGWwwNmKy3QNdKljK63Yq46gjRf_RJ0lh5_WqA,752
|
||||||
|
joblib/test/data/joblib_0.11.0_compressed_pickle_py36_np111.gz,sha256=1WrnXDqDoNEPYOZX1Q5Wr2463b8vVV6fw4Wm5S4bMt4,800
|
||||||
|
joblib/test/data/joblib_0.11.0_pickle_py36_np111.pkl,sha256=XmsOFxeC1f1aYdGETclG6yfF9rLoB11DayOAhDMULrw,1068
|
||||||
|
joblib/test/data/joblib_0.11.0_pickle_py36_np111.pkl.bz2,sha256=vI2yWb50LKL_NgZyd_XkoD5teIg93uI42mWnx9ee-AQ,991
|
||||||
|
joblib/test/data/joblib_0.11.0_pickle_py36_np111.pkl.gzip,sha256=1WrnXDqDoNEPYOZX1Q5Wr2463b8vVV6fw4Wm5S4bMt4,800
|
||||||
|
joblib/test/data/joblib_0.11.0_pickle_py36_np111.pkl.lzma,sha256=IWA0JlZG2ur53HgTUDl1m7q79dcVq6b0VOq33gKoJU0,715
|
||||||
|
joblib/test/data/joblib_0.11.0_pickle_py36_np111.pkl.xz,sha256=3Xh_NbMZdBjYx7ynfJ3Fyke28izSRSSzzNB0z5D4k9Y,752
|
||||||
|
joblib/test/data/joblib_0.8.4_compressed_pickle_py27_np17.gz,sha256=Sp-ZT7i6pj5on2gbptszu7RarzJpOmHJ67UKOmCPQMg,659
|
||||||
|
joblib/test/data/joblib_0.9.2_compressed_pickle_py27_np16.gz,sha256=NLtDrvo2XIH0KvUUAvhOqMeoXEjGW0IuTk_osu5XiDw,658
|
||||||
|
joblib/test/data/joblib_0.9.2_compressed_pickle_py27_np17.gz,sha256=NLtDrvo2XIH0KvUUAvhOqMeoXEjGW0IuTk_osu5XiDw,658
|
||||||
|
joblib/test/data/joblib_0.9.2_compressed_pickle_py34_np19.gz,sha256=nzO9iiGkG3KbBdrF3usOho8higkrDj_lmICUzxZyF_Y,673
|
||||||
|
joblib/test/data/joblib_0.9.2_compressed_pickle_py35_np19.gz,sha256=nzO9iiGkG3KbBdrF3usOho8higkrDj_lmICUzxZyF_Y,673
|
||||||
|
joblib/test/data/joblib_0.9.2_pickle_py27_np16.pkl,sha256=naijdk2xIeKdIa3mfJw0JlmOdtiN6uRM1yOJg6-M73M,670
|
||||||
|
joblib/test/data/joblib_0.9.2_pickle_py27_np16.pkl_01.npy,sha256=DvvX2c5-7DpuCg20HnleA5bMo9awN9rWxhtGSEPSiAk,120
|
||||||
|
joblib/test/data/joblib_0.9.2_pickle_py27_np16.pkl_02.npy,sha256=HBzzbLeB-8whuVO7CgtF3wktoOrg52WILlljzNcBBbE,120
|
||||||
|
joblib/test/data/joblib_0.9.2_pickle_py27_np16.pkl_03.npy,sha256=oMRa4qKJhBy-uiRDt-uqOzHAqencxzKUrKVynaAJJAU,236
|
||||||
|
joblib/test/data/joblib_0.9.2_pickle_py27_np16.pkl_04.npy,sha256=PsviRClLqT4IR5sWwbmpQR41af9mDtBFncodJBOB3wU,104
|
||||||
|
joblib/test/data/joblib_0.9.2_pickle_py27_np17.pkl,sha256=LynX8dLOygfxDfFywOgm7wgWOhSxLG7z-oDsU6X83Dw,670
|
||||||
|
joblib/test/data/joblib_0.9.2_pickle_py27_np17.pkl_01.npy,sha256=DvvX2c5-7DpuCg20HnleA5bMo9awN9rWxhtGSEPSiAk,120
|
||||||
|
joblib/test/data/joblib_0.9.2_pickle_py27_np17.pkl_02.npy,sha256=HBzzbLeB-8whuVO7CgtF3wktoOrg52WILlljzNcBBbE,120
|
||||||
|
joblib/test/data/joblib_0.9.2_pickle_py27_np17.pkl_03.npy,sha256=oMRa4qKJhBy-uiRDt-uqOzHAqencxzKUrKVynaAJJAU,236
|
||||||
|
joblib/test/data/joblib_0.9.2_pickle_py27_np17.pkl_04.npy,sha256=PsviRClLqT4IR5sWwbmpQR41af9mDtBFncodJBOB3wU,104
|
||||||
|
joblib/test/data/joblib_0.9.2_pickle_py33_np18.pkl,sha256=w9TLxpDTzp5TI6cU6lRvMsAasXEChcQgGE9s30sm_CU,691
|
||||||
|
joblib/test/data/joblib_0.9.2_pickle_py33_np18.pkl_01.npy,sha256=DvvX2c5-7DpuCg20HnleA5bMo9awN9rWxhtGSEPSiAk,120
|
||||||
|
joblib/test/data/joblib_0.9.2_pickle_py33_np18.pkl_02.npy,sha256=HBzzbLeB-8whuVO7CgtF3wktoOrg52WILlljzNcBBbE,120
|
||||||
|
joblib/test/data/joblib_0.9.2_pickle_py33_np18.pkl_03.npy,sha256=jt6aZKUrJdfbMJUJVsl47As5MrfRSs1avGMhbmS6vec,307
|
||||||
|
joblib/test/data/joblib_0.9.2_pickle_py33_np18.pkl_04.npy,sha256=PsviRClLqT4IR5sWwbmpQR41af9mDtBFncodJBOB3wU,104
|
||||||
|
joblib/test/data/joblib_0.9.2_pickle_py34_np19.pkl,sha256=ilOBAOaulLFvKrD32S1NfnpiK-LfzA9rC3O2I7xROuI,691
|
||||||
|
joblib/test/data/joblib_0.9.2_pickle_py34_np19.pkl_01.npy,sha256=DvvX2c5-7DpuCg20HnleA5bMo9awN9rWxhtGSEPSiAk,120
|
||||||
|
joblib/test/data/joblib_0.9.2_pickle_py34_np19.pkl_02.npy,sha256=HBzzbLeB-8whuVO7CgtF3wktoOrg52WILlljzNcBBbE,120
|
||||||
|
joblib/test/data/joblib_0.9.2_pickle_py34_np19.pkl_03.npy,sha256=jt6aZKUrJdfbMJUJVsl47As5MrfRSs1avGMhbmS6vec,307
|
||||||
|
joblib/test/data/joblib_0.9.2_pickle_py34_np19.pkl_04.npy,sha256=PsviRClLqT4IR5sWwbmpQR41af9mDtBFncodJBOB3wU,104
|
||||||
|
joblib/test/data/joblib_0.9.2_pickle_py35_np19.pkl,sha256=WfDVIqKcMzzh1gSAshIfzBoIpdLdZQuG79yYf5kfpOo,691
|
||||||
|
joblib/test/data/joblib_0.9.2_pickle_py35_np19.pkl_01.npy,sha256=DvvX2c5-7DpuCg20HnleA5bMo9awN9rWxhtGSEPSiAk,120
|
||||||
|
joblib/test/data/joblib_0.9.2_pickle_py35_np19.pkl_02.npy,sha256=HBzzbLeB-8whuVO7CgtF3wktoOrg52WILlljzNcBBbE,120
|
||||||
|
joblib/test/data/joblib_0.9.2_pickle_py35_np19.pkl_03.npy,sha256=jt6aZKUrJdfbMJUJVsl47As5MrfRSs1avGMhbmS6vec,307
|
||||||
|
joblib/test/data/joblib_0.9.2_pickle_py35_np19.pkl_04.npy,sha256=PsviRClLqT4IR5sWwbmpQR41af9mDtBFncodJBOB3wU,104
|
||||||
|
joblib/test/data/joblib_0.9.4.dev0_compressed_cache_size_pickle_py35_np19.gz,sha256=8jYfWJsx0oY2J-3LlmEigK5cClnJSW2J2rfeSTZw-Ts,802
|
||||||
|
joblib/test/data/joblib_0.9.4.dev0_compressed_cache_size_pickle_py35_np19.gz_01.npy.z,sha256=YT9VvT3sEl2uWlOyvH2CkyE9Sok4od9O3kWtgeuUUqE,43
|
||||||
|
joblib/test/data/joblib_0.9.4.dev0_compressed_cache_size_pickle_py35_np19.gz_02.npy.z,sha256=txA5RDI0PRuiU_UNKY8pGp-zQgQQ9vaVvMi60hOPaVs,43
|
||||||
|
joblib/test/data/joblib_0.9.4.dev0_compressed_cache_size_pickle_py35_np19.gz_03.npy.z,sha256=d3AwICvU2MpSNjh2aPIsdJeGZLlDjANAF1Soa6uM0Po,37
|
||||||
|
joblib-0.14.1.dist-info/LICENSE.txt,sha256=-OWIkGu9oHPojgnkwRnCbbbHLJsFncP-e-fgi-_0y60,1527
|
||||||
|
joblib-0.14.1.dist-info/METADATA,sha256=zgjue7-Hf0nJipSAvJ6_pewUfWlab2CyIo_SLZe-JR4,4536
|
||||||
|
joblib-0.14.1.dist-info/WHEEL,sha256=8zNYZbwQSXoB9IfXOjPfeNwvAsALAjffgk27FqvCWbo,110
|
||||||
|
joblib-0.14.1.dist-info/top_level.txt,sha256=P0LsoZ45gBL7ckL4lqQt7tdbrHD4xlVYhffmhHeeT_U,7
|
||||||
|
joblib-0.14.1.dist-info/RECORD,,
|
||||||
|
joblib-0.14.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||||
|
joblib/externals/cloudpickle/__pycache__/cloudpickle.cpython-37.pyc,,
|
||||||
|
joblib/externals/cloudpickle/__pycache__/cloudpickle_fast.cpython-37.pyc,,
|
||||||
|
joblib/externals/cloudpickle/__pycache__/__init__.cpython-37.pyc,,
|
||||||
|
joblib/externals/loky/backend/__pycache__/compat.cpython-37.pyc,,
|
||||||
|
joblib/externals/loky/backend/__pycache__/compat_posix.cpython-37.pyc,,
|
||||||
|
joblib/externals/loky/backend/__pycache__/compat_win32.cpython-37.pyc,,
|
||||||
|
joblib/externals/loky/backend/__pycache__/context.cpython-37.pyc,,
|
||||||
|
joblib/externals/loky/backend/__pycache__/fork_exec.cpython-37.pyc,,
|
||||||
|
joblib/externals/loky/backend/__pycache__/managers.cpython-37.pyc,,
|
||||||
|
joblib/externals/loky/backend/__pycache__/popen_loky_posix.cpython-37.pyc,,
|
||||||
|
joblib/externals/loky/backend/__pycache__/popen_loky_win32.cpython-37.pyc,,
|
||||||
|
joblib/externals/loky/backend/__pycache__/process.cpython-37.pyc,,
|
||||||
|
joblib/externals/loky/backend/__pycache__/queues.cpython-37.pyc,,
|
||||||
|
joblib/externals/loky/backend/__pycache__/reduction.cpython-37.pyc,,
|
||||||
|
joblib/externals/loky/backend/__pycache__/resource_tracker.cpython-37.pyc,,
|
||||||
|
joblib/externals/loky/backend/__pycache__/semlock.cpython-37.pyc,,
|
||||||
|
joblib/externals/loky/backend/__pycache__/spawn.cpython-37.pyc,,
|
||||||
|
joblib/externals/loky/backend/__pycache__/synchronize.cpython-37.pyc,,
|
||||||
|
joblib/externals/loky/backend/__pycache__/utils.cpython-37.pyc,,
|
||||||
|
joblib/externals/loky/backend/__pycache__/_posix_reduction.cpython-37.pyc,,
|
||||||
|
joblib/externals/loky/backend/__pycache__/_posix_wait.cpython-37.pyc,,
|
||||||
|
joblib/externals/loky/backend/__pycache__/_win_reduction.cpython-37.pyc,,
|
||||||
|
joblib/externals/loky/backend/__pycache__/_win_wait.cpython-37.pyc,,
|
||||||
|
joblib/externals/loky/backend/__pycache__/__init__.cpython-37.pyc,,
|
||||||
|
joblib/externals/loky/__pycache__/cloudpickle_wrapper.cpython-37.pyc,,
|
||||||
|
joblib/externals/loky/__pycache__/process_executor.cpython-37.pyc,,
|
||||||
|
joblib/externals/loky/__pycache__/reusable_executor.cpython-37.pyc,,
|
||||||
|
joblib/externals/loky/__pycache__/_base.cpython-37.pyc,,
|
||||||
|
joblib/externals/loky/__pycache__/__init__.cpython-37.pyc,,
|
||||||
|
joblib/externals/__pycache__/__init__.cpython-37.pyc,,
|
||||||
|
joblib/test/data/__pycache__/create_numpy_pickle.cpython-37.pyc,,
|
||||||
|
joblib/test/data/__pycache__/__init__.cpython-37.pyc,,
|
||||||
|
joblib/test/__pycache__/common.cpython-37.pyc,,
|
||||||
|
joblib/test/__pycache__/test_backports.cpython-37.pyc,,
|
||||||
|
joblib/test/__pycache__/test_dask.cpython-37.pyc,,
|
||||||
|
joblib/test/__pycache__/test_disk.cpython-37.pyc,,
|
||||||
|
joblib/test/__pycache__/test_format_stack.cpython-37.pyc,,
|
||||||
|
joblib/test/__pycache__/test_func_inspect.cpython-37.pyc,,
|
||||||
|
joblib/test/__pycache__/test_func_inspect_special_encoding.cpython-37.pyc,,
|
||||||
|
joblib/test/__pycache__/test_hashing.cpython-37.pyc,,
|
||||||
|
joblib/test/__pycache__/test_init.cpython-37.pyc,,
|
||||||
|
joblib/test/__pycache__/test_logger.cpython-37.pyc,,
|
||||||
|
joblib/test/__pycache__/test_memmapping.cpython-37.pyc,,
|
||||||
|
joblib/test/__pycache__/test_memory.cpython-37.pyc,,
|
||||||
|
joblib/test/__pycache__/test_module.cpython-37.pyc,,
|
||||||
|
joblib/test/__pycache__/test_my_exceptions.cpython-37.pyc,,
|
||||||
|
joblib/test/__pycache__/test_numpy_pickle.cpython-37.pyc,,
|
||||||
|
joblib/test/__pycache__/test_numpy_pickle_compat.cpython-37.pyc,,
|
||||||
|
joblib/test/__pycache__/test_numpy_pickle_utils.cpython-37.pyc,,
|
||||||
|
joblib/test/__pycache__/test_parallel.cpython-37.pyc,,
|
||||||
|
joblib/test/__pycache__/test_store_backends.cpython-37.pyc,,
|
||||||
|
joblib/test/__pycache__/test_testing.cpython-37.pyc,,
|
||||||
|
joblib/test/__pycache__/__init__.cpython-37.pyc,,
|
||||||
|
joblib/__pycache__/backports.cpython-37.pyc,,
|
||||||
|
joblib/__pycache__/compressor.cpython-37.pyc,,
|
||||||
|
joblib/__pycache__/disk.cpython-37.pyc,,
|
||||||
|
joblib/__pycache__/executor.cpython-37.pyc,,
|
||||||
|
joblib/__pycache__/format_stack.cpython-37.pyc,,
|
||||||
|
joblib/__pycache__/func_inspect.cpython-37.pyc,,
|
||||||
|
joblib/__pycache__/hashing.cpython-37.pyc,,
|
||||||
|
joblib/__pycache__/logger.cpython-37.pyc,,
|
||||||
|
joblib/__pycache__/memory.cpython-37.pyc,,
|
||||||
|
joblib/__pycache__/my_exceptions.cpython-37.pyc,,
|
||||||
|
joblib/__pycache__/numpy_pickle.cpython-37.pyc,,
|
||||||
|
joblib/__pycache__/numpy_pickle_compat.cpython-37.pyc,,
|
||||||
|
joblib/__pycache__/numpy_pickle_utils.cpython-37.pyc,,
|
||||||
|
joblib/__pycache__/parallel.cpython-37.pyc,,
|
||||||
|
joblib/__pycache__/pool.cpython-37.pyc,,
|
||||||
|
joblib/__pycache__/testing.cpython-37.pyc,,
|
||||||
|
joblib/__pycache__/_compat.cpython-37.pyc,,
|
||||||
|
joblib/__pycache__/_dask.cpython-37.pyc,,
|
||||||
|
joblib/__pycache__/_memmapping_reducer.cpython-37.pyc,,
|
||||||
|
joblib/__pycache__/_memory_helpers.cpython-37.pyc,,
|
||||||
|
joblib/__pycache__/_multiprocessing_helpers.cpython-37.pyc,,
|
||||||
|
joblib/__pycache__/_parallel_backends.cpython-37.pyc,,
|
||||||
|
joblib/__pycache__/_store_backends.cpython-37.pyc,,
|
||||||
|
joblib/__pycache__/__init__.cpython-37.pyc,,
|
@ -0,0 +1,6 @@
|
|||||||
|
Wheel-Version: 1.0
|
||||||
|
Generator: bdist_wheel (0.33.6)
|
||||||
|
Root-Is-Purelib: true
|
||||||
|
Tag: py2-none-any
|
||||||
|
Tag: py3-none-any
|
||||||
|
|
@ -0,0 +1 @@
|
|||||||
|
joblib
|
968
Restaurant/Marta/venv/Lib/site-packages/numpy/LICENSE.txt
Normal file
968
Restaurant/Marta/venv/Lib/site-packages/numpy/LICENSE.txt
Normal file
@ -0,0 +1,968 @@
|
|||||||
|
Copyright (c) 2005-2019, NumPy Developers.
|
||||||
|
All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following
|
||||||
|
disclaimer in the documentation and/or other materials provided
|
||||||
|
with the distribution.
|
||||||
|
|
||||||
|
* Neither the name of the NumPy Developers nor the names of any
|
||||||
|
contributors may be used to endorse or promote products derived
|
||||||
|
from this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
----
|
||||||
|
|
||||||
|
This binary distribution of NumPy also bundles the following software:
|
||||||
|
|
||||||
|
|
||||||
|
Name: OpenBLAS
|
||||||
|
Files: extra-dll\libopenb*.dll
|
||||||
|
Description: bundled as a dynamically linked library
|
||||||
|
Availability: https://github.com/xianyi/OpenBLAS/
|
||||||
|
License: 3-clause BSD
|
||||||
|
Copyright (c) 2011-2014, The OpenBLAS Project
|
||||||
|
All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
1. Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
|
||||||
|
2. Redistributions in binary form must reproduce the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer in
|
||||||
|
the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
3. Neither the name of the OpenBLAS project nor the names of
|
||||||
|
its contributors may be used to endorse or promote products
|
||||||
|
derived from this software without specific prior written
|
||||||
|
permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||||
|
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||||
|
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||||
|
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||||
|
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||||
|
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||||
|
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||||
|
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
|
||||||
|
Name: LAPACK
|
||||||
|
Files: extra-dll\libopenb*.dll
|
||||||
|
Description: bundled in OpenBLAS
|
||||||
|
Availability: https://github.com/xianyi/OpenBLAS/
|
||||||
|
License 3-clause BSD
|
||||||
|
Copyright (c) 1992-2013 The University of Tennessee and The University
|
||||||
|
of Tennessee Research Foundation. All rights
|
||||||
|
reserved.
|
||||||
|
Copyright (c) 2000-2013 The University of California Berkeley. All
|
||||||
|
rights reserved.
|
||||||
|
Copyright (c) 2006-2013 The University of Colorado Denver. All rights
|
||||||
|
reserved.
|
||||||
|
|
||||||
|
$COPYRIGHT$
|
||||||
|
|
||||||
|
Additional copyrights may follow
|
||||||
|
|
||||||
|
$HEADER$
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
- Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
|
||||||
|
- Redistributions in binary form must reproduce the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer listed
|
||||||
|
in this license in the documentation and/or other materials
|
||||||
|
provided with the distribution.
|
||||||
|
|
||||||
|
- Neither the name of the copyright holders nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
The copyright holders provide no reassurances that the source code
|
||||||
|
provided does not infringe any patent, copyright, or any other
|
||||||
|
intellectual property rights of third parties. The copyright holders
|
||||||
|
disclaim any liability to any recipient for claims brought against
|
||||||
|
recipient by any third party for infringement of that parties
|
||||||
|
intellectual property rights.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
|
||||||
|
Name: GCC runtime library
|
||||||
|
Files: extra-dll\*.dll
|
||||||
|
Description: statically linked, in DLL files compiled with gfortran only
|
||||||
|
Availability: https://gcc.gnu.org/viewcvs/gcc/
|
||||||
|
License: GPLv3 + runtime exception
|
||||||
|
Copyright (C) 2002-2017 Free Software Foundation, Inc.
|
||||||
|
|
||||||
|
Libgfortran is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 3, or (at your option)
|
||||||
|
any later version.
|
||||||
|
|
||||||
|
Libgfortran is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
Under Section 7 of GPL version 3, you are granted additional
|
||||||
|
permissions described in the GCC Runtime Library Exception, version
|
||||||
|
3.1, as published by the Free Software Foundation.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License and
|
||||||
|
a copy of the GCC Runtime Library Exception along with this program;
|
||||||
|
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
|
||||||
|
<http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
|
||||||
|
Name: Microsoft Visual C++ Runtime Files
|
||||||
|
Files: extra-dll\msvcp140.dll
|
||||||
|
License: MSVC
|
||||||
|
https://www.visualstudio.com/license-terms/distributable-code-microsoft-visual-studio-2015-rc-microsoft-visual-studio-2015-sdk-rc-includes-utilities-buildserver-files/#visual-c-runtime
|
||||||
|
|
||||||
|
Subject to the License Terms for the software, you may copy and
|
||||||
|
distribute with your program any of the files within the followng
|
||||||
|
folder and its subfolders except as noted below. You may not modify
|
||||||
|
these files.
|
||||||
|
|
||||||
|
C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\redist
|
||||||
|
|
||||||
|
You may not distribute the contents of the following folders:
|
||||||
|
|
||||||
|
C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\redist\debug_nonredist
|
||||||
|
C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\redist\onecore\debug_nonredist
|
||||||
|
|
||||||
|
Subject to the License Terms for the software, you may copy and
|
||||||
|
distribute the following files with your program in your program’s
|
||||||
|
application local folder or by deploying them into the Global
|
||||||
|
Assembly Cache (GAC):
|
||||||
|
|
||||||
|
VC\atlmfc\lib\mfcmifc80.dll
|
||||||
|
VC\atlmfc\lib\amd64\mfcmifc80.dll
|
||||||
|
|
||||||
|
|
||||||
|
Name: Microsoft Visual C++ Runtime Files
|
||||||
|
Files: extra-dll\msvc*90.dll, extra-dll\Microsoft.VC90.CRT.manifest
|
||||||
|
License: MSVC
|
||||||
|
For your convenience, we have provided the following folders for
|
||||||
|
use when redistributing VC++ runtime files. Subject to the license
|
||||||
|
terms for the software, you may redistribute the folder
|
||||||
|
(unmodified) in the application local folder as a sub-folder with
|
||||||
|
no change to the folder name. You may also redistribute all the
|
||||||
|
files (*.dll and *.manifest) within a folder, listed below the
|
||||||
|
folder for your convenience, as an entire set.
|
||||||
|
|
||||||
|
\VC\redist\x86\Microsoft.VC90.ATL\
|
||||||
|
atl90.dll
|
||||||
|
Microsoft.VC90.ATL.manifest
|
||||||
|
\VC\redist\ia64\Microsoft.VC90.ATL\
|
||||||
|
atl90.dll
|
||||||
|
Microsoft.VC90.ATL.manifest
|
||||||
|
\VC\redist\amd64\Microsoft.VC90.ATL\
|
||||||
|
atl90.dll
|
||||||
|
Microsoft.VC90.ATL.manifest
|
||||||
|
\VC\redist\x86\Microsoft.VC90.CRT\
|
||||||
|
msvcm90.dll
|
||||||
|
msvcp90.dll
|
||||||
|
msvcr90.dll
|
||||||
|
Microsoft.VC90.CRT.manifest
|
||||||
|
\VC\redist\ia64\Microsoft.VC90.CRT\
|
||||||
|
msvcm90.dll
|
||||||
|
msvcp90.dll
|
||||||
|
msvcr90.dll
|
||||||
|
Microsoft.VC90.CRT.manifest
|
||||||
|
|
||||||
|
----
|
||||||
|
|
||||||
|
Full text of license texts referred to above follows (that they are
|
||||||
|
listed below does not necessarily imply the conditions apply to the
|
||||||
|
present binary release):
|
||||||
|
|
||||||
|
----
|
||||||
|
|
||||||
|
GCC RUNTIME LIBRARY EXCEPTION
|
||||||
|
|
||||||
|
Version 3.1, 31 March 2009
|
||||||
|
|
||||||
|
Copyright (C) 2009 Free Software Foundation, Inc. <http://fsf.org/>
|
||||||
|
|
||||||
|
Everyone is permitted to copy and distribute verbatim copies of this
|
||||||
|
license document, but changing it is not allowed.
|
||||||
|
|
||||||
|
This GCC Runtime Library Exception ("Exception") is an additional
|
||||||
|
permission under section 7 of the GNU General Public License, version
|
||||||
|
3 ("GPLv3"). It applies to a given file (the "Runtime Library") that
|
||||||
|
bears a notice placed by the copyright holder of the file stating that
|
||||||
|
the file is governed by GPLv3 along with this Exception.
|
||||||
|
|
||||||
|
When you use GCC to compile a program, GCC may combine portions of
|
||||||
|
certain GCC header files and runtime libraries with the compiled
|
||||||
|
program. The purpose of this Exception is to allow compilation of
|
||||||
|
non-GPL (including proprietary) programs to use, in this way, the
|
||||||
|
header files and runtime libraries covered by this Exception.
|
||||||
|
|
||||||
|
0. Definitions.
|
||||||
|
|
||||||
|
A file is an "Independent Module" if it either requires the Runtime
|
||||||
|
Library for execution after a Compilation Process, or makes use of an
|
||||||
|
interface provided by the Runtime Library, but is not otherwise based
|
||||||
|
on the Runtime Library.
|
||||||
|
|
||||||
|
"GCC" means a version of the GNU Compiler Collection, with or without
|
||||||
|
modifications, governed by version 3 (or a specified later version) of
|
||||||
|
the GNU General Public License (GPL) with the option of using any
|
||||||
|
subsequent versions published by the FSF.
|
||||||
|
|
||||||
|
"GPL-compatible Software" is software whose conditions of propagation,
|
||||||
|
modification and use would permit combination with GCC in accord with
|
||||||
|
the license of GCC.
|
||||||
|
|
||||||
|
"Target Code" refers to output from any compiler for a real or virtual
|
||||||
|
target processor architecture, in executable form or suitable for
|
||||||
|
input to an assembler, loader, linker and/or execution
|
||||||
|
phase. Notwithstanding that, Target Code does not include data in any
|
||||||
|
format that is used as a compiler intermediate representation, or used
|
||||||
|
for producing a compiler intermediate representation.
|
||||||
|
|
||||||
|
The "Compilation Process" transforms code entirely represented in
|
||||||
|
non-intermediate languages designed for human-written code, and/or in
|
||||||
|
Java Virtual Machine byte code, into Target Code. Thus, for example,
|
||||||
|
use of source code generators and preprocessors need not be considered
|
||||||
|
part of the Compilation Process, since the Compilation Process can be
|
||||||
|
understood as starting with the output of the generators or
|
||||||
|
preprocessors.
|
||||||
|
|
||||||
|
A Compilation Process is "Eligible" if it is done using GCC, alone or
|
||||||
|
with other GPL-compatible software, or if it is done without using any
|
||||||
|
work based on GCC. For example, using non-GPL-compatible Software to
|
||||||
|
optimize any GCC intermediate representations would not qualify as an
|
||||||
|
Eligible Compilation Process.
|
||||||
|
|
||||||
|
1. Grant of Additional Permission.
|
||||||
|
|
||||||
|
You have permission to propagate a work of Target Code formed by
|
||||||
|
combining the Runtime Library with Independent Modules, even if such
|
||||||
|
propagation would otherwise violate the terms of GPLv3, provided that
|
||||||
|
all Target Code was generated by Eligible Compilation Processes. You
|
||||||
|
may then convey such a combination under terms of your choice,
|
||||||
|
consistent with the licensing of the Independent Modules.
|
||||||
|
|
||||||
|
2. No Weakening of GCC Copyleft.
|
||||||
|
|
||||||
|
The availability of this Exception does not imply any general
|
||||||
|
presumption that third-party software is unaffected by the copyleft
|
||||||
|
requirements of the license of GCC.
|
||||||
|
|
||||||
|
----
|
||||||
|
|
||||||
|
GNU GENERAL PUBLIC LICENSE
|
||||||
|
Version 3, 29 June 2007
|
||||||
|
|
||||||
|
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
||||||
|
Everyone is permitted to copy and distribute verbatim copies
|
||||||
|
of this license document, but changing it is not allowed.
|
||||||
|
|
||||||
|
Preamble
|
||||||
|
|
||||||
|
The GNU General Public License is a free, copyleft license for
|
||||||
|
software and other kinds of works.
|
||||||
|
|
||||||
|
The licenses for most software and other practical works are designed
|
||||||
|
to take away your freedom to share and change the works. By contrast,
|
||||||
|
the GNU General Public License is intended to guarantee your freedom to
|
||||||
|
share and change all versions of a program--to make sure it remains free
|
||||||
|
software for all its users. We, the Free Software Foundation, use the
|
||||||
|
GNU General Public License for most of our software; it applies also to
|
||||||
|
any other work released this way by its authors. You can apply it to
|
||||||
|
your programs, too.
|
||||||
|
|
||||||
|
When we speak of free software, we are referring to freedom, not
|
||||||
|
price. Our General Public Licenses are designed to make sure that you
|
||||||
|
have the freedom to distribute copies of free software (and charge for
|
||||||
|
them if you wish), that you receive source code or can get it if you
|
||||||
|
want it, that you can change the software or use pieces of it in new
|
||||||
|
free programs, and that you know you can do these things.
|
||||||
|
|
||||||
|
To protect your rights, we need to prevent others from denying you
|
||||||
|
these rights or asking you to surrender the rights. Therefore, you have
|
||||||
|
certain responsibilities if you distribute copies of the software, or if
|
||||||
|
you modify it: responsibilities to respect the freedom of others.
|
||||||
|
|
||||||
|
For example, if you distribute copies of such a program, whether
|
||||||
|
gratis or for a fee, you must pass on to the recipients the same
|
||||||
|
freedoms that you received. You must make sure that they, too, receive
|
||||||
|
or can get the source code. And you must show them these terms so they
|
||||||
|
know their rights.
|
||||||
|
|
||||||
|
Developers that use the GNU GPL protect your rights with two steps:
|
||||||
|
(1) assert copyright on the software, and (2) offer you this License
|
||||||
|
giving you legal permission to copy, distribute and/or modify it.
|
||||||
|
|
||||||
|
For the developers' and authors' protection, the GPL clearly explains
|
||||||
|
that there is no warranty for this free software. For both users' and
|
||||||
|
authors' sake, the GPL requires that modified versions be marked as
|
||||||
|
changed, so that their problems will not be attributed erroneously to
|
||||||
|
authors of previous versions.
|
||||||
|
|
||||||
|
Some devices are designed to deny users access to install or run
|
||||||
|
modified versions of the software inside them, although the manufacturer
|
||||||
|
can do so. This is fundamentally incompatible with the aim of
|
||||||
|
protecting users' freedom to change the software. The systematic
|
||||||
|
pattern of such abuse occurs in the area of products for individuals to
|
||||||
|
use, which is precisely where it is most unacceptable. Therefore, we
|
||||||
|
have designed this version of the GPL to prohibit the practice for those
|
||||||
|
products. If such problems arise substantially in other domains, we
|
||||||
|
stand ready to extend this provision to those domains in future versions
|
||||||
|
of the GPL, as needed to protect the freedom of users.
|
||||||
|
|
||||||
|
Finally, every program is threatened constantly by software patents.
|
||||||
|
States should not allow patents to restrict development and use of
|
||||||
|
software on general-purpose computers, but in those that do, we wish to
|
||||||
|
avoid the special danger that patents applied to a free program could
|
||||||
|
make it effectively proprietary. To prevent this, the GPL assures that
|
||||||
|
patents cannot be used to render the program non-free.
|
||||||
|
|
||||||
|
The precise terms and conditions for copying, distribution and
|
||||||
|
modification follow.
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
0. Definitions.
|
||||||
|
|
||||||
|
"This License" refers to version 3 of the GNU General Public License.
|
||||||
|
|
||||||
|
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||||
|
works, such as semiconductor masks.
|
||||||
|
|
||||||
|
"The Program" refers to any copyrightable work licensed under this
|
||||||
|
License. Each licensee is addressed as "you". "Licensees" and
|
||||||
|
"recipients" may be individuals or organizations.
|
||||||
|
|
||||||
|
To "modify" a work means to copy from or adapt all or part of the work
|
||||||
|
in a fashion requiring copyright permission, other than the making of an
|
||||||
|
exact copy. The resulting work is called a "modified version" of the
|
||||||
|
earlier work or a work "based on" the earlier work.
|
||||||
|
|
||||||
|
A "covered work" means either the unmodified Program or a work based
|
||||||
|
on the Program.
|
||||||
|
|
||||||
|
To "propagate" a work means to do anything with it that, without
|
||||||
|
permission, would make you directly or secondarily liable for
|
||||||
|
infringement under applicable copyright law, except executing it on a
|
||||||
|
computer or modifying a private copy. Propagation includes copying,
|
||||||
|
distribution (with or without modification), making available to the
|
||||||
|
public, and in some countries other activities as well.
|
||||||
|
|
||||||
|
To "convey" a work means any kind of propagation that enables other
|
||||||
|
parties to make or receive copies. Mere interaction with a user through
|
||||||
|
a computer network, with no transfer of a copy, is not conveying.
|
||||||
|
|
||||||
|
An interactive user interface displays "Appropriate Legal Notices"
|
||||||
|
to the extent that it includes a convenient and prominently visible
|
||||||
|
feature that (1) displays an appropriate copyright notice, and (2)
|
||||||
|
tells the user that there is no warranty for the work (except to the
|
||||||
|
extent that warranties are provided), that licensees may convey the
|
||||||
|
work under this License, and how to view a copy of this License. If
|
||||||
|
the interface presents a list of user commands or options, such as a
|
||||||
|
menu, a prominent item in the list meets this criterion.
|
||||||
|
|
||||||
|
1. Source Code.
|
||||||
|
|
||||||
|
The "source code" for a work means the preferred form of the work
|
||||||
|
for making modifications to it. "Object code" means any non-source
|
||||||
|
form of a work.
|
||||||
|
|
||||||
|
A "Standard Interface" means an interface that either is an official
|
||||||
|
standard defined by a recognized standards body, or, in the case of
|
||||||
|
interfaces specified for a particular programming language, one that
|
||||||
|
is widely used among developers working in that language.
|
||||||
|
|
||||||
|
The "System Libraries" of an executable work include anything, other
|
||||||
|
than the work as a whole, that (a) is included in the normal form of
|
||||||
|
packaging a Major Component, but which is not part of that Major
|
||||||
|
Component, and (b) serves only to enable use of the work with that
|
||||||
|
Major Component, or to implement a Standard Interface for which an
|
||||||
|
implementation is available to the public in source code form. A
|
||||||
|
"Major Component", in this context, means a major essential component
|
||||||
|
(kernel, window system, and so on) of the specific operating system
|
||||||
|
(if any) on which the executable work runs, or a compiler used to
|
||||||
|
produce the work, or an object code interpreter used to run it.
|
||||||
|
|
||||||
|
The "Corresponding Source" for a work in object code form means all
|
||||||
|
the source code needed to generate, install, and (for an executable
|
||||||
|
work) run the object code and to modify the work, including scripts to
|
||||||
|
control those activities. However, it does not include the work's
|
||||||
|
System Libraries, or general-purpose tools or generally available free
|
||||||
|
programs which are used unmodified in performing those activities but
|
||||||
|
which are not part of the work. For example, Corresponding Source
|
||||||
|
includes interface definition files associated with source files for
|
||||||
|
the work, and the source code for shared libraries and dynamically
|
||||||
|
linked subprograms that the work is specifically designed to require,
|
||||||
|
such as by intimate data communication or control flow between those
|
||||||
|
subprograms and other parts of the work.
|
||||||
|
|
||||||
|
The Corresponding Source need not include anything that users
|
||||||
|
can regenerate automatically from other parts of the Corresponding
|
||||||
|
Source.
|
||||||
|
|
||||||
|
The Corresponding Source for a work in source code form is that
|
||||||
|
same work.
|
||||||
|
|
||||||
|
2. Basic Permissions.
|
||||||
|
|
||||||
|
All rights granted under this License are granted for the term of
|
||||||
|
copyright on the Program, and are irrevocable provided the stated
|
||||||
|
conditions are met. This License explicitly affirms your unlimited
|
||||||
|
permission to run the unmodified Program. The output from running a
|
||||||
|
covered work is covered by this License only if the output, given its
|
||||||
|
content, constitutes a covered work. This License acknowledges your
|
||||||
|
rights of fair use or other equivalent, as provided by copyright law.
|
||||||
|
|
||||||
|
You may make, run and propagate covered works that you do not
|
||||||
|
convey, without conditions so long as your license otherwise remains
|
||||||
|
in force. You may convey covered works to others for the sole purpose
|
||||||
|
of having them make modifications exclusively for you, or provide you
|
||||||
|
with facilities for running those works, provided that you comply with
|
||||||
|
the terms of this License in conveying all material for which you do
|
||||||
|
not control copyright. Those thus making or running the covered works
|
||||||
|
for you must do so exclusively on your behalf, under your direction
|
||||||
|
and control, on terms that prohibit them from making any copies of
|
||||||
|
your copyrighted material outside their relationship with you.
|
||||||
|
|
||||||
|
Conveying under any other circumstances is permitted solely under
|
||||||
|
the conditions stated below. Sublicensing is not allowed; section 10
|
||||||
|
makes it unnecessary.
|
||||||
|
|
||||||
|
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||||
|
|
||||||
|
No covered work shall be deemed part of an effective technological
|
||||||
|
measure under any applicable law fulfilling obligations under article
|
||||||
|
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||||
|
similar laws prohibiting or restricting circumvention of such
|
||||||
|
measures.
|
||||||
|
|
||||||
|
When you convey a covered work, you waive any legal power to forbid
|
||||||
|
circumvention of technological measures to the extent such circumvention
|
||||||
|
is effected by exercising rights under this License with respect to
|
||||||
|
the covered work, and you disclaim any intention to limit operation or
|
||||||
|
modification of the work as a means of enforcing, against the work's
|
||||||
|
users, your or third parties' legal rights to forbid circumvention of
|
||||||
|
technological measures.
|
||||||
|
|
||||||
|
4. Conveying Verbatim Copies.
|
||||||
|
|
||||||
|
You may convey verbatim copies of the Program's source code as you
|
||||||
|
receive it, in any medium, provided that you conspicuously and
|
||||||
|
appropriately publish on each copy an appropriate copyright notice;
|
||||||
|
keep intact all notices stating that this License and any
|
||||||
|
non-permissive terms added in accord with section 7 apply to the code;
|
||||||
|
keep intact all notices of the absence of any warranty; and give all
|
||||||
|
recipients a copy of this License along with the Program.
|
||||||
|
|
||||||
|
You may charge any price or no price for each copy that you convey,
|
||||||
|
and you may offer support or warranty protection for a fee.
|
||||||
|
|
||||||
|
5. Conveying Modified Source Versions.
|
||||||
|
|
||||||
|
You may convey a work based on the Program, or the modifications to
|
||||||
|
produce it from the Program, in the form of source code under the
|
||||||
|
terms of section 4, provided that you also meet all of these conditions:
|
||||||
|
|
||||||
|
a) The work must carry prominent notices stating that you modified
|
||||||
|
it, and giving a relevant date.
|
||||||
|
|
||||||
|
b) The work must carry prominent notices stating that it is
|
||||||
|
released under this License and any conditions added under section
|
||||||
|
7. This requirement modifies the requirement in section 4 to
|
||||||
|
"keep intact all notices".
|
||||||
|
|
||||||
|
c) You must license the entire work, as a whole, under this
|
||||||
|
License to anyone who comes into possession of a copy. This
|
||||||
|
License will therefore apply, along with any applicable section 7
|
||||||
|
additional terms, to the whole of the work, and all its parts,
|
||||||
|
regardless of how they are packaged. This License gives no
|
||||||
|
permission to license the work in any other way, but it does not
|
||||||
|
invalidate such permission if you have separately received it.
|
||||||
|
|
||||||
|
d) If the work has interactive user interfaces, each must display
|
||||||
|
Appropriate Legal Notices; however, if the Program has interactive
|
||||||
|
interfaces that do not display Appropriate Legal Notices, your
|
||||||
|
work need not make them do so.
|
||||||
|
|
||||||
|
A compilation of a covered work with other separate and independent
|
||||||
|
works, which are not by their nature extensions of the covered work,
|
||||||
|
and which are not combined with it such as to form a larger program,
|
||||||
|
in or on a volume of a storage or distribution medium, is called an
|
||||||
|
"aggregate" if the compilation and its resulting copyright are not
|
||||||
|
used to limit the access or legal rights of the compilation's users
|
||||||
|
beyond what the individual works permit. Inclusion of a covered work
|
||||||
|
in an aggregate does not cause this License to apply to the other
|
||||||
|
parts of the aggregate.
|
||||||
|
|
||||||
|
6. Conveying Non-Source Forms.
|
||||||
|
|
||||||
|
You may convey a covered work in object code form under the terms
|
||||||
|
of sections 4 and 5, provided that you also convey the
|
||||||
|
machine-readable Corresponding Source under the terms of this License,
|
||||||
|
in one of these ways:
|
||||||
|
|
||||||
|
a) Convey the object code in, or embodied in, a physical product
|
||||||
|
(including a physical distribution medium), accompanied by the
|
||||||
|
Corresponding Source fixed on a durable physical medium
|
||||||
|
customarily used for software interchange.
|
||||||
|
|
||||||
|
b) Convey the object code in, or embodied in, a physical product
|
||||||
|
(including a physical distribution medium), accompanied by a
|
||||||
|
written offer, valid for at least three years and valid for as
|
||||||
|
long as you offer spare parts or customer support for that product
|
||||||
|
model, to give anyone who possesses the object code either (1) a
|
||||||
|
copy of the Corresponding Source for all the software in the
|
||||||
|
product that is covered by this License, on a durable physical
|
||||||
|
medium customarily used for software interchange, for a price no
|
||||||
|
more than your reasonable cost of physically performing this
|
||||||
|
conveying of source, or (2) access to copy the
|
||||||
|
Corresponding Source from a network server at no charge.
|
||||||
|
|
||||||
|
c) Convey individual copies of the object code with a copy of the
|
||||||
|
written offer to provide the Corresponding Source. This
|
||||||
|
alternative is allowed only occasionally and noncommercially, and
|
||||||
|
only if you received the object code with such an offer, in accord
|
||||||
|
with subsection 6b.
|
||||||
|
|
||||||
|
d) Convey the object code by offering access from a designated
|
||||||
|
place (gratis or for a charge), and offer equivalent access to the
|
||||||
|
Corresponding Source in the same way through the same place at no
|
||||||
|
further charge. You need not require recipients to copy the
|
||||||
|
Corresponding Source along with the object code. If the place to
|
||||||
|
copy the object code is a network server, the Corresponding Source
|
||||||
|
may be on a different server (operated by you or a third party)
|
||||||
|
that supports equivalent copying facilities, provided you maintain
|
||||||
|
clear directions next to the object code saying where to find the
|
||||||
|
Corresponding Source. Regardless of what server hosts the
|
||||||
|
Corresponding Source, you remain obligated to ensure that it is
|
||||||
|
available for as long as needed to satisfy these requirements.
|
||||||
|
|
||||||
|
e) Convey the object code using peer-to-peer transmission, provided
|
||||||
|
you inform other peers where the object code and Corresponding
|
||||||
|
Source of the work are being offered to the general public at no
|
||||||
|
charge under subsection 6d.
|
||||||
|
|
||||||
|
A separable portion of the object code, whose source code is excluded
|
||||||
|
from the Corresponding Source as a System Library, need not be
|
||||||
|
included in conveying the object code work.
|
||||||
|
|
||||||
|
A "User Product" is either (1) a "consumer product", which means any
|
||||||
|
tangible personal property which is normally used for personal, family,
|
||||||
|
or household purposes, or (2) anything designed or sold for incorporation
|
||||||
|
into a dwelling. In determining whether a product is a consumer product,
|
||||||
|
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||||
|
product received by a particular user, "normally used" refers to a
|
||||||
|
typical or common use of that class of product, regardless of the status
|
||||||
|
of the particular user or of the way in which the particular user
|
||||||
|
actually uses, or expects or is expected to use, the product. A product
|
||||||
|
is a consumer product regardless of whether the product has substantial
|
||||||
|
commercial, industrial or non-consumer uses, unless such uses represent
|
||||||
|
the only significant mode of use of the product.
|
||||||
|
|
||||||
|
"Installation Information" for a User Product means any methods,
|
||||||
|
procedures, authorization keys, or other information required to install
|
||||||
|
and execute modified versions of a covered work in that User Product from
|
||||||
|
a modified version of its Corresponding Source. The information must
|
||||||
|
suffice to ensure that the continued functioning of the modified object
|
||||||
|
code is in no case prevented or interfered with solely because
|
||||||
|
modification has been made.
|
||||||
|
|
||||||
|
If you convey an object code work under this section in, or with, or
|
||||||
|
specifically for use in, a User Product, and the conveying occurs as
|
||||||
|
part of a transaction in which the right of possession and use of the
|
||||||
|
User Product is transferred to the recipient in perpetuity or for a
|
||||||
|
fixed term (regardless of how the transaction is characterized), the
|
||||||
|
Corresponding Source conveyed under this section must be accompanied
|
||||||
|
by the Installation Information. But this requirement does not apply
|
||||||
|
if neither you nor any third party retains the ability to install
|
||||||
|
modified object code on the User Product (for example, the work has
|
||||||
|
been installed in ROM).
|
||||||
|
|
||||||
|
The requirement to provide Installation Information does not include a
|
||||||
|
requirement to continue to provide support service, warranty, or updates
|
||||||
|
for a work that has been modified or installed by the recipient, or for
|
||||||
|
the User Product in which it has been modified or installed. Access to a
|
||||||
|
network may be denied when the modification itself materially and
|
||||||
|
adversely affects the operation of the network or violates the rules and
|
||||||
|
protocols for communication across the network.
|
||||||
|
|
||||||
|
Corresponding Source conveyed, and Installation Information provided,
|
||||||
|
in accord with this section must be in a format that is publicly
|
||||||
|
documented (and with an implementation available to the public in
|
||||||
|
source code form), and must require no special password or key for
|
||||||
|
unpacking, reading or copying.
|
||||||
|
|
||||||
|
7. Additional Terms.
|
||||||
|
|
||||||
|
"Additional permissions" are terms that supplement the terms of this
|
||||||
|
License by making exceptions from one or more of its conditions.
|
||||||
|
Additional permissions that are applicable to the entire Program shall
|
||||||
|
be treated as though they were included in this License, to the extent
|
||||||
|
that they are valid under applicable law. If additional permissions
|
||||||
|
apply only to part of the Program, that part may be used separately
|
||||||
|
under those permissions, but the entire Program remains governed by
|
||||||
|
this License without regard to the additional permissions.
|
||||||
|
|
||||||
|
When you convey a copy of a covered work, you may at your option
|
||||||
|
remove any additional permissions from that copy, or from any part of
|
||||||
|
it. (Additional permissions may be written to require their own
|
||||||
|
removal in certain cases when you modify the work.) You may place
|
||||||
|
additional permissions on material, added by you to a covered work,
|
||||||
|
for which you have or can give appropriate copyright permission.
|
||||||
|
|
||||||
|
Notwithstanding any other provision of this License, for material you
|
||||||
|
add to a covered work, you may (if authorized by the copyright holders of
|
||||||
|
that material) supplement the terms of this License with terms:
|
||||||
|
|
||||||
|
a) Disclaiming warranty or limiting liability differently from the
|
||||||
|
terms of sections 15 and 16 of this License; or
|
||||||
|
|
||||||
|
b) Requiring preservation of specified reasonable legal notices or
|
||||||
|
author attributions in that material or in the Appropriate Legal
|
||||||
|
Notices displayed by works containing it; or
|
||||||
|
|
||||||
|
c) Prohibiting misrepresentation of the origin of that material, or
|
||||||
|
requiring that modified versions of such material be marked in
|
||||||
|
reasonable ways as different from the original version; or
|
||||||
|
|
||||||
|
d) Limiting the use for publicity purposes of names of licensors or
|
||||||
|
authors of the material; or
|
||||||
|
|
||||||
|
e) Declining to grant rights under trademark law for use of some
|
||||||
|
trade names, trademarks, or service marks; or
|
||||||
|
|
||||||
|
f) Requiring indemnification of licensors and authors of that
|
||||||
|
material by anyone who conveys the material (or modified versions of
|
||||||
|
it) with contractual assumptions of liability to the recipient, for
|
||||||
|
any liability that these contractual assumptions directly impose on
|
||||||
|
those licensors and authors.
|
||||||
|
|
||||||
|
All other non-permissive additional terms are considered "further
|
||||||
|
restrictions" within the meaning of section 10. If the Program as you
|
||||||
|
received it, or any part of it, contains a notice stating that it is
|
||||||
|
governed by this License along with a term that is a further
|
||||||
|
restriction, you may remove that term. If a license document contains
|
||||||
|
a further restriction but permits relicensing or conveying under this
|
||||||
|
License, you may add to a covered work material governed by the terms
|
||||||
|
of that license document, provided that the further restriction does
|
||||||
|
not survive such relicensing or conveying.
|
||||||
|
|
||||||
|
If you add terms to a covered work in accord with this section, you
|
||||||
|
must place, in the relevant source files, a statement of the
|
||||||
|
additional terms that apply to those files, or a notice indicating
|
||||||
|
where to find the applicable terms.
|
||||||
|
|
||||||
|
Additional terms, permissive or non-permissive, may be stated in the
|
||||||
|
form of a separately written license, or stated as exceptions;
|
||||||
|
the above requirements apply either way.
|
||||||
|
|
||||||
|
8. Termination.
|
||||||
|
|
||||||
|
You may not propagate or modify a covered work except as expressly
|
||||||
|
provided under this License. Any attempt otherwise to propagate or
|
||||||
|
modify it is void, and will automatically terminate your rights under
|
||||||
|
this License (including any patent licenses granted under the third
|
||||||
|
paragraph of section 11).
|
||||||
|
|
||||||
|
However, if you cease all violation of this License, then your
|
||||||
|
license from a particular copyright holder is reinstated (a)
|
||||||
|
provisionally, unless and until the copyright holder explicitly and
|
||||||
|
finally terminates your license, and (b) permanently, if the copyright
|
||||||
|
holder fails to notify you of the violation by some reasonable means
|
||||||
|
prior to 60 days after the cessation.
|
||||||
|
|
||||||
|
Moreover, your license from a particular copyright holder is
|
||||||
|
reinstated permanently if the copyright holder notifies you of the
|
||||||
|
violation by some reasonable means, this is the first time you have
|
||||||
|
received notice of violation of this License (for any work) from that
|
||||||
|
copyright holder, and you cure the violation prior to 30 days after
|
||||||
|
your receipt of the notice.
|
||||||
|
|
||||||
|
Termination of your rights under this section does not terminate the
|
||||||
|
licenses of parties who have received copies or rights from you under
|
||||||
|
this License. If your rights have been terminated and not permanently
|
||||||
|
reinstated, you do not qualify to receive new licenses for the same
|
||||||
|
material under section 10.
|
||||||
|
|
||||||
|
9. Acceptance Not Required for Having Copies.
|
||||||
|
|
||||||
|
You are not required to accept this License in order to receive or
|
||||||
|
run a copy of the Program. Ancillary propagation of a covered work
|
||||||
|
occurring solely as a consequence of using peer-to-peer transmission
|
||||||
|
to receive a copy likewise does not require acceptance. However,
|
||||||
|
nothing other than this License grants you permission to propagate or
|
||||||
|
modify any covered work. These actions infringe copyright if you do
|
||||||
|
not accept this License. Therefore, by modifying or propagating a
|
||||||
|
covered work, you indicate your acceptance of this License to do so.
|
||||||
|
|
||||||
|
10. Automatic Licensing of Downstream Recipients.
|
||||||
|
|
||||||
|
Each time you convey a covered work, the recipient automatically
|
||||||
|
receives a license from the original licensors, to run, modify and
|
||||||
|
propagate that work, subject to this License. You are not responsible
|
||||||
|
for enforcing compliance by third parties with this License.
|
||||||
|
|
||||||
|
An "entity transaction" is a transaction transferring control of an
|
||||||
|
organization, or substantially all assets of one, or subdividing an
|
||||||
|
organization, or merging organizations. If propagation of a covered
|
||||||
|
work results from an entity transaction, each party to that
|
||||||
|
transaction who receives a copy of the work also receives whatever
|
||||||
|
licenses to the work the party's predecessor in interest had or could
|
||||||
|
give under the previous paragraph, plus a right to possession of the
|
||||||
|
Corresponding Source of the work from the predecessor in interest, if
|
||||||
|
the predecessor has it or can get it with reasonable efforts.
|
||||||
|
|
||||||
|
You may not impose any further restrictions on the exercise of the
|
||||||
|
rights granted or affirmed under this License. For example, you may
|
||||||
|
not impose a license fee, royalty, or other charge for exercise of
|
||||||
|
rights granted under this License, and you may not initiate litigation
|
||||||
|
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||||
|
any patent claim is infringed by making, using, selling, offering for
|
||||||
|
sale, or importing the Program or any portion of it.
|
||||||
|
|
||||||
|
11. Patents.
|
||||||
|
|
||||||
|
A "contributor" is a copyright holder who authorizes use under this
|
||||||
|
License of the Program or a work on which the Program is based. The
|
||||||
|
work thus licensed is called the contributor's "contributor version".
|
||||||
|
|
||||||
|
A contributor's "essential patent claims" are all patent claims
|
||||||
|
owned or controlled by the contributor, whether already acquired or
|
||||||
|
hereafter acquired, that would be infringed by some manner, permitted
|
||||||
|
by this License, of making, using, or selling its contributor version,
|
||||||
|
but do not include claims that would be infringed only as a
|
||||||
|
consequence of further modification of the contributor version. For
|
||||||
|
purposes of this definition, "control" includes the right to grant
|
||||||
|
patent sublicenses in a manner consistent with the requirements of
|
||||||
|
this License.
|
||||||
|
|
||||||
|
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||||
|
patent license under the contributor's essential patent claims, to
|
||||||
|
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||||
|
propagate the contents of its contributor version.
|
||||||
|
|
||||||
|
In the following three paragraphs, a "patent license" is any express
|
||||||
|
agreement or commitment, however denominated, not to enforce a patent
|
||||||
|
(such as an express permission to practice a patent or covenant not to
|
||||||
|
sue for patent infringement). To "grant" such a patent license to a
|
||||||
|
party means to make such an agreement or commitment not to enforce a
|
||||||
|
patent against the party.
|
||||||
|
|
||||||
|
If you convey a covered work, knowingly relying on a patent license,
|
||||||
|
and the Corresponding Source of the work is not available for anyone
|
||||||
|
to copy, free of charge and under the terms of this License, through a
|
||||||
|
publicly available network server or other readily accessible means,
|
||||||
|
then you must either (1) cause the Corresponding Source to be so
|
||||||
|
available, or (2) arrange to deprive yourself of the benefit of the
|
||||||
|
patent license for this particular work, or (3) arrange, in a manner
|
||||||
|
consistent with the requirements of this License, to extend the patent
|
||||||
|
license to downstream recipients. "Knowingly relying" means you have
|
||||||
|
actual knowledge that, but for the patent license, your conveying the
|
||||||
|
covered work in a country, or your recipient's use of the covered work
|
||||||
|
in a country, would infringe one or more identifiable patents in that
|
||||||
|
country that you have reason to believe are valid.
|
||||||
|
|
||||||
|
If, pursuant to or in connection with a single transaction or
|
||||||
|
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||||
|
covered work, and grant a patent license to some of the parties
|
||||||
|
receiving the covered work authorizing them to use, propagate, modify
|
||||||
|
or convey a specific copy of the covered work, then the patent license
|
||||||
|
you grant is automatically extended to all recipients of the covered
|
||||||
|
work and works based on it.
|
||||||
|
|
||||||
|
A patent license is "discriminatory" if it does not include within
|
||||||
|
the scope of its coverage, prohibits the exercise of, or is
|
||||||
|
conditioned on the non-exercise of one or more of the rights that are
|
||||||
|
specifically granted under this License. You may not convey a covered
|
||||||
|
work if you are a party to an arrangement with a third party that is
|
||||||
|
in the business of distributing software, under which you make payment
|
||||||
|
to the third party based on the extent of your activity of conveying
|
||||||
|
the work, and under which the third party grants, to any of the
|
||||||
|
parties who would receive the covered work from you, a discriminatory
|
||||||
|
patent license (a) in connection with copies of the covered work
|
||||||
|
conveyed by you (or copies made from those copies), or (b) primarily
|
||||||
|
for and in connection with specific products or compilations that
|
||||||
|
contain the covered work, unless you entered into that arrangement,
|
||||||
|
or that patent license was granted, prior to 28 March 2007.
|
||||||
|
|
||||||
|
Nothing in this License shall be construed as excluding or limiting
|
||||||
|
any implied license or other defenses to infringement that may
|
||||||
|
otherwise be available to you under applicable patent law.
|
||||||
|
|
||||||
|
12. No Surrender of Others' Freedom.
|
||||||
|
|
||||||
|
If conditions are imposed on you (whether by court order, agreement or
|
||||||
|
otherwise) that contradict the conditions of this License, they do not
|
||||||
|
excuse you from the conditions of this License. If you cannot convey a
|
||||||
|
covered work so as to satisfy simultaneously your obligations under this
|
||||||
|
License and any other pertinent obligations, then as a consequence you may
|
||||||
|
not convey it at all. For example, if you agree to terms that obligate you
|
||||||
|
to collect a royalty for further conveying from those to whom you convey
|
||||||
|
the Program, the only way you could satisfy both those terms and this
|
||||||
|
License would be to refrain entirely from conveying the Program.
|
||||||
|
|
||||||
|
13. Use with the GNU Affero General Public License.
|
||||||
|
|
||||||
|
Notwithstanding any other provision of this License, you have
|
||||||
|
permission to link or combine any covered work with a work licensed
|
||||||
|
under version 3 of the GNU Affero General Public License into a single
|
||||||
|
combined work, and to convey the resulting work. The terms of this
|
||||||
|
License will continue to apply to the part which is the covered work,
|
||||||
|
but the special requirements of the GNU Affero General Public License,
|
||||||
|
section 13, concerning interaction through a network will apply to the
|
||||||
|
combination as such.
|
||||||
|
|
||||||
|
14. Revised Versions of this License.
|
||||||
|
|
||||||
|
The Free Software Foundation may publish revised and/or new versions of
|
||||||
|
the GNU General Public License from time to time. Such new versions will
|
||||||
|
be similar in spirit to the present version, but may differ in detail to
|
||||||
|
address new problems or concerns.
|
||||||
|
|
||||||
|
Each version is given a distinguishing version number. If the
|
||||||
|
Program specifies that a certain numbered version of the GNU General
|
||||||
|
Public License "or any later version" applies to it, you have the
|
||||||
|
option of following the terms and conditions either of that numbered
|
||||||
|
version or of any later version published by the Free Software
|
||||||
|
Foundation. If the Program does not specify a version number of the
|
||||||
|
GNU General Public License, you may choose any version ever published
|
||||||
|
by the Free Software Foundation.
|
||||||
|
|
||||||
|
If the Program specifies that a proxy can decide which future
|
||||||
|
versions of the GNU General Public License can be used, that proxy's
|
||||||
|
public statement of acceptance of a version permanently authorizes you
|
||||||
|
to choose that version for the Program.
|
||||||
|
|
||||||
|
Later license versions may give you additional or different
|
||||||
|
permissions. However, no additional obligations are imposed on any
|
||||||
|
author or copyright holder as a result of your choosing to follow a
|
||||||
|
later version.
|
||||||
|
|
||||||
|
15. Disclaimer of Warranty.
|
||||||
|
|
||||||
|
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||||
|
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||||
|
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||||
|
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||||
|
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||||
|
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||||
|
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||||
|
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||||
|
|
||||||
|
16. Limitation of Liability.
|
||||||
|
|
||||||
|
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||||
|
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||||
|
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||||
|
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||||
|
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||||
|
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||||
|
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||||
|
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||||
|
SUCH DAMAGES.
|
||||||
|
|
||||||
|
17. Interpretation of Sections 15 and 16.
|
||||||
|
|
||||||
|
If the disclaimer of warranty and limitation of liability provided
|
||||||
|
above cannot be given local legal effect according to their terms,
|
||||||
|
reviewing courts shall apply local law that most closely approximates
|
||||||
|
an absolute waiver of all civil liability in connection with the
|
||||||
|
Program, unless a warranty or assumption of liability accompanies a
|
||||||
|
copy of the Program in return for a fee.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
How to Apply These Terms to Your New Programs
|
||||||
|
|
||||||
|
If you develop a new program, and you want it to be of the greatest
|
||||||
|
possible use to the public, the best way to achieve this is to make it
|
||||||
|
free software which everyone can redistribute and change under these terms.
|
||||||
|
|
||||||
|
To do so, attach the following notices to the program. It is safest
|
||||||
|
to attach them to the start of each source file to most effectively
|
||||||
|
state the exclusion of warranty; and each file should have at least
|
||||||
|
the "copyright" line and a pointer to where the full notice is found.
|
||||||
|
|
||||||
|
<one line to give the program's name and a brief idea of what it does.>
|
||||||
|
Copyright (C) <year> <name of author>
|
||||||
|
|
||||||
|
This program is free software: you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation, either version 3 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License
|
||||||
|
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
Also add information on how to contact you by electronic and paper mail.
|
||||||
|
|
||||||
|
If the program does terminal interaction, make it output a short
|
||||||
|
notice like this when it starts in an interactive mode:
|
||||||
|
|
||||||
|
<program> Copyright (C) <year> <name of author>
|
||||||
|
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||||
|
This is free software, and you are welcome to redistribute it
|
||||||
|
under certain conditions; type `show c' for details.
|
||||||
|
|
||||||
|
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||||
|
parts of the General Public License. Of course, your program's commands
|
||||||
|
might be different; for a GUI interface, you would use an "about box".
|
||||||
|
|
||||||
|
You should also get your employer (if you work as a programmer) or school,
|
||||||
|
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||||
|
For more information on this, and how to apply and follow the GNU GPL, see
|
||||||
|
<http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
The GNU General Public License does not permit incorporating your program
|
||||||
|
into proprietary programs. If your program is a subroutine library, you
|
||||||
|
may consider it more useful to permit linking proprietary applications with
|
||||||
|
the library. If this is what you want to do, use the GNU Lesser General
|
||||||
|
Public License instead of this License. But first, please read
|
||||||
|
<http://www.gnu.org/philosophy/why-not-lgpl.html>.
|
40
Restaurant/Marta/venv/Lib/site-packages/numpy/__config__.py
Normal file
40
Restaurant/Marta/venv/Lib/site-packages/numpy/__config__.py
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
# This file is generated by numpy's setup.py
|
||||||
|
# It contains system_info results at the time of building this package.
|
||||||
|
__all__ = ["get_info","show"]
|
||||||
|
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs')
|
||||||
|
|
||||||
|
if sys.platform == 'win32' and os.path.isdir(extra_dll_dir):
|
||||||
|
if sys.version_info >= (3, 8):
|
||||||
|
os.add_dll_directory(extra_dll_dir)
|
||||||
|
else:
|
||||||
|
os.environ.setdefault('PATH', '')
|
||||||
|
os.environ['PATH'] += os.pathsep + extra_dll_dir
|
||||||
|
|
||||||
|
blas_mkl_info={}
|
||||||
|
blis_info={}
|
||||||
|
openblas_info={'library_dirs': ['C:\\projects\\numpy-wheels\\numpy\\build\\openblas_info'], 'libraries': ['openblas_info'], 'language': 'f77', 'define_macros': [('HAVE_CBLAS', None)]}
|
||||||
|
blas_opt_info={'library_dirs': ['C:\\projects\\numpy-wheels\\numpy\\build\\openblas_info'], 'libraries': ['openblas_info'], 'language': 'f77', 'define_macros': [('HAVE_CBLAS', None)]}
|
||||||
|
lapack_mkl_info={}
|
||||||
|
openblas_lapack_info={'library_dirs': ['C:\\projects\\numpy-wheels\\numpy\\build\\openblas_lapack_info'], 'libraries': ['openblas_lapack_info'], 'language': 'f77', 'define_macros': [('HAVE_CBLAS', None)]}
|
||||||
|
lapack_opt_info={'library_dirs': ['C:\\projects\\numpy-wheels\\numpy\\build\\openblas_lapack_info'], 'libraries': ['openblas_lapack_info'], 'language': 'f77', 'define_macros': [('HAVE_CBLAS', None)]}
|
||||||
|
|
||||||
|
def get_info(name):
|
||||||
|
g = globals()
|
||||||
|
return g.get(name, g.get(name + "_info", {}))
|
||||||
|
|
||||||
|
def show():
|
||||||
|
for name,info_dict in globals().items():
|
||||||
|
if name[0] == "_" or type(info_dict) is not type({}): continue
|
||||||
|
print(name + ":")
|
||||||
|
if not info_dict:
|
||||||
|
print(" NOT AVAILABLE")
|
||||||
|
for k,v in info_dict.items():
|
||||||
|
v = str(v)
|
||||||
|
if k == "sources" and len(v) > 200:
|
||||||
|
v = v[:60] + " ...\n... " + v[-60:]
|
||||||
|
print(" %s = %s" % (k,v))
|
260
Restaurant/Marta/venv/Lib/site-packages/numpy/__init__.py
Normal file
260
Restaurant/Marta/venv/Lib/site-packages/numpy/__init__.py
Normal file
@ -0,0 +1,260 @@
|
|||||||
|
"""
|
||||||
|
NumPy
|
||||||
|
=====
|
||||||
|
|
||||||
|
Provides
|
||||||
|
1. An array object of arbitrary homogeneous items
|
||||||
|
2. Fast mathematical operations over arrays
|
||||||
|
3. Linear Algebra, Fourier Transforms, Random Number Generation
|
||||||
|
|
||||||
|
How to use the documentation
|
||||||
|
----------------------------
|
||||||
|
Documentation is available in two forms: docstrings provided
|
||||||
|
with the code, and a loose standing reference guide, available from
|
||||||
|
`the NumPy homepage <https://www.scipy.org>`_.
|
||||||
|
|
||||||
|
We recommend exploring the docstrings using
|
||||||
|
`IPython <https://ipython.org>`_, an advanced Python shell with
|
||||||
|
TAB-completion and introspection capabilities. See below for further
|
||||||
|
instructions.
|
||||||
|
|
||||||
|
The docstring examples assume that `numpy` has been imported as `np`::
|
||||||
|
|
||||||
|
>>> import numpy as np
|
||||||
|
|
||||||
|
Code snippets are indicated by three greater-than signs::
|
||||||
|
|
||||||
|
>>> x = 42
|
||||||
|
>>> x = x + 1
|
||||||
|
|
||||||
|
Use the built-in ``help`` function to view a function's docstring::
|
||||||
|
|
||||||
|
>>> help(np.sort)
|
||||||
|
... # doctest: +SKIP
|
||||||
|
|
||||||
|
For some objects, ``np.info(obj)`` may provide additional help. This is
|
||||||
|
particularly true if you see the line "Help on ufunc object:" at the top
|
||||||
|
of the help() page. Ufuncs are implemented in C, not Python, for speed.
|
||||||
|
The native Python help() does not know how to view their help, but our
|
||||||
|
np.info() function does.
|
||||||
|
|
||||||
|
To search for documents containing a keyword, do::
|
||||||
|
|
||||||
|
>>> np.lookfor('keyword')
|
||||||
|
... # doctest: +SKIP
|
||||||
|
|
||||||
|
General-purpose documents like a glossary and help on the basic concepts
|
||||||
|
of numpy are available under the ``doc`` sub-module::
|
||||||
|
|
||||||
|
>>> from numpy import doc
|
||||||
|
>>> help(doc)
|
||||||
|
... # doctest: +SKIP
|
||||||
|
|
||||||
|
Available subpackages
|
||||||
|
---------------------
|
||||||
|
doc
|
||||||
|
Topical documentation on broadcasting, indexing, etc.
|
||||||
|
lib
|
||||||
|
Basic functions used by several sub-packages.
|
||||||
|
random
|
||||||
|
Core Random Tools
|
||||||
|
linalg
|
||||||
|
Core Linear Algebra Tools
|
||||||
|
fft
|
||||||
|
Core FFT routines
|
||||||
|
polynomial
|
||||||
|
Polynomial tools
|
||||||
|
testing
|
||||||
|
NumPy testing tools
|
||||||
|
f2py
|
||||||
|
Fortran to Python Interface Generator.
|
||||||
|
distutils
|
||||||
|
Enhancements to distutils with support for
|
||||||
|
Fortran compilers support and more.
|
||||||
|
|
||||||
|
Utilities
|
||||||
|
---------
|
||||||
|
test
|
||||||
|
Run numpy unittests
|
||||||
|
show_config
|
||||||
|
Show numpy build configuration
|
||||||
|
dual
|
||||||
|
Overwrite certain functions with high-performance Scipy tools
|
||||||
|
matlib
|
||||||
|
Make everything matrices.
|
||||||
|
__version__
|
||||||
|
NumPy version string
|
||||||
|
|
||||||
|
Viewing documentation using IPython
|
||||||
|
-----------------------------------
|
||||||
|
Start IPython with the NumPy profile (``ipython -p numpy``), which will
|
||||||
|
import `numpy` under the alias `np`. Then, use the ``cpaste`` command to
|
||||||
|
paste examples into the shell. To see which functions are available in
|
||||||
|
`numpy`, type ``np.<TAB>`` (where ``<TAB>`` refers to the TAB key), or use
|
||||||
|
``np.*cos*?<ENTER>`` (where ``<ENTER>`` refers to the ENTER key) to narrow
|
||||||
|
down the list. To view the docstring for a function, use
|
||||||
|
``np.cos?<ENTER>`` (to view the docstring) and ``np.cos??<ENTER>`` (to view
|
||||||
|
the source code).
|
||||||
|
|
||||||
|
Copies vs. in-place operation
|
||||||
|
-----------------------------
|
||||||
|
Most of the functions in `numpy` return a copy of the array argument
|
||||||
|
(e.g., `np.sort`). In-place versions of these functions are often
|
||||||
|
available as array methods, i.e. ``x = np.array([1,2,3]); x.sort()``.
|
||||||
|
Exceptions to this rule are documented.
|
||||||
|
|
||||||
|
"""
|
||||||
|
from __future__ import division, absolute_import, print_function
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import warnings
|
||||||
|
|
||||||
|
from ._globals import ModuleDeprecationWarning, VisibleDeprecationWarning
|
||||||
|
from ._globals import _NoValue
|
||||||
|
|
||||||
|
# We first need to detect if we're being called as part of the numpy setup
|
||||||
|
# procedure itself in a reliable manner.
|
||||||
|
try:
|
||||||
|
__NUMPY_SETUP__
|
||||||
|
except NameError:
|
||||||
|
__NUMPY_SETUP__ = False
|
||||||
|
|
||||||
|
if __NUMPY_SETUP__:
|
||||||
|
sys.stderr.write('Running from numpy source directory.\n')
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
from numpy.__config__ import show as show_config
|
||||||
|
except ImportError:
|
||||||
|
msg = """Error importing numpy: you should not try to import numpy from
|
||||||
|
its source directory; please exit the numpy source tree, and relaunch
|
||||||
|
your python interpreter from there."""
|
||||||
|
raise ImportError(msg)
|
||||||
|
|
||||||
|
from .version import git_revision as __git_revision__
|
||||||
|
from .version import version as __version__
|
||||||
|
|
||||||
|
__all__ = ['ModuleDeprecationWarning',
|
||||||
|
'VisibleDeprecationWarning']
|
||||||
|
|
||||||
|
# Allow distributors to run custom init code
|
||||||
|
from . import _distributor_init
|
||||||
|
|
||||||
|
from . import core
|
||||||
|
from .core import *
|
||||||
|
from . import compat
|
||||||
|
from . import lib
|
||||||
|
# FIXME: why have numpy.lib if everything is imported here??
|
||||||
|
from .lib import *
|
||||||
|
|
||||||
|
from . import linalg
|
||||||
|
from . import fft
|
||||||
|
from . import polynomial
|
||||||
|
from . import random
|
||||||
|
from . import ctypeslib
|
||||||
|
from . import ma
|
||||||
|
from . import matrixlib as _mat
|
||||||
|
from .matrixlib import *
|
||||||
|
from .compat import long
|
||||||
|
|
||||||
|
# Make these accessible from numpy name-space
|
||||||
|
# but not imported in from numpy import *
|
||||||
|
# TODO[gh-6103]: Deprecate these
|
||||||
|
if sys.version_info[0] >= 3:
|
||||||
|
from builtins import bool, int, float, complex, object, str
|
||||||
|
unicode = str
|
||||||
|
else:
|
||||||
|
from __builtin__ import bool, int, float, complex, object, unicode, str
|
||||||
|
|
||||||
|
from .core import round, abs, max, min
|
||||||
|
# now that numpy modules are imported, can initialize limits
|
||||||
|
core.getlimits._register_known_types()
|
||||||
|
|
||||||
|
__all__.extend(['__version__', 'show_config'])
|
||||||
|
__all__.extend(core.__all__)
|
||||||
|
__all__.extend(_mat.__all__)
|
||||||
|
__all__.extend(lib.__all__)
|
||||||
|
__all__.extend(['linalg', 'fft', 'random', 'ctypeslib', 'ma'])
|
||||||
|
|
||||||
|
# These are added by `from .core import *` and `core.__all__`, but we
|
||||||
|
# overwrite them above with builtins we do _not_ want to export.
|
||||||
|
__all__.remove('long')
|
||||||
|
__all__.remove('unicode')
|
||||||
|
|
||||||
|
# Remove things that are in the numpy.lib but not in the numpy namespace
|
||||||
|
# Note that there is a test (numpy/tests/test_public_api.py:test_numpy_namespace)
|
||||||
|
# that prevents adding more things to the main namespace by accident.
|
||||||
|
# The list below will grow until the `from .lib import *` fixme above is
|
||||||
|
# taken care of
|
||||||
|
__all__.remove('Arrayterator')
|
||||||
|
del Arrayterator
|
||||||
|
|
||||||
|
# Filter out Cython harmless warnings
|
||||||
|
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
|
||||||
|
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
|
||||||
|
warnings.filterwarnings("ignore", message="numpy.ndarray size changed")
|
||||||
|
|
||||||
|
# oldnumeric and numarray were removed in 1.9. In case some packages import
|
||||||
|
# but do not use them, we define them here for backward compatibility.
|
||||||
|
oldnumeric = 'removed'
|
||||||
|
numarray = 'removed'
|
||||||
|
|
||||||
|
if sys.version_info[:2] >= (3, 7):
|
||||||
|
# Importing Tester requires importing all of UnitTest which is not a
|
||||||
|
# cheap import Since it is mainly used in test suits, we lazy import it
|
||||||
|
# here to save on the order of 10 ms of import time for most users
|
||||||
|
#
|
||||||
|
# The previous way Tester was imported also had a side effect of adding
|
||||||
|
# the full `numpy.testing` namespace
|
||||||
|
#
|
||||||
|
# module level getattr is only supported in 3.7 onwards
|
||||||
|
# https://www.python.org/dev/peps/pep-0562/
|
||||||
|
def __getattr__(attr):
|
||||||
|
if attr == 'testing':
|
||||||
|
import numpy.testing as testing
|
||||||
|
return testing
|
||||||
|
elif attr == 'Tester':
|
||||||
|
from .testing import Tester
|
||||||
|
return Tester
|
||||||
|
else:
|
||||||
|
raise AttributeError("module {!r} has no attribute "
|
||||||
|
"{!r}".format(__name__, attr))
|
||||||
|
|
||||||
|
def __dir__():
|
||||||
|
return list(globals().keys()) + ['Tester', 'testing']
|
||||||
|
|
||||||
|
else:
|
||||||
|
# We don't actually use this ourselves anymore, but I'm not 100% sure that
|
||||||
|
# no-one else in the world is using it (though I hope not)
|
||||||
|
from .testing import Tester
|
||||||
|
|
||||||
|
# Pytest testing
|
||||||
|
from numpy._pytesttester import PytestTester
|
||||||
|
test = PytestTester(__name__)
|
||||||
|
del PytestTester
|
||||||
|
|
||||||
|
|
||||||
|
def _sanity_check():
|
||||||
|
"""
|
||||||
|
Quick sanity checks for common bugs caused by environment.
|
||||||
|
There are some cases e.g. with wrong BLAS ABI that cause wrong
|
||||||
|
results under specific runtime conditions that are not necessarily
|
||||||
|
achieved during test suite runs, and it is useful to catch those early.
|
||||||
|
|
||||||
|
See https://github.com/numpy/numpy/issues/8577 and other
|
||||||
|
similar bug reports.
|
||||||
|
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
x = ones(2, dtype=float32)
|
||||||
|
if not abs(x.dot(x) - 2.0) < 1e-5:
|
||||||
|
raise AssertionError()
|
||||||
|
except AssertionError:
|
||||||
|
msg = ("The current Numpy installation ({!r}) fails to "
|
||||||
|
"pass simple sanity checks. This can be caused for example "
|
||||||
|
"by incorrect BLAS library being linked in, or by mixing "
|
||||||
|
"package managers (pip, conda, apt, ...). Search closed "
|
||||||
|
"numpy issues for similar problems.")
|
||||||
|
raise RuntimeError(msg.format(__file__))
|
||||||
|
|
||||||
|
_sanity_check()
|
||||||
|
del _sanity_check
|
@ -0,0 +1,32 @@
|
|||||||
|
|
||||||
|
'''
|
||||||
|
Helper to preload windows dlls to prevent dll not found errors.
|
||||||
|
Once a DLL is preloaded, its namespace is made available to any
|
||||||
|
subsequent DLL. This file originated in the numpy-wheels repo,
|
||||||
|
and is created as part of the scripts that build the wheel.
|
||||||
|
'''
|
||||||
|
import os
|
||||||
|
from ctypes import WinDLL
|
||||||
|
import glob
|
||||||
|
if os.name == 'nt':
|
||||||
|
# convention for storing / loading the DLL from
|
||||||
|
# numpy/.libs/, if present
|
||||||
|
try:
|
||||||
|
basedir = os.path.dirname(__file__)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
libs_dir = os.path.abspath(os.path.join(basedir, '.libs'))
|
||||||
|
DLL_filenames = []
|
||||||
|
if os.path.isdir(libs_dir):
|
||||||
|
for filename in glob.glob(os.path.join(libs_dir,
|
||||||
|
'*openblas*dll')):
|
||||||
|
# NOTE: would it change behavior to load ALL
|
||||||
|
# DLLs at this path vs. the name restriction?
|
||||||
|
WinDLL(os.path.abspath(filename))
|
||||||
|
DLL_filenames.append(filename)
|
||||||
|
if len(DLL_filenames) > 1:
|
||||||
|
import warnings
|
||||||
|
warnings.warn("loaded more than 1 DLL from .libs:\n%s" %
|
||||||
|
"\n".join(DLL_filenames),
|
||||||
|
stacklevel=1)
|
81
Restaurant/Marta/venv/Lib/site-packages/numpy/_globals.py
Normal file
81
Restaurant/Marta/venv/Lib/site-packages/numpy/_globals.py
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
"""
|
||||||
|
Module defining global singleton classes.
|
||||||
|
|
||||||
|
This module raises a RuntimeError if an attempt to reload it is made. In that
|
||||||
|
way the identities of the classes defined here are fixed and will remain so
|
||||||
|
even if numpy itself is reloaded. In particular, a function like the following
|
||||||
|
will still work correctly after numpy is reloaded::
|
||||||
|
|
||||||
|
def foo(arg=np._NoValue):
|
||||||
|
if arg is np._NoValue:
|
||||||
|
...
|
||||||
|
|
||||||
|
That was not the case when the singleton classes were defined in the numpy
|
||||||
|
``__init__.py`` file. See gh-7844 for a discussion of the reload problem that
|
||||||
|
motivated this module.
|
||||||
|
|
||||||
|
"""
|
||||||
|
from __future__ import division, absolute_import, print_function
|
||||||
|
|
||||||
|
__ALL__ = [
|
||||||
|
'ModuleDeprecationWarning', 'VisibleDeprecationWarning', '_NoValue'
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
# Disallow reloading this module so as to preserve the identities of the
|
||||||
|
# classes defined here.
|
||||||
|
if '_is_loaded' in globals():
|
||||||
|
raise RuntimeError('Reloading numpy._globals is not allowed')
|
||||||
|
_is_loaded = True
|
||||||
|
|
||||||
|
|
||||||
|
class ModuleDeprecationWarning(DeprecationWarning):
|
||||||
|
"""Module deprecation warning.
|
||||||
|
|
||||||
|
The nose tester turns ordinary Deprecation warnings into test failures.
|
||||||
|
That makes it hard to deprecate whole modules, because they get
|
||||||
|
imported by default. So this is a special Deprecation warning that the
|
||||||
|
nose tester will let pass without making tests fail.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
ModuleDeprecationWarning.__module__ = 'numpy'
|
||||||
|
|
||||||
|
|
||||||
|
class VisibleDeprecationWarning(UserWarning):
|
||||||
|
"""Visible deprecation warning.
|
||||||
|
|
||||||
|
By default, python will not show deprecation warnings, so this class
|
||||||
|
can be used when a very visible warning is helpful, for example because
|
||||||
|
the usage is most likely a user bug.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
VisibleDeprecationWarning.__module__ = 'numpy'
|
||||||
|
|
||||||
|
|
||||||
|
class _NoValueType(object):
|
||||||
|
"""Special keyword value.
|
||||||
|
|
||||||
|
The instance of this class may be used as the default value assigned to a
|
||||||
|
deprecated keyword in order to check if it has been given a user defined
|
||||||
|
value.
|
||||||
|
"""
|
||||||
|
__instance = None
|
||||||
|
def __new__(cls):
|
||||||
|
# ensure that only one instance exists
|
||||||
|
if not cls.__instance:
|
||||||
|
cls.__instance = super(_NoValueType, cls).__new__(cls)
|
||||||
|
return cls.__instance
|
||||||
|
|
||||||
|
# needed for python 2 to preserve identity through a pickle
|
||||||
|
def __reduce__(self):
|
||||||
|
return (self.__class__, ())
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<no value>"
|
||||||
|
|
||||||
|
|
||||||
|
_NoValue = _NoValueType()
|
214
Restaurant/Marta/venv/Lib/site-packages/numpy/_pytesttester.py
Normal file
214
Restaurant/Marta/venv/Lib/site-packages/numpy/_pytesttester.py
Normal file
@ -0,0 +1,214 @@
|
|||||||
|
"""
|
||||||
|
Pytest test running.
|
||||||
|
|
||||||
|
This module implements the ``test()`` function for NumPy modules. The usual
|
||||||
|
boiler plate for doing that is to put the following in the module
|
||||||
|
``__init__.py`` file::
|
||||||
|
|
||||||
|
from numpy._pytesttester import PytestTester
|
||||||
|
test = PytestTester(__name__).test
|
||||||
|
del PytestTester
|
||||||
|
|
||||||
|
|
||||||
|
Warnings filtering and other runtime settings should be dealt with in the
|
||||||
|
``pytest.ini`` file in the numpy repo root. The behavior of the test depends on
|
||||||
|
whether or not that file is found as follows:
|
||||||
|
|
||||||
|
* ``pytest.ini`` is present (develop mode)
|
||||||
|
All warnings except those explicily filtered out are raised as error.
|
||||||
|
* ``pytest.ini`` is absent (release mode)
|
||||||
|
DeprecationWarnings and PendingDeprecationWarnings are ignored, other
|
||||||
|
warnings are passed through.
|
||||||
|
|
||||||
|
In practice, tests run from the numpy repo are run in develop mode. That
|
||||||
|
includes the standard ``python runtests.py`` invocation.
|
||||||
|
|
||||||
|
This module is imported by every numpy subpackage, so lies at the top level to
|
||||||
|
simplify circular import issues. For the same reason, it contains no numpy
|
||||||
|
imports at module scope, instead importing numpy within function calls.
|
||||||
|
"""
|
||||||
|
from __future__ import division, absolute_import, print_function
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
|
||||||
|
__all__ = ['PytestTester']
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def _show_numpy_info():
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
print("NumPy version %s" % np.__version__)
|
||||||
|
relaxed_strides = np.ones((10, 1), order="C").flags.f_contiguous
|
||||||
|
print("NumPy relaxed strides checking option:", relaxed_strides)
|
||||||
|
|
||||||
|
|
||||||
|
class PytestTester(object):
|
||||||
|
"""
|
||||||
|
Pytest test runner.
|
||||||
|
|
||||||
|
A test function is typically added to a package's __init__.py like so::
|
||||||
|
|
||||||
|
from numpy._pytesttester import PytestTester
|
||||||
|
test = PytestTester(__name__).test
|
||||||
|
del PytestTester
|
||||||
|
|
||||||
|
Calling this test function finds and runs all tests associated with the
|
||||||
|
module and all its sub-modules.
|
||||||
|
|
||||||
|
Attributes
|
||||||
|
----------
|
||||||
|
module_name : str
|
||||||
|
Full path to the package to test.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
module_name : module name
|
||||||
|
The name of the module to test.
|
||||||
|
|
||||||
|
Notes
|
||||||
|
-----
|
||||||
|
Unlike the previous ``nose``-based implementation, this class is not
|
||||||
|
publicly exposed as it performs some ``numpy``-specific warning
|
||||||
|
suppression.
|
||||||
|
|
||||||
|
"""
|
||||||
|
def __init__(self, module_name):
|
||||||
|
self.module_name = module_name
|
||||||
|
|
||||||
|
def __call__(self, label='fast', verbose=1, extra_argv=None,
|
||||||
|
doctests=False, coverage=False, durations=-1, tests=None):
|
||||||
|
"""
|
||||||
|
Run tests for module using pytest.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
label : {'fast', 'full'}, optional
|
||||||
|
Identifies the tests to run. When set to 'fast', tests decorated
|
||||||
|
with `pytest.mark.slow` are skipped, when 'full', the slow marker
|
||||||
|
is ignored.
|
||||||
|
verbose : int, optional
|
||||||
|
Verbosity value for test outputs, in the range 1-3. Default is 1.
|
||||||
|
extra_argv : list, optional
|
||||||
|
List with any extra arguments to pass to pytests.
|
||||||
|
doctests : bool, optional
|
||||||
|
.. note:: Not supported
|
||||||
|
coverage : bool, optional
|
||||||
|
If True, report coverage of NumPy code. Default is False.
|
||||||
|
Requires installation of (pip) pytest-cov.
|
||||||
|
durations : int, optional
|
||||||
|
If < 0, do nothing, If 0, report time of all tests, if > 0,
|
||||||
|
report the time of the slowest `timer` tests. Default is -1.
|
||||||
|
tests : test or list of tests
|
||||||
|
Tests to be executed with pytest '--pyargs'
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
result : bool
|
||||||
|
Return True on success, false otherwise.
|
||||||
|
|
||||||
|
Notes
|
||||||
|
-----
|
||||||
|
Each NumPy module exposes `test` in its namespace to run all tests for
|
||||||
|
it. For example, to run all tests for numpy.lib:
|
||||||
|
|
||||||
|
>>> np.lib.test() #doctest: +SKIP
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
>>> result = np.lib.test() #doctest: +SKIP
|
||||||
|
...
|
||||||
|
1023 passed, 2 skipped, 6 deselected, 1 xfailed in 10.39 seconds
|
||||||
|
>>> result
|
||||||
|
True
|
||||||
|
|
||||||
|
"""
|
||||||
|
import pytest
|
||||||
|
import warnings
|
||||||
|
|
||||||
|
#FIXME This is no longer needed? Assume it was for use in tests.
|
||||||
|
# cap verbosity at 3, which is equivalent to the pytest '-vv' option
|
||||||
|
#from . import utils
|
||||||
|
#verbose = min(int(verbose), 3)
|
||||||
|
#utils.verbose = verbose
|
||||||
|
#
|
||||||
|
|
||||||
|
module = sys.modules[self.module_name]
|
||||||
|
module_path = os.path.abspath(module.__path__[0])
|
||||||
|
|
||||||
|
# setup the pytest arguments
|
||||||
|
pytest_args = ["-l"]
|
||||||
|
|
||||||
|
# offset verbosity. The "-q" cancels a "-v".
|
||||||
|
pytest_args += ["-q"]
|
||||||
|
|
||||||
|
# Filter out distutils cpu warnings (could be localized to
|
||||||
|
# distutils tests). ASV has problems with top level import,
|
||||||
|
# so fetch module for suppression here.
|
||||||
|
with warnings.catch_warnings():
|
||||||
|
warnings.simplefilter("always")
|
||||||
|
from numpy.distutils import cpuinfo
|
||||||
|
|
||||||
|
# Filter out annoying import messages. Want these in both develop and
|
||||||
|
# release mode.
|
||||||
|
pytest_args += [
|
||||||
|
"-W ignore:Not importing directory",
|
||||||
|
"-W ignore:numpy.dtype size changed",
|
||||||
|
"-W ignore:numpy.ufunc size changed",
|
||||||
|
"-W ignore::UserWarning:cpuinfo",
|
||||||
|
]
|
||||||
|
|
||||||
|
# When testing matrices, ignore their PendingDeprecationWarnings
|
||||||
|
pytest_args += [
|
||||||
|
"-W ignore:the matrix subclass is not",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Ignore python2.7 -3 warnings
|
||||||
|
pytest_args += [
|
||||||
|
r"-W ignore:sys\.exc_clear\(\) not supported in 3\.x:DeprecationWarning",
|
||||||
|
r"-W ignore:in 3\.x, __setslice__:DeprecationWarning",
|
||||||
|
r"-W ignore:in 3\.x, __getslice__:DeprecationWarning",
|
||||||
|
r"-W ignore:buffer\(\) not supported in 3\.x:DeprecationWarning",
|
||||||
|
r"-W ignore:CObject type is not supported in 3\.x:DeprecationWarning",
|
||||||
|
r"-W ignore:comparing unequal types not supported in 3\.x:DeprecationWarning",
|
||||||
|
r"-W ignore:the commands module has been removed in Python 3\.0:DeprecationWarning",
|
||||||
|
r"-W ignore:The 'new' module has been removed in Python 3\.0:DeprecationWarning",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
if doctests:
|
||||||
|
raise ValueError("Doctests not supported")
|
||||||
|
|
||||||
|
if extra_argv:
|
||||||
|
pytest_args += list(extra_argv)
|
||||||
|
|
||||||
|
if verbose > 1:
|
||||||
|
pytest_args += ["-" + "v"*(verbose - 1)]
|
||||||
|
|
||||||
|
if coverage:
|
||||||
|
pytest_args += ["--cov=" + module_path]
|
||||||
|
|
||||||
|
if label == "fast":
|
||||||
|
pytest_args += ["-m", "not slow"]
|
||||||
|
elif label != "full":
|
||||||
|
pytest_args += ["-m", label]
|
||||||
|
|
||||||
|
if durations >= 0:
|
||||||
|
pytest_args += ["--durations=%s" % durations]
|
||||||
|
|
||||||
|
if tests is None:
|
||||||
|
tests = [self.module_name]
|
||||||
|
|
||||||
|
pytest_args += ["--pyargs"] + list(tests)
|
||||||
|
|
||||||
|
|
||||||
|
# run tests.
|
||||||
|
_show_numpy_info()
|
||||||
|
|
||||||
|
try:
|
||||||
|
code = pytest.main(pytest_args)
|
||||||
|
except SystemExit as exc:
|
||||||
|
code = exc.code
|
||||||
|
|
||||||
|
return code == 0
|
87
Restaurant/Marta/venv/Lib/site-packages/numpy/conftest.py
Normal file
87
Restaurant/Marta/venv/Lib/site-packages/numpy/conftest.py
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
"""
|
||||||
|
Pytest configuration and fixtures for the Numpy test suite.
|
||||||
|
"""
|
||||||
|
from __future__ import division, absolute_import, print_function
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
import numpy
|
||||||
|
|
||||||
|
from numpy.core._multiarray_tests import get_fpu_mode
|
||||||
|
|
||||||
|
|
||||||
|
_old_fpu_mode = None
|
||||||
|
_collect_results = {}
|
||||||
|
|
||||||
|
|
||||||
|
def pytest_configure(config):
|
||||||
|
config.addinivalue_line("markers",
|
||||||
|
"valgrind_error: Tests that are known to error under valgrind.")
|
||||||
|
config.addinivalue_line("markers",
|
||||||
|
"leaks_references: Tests that are known to leak references.")
|
||||||
|
config.addinivalue_line("markers",
|
||||||
|
"slow: Tests that are very slow.")
|
||||||
|
|
||||||
|
|
||||||
|
def pytest_addoption(parser):
|
||||||
|
parser.addoption("--available-memory", action="store", default=None,
|
||||||
|
help=("Set amount of memory available for running the "
|
||||||
|
"test suite. This can result to tests requiring "
|
||||||
|
"especially large amounts of memory to be skipped. "
|
||||||
|
"Equivalent to setting environment variable "
|
||||||
|
"NPY_AVAILABLE_MEM. Default: determined"
|
||||||
|
"automatically."))
|
||||||
|
|
||||||
|
|
||||||
|
def pytest_sessionstart(session):
|
||||||
|
available_mem = session.config.getoption('available_memory')
|
||||||
|
if available_mem is not None:
|
||||||
|
os.environ['NPY_AVAILABLE_MEM'] = available_mem
|
||||||
|
|
||||||
|
|
||||||
|
#FIXME when yield tests are gone.
|
||||||
|
@pytest.hookimpl()
|
||||||
|
def pytest_itemcollected(item):
|
||||||
|
"""
|
||||||
|
Check FPU precision mode was not changed during test collection.
|
||||||
|
|
||||||
|
The clumsy way we do it here is mainly necessary because numpy
|
||||||
|
still uses yield tests, which can execute code at test collection
|
||||||
|
time.
|
||||||
|
"""
|
||||||
|
global _old_fpu_mode
|
||||||
|
|
||||||
|
mode = get_fpu_mode()
|
||||||
|
|
||||||
|
if _old_fpu_mode is None:
|
||||||
|
_old_fpu_mode = mode
|
||||||
|
elif mode != _old_fpu_mode:
|
||||||
|
_collect_results[item] = (_old_fpu_mode, mode)
|
||||||
|
_old_fpu_mode = mode
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="function", autouse=True)
|
||||||
|
def check_fpu_mode(request):
|
||||||
|
"""
|
||||||
|
Check FPU precision mode was not changed during the test.
|
||||||
|
"""
|
||||||
|
old_mode = get_fpu_mode()
|
||||||
|
yield
|
||||||
|
new_mode = get_fpu_mode()
|
||||||
|
|
||||||
|
if old_mode != new_mode:
|
||||||
|
raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}"
|
||||||
|
" during the test".format(old_mode, new_mode))
|
||||||
|
|
||||||
|
collect_result = _collect_results.get(request.node)
|
||||||
|
if collect_result is not None:
|
||||||
|
old_mode, new_mode = collect_result
|
||||||
|
raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}"
|
||||||
|
" when collecting the test".format(old_mode,
|
||||||
|
new_mode))
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(autouse=True)
|
||||||
|
def add_np(doctest_namespace):
|
||||||
|
doctest_namespace['np'] = numpy
|
150
Restaurant/Marta/venv/Lib/site-packages/numpy/core/__init__.py
Normal file
150
Restaurant/Marta/venv/Lib/site-packages/numpy/core/__init__.py
Normal file
@ -0,0 +1,150 @@
|
|||||||
|
"""
|
||||||
|
Contains the core of NumPy: ndarray, ufuncs, dtypes, etc.
|
||||||
|
|
||||||
|
Please note that this module is private. All functions and objects
|
||||||
|
are available in the main ``numpy`` namespace - use that instead.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import division, absolute_import, print_function
|
||||||
|
|
||||||
|
from numpy.version import version as __version__
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
# disables OpenBLAS affinity setting of the main thread that limits
|
||||||
|
# python threads or processes to one core
|
||||||
|
env_added = []
|
||||||
|
for envkey in ['OPENBLAS_MAIN_FREE', 'GOTOBLAS_MAIN_FREE']:
|
||||||
|
if envkey not in os.environ:
|
||||||
|
os.environ[envkey] = '1'
|
||||||
|
env_added.append(envkey)
|
||||||
|
|
||||||
|
try:
|
||||||
|
from . import multiarray
|
||||||
|
except ImportError as exc:
|
||||||
|
import sys
|
||||||
|
msg = """
|
||||||
|
|
||||||
|
IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE!
|
||||||
|
|
||||||
|
Importing the numpy C-extensions failed. This error can happen for
|
||||||
|
many reasons, often due to issues with your setup or how NumPy was
|
||||||
|
installed.
|
||||||
|
|
||||||
|
We have compiled some common reasons and troubleshooting tips at:
|
||||||
|
|
||||||
|
https://numpy.org/devdocs/user/troubleshooting-importerror.html
|
||||||
|
|
||||||
|
Please note and check the following:
|
||||||
|
|
||||||
|
* The Python version is: Python%d.%d from "%s"
|
||||||
|
* The NumPy version is: "%s"
|
||||||
|
|
||||||
|
and make sure that they are the versions you expect.
|
||||||
|
Please carefully study the documentation linked above for further help.
|
||||||
|
|
||||||
|
Original error was: %s
|
||||||
|
""" % (sys.version_info[0], sys.version_info[1], sys.executable,
|
||||||
|
__version__, exc)
|
||||||
|
raise ImportError(msg)
|
||||||
|
finally:
|
||||||
|
for envkey in env_added:
|
||||||
|
del os.environ[envkey]
|
||||||
|
del envkey
|
||||||
|
del env_added
|
||||||
|
del os
|
||||||
|
|
||||||
|
from . import umath
|
||||||
|
|
||||||
|
# Check that multiarray,umath are pure python modules wrapping
|
||||||
|
# _multiarray_umath and not either of the old c-extension modules
|
||||||
|
if not (hasattr(multiarray, '_multiarray_umath') and
|
||||||
|
hasattr(umath, '_multiarray_umath')):
|
||||||
|
import sys
|
||||||
|
path = sys.modules['numpy'].__path__
|
||||||
|
msg = ("Something is wrong with the numpy installation. "
|
||||||
|
"While importing we detected an older version of "
|
||||||
|
"numpy in {}. One method of fixing this is to repeatedly uninstall "
|
||||||
|
"numpy until none is found, then reinstall this version.")
|
||||||
|
raise ImportError(msg.format(path))
|
||||||
|
|
||||||
|
from . import numerictypes as nt
|
||||||
|
multiarray.set_typeDict(nt.sctypeDict)
|
||||||
|
from . import numeric
|
||||||
|
from .numeric import *
|
||||||
|
from . import fromnumeric
|
||||||
|
from .fromnumeric import *
|
||||||
|
from . import defchararray as char
|
||||||
|
from . import records as rec
|
||||||
|
from .records import *
|
||||||
|
from .memmap import *
|
||||||
|
from .defchararray import chararray
|
||||||
|
from . import function_base
|
||||||
|
from .function_base import *
|
||||||
|
from . import machar
|
||||||
|
from .machar import *
|
||||||
|
from . import getlimits
|
||||||
|
from .getlimits import *
|
||||||
|
from . import shape_base
|
||||||
|
from .shape_base import *
|
||||||
|
from . import einsumfunc
|
||||||
|
from .einsumfunc import *
|
||||||
|
del nt
|
||||||
|
|
||||||
|
from .fromnumeric import amax as max, amin as min, round_ as round
|
||||||
|
from .numeric import absolute as abs
|
||||||
|
|
||||||
|
# do this after everything else, to minimize the chance of this misleadingly
|
||||||
|
# appearing in an import-time traceback
|
||||||
|
from . import _add_newdocs
|
||||||
|
# add these for module-freeze analysis (like PyInstaller)
|
||||||
|
from . import _dtype_ctypes
|
||||||
|
from . import _internal
|
||||||
|
from . import _dtype
|
||||||
|
from . import _methods
|
||||||
|
|
||||||
|
__all__ = ['char', 'rec', 'memmap']
|
||||||
|
__all__ += numeric.__all__
|
||||||
|
__all__ += fromnumeric.__all__
|
||||||
|
__all__ += rec.__all__
|
||||||
|
__all__ += ['chararray']
|
||||||
|
__all__ += function_base.__all__
|
||||||
|
__all__ += machar.__all__
|
||||||
|
__all__ += getlimits.__all__
|
||||||
|
__all__ += shape_base.__all__
|
||||||
|
__all__ += einsumfunc.__all__
|
||||||
|
|
||||||
|
# Make it possible so that ufuncs can be pickled
|
||||||
|
# Here are the loading and unloading functions
|
||||||
|
# The name numpy.core._ufunc_reconstruct must be
|
||||||
|
# available for unpickling to work.
|
||||||
|
def _ufunc_reconstruct(module, name):
|
||||||
|
# The `fromlist` kwarg is required to ensure that `mod` points to the
|
||||||
|
# inner-most module rather than the parent package when module name is
|
||||||
|
# nested. This makes it possible to pickle non-toplevel ufuncs such as
|
||||||
|
# scipy.special.expit for instance.
|
||||||
|
mod = __import__(module, fromlist=[name])
|
||||||
|
return getattr(mod, name)
|
||||||
|
|
||||||
|
def _ufunc_reduce(func):
|
||||||
|
from pickle import whichmodule
|
||||||
|
name = func.__name__
|
||||||
|
return _ufunc_reconstruct, (whichmodule(func, name), name)
|
||||||
|
|
||||||
|
|
||||||
|
import sys
|
||||||
|
if sys.version_info[0] >= 3:
|
||||||
|
import copyreg
|
||||||
|
else:
|
||||||
|
import copy_reg as copyreg
|
||||||
|
|
||||||
|
copyreg.pickle(ufunc, _ufunc_reduce, _ufunc_reconstruct)
|
||||||
|
# Unclutter namespace (must keep _ufunc_reconstruct for unpickling)
|
||||||
|
del copyreg
|
||||||
|
del sys
|
||||||
|
del _ufunc_reduce
|
||||||
|
|
||||||
|
from numpy._pytesttester import PytestTester
|
||||||
|
test = PytestTester(__name__)
|
||||||
|
del PytestTester
|
6874
Restaurant/Marta/venv/Lib/site-packages/numpy/core/_add_newdocs.py
Normal file
6874
Restaurant/Marta/venv/Lib/site-packages/numpy/core/_add_newdocs.py
Normal file
File diff suppressed because it is too large
Load Diff
324
Restaurant/Marta/venv/Lib/site-packages/numpy/core/_asarray.py
Normal file
324
Restaurant/Marta/venv/Lib/site-packages/numpy/core/_asarray.py
Normal file
@ -0,0 +1,324 @@
|
|||||||
|
"""
|
||||||
|
Functions in the ``as*array`` family that promote array-likes into arrays.
|
||||||
|
|
||||||
|
`require` fits this category despite its name not matching this pattern.
|
||||||
|
"""
|
||||||
|
from __future__ import division, absolute_import, print_function
|
||||||
|
|
||||||
|
from .overrides import set_module
|
||||||
|
from .multiarray import array
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"asarray", "asanyarray", "ascontiguousarray", "asfortranarray", "require",
|
||||||
|
]
|
||||||
|
|
||||||
|
@set_module('numpy')
|
||||||
|
def asarray(a, dtype=None, order=None):
|
||||||
|
"""Convert the input to an array.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
a : array_like
|
||||||
|
Input data, in any form that can be converted to an array. This
|
||||||
|
includes lists, lists of tuples, tuples, tuples of tuples, tuples
|
||||||
|
of lists and ndarrays.
|
||||||
|
dtype : data-type, optional
|
||||||
|
By default, the data-type is inferred from the input data.
|
||||||
|
order : {'C', 'F'}, optional
|
||||||
|
Whether to use row-major (C-style) or
|
||||||
|
column-major (Fortran-style) memory representation.
|
||||||
|
Defaults to 'C'.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
out : ndarray
|
||||||
|
Array interpretation of `a`. No copy is performed if the input
|
||||||
|
is already an ndarray with matching dtype and order. If `a` is a
|
||||||
|
subclass of ndarray, a base class ndarray is returned.
|
||||||
|
|
||||||
|
See Also
|
||||||
|
--------
|
||||||
|
asanyarray : Similar function which passes through subclasses.
|
||||||
|
ascontiguousarray : Convert input to a contiguous array.
|
||||||
|
asfarray : Convert input to a floating point ndarray.
|
||||||
|
asfortranarray : Convert input to an ndarray with column-major
|
||||||
|
memory order.
|
||||||
|
asarray_chkfinite : Similar function which checks input for NaNs and Infs.
|
||||||
|
fromiter : Create an array from an iterator.
|
||||||
|
fromfunction : Construct an array by executing a function on grid
|
||||||
|
positions.
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
Convert a list into an array:
|
||||||
|
|
||||||
|
>>> a = [1, 2]
|
||||||
|
>>> np.asarray(a)
|
||||||
|
array([1, 2])
|
||||||
|
|
||||||
|
Existing arrays are not copied:
|
||||||
|
|
||||||
|
>>> a = np.array([1, 2])
|
||||||
|
>>> np.asarray(a) is a
|
||||||
|
True
|
||||||
|
|
||||||
|
If `dtype` is set, array is copied only if dtype does not match:
|
||||||
|
|
||||||
|
>>> a = np.array([1, 2], dtype=np.float32)
|
||||||
|
>>> np.asarray(a, dtype=np.float32) is a
|
||||||
|
True
|
||||||
|
>>> np.asarray(a, dtype=np.float64) is a
|
||||||
|
False
|
||||||
|
|
||||||
|
Contrary to `asanyarray`, ndarray subclasses are not passed through:
|
||||||
|
|
||||||
|
>>> issubclass(np.recarray, np.ndarray)
|
||||||
|
True
|
||||||
|
>>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray)
|
||||||
|
>>> np.asarray(a) is a
|
||||||
|
False
|
||||||
|
>>> np.asanyarray(a) is a
|
||||||
|
True
|
||||||
|
|
||||||
|
"""
|
||||||
|
return array(a, dtype, copy=False, order=order)
|
||||||
|
|
||||||
|
|
||||||
|
@set_module('numpy')
|
||||||
|
def asanyarray(a, dtype=None, order=None):
|
||||||
|
"""Convert the input to an ndarray, but pass ndarray subclasses through.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
a : array_like
|
||||||
|
Input data, in any form that can be converted to an array. This
|
||||||
|
includes scalars, lists, lists of tuples, tuples, tuples of tuples,
|
||||||
|
tuples of lists, and ndarrays.
|
||||||
|
dtype : data-type, optional
|
||||||
|
By default, the data-type is inferred from the input data.
|
||||||
|
order : {'C', 'F'}, optional
|
||||||
|
Whether to use row-major (C-style) or column-major
|
||||||
|
(Fortran-style) memory representation. Defaults to 'C'.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
out : ndarray or an ndarray subclass
|
||||||
|
Array interpretation of `a`. If `a` is an ndarray or a subclass
|
||||||
|
of ndarray, it is returned as-is and no copy is performed.
|
||||||
|
|
||||||
|
See Also
|
||||||
|
--------
|
||||||
|
asarray : Similar function which always returns ndarrays.
|
||||||
|
ascontiguousarray : Convert input to a contiguous array.
|
||||||
|
asfarray : Convert input to a floating point ndarray.
|
||||||
|
asfortranarray : Convert input to an ndarray with column-major
|
||||||
|
memory order.
|
||||||
|
asarray_chkfinite : Similar function which checks input for NaNs and
|
||||||
|
Infs.
|
||||||
|
fromiter : Create an array from an iterator.
|
||||||
|
fromfunction : Construct an array by executing a function on grid
|
||||||
|
positions.
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
Convert a list into an array:
|
||||||
|
|
||||||
|
>>> a = [1, 2]
|
||||||
|
>>> np.asanyarray(a)
|
||||||
|
array([1, 2])
|
||||||
|
|
||||||
|
Instances of `ndarray` subclasses are passed through as-is:
|
||||||
|
|
||||||
|
>>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray)
|
||||||
|
>>> np.asanyarray(a) is a
|
||||||
|
True
|
||||||
|
|
||||||
|
"""
|
||||||
|
return array(a, dtype, copy=False, order=order, subok=True)
|
||||||
|
|
||||||
|
|
||||||
|
@set_module('numpy')
|
||||||
|
def ascontiguousarray(a, dtype=None):
|
||||||
|
"""
|
||||||
|
Return a contiguous array (ndim >= 1) in memory (C order).
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
a : array_like
|
||||||
|
Input array.
|
||||||
|
dtype : str or dtype object, optional
|
||||||
|
Data-type of returned array.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
out : ndarray
|
||||||
|
Contiguous array of same shape and content as `a`, with type `dtype`
|
||||||
|
if specified.
|
||||||
|
|
||||||
|
See Also
|
||||||
|
--------
|
||||||
|
asfortranarray : Convert input to an ndarray with column-major
|
||||||
|
memory order.
|
||||||
|
require : Return an ndarray that satisfies requirements.
|
||||||
|
ndarray.flags : Information about the memory layout of the array.
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
>>> x = np.arange(6).reshape(2,3)
|
||||||
|
>>> np.ascontiguousarray(x, dtype=np.float32)
|
||||||
|
array([[0., 1., 2.],
|
||||||
|
[3., 4., 5.]], dtype=float32)
|
||||||
|
>>> x.flags['C_CONTIGUOUS']
|
||||||
|
True
|
||||||
|
|
||||||
|
Note: This function returns an array with at least one-dimension (1-d)
|
||||||
|
so it will not preserve 0-d arrays.
|
||||||
|
|
||||||
|
"""
|
||||||
|
return array(a, dtype, copy=False, order='C', ndmin=1)
|
||||||
|
|
||||||
|
|
||||||
|
@set_module('numpy')
|
||||||
|
def asfortranarray(a, dtype=None):
|
||||||
|
"""
|
||||||
|
Return an array (ndim >= 1) laid out in Fortran order in memory.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
a : array_like
|
||||||
|
Input array.
|
||||||
|
dtype : str or dtype object, optional
|
||||||
|
By default, the data-type is inferred from the input data.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
out : ndarray
|
||||||
|
The input `a` in Fortran, or column-major, order.
|
||||||
|
|
||||||
|
See Also
|
||||||
|
--------
|
||||||
|
ascontiguousarray : Convert input to a contiguous (C order) array.
|
||||||
|
asanyarray : Convert input to an ndarray with either row or
|
||||||
|
column-major memory order.
|
||||||
|
require : Return an ndarray that satisfies requirements.
|
||||||
|
ndarray.flags : Information about the memory layout of the array.
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
>>> x = np.arange(6).reshape(2,3)
|
||||||
|
>>> y = np.asfortranarray(x)
|
||||||
|
>>> x.flags['F_CONTIGUOUS']
|
||||||
|
False
|
||||||
|
>>> y.flags['F_CONTIGUOUS']
|
||||||
|
True
|
||||||
|
|
||||||
|
Note: This function returns an array with at least one-dimension (1-d)
|
||||||
|
so it will not preserve 0-d arrays.
|
||||||
|
|
||||||
|
"""
|
||||||
|
return array(a, dtype, copy=False, order='F', ndmin=1)
|
||||||
|
|
||||||
|
|
||||||
|
@set_module('numpy')
|
||||||
|
def require(a, dtype=None, requirements=None):
|
||||||
|
"""
|
||||||
|
Return an ndarray of the provided type that satisfies requirements.
|
||||||
|
|
||||||
|
This function is useful to be sure that an array with the correct flags
|
||||||
|
is returned for passing to compiled code (perhaps through ctypes).
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
a : array_like
|
||||||
|
The object to be converted to a type-and-requirement-satisfying array.
|
||||||
|
dtype : data-type
|
||||||
|
The required data-type. If None preserve the current dtype. If your
|
||||||
|
application requires the data to be in native byteorder, include
|
||||||
|
a byteorder specification as a part of the dtype specification.
|
||||||
|
requirements : str or list of str
|
||||||
|
The requirements list can be any of the following
|
||||||
|
|
||||||
|
* 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array
|
||||||
|
* 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array
|
||||||
|
* 'ALIGNED' ('A') - ensure a data-type aligned array
|
||||||
|
* 'WRITEABLE' ('W') - ensure a writable array
|
||||||
|
* 'OWNDATA' ('O') - ensure an array that owns its own data
|
||||||
|
* 'ENSUREARRAY', ('E') - ensure a base array, instead of a subclass
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
out : ndarray
|
||||||
|
Array with specified requirements and type if given.
|
||||||
|
|
||||||
|
See Also
|
||||||
|
--------
|
||||||
|
asarray : Convert input to an ndarray.
|
||||||
|
asanyarray : Convert to an ndarray, but pass through ndarray subclasses.
|
||||||
|
ascontiguousarray : Convert input to a contiguous array.
|
||||||
|
asfortranarray : Convert input to an ndarray with column-major
|
||||||
|
memory order.
|
||||||
|
ndarray.flags : Information about the memory layout of the array.
|
||||||
|
|
||||||
|
Notes
|
||||||
|
-----
|
||||||
|
The returned array will be guaranteed to have the listed requirements
|
||||||
|
by making a copy if needed.
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
>>> x = np.arange(6).reshape(2,3)
|
||||||
|
>>> x.flags
|
||||||
|
C_CONTIGUOUS : True
|
||||||
|
F_CONTIGUOUS : False
|
||||||
|
OWNDATA : False
|
||||||
|
WRITEABLE : True
|
||||||
|
ALIGNED : True
|
||||||
|
WRITEBACKIFCOPY : False
|
||||||
|
UPDATEIFCOPY : False
|
||||||
|
|
||||||
|
>>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F'])
|
||||||
|
>>> y.flags
|
||||||
|
C_CONTIGUOUS : False
|
||||||
|
F_CONTIGUOUS : True
|
||||||
|
OWNDATA : True
|
||||||
|
WRITEABLE : True
|
||||||
|
ALIGNED : True
|
||||||
|
WRITEBACKIFCOPY : False
|
||||||
|
UPDATEIFCOPY : False
|
||||||
|
|
||||||
|
"""
|
||||||
|
possible_flags = {'C': 'C', 'C_CONTIGUOUS': 'C', 'CONTIGUOUS': 'C',
|
||||||
|
'F': 'F', 'F_CONTIGUOUS': 'F', 'FORTRAN': 'F',
|
||||||
|
'A': 'A', 'ALIGNED': 'A',
|
||||||
|
'W': 'W', 'WRITEABLE': 'W',
|
||||||
|
'O': 'O', 'OWNDATA': 'O',
|
||||||
|
'E': 'E', 'ENSUREARRAY': 'E'}
|
||||||
|
if not requirements:
|
||||||
|
return asanyarray(a, dtype=dtype)
|
||||||
|
else:
|
||||||
|
requirements = {possible_flags[x.upper()] for x in requirements}
|
||||||
|
|
||||||
|
if 'E' in requirements:
|
||||||
|
requirements.remove('E')
|
||||||
|
subok = False
|
||||||
|
else:
|
||||||
|
subok = True
|
||||||
|
|
||||||
|
order = 'A'
|
||||||
|
if requirements >= {'C', 'F'}:
|
||||||
|
raise ValueError('Cannot specify both "C" and "F" order')
|
||||||
|
elif 'F' in requirements:
|
||||||
|
order = 'F'
|
||||||
|
requirements.remove('F')
|
||||||
|
elif 'C' in requirements:
|
||||||
|
order = 'C'
|
||||||
|
requirements.remove('C')
|
||||||
|
|
||||||
|
arr = array(a, dtype=dtype, order=order, copy=False, subok=subok)
|
||||||
|
|
||||||
|
for prop in requirements:
|
||||||
|
if not arr.flags[prop]:
|
||||||
|
arr = arr.copy(order)
|
||||||
|
break
|
||||||
|
return arr
|
354
Restaurant/Marta/venv/Lib/site-packages/numpy/core/_dtype.py
Normal file
354
Restaurant/Marta/venv/Lib/site-packages/numpy/core/_dtype.py
Normal file
@ -0,0 +1,354 @@
|
|||||||
|
"""
|
||||||
|
A place for code to be called from the implementation of np.dtype
|
||||||
|
|
||||||
|
String handling is much easier to do correctly in python.
|
||||||
|
"""
|
||||||
|
from __future__ import division, absolute_import, print_function
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
|
_kind_to_stem = {
|
||||||
|
'u': 'uint',
|
||||||
|
'i': 'int',
|
||||||
|
'c': 'complex',
|
||||||
|
'f': 'float',
|
||||||
|
'b': 'bool',
|
||||||
|
'V': 'void',
|
||||||
|
'O': 'object',
|
||||||
|
'M': 'datetime',
|
||||||
|
'm': 'timedelta'
|
||||||
|
}
|
||||||
|
if sys.version_info[0] >= 3:
|
||||||
|
_kind_to_stem.update({
|
||||||
|
'S': 'bytes',
|
||||||
|
'U': 'str'
|
||||||
|
})
|
||||||
|
else:
|
||||||
|
_kind_to_stem.update({
|
||||||
|
'S': 'string',
|
||||||
|
'U': 'unicode'
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
def _kind_name(dtype):
|
||||||
|
try:
|
||||||
|
return _kind_to_stem[dtype.kind]
|
||||||
|
except KeyError:
|
||||||
|
raise RuntimeError(
|
||||||
|
"internal dtype error, unknown kind {!r}"
|
||||||
|
.format(dtype.kind)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def __str__(dtype):
|
||||||
|
if dtype.fields is not None:
|
||||||
|
return _struct_str(dtype, include_align=True)
|
||||||
|
elif dtype.subdtype:
|
||||||
|
return _subarray_str(dtype)
|
||||||
|
elif issubclass(dtype.type, np.flexible) or not dtype.isnative:
|
||||||
|
return dtype.str
|
||||||
|
else:
|
||||||
|
return dtype.name
|
||||||
|
|
||||||
|
|
||||||
|
def __repr__(dtype):
|
||||||
|
arg_str = _construction_repr(dtype, include_align=False)
|
||||||
|
if dtype.isalignedstruct:
|
||||||
|
arg_str = arg_str + ", align=True"
|
||||||
|
return "dtype({})".format(arg_str)
|
||||||
|
|
||||||
|
|
||||||
|
def _unpack_field(dtype, offset, title=None):
|
||||||
|
"""
|
||||||
|
Helper function to normalize the items in dtype.fields.
|
||||||
|
|
||||||
|
Call as:
|
||||||
|
|
||||||
|
dtype, offset, title = _unpack_field(*dtype.fields[name])
|
||||||
|
"""
|
||||||
|
return dtype, offset, title
|
||||||
|
|
||||||
|
|
||||||
|
def _isunsized(dtype):
|
||||||
|
# PyDataType_ISUNSIZED
|
||||||
|
return dtype.itemsize == 0
|
||||||
|
|
||||||
|
|
||||||
|
def _construction_repr(dtype, include_align=False, short=False):
|
||||||
|
"""
|
||||||
|
Creates a string repr of the dtype, excluding the 'dtype()' part
|
||||||
|
surrounding the object. This object may be a string, a list, or
|
||||||
|
a dict depending on the nature of the dtype. This
|
||||||
|
is the object passed as the first parameter to the dtype
|
||||||
|
constructor, and if no additional constructor parameters are
|
||||||
|
given, will reproduce the exact memory layout.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
short : bool
|
||||||
|
If true, this creates a shorter repr using 'kind' and 'itemsize', instead
|
||||||
|
of the longer type name.
|
||||||
|
|
||||||
|
include_align : bool
|
||||||
|
If true, this includes the 'align=True' parameter
|
||||||
|
inside the struct dtype construction dict when needed. Use this flag
|
||||||
|
if you want a proper repr string without the 'dtype()' part around it.
|
||||||
|
|
||||||
|
If false, this does not preserve the
|
||||||
|
'align=True' parameter or sticky NPY_ALIGNED_STRUCT flag for
|
||||||
|
struct arrays like the regular repr does, because the 'align'
|
||||||
|
flag is not part of first dtype constructor parameter. This
|
||||||
|
mode is intended for a full 'repr', where the 'align=True' is
|
||||||
|
provided as the second parameter.
|
||||||
|
"""
|
||||||
|
if dtype.fields is not None:
|
||||||
|
return _struct_str(dtype, include_align=include_align)
|
||||||
|
elif dtype.subdtype:
|
||||||
|
return _subarray_str(dtype)
|
||||||
|
else:
|
||||||
|
return _scalar_str(dtype, short=short)
|
||||||
|
|
||||||
|
|
||||||
|
def _scalar_str(dtype, short):
|
||||||
|
byteorder = _byte_order_str(dtype)
|
||||||
|
|
||||||
|
if dtype.type == np.bool_:
|
||||||
|
if short:
|
||||||
|
return "'?'"
|
||||||
|
else:
|
||||||
|
return "'bool'"
|
||||||
|
|
||||||
|
elif dtype.type == np.object_:
|
||||||
|
# The object reference may be different sizes on different
|
||||||
|
# platforms, so it should never include the itemsize here.
|
||||||
|
return "'O'"
|
||||||
|
|
||||||
|
elif dtype.type == np.string_:
|
||||||
|
if _isunsized(dtype):
|
||||||
|
return "'S'"
|
||||||
|
else:
|
||||||
|
return "'S%d'" % dtype.itemsize
|
||||||
|
|
||||||
|
elif dtype.type == np.unicode_:
|
||||||
|
if _isunsized(dtype):
|
||||||
|
return "'%sU'" % byteorder
|
||||||
|
else:
|
||||||
|
return "'%sU%d'" % (byteorder, dtype.itemsize / 4)
|
||||||
|
|
||||||
|
# unlike the other types, subclasses of void are preserved - but
|
||||||
|
# historically the repr does not actually reveal the subclass
|
||||||
|
elif issubclass(dtype.type, np.void):
|
||||||
|
if _isunsized(dtype):
|
||||||
|
return "'V'"
|
||||||
|
else:
|
||||||
|
return "'V%d'" % dtype.itemsize
|
||||||
|
|
||||||
|
elif dtype.type == np.datetime64:
|
||||||
|
return "'%sM8%s'" % (byteorder, _datetime_metadata_str(dtype))
|
||||||
|
|
||||||
|
elif dtype.type == np.timedelta64:
|
||||||
|
return "'%sm8%s'" % (byteorder, _datetime_metadata_str(dtype))
|
||||||
|
|
||||||
|
elif np.issubdtype(dtype, np.number):
|
||||||
|
# Short repr with endianness, like '<f8'
|
||||||
|
if short or dtype.byteorder not in ('=', '|'):
|
||||||
|
return "'%s%c%d'" % (byteorder, dtype.kind, dtype.itemsize)
|
||||||
|
|
||||||
|
# Longer repr, like 'float64'
|
||||||
|
else:
|
||||||
|
return "'%s%d'" % (_kind_name(dtype), 8*dtype.itemsize)
|
||||||
|
|
||||||
|
elif dtype.isbuiltin == 2:
|
||||||
|
return dtype.type.__name__
|
||||||
|
|
||||||
|
else:
|
||||||
|
raise RuntimeError(
|
||||||
|
"Internal error: NumPy dtype unrecognized type number")
|
||||||
|
|
||||||
|
|
||||||
|
def _byte_order_str(dtype):
|
||||||
|
""" Normalize byteorder to '<' or '>' """
|
||||||
|
# hack to obtain the native and swapped byte order characters
|
||||||
|
swapped = np.dtype(int).newbyteorder('s')
|
||||||
|
native = swapped.newbyteorder('s')
|
||||||
|
|
||||||
|
byteorder = dtype.byteorder
|
||||||
|
if byteorder == '=':
|
||||||
|
return native.byteorder
|
||||||
|
if byteorder == 's':
|
||||||
|
# TODO: this path can never be reached
|
||||||
|
return swapped.byteorder
|
||||||
|
elif byteorder == '|':
|
||||||
|
return ''
|
||||||
|
else:
|
||||||
|
return byteorder
|
||||||
|
|
||||||
|
|
||||||
|
def _datetime_metadata_str(dtype):
|
||||||
|
# TODO: this duplicates the C append_metastr_to_string
|
||||||
|
unit, count = np.datetime_data(dtype)
|
||||||
|
if unit == 'generic':
|
||||||
|
return ''
|
||||||
|
elif count == 1:
|
||||||
|
return '[{}]'.format(unit)
|
||||||
|
else:
|
||||||
|
return '[{}{}]'.format(count, unit)
|
||||||
|
|
||||||
|
|
||||||
|
def _struct_dict_str(dtype, includealignedflag):
|
||||||
|
# unpack the fields dictionary into ls
|
||||||
|
names = dtype.names
|
||||||
|
fld_dtypes = []
|
||||||
|
offsets = []
|
||||||
|
titles = []
|
||||||
|
for name in names:
|
||||||
|
fld_dtype, offset, title = _unpack_field(*dtype.fields[name])
|
||||||
|
fld_dtypes.append(fld_dtype)
|
||||||
|
offsets.append(offset)
|
||||||
|
titles.append(title)
|
||||||
|
|
||||||
|
# Build up a string to make the dictionary
|
||||||
|
|
||||||
|
# First, the names
|
||||||
|
ret = "{'names':["
|
||||||
|
ret += ",".join(repr(name) for name in names)
|
||||||
|
|
||||||
|
# Second, the formats
|
||||||
|
ret += "], 'formats':["
|
||||||
|
ret += ",".join(
|
||||||
|
_construction_repr(fld_dtype, short=True) for fld_dtype in fld_dtypes)
|
||||||
|
|
||||||
|
# Third, the offsets
|
||||||
|
ret += "], 'offsets':["
|
||||||
|
ret += ",".join("%d" % offset for offset in offsets)
|
||||||
|
|
||||||
|
# Fourth, the titles
|
||||||
|
if any(title is not None for title in titles):
|
||||||
|
ret += "], 'titles':["
|
||||||
|
ret += ",".join(repr(title) for title in titles)
|
||||||
|
|
||||||
|
# Fifth, the itemsize
|
||||||
|
ret += "], 'itemsize':%d" % dtype.itemsize
|
||||||
|
|
||||||
|
if (includealignedflag and dtype.isalignedstruct):
|
||||||
|
# Finally, the aligned flag
|
||||||
|
ret += ", 'aligned':True}"
|
||||||
|
else:
|
||||||
|
ret += "}"
|
||||||
|
|
||||||
|
return ret
|
||||||
|
|
||||||
|
|
||||||
|
def _is_packed(dtype):
|
||||||
|
"""
|
||||||
|
Checks whether the structured data type in 'dtype'
|
||||||
|
has a simple layout, where all the fields are in order,
|
||||||
|
and follow each other with no alignment padding.
|
||||||
|
|
||||||
|
When this returns true, the dtype can be reconstructed
|
||||||
|
from a list of the field names and dtypes with no additional
|
||||||
|
dtype parameters.
|
||||||
|
|
||||||
|
Duplicates the C `is_dtype_struct_simple_unaligned_layout` function.
|
||||||
|
"""
|
||||||
|
total_offset = 0
|
||||||
|
for name in dtype.names:
|
||||||
|
fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name])
|
||||||
|
if fld_offset != total_offset:
|
||||||
|
return False
|
||||||
|
total_offset += fld_dtype.itemsize
|
||||||
|
if total_offset != dtype.itemsize:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def _struct_list_str(dtype):
|
||||||
|
items = []
|
||||||
|
for name in dtype.names:
|
||||||
|
fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name])
|
||||||
|
|
||||||
|
item = "("
|
||||||
|
if title is not None:
|
||||||
|
item += "({!r}, {!r}), ".format(title, name)
|
||||||
|
else:
|
||||||
|
item += "{!r}, ".format(name)
|
||||||
|
# Special case subarray handling here
|
||||||
|
if fld_dtype.subdtype is not None:
|
||||||
|
base, shape = fld_dtype.subdtype
|
||||||
|
item += "{}, {}".format(
|
||||||
|
_construction_repr(base, short=True),
|
||||||
|
shape
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
item += _construction_repr(fld_dtype, short=True)
|
||||||
|
|
||||||
|
item += ")"
|
||||||
|
items.append(item)
|
||||||
|
|
||||||
|
return "[" + ", ".join(items) + "]"
|
||||||
|
|
||||||
|
|
||||||
|
def _struct_str(dtype, include_align):
|
||||||
|
# The list str representation can't include the 'align=' flag,
|
||||||
|
# so if it is requested and the struct has the aligned flag set,
|
||||||
|
# we must use the dict str instead.
|
||||||
|
if not (include_align and dtype.isalignedstruct) and _is_packed(dtype):
|
||||||
|
sub = _struct_list_str(dtype)
|
||||||
|
|
||||||
|
else:
|
||||||
|
sub = _struct_dict_str(dtype, include_align)
|
||||||
|
|
||||||
|
# If the data type isn't the default, void, show it
|
||||||
|
if dtype.type != np.void:
|
||||||
|
return "({t.__module__}.{t.__name__}, {f})".format(t=dtype.type, f=sub)
|
||||||
|
else:
|
||||||
|
return sub
|
||||||
|
|
||||||
|
|
||||||
|
def _subarray_str(dtype):
|
||||||
|
base, shape = dtype.subdtype
|
||||||
|
return "({}, {})".format(
|
||||||
|
_construction_repr(base, short=True),
|
||||||
|
shape
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _name_includes_bit_suffix(dtype):
|
||||||
|
if dtype.type == np.object_:
|
||||||
|
# pointer size varies by system, best to omit it
|
||||||
|
return False
|
||||||
|
elif dtype.type == np.bool_:
|
||||||
|
# implied
|
||||||
|
return False
|
||||||
|
elif np.issubdtype(dtype, np.flexible) and _isunsized(dtype):
|
||||||
|
# unspecified
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def _name_get(dtype):
|
||||||
|
# provides dtype.name.__get__, documented as returning a "bit name"
|
||||||
|
|
||||||
|
if dtype.isbuiltin == 2:
|
||||||
|
# user dtypes don't promise to do anything special
|
||||||
|
return dtype.type.__name__
|
||||||
|
|
||||||
|
if issubclass(dtype.type, np.void):
|
||||||
|
# historically, void subclasses preserve their name, eg `record64`
|
||||||
|
name = dtype.type.__name__
|
||||||
|
else:
|
||||||
|
name = _kind_name(dtype)
|
||||||
|
|
||||||
|
# append bit counts
|
||||||
|
if _name_includes_bit_suffix(dtype):
|
||||||
|
name += "{}".format(dtype.itemsize * 8)
|
||||||
|
|
||||||
|
# append metadata to datetimes
|
||||||
|
if dtype.type in (np.datetime64, np.timedelta64):
|
||||||
|
name += _datetime_metadata_str(dtype)
|
||||||
|
|
||||||
|
return name
|
@ -0,0 +1,113 @@
|
|||||||
|
"""
|
||||||
|
Conversion from ctypes to dtype.
|
||||||
|
|
||||||
|
In an ideal world, we could achieve this through the PEP3118 buffer protocol,
|
||||||
|
something like::
|
||||||
|
|
||||||
|
def dtype_from_ctypes_type(t):
|
||||||
|
# needed to ensure that the shape of `t` is within memoryview.format
|
||||||
|
class DummyStruct(ctypes.Structure):
|
||||||
|
_fields_ = [('a', t)]
|
||||||
|
|
||||||
|
# empty to avoid memory allocation
|
||||||
|
ctype_0 = (DummyStruct * 0)()
|
||||||
|
mv = memoryview(ctype_0)
|
||||||
|
|
||||||
|
# convert the struct, and slice back out the field
|
||||||
|
return _dtype_from_pep3118(mv.format)['a']
|
||||||
|
|
||||||
|
Unfortunately, this fails because:
|
||||||
|
|
||||||
|
* ctypes cannot handle length-0 arrays with PEP3118 (bpo-32782)
|
||||||
|
* PEP3118 cannot represent unions, but both numpy and ctypes can
|
||||||
|
* ctypes cannot handle big-endian structs with PEP3118 (bpo-32780)
|
||||||
|
"""
|
||||||
|
import _ctypes
|
||||||
|
import ctypes
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
|
def _from_ctypes_array(t):
|
||||||
|
return np.dtype((dtype_from_ctypes_type(t._type_), (t._length_,)))
|
||||||
|
|
||||||
|
|
||||||
|
def _from_ctypes_structure(t):
|
||||||
|
for item in t._fields_:
|
||||||
|
if len(item) > 2:
|
||||||
|
raise TypeError(
|
||||||
|
"ctypes bitfields have no dtype equivalent")
|
||||||
|
|
||||||
|
if hasattr(t, "_pack_"):
|
||||||
|
formats = []
|
||||||
|
offsets = []
|
||||||
|
names = []
|
||||||
|
current_offset = 0
|
||||||
|
for fname, ftyp in t._fields_:
|
||||||
|
names.append(fname)
|
||||||
|
formats.append(dtype_from_ctypes_type(ftyp))
|
||||||
|
# Each type has a default offset, this is platform dependent for some types.
|
||||||
|
effective_pack = min(t._pack_, ctypes.alignment(ftyp))
|
||||||
|
current_offset = ((current_offset + effective_pack - 1) // effective_pack) * effective_pack
|
||||||
|
offsets.append(current_offset)
|
||||||
|
current_offset += ctypes.sizeof(ftyp)
|
||||||
|
|
||||||
|
return np.dtype(dict(
|
||||||
|
formats=formats,
|
||||||
|
offsets=offsets,
|
||||||
|
names=names,
|
||||||
|
itemsize=ctypes.sizeof(t)))
|
||||||
|
else:
|
||||||
|
fields = []
|
||||||
|
for fname, ftyp in t._fields_:
|
||||||
|
fields.append((fname, dtype_from_ctypes_type(ftyp)))
|
||||||
|
|
||||||
|
# by default, ctypes structs are aligned
|
||||||
|
return np.dtype(fields, align=True)
|
||||||
|
|
||||||
|
|
||||||
|
def _from_ctypes_scalar(t):
|
||||||
|
"""
|
||||||
|
Return the dtype type with endianness included if it's the case
|
||||||
|
"""
|
||||||
|
if getattr(t, '__ctype_be__', None) is t:
|
||||||
|
return np.dtype('>' + t._type_)
|
||||||
|
elif getattr(t, '__ctype_le__', None) is t:
|
||||||
|
return np.dtype('<' + t._type_)
|
||||||
|
else:
|
||||||
|
return np.dtype(t._type_)
|
||||||
|
|
||||||
|
|
||||||
|
def _from_ctypes_union(t):
|
||||||
|
formats = []
|
||||||
|
offsets = []
|
||||||
|
names = []
|
||||||
|
for fname, ftyp in t._fields_:
|
||||||
|
names.append(fname)
|
||||||
|
formats.append(dtype_from_ctypes_type(ftyp))
|
||||||
|
offsets.append(0) # Union fields are offset to 0
|
||||||
|
|
||||||
|
return np.dtype(dict(
|
||||||
|
formats=formats,
|
||||||
|
offsets=offsets,
|
||||||
|
names=names,
|
||||||
|
itemsize=ctypes.sizeof(t)))
|
||||||
|
|
||||||
|
|
||||||
|
def dtype_from_ctypes_type(t):
|
||||||
|
"""
|
||||||
|
Construct a dtype object from a ctypes type
|
||||||
|
"""
|
||||||
|
if issubclass(t, _ctypes.Array):
|
||||||
|
return _from_ctypes_array(t)
|
||||||
|
elif issubclass(t, _ctypes._Pointer):
|
||||||
|
raise TypeError("ctypes pointers have no dtype equivalent")
|
||||||
|
elif issubclass(t, _ctypes.Structure):
|
||||||
|
return _from_ctypes_structure(t)
|
||||||
|
elif issubclass(t, _ctypes.Union):
|
||||||
|
return _from_ctypes_union(t)
|
||||||
|
elif isinstance(getattr(t, '_type_', None), str):
|
||||||
|
return _from_ctypes_scalar(t)
|
||||||
|
else:
|
||||||
|
raise NotImplementedError(
|
||||||
|
"Unknown ctypes type {}".format(t.__name__))
|
@ -0,0 +1,200 @@
|
|||||||
|
"""
|
||||||
|
Various richly-typed exceptions, that also help us deal with string formatting
|
||||||
|
in python where it's easier.
|
||||||
|
|
||||||
|
By putting the formatting in `__str__`, we also avoid paying the cost for
|
||||||
|
users who silence the exceptions.
|
||||||
|
"""
|
||||||
|
from numpy.core.overrides import set_module
|
||||||
|
|
||||||
|
def _unpack_tuple(tup):
|
||||||
|
if len(tup) == 1:
|
||||||
|
return tup[0]
|
||||||
|
else:
|
||||||
|
return tup
|
||||||
|
|
||||||
|
|
||||||
|
def _display_as_base(cls):
|
||||||
|
"""
|
||||||
|
A decorator that makes an exception class look like its base.
|
||||||
|
|
||||||
|
We use this to hide subclasses that are implementation details - the user
|
||||||
|
should catch the base type, which is what the traceback will show them.
|
||||||
|
|
||||||
|
Classes decorated with this decorator are subject to removal without a
|
||||||
|
deprecation warning.
|
||||||
|
"""
|
||||||
|
assert issubclass(cls, Exception)
|
||||||
|
cls.__name__ = cls.__base__.__name__
|
||||||
|
cls.__qualname__ = cls.__base__.__qualname__
|
||||||
|
set_module(cls.__base__.__module__)(cls)
|
||||||
|
return cls
|
||||||
|
|
||||||
|
|
||||||
|
class UFuncTypeError(TypeError):
|
||||||
|
""" Base class for all ufunc exceptions """
|
||||||
|
def __init__(self, ufunc):
|
||||||
|
self.ufunc = ufunc
|
||||||
|
|
||||||
|
|
||||||
|
@_display_as_base
|
||||||
|
class _UFuncBinaryResolutionError(UFuncTypeError):
|
||||||
|
""" Thrown when a binary resolution fails """
|
||||||
|
def __init__(self, ufunc, dtypes):
|
||||||
|
super().__init__(ufunc)
|
||||||
|
self.dtypes = tuple(dtypes)
|
||||||
|
assert len(self.dtypes) == 2
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return (
|
||||||
|
"ufunc {!r} cannot use operands with types {!r} and {!r}"
|
||||||
|
).format(
|
||||||
|
self.ufunc.__name__, *self.dtypes
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@_display_as_base
|
||||||
|
class _UFuncNoLoopError(UFuncTypeError):
|
||||||
|
""" Thrown when a ufunc loop cannot be found """
|
||||||
|
def __init__(self, ufunc, dtypes):
|
||||||
|
super().__init__(ufunc)
|
||||||
|
self.dtypes = tuple(dtypes)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return (
|
||||||
|
"ufunc {!r} did not contain a loop with signature matching types "
|
||||||
|
"{!r} -> {!r}"
|
||||||
|
).format(
|
||||||
|
self.ufunc.__name__,
|
||||||
|
_unpack_tuple(self.dtypes[:self.ufunc.nin]),
|
||||||
|
_unpack_tuple(self.dtypes[self.ufunc.nin:])
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@_display_as_base
|
||||||
|
class _UFuncCastingError(UFuncTypeError):
|
||||||
|
def __init__(self, ufunc, casting, from_, to):
|
||||||
|
super().__init__(ufunc)
|
||||||
|
self.casting = casting
|
||||||
|
self.from_ = from_
|
||||||
|
self.to = to
|
||||||
|
|
||||||
|
|
||||||
|
@_display_as_base
|
||||||
|
class _UFuncInputCastingError(_UFuncCastingError):
|
||||||
|
""" Thrown when a ufunc input cannot be casted """
|
||||||
|
def __init__(self, ufunc, casting, from_, to, i):
|
||||||
|
super().__init__(ufunc, casting, from_, to)
|
||||||
|
self.in_i = i
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
# only show the number if more than one input exists
|
||||||
|
i_str = "{} ".format(self.in_i) if self.ufunc.nin != 1 else ""
|
||||||
|
return (
|
||||||
|
"Cannot cast ufunc {!r} input {}from {!r} to {!r} with casting "
|
||||||
|
"rule {!r}"
|
||||||
|
).format(
|
||||||
|
self.ufunc.__name__, i_str, self.from_, self.to, self.casting
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@_display_as_base
|
||||||
|
class _UFuncOutputCastingError(_UFuncCastingError):
|
||||||
|
""" Thrown when a ufunc output cannot be casted """
|
||||||
|
def __init__(self, ufunc, casting, from_, to, i):
|
||||||
|
super().__init__(ufunc, casting, from_, to)
|
||||||
|
self.out_i = i
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
# only show the number if more than one output exists
|
||||||
|
i_str = "{} ".format(self.out_i) if self.ufunc.nout != 1 else ""
|
||||||
|
return (
|
||||||
|
"Cannot cast ufunc {!r} output {}from {!r} to {!r} with casting "
|
||||||
|
"rule {!r}"
|
||||||
|
).format(
|
||||||
|
self.ufunc.__name__, i_str, self.from_, self.to, self.casting
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Exception used in shares_memory()
|
||||||
|
@set_module('numpy')
|
||||||
|
class TooHardError(RuntimeError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
@set_module('numpy')
|
||||||
|
class AxisError(ValueError, IndexError):
|
||||||
|
""" Axis supplied was invalid. """
|
||||||
|
def __init__(self, axis, ndim=None, msg_prefix=None):
|
||||||
|
# single-argument form just delegates to base class
|
||||||
|
if ndim is None and msg_prefix is None:
|
||||||
|
msg = axis
|
||||||
|
|
||||||
|
# do the string formatting here, to save work in the C code
|
||||||
|
else:
|
||||||
|
msg = ("axis {} is out of bounds for array of dimension {}"
|
||||||
|
.format(axis, ndim))
|
||||||
|
if msg_prefix is not None:
|
||||||
|
msg = "{}: {}".format(msg_prefix, msg)
|
||||||
|
|
||||||
|
super(AxisError, self).__init__(msg)
|
||||||
|
|
||||||
|
|
||||||
|
@_display_as_base
|
||||||
|
class _ArrayMemoryError(MemoryError):
|
||||||
|
""" Thrown when an array cannot be allocated"""
|
||||||
|
def __init__(self, shape, dtype):
|
||||||
|
self.shape = shape
|
||||||
|
self.dtype = dtype
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _total_size(self):
|
||||||
|
num_bytes = self.dtype.itemsize
|
||||||
|
for dim in self.shape:
|
||||||
|
num_bytes *= dim
|
||||||
|
return num_bytes
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _size_to_string(num_bytes):
|
||||||
|
""" Convert a number of bytes into a binary size string """
|
||||||
|
import math
|
||||||
|
|
||||||
|
# https://en.wikipedia.org/wiki/Binary_prefix
|
||||||
|
LOG2_STEP = 10
|
||||||
|
STEP = 1024
|
||||||
|
units = ['bytes', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB']
|
||||||
|
|
||||||
|
unit_i = max(num_bytes.bit_length() - 1, 1) // LOG2_STEP
|
||||||
|
unit_val = 1 << (unit_i * LOG2_STEP)
|
||||||
|
n_units = num_bytes / unit_val
|
||||||
|
del unit_val
|
||||||
|
|
||||||
|
# ensure we pick a unit that is correct after rounding
|
||||||
|
if round(n_units) == STEP:
|
||||||
|
unit_i += 1
|
||||||
|
n_units /= STEP
|
||||||
|
|
||||||
|
# deal with sizes so large that we don't have units for them
|
||||||
|
if unit_i >= len(units):
|
||||||
|
new_unit_i = len(units) - 1
|
||||||
|
n_units *= 1 << ((unit_i - new_unit_i) * LOG2_STEP)
|
||||||
|
unit_i = new_unit_i
|
||||||
|
|
||||||
|
unit_name = units[unit_i]
|
||||||
|
# format with a sensible number of digits
|
||||||
|
if unit_i == 0:
|
||||||
|
# no decimal point on bytes
|
||||||
|
return '{:.0f} {}'.format(n_units, unit_name)
|
||||||
|
elif round(n_units) < 1000:
|
||||||
|
# 3 significant figures, if none are dropped to the left of the .
|
||||||
|
return '{:#.3g} {}'.format(n_units, unit_name)
|
||||||
|
else:
|
||||||
|
# just give all the digits otherwise
|
||||||
|
return '{:#.0f} {}'.format(n_units, unit_name)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
size_str = self._size_to_string(self._total_size)
|
||||||
|
return (
|
||||||
|
"Unable to allocate {} for an array with shape {} and data type {}"
|
||||||
|
.format(size_str, self.shape, self.dtype)
|
||||||
|
)
|
877
Restaurant/Marta/venv/Lib/site-packages/numpy/core/_internal.py
Normal file
877
Restaurant/Marta/venv/Lib/site-packages/numpy/core/_internal.py
Normal file
@ -0,0 +1,877 @@
|
|||||||
|
"""
|
||||||
|
A place for internal code
|
||||||
|
|
||||||
|
Some things are more easily handled Python.
|
||||||
|
|
||||||
|
"""
|
||||||
|
from __future__ import division, absolute_import, print_function
|
||||||
|
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
import platform
|
||||||
|
|
||||||
|
from numpy.compat import unicode
|
||||||
|
from .multiarray import dtype, array, ndarray
|
||||||
|
try:
|
||||||
|
import ctypes
|
||||||
|
except ImportError:
|
||||||
|
ctypes = None
|
||||||
|
|
||||||
|
IS_PYPY = platform.python_implementation() == 'PyPy'
|
||||||
|
|
||||||
|
if (sys.byteorder == 'little'):
|
||||||
|
_nbo = b'<'
|
||||||
|
else:
|
||||||
|
_nbo = b'>'
|
||||||
|
|
||||||
|
def _makenames_list(adict, align):
|
||||||
|
allfields = []
|
||||||
|
fnames = list(adict.keys())
|
||||||
|
for fname in fnames:
|
||||||
|
obj = adict[fname]
|
||||||
|
n = len(obj)
|
||||||
|
if not isinstance(obj, tuple) or n not in [2, 3]:
|
||||||
|
raise ValueError("entry not a 2- or 3- tuple")
|
||||||
|
if (n > 2) and (obj[2] == fname):
|
||||||
|
continue
|
||||||
|
num = int(obj[1])
|
||||||
|
if (num < 0):
|
||||||
|
raise ValueError("invalid offset.")
|
||||||
|
format = dtype(obj[0], align=align)
|
||||||
|
if (n > 2):
|
||||||
|
title = obj[2]
|
||||||
|
else:
|
||||||
|
title = None
|
||||||
|
allfields.append((fname, format, num, title))
|
||||||
|
# sort by offsets
|
||||||
|
allfields.sort(key=lambda x: x[2])
|
||||||
|
names = [x[0] for x in allfields]
|
||||||
|
formats = [x[1] for x in allfields]
|
||||||
|
offsets = [x[2] for x in allfields]
|
||||||
|
titles = [x[3] for x in allfields]
|
||||||
|
|
||||||
|
return names, formats, offsets, titles
|
||||||
|
|
||||||
|
# Called in PyArray_DescrConverter function when
|
||||||
|
# a dictionary without "names" and "formats"
|
||||||
|
# fields is used as a data-type descriptor.
|
||||||
|
def _usefields(adict, align):
|
||||||
|
try:
|
||||||
|
names = adict[-1]
|
||||||
|
except KeyError:
|
||||||
|
names = None
|
||||||
|
if names is None:
|
||||||
|
names, formats, offsets, titles = _makenames_list(adict, align)
|
||||||
|
else:
|
||||||
|
formats = []
|
||||||
|
offsets = []
|
||||||
|
titles = []
|
||||||
|
for name in names:
|
||||||
|
res = adict[name]
|
||||||
|
formats.append(res[0])
|
||||||
|
offsets.append(res[1])
|
||||||
|
if (len(res) > 2):
|
||||||
|
titles.append(res[2])
|
||||||
|
else:
|
||||||
|
titles.append(None)
|
||||||
|
|
||||||
|
return dtype({"names": names,
|
||||||
|
"formats": formats,
|
||||||
|
"offsets": offsets,
|
||||||
|
"titles": titles}, align)
|
||||||
|
|
||||||
|
|
||||||
|
# construct an array_protocol descriptor list
|
||||||
|
# from the fields attribute of a descriptor
|
||||||
|
# This calls itself recursively but should eventually hit
|
||||||
|
# a descriptor that has no fields and then return
|
||||||
|
# a simple typestring
|
||||||
|
|
||||||
|
def _array_descr(descriptor):
|
||||||
|
fields = descriptor.fields
|
||||||
|
if fields is None:
|
||||||
|
subdtype = descriptor.subdtype
|
||||||
|
if subdtype is None:
|
||||||
|
if descriptor.metadata is None:
|
||||||
|
return descriptor.str
|
||||||
|
else:
|
||||||
|
new = descriptor.metadata.copy()
|
||||||
|
if new:
|
||||||
|
return (descriptor.str, new)
|
||||||
|
else:
|
||||||
|
return descriptor.str
|
||||||
|
else:
|
||||||
|
return (_array_descr(subdtype[0]), subdtype[1])
|
||||||
|
|
||||||
|
names = descriptor.names
|
||||||
|
ordered_fields = [fields[x] + (x,) for x in names]
|
||||||
|
result = []
|
||||||
|
offset = 0
|
||||||
|
for field in ordered_fields:
|
||||||
|
if field[1] > offset:
|
||||||
|
num = field[1] - offset
|
||||||
|
result.append(('', '|V%d' % num))
|
||||||
|
offset += num
|
||||||
|
elif field[1] < offset:
|
||||||
|
raise ValueError(
|
||||||
|
"dtype.descr is not defined for types with overlapping or "
|
||||||
|
"out-of-order fields")
|
||||||
|
if len(field) > 3:
|
||||||
|
name = (field[2], field[3])
|
||||||
|
else:
|
||||||
|
name = field[2]
|
||||||
|
if field[0].subdtype:
|
||||||
|
tup = (name, _array_descr(field[0].subdtype[0]),
|
||||||
|
field[0].subdtype[1])
|
||||||
|
else:
|
||||||
|
tup = (name, _array_descr(field[0]))
|
||||||
|
offset += field[0].itemsize
|
||||||
|
result.append(tup)
|
||||||
|
|
||||||
|
if descriptor.itemsize > offset:
|
||||||
|
num = descriptor.itemsize - offset
|
||||||
|
result.append(('', '|V%d' % num))
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
# Build a new array from the information in a pickle.
|
||||||
|
# Note that the name numpy.core._internal._reconstruct is embedded in
|
||||||
|
# pickles of ndarrays made with NumPy before release 1.0
|
||||||
|
# so don't remove the name here, or you'll
|
||||||
|
# break backward compatibility.
|
||||||
|
def _reconstruct(subtype, shape, dtype):
|
||||||
|
return ndarray.__new__(subtype, shape, dtype)
|
||||||
|
|
||||||
|
|
||||||
|
# format_re was originally from numarray by J. Todd Miller
|
||||||
|
|
||||||
|
format_re = re.compile(br'(?P<order1>[<>|=]?)'
|
||||||
|
br'(?P<repeats> *[(]?[ ,0-9]*[)]? *)'
|
||||||
|
br'(?P<order2>[<>|=]?)'
|
||||||
|
br'(?P<dtype>[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)')
|
||||||
|
sep_re = re.compile(br'\s*,\s*')
|
||||||
|
space_re = re.compile(br'\s+$')
|
||||||
|
|
||||||
|
# astr is a string (perhaps comma separated)
|
||||||
|
|
||||||
|
_convorder = {b'=': _nbo}
|
||||||
|
|
||||||
|
def _commastring(astr):
|
||||||
|
startindex = 0
|
||||||
|
result = []
|
||||||
|
while startindex < len(astr):
|
||||||
|
mo = format_re.match(astr, pos=startindex)
|
||||||
|
try:
|
||||||
|
(order1, repeats, order2, dtype) = mo.groups()
|
||||||
|
except (TypeError, AttributeError):
|
||||||
|
raise ValueError('format number %d of "%s" is not recognized' %
|
||||||
|
(len(result)+1, astr))
|
||||||
|
startindex = mo.end()
|
||||||
|
# Separator or ending padding
|
||||||
|
if startindex < len(astr):
|
||||||
|
if space_re.match(astr, pos=startindex):
|
||||||
|
startindex = len(astr)
|
||||||
|
else:
|
||||||
|
mo = sep_re.match(astr, pos=startindex)
|
||||||
|
if not mo:
|
||||||
|
raise ValueError(
|
||||||
|
'format number %d of "%s" is not recognized' %
|
||||||
|
(len(result)+1, astr))
|
||||||
|
startindex = mo.end()
|
||||||
|
|
||||||
|
if order2 == b'':
|
||||||
|
order = order1
|
||||||
|
elif order1 == b'':
|
||||||
|
order = order2
|
||||||
|
else:
|
||||||
|
order1 = _convorder.get(order1, order1)
|
||||||
|
order2 = _convorder.get(order2, order2)
|
||||||
|
if (order1 != order2):
|
||||||
|
raise ValueError(
|
||||||
|
'inconsistent byte-order specification %s and %s' %
|
||||||
|
(order1, order2))
|
||||||
|
order = order1
|
||||||
|
|
||||||
|
if order in [b'|', b'=', _nbo]:
|
||||||
|
order = b''
|
||||||
|
dtype = order + dtype
|
||||||
|
if (repeats == b''):
|
||||||
|
newitem = dtype
|
||||||
|
else:
|
||||||
|
newitem = (dtype, eval(repeats))
|
||||||
|
result.append(newitem)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
class dummy_ctype(object):
|
||||||
|
def __init__(self, cls):
|
||||||
|
self._cls = cls
|
||||||
|
def __mul__(self, other):
|
||||||
|
return self
|
||||||
|
def __call__(self, *other):
|
||||||
|
return self._cls(other)
|
||||||
|
def __eq__(self, other):
|
||||||
|
return self._cls == other._cls
|
||||||
|
def __ne__(self, other):
|
||||||
|
return self._cls != other._cls
|
||||||
|
|
||||||
|
def _getintp_ctype():
|
||||||
|
val = _getintp_ctype.cache
|
||||||
|
if val is not None:
|
||||||
|
return val
|
||||||
|
if ctypes is None:
|
||||||
|
import numpy as np
|
||||||
|
val = dummy_ctype(np.intp)
|
||||||
|
else:
|
||||||
|
char = dtype('p').char
|
||||||
|
if (char == 'i'):
|
||||||
|
val = ctypes.c_int
|
||||||
|
elif char == 'l':
|
||||||
|
val = ctypes.c_long
|
||||||
|
elif char == 'q':
|
||||||
|
val = ctypes.c_longlong
|
||||||
|
else:
|
||||||
|
val = ctypes.c_long
|
||||||
|
_getintp_ctype.cache = val
|
||||||
|
return val
|
||||||
|
_getintp_ctype.cache = None
|
||||||
|
|
||||||
|
# Used for .ctypes attribute of ndarray
|
||||||
|
|
||||||
|
class _missing_ctypes(object):
|
||||||
|
def cast(self, num, obj):
|
||||||
|
return num.value
|
||||||
|
|
||||||
|
class c_void_p(object):
|
||||||
|
def __init__(self, ptr):
|
||||||
|
self.value = ptr
|
||||||
|
|
||||||
|
|
||||||
|
class _ctypes(object):
|
||||||
|
def __init__(self, array, ptr=None):
|
||||||
|
self._arr = array
|
||||||
|
|
||||||
|
if ctypes:
|
||||||
|
self._ctypes = ctypes
|
||||||
|
self._data = self._ctypes.c_void_p(ptr)
|
||||||
|
else:
|
||||||
|
# fake a pointer-like object that holds onto the reference
|
||||||
|
self._ctypes = _missing_ctypes()
|
||||||
|
self._data = self._ctypes.c_void_p(ptr)
|
||||||
|
self._data._objects = array
|
||||||
|
|
||||||
|
if self._arr.ndim == 0:
|
||||||
|
self._zerod = True
|
||||||
|
else:
|
||||||
|
self._zerod = False
|
||||||
|
|
||||||
|
def data_as(self, obj):
|
||||||
|
"""
|
||||||
|
Return the data pointer cast to a particular c-types object.
|
||||||
|
For example, calling ``self._as_parameter_`` is equivalent to
|
||||||
|
``self.data_as(ctypes.c_void_p)``. Perhaps you want to use the data as a
|
||||||
|
pointer to a ctypes array of floating-point data:
|
||||||
|
``self.data_as(ctypes.POINTER(ctypes.c_double))``.
|
||||||
|
|
||||||
|
The returned pointer will keep a reference to the array.
|
||||||
|
"""
|
||||||
|
# _ctypes.cast function causes a circular reference of self._data in
|
||||||
|
# self._data._objects. Attributes of self._data cannot be released
|
||||||
|
# until gc.collect is called. Make a copy of the pointer first then let
|
||||||
|
# it hold the array reference. This is a workaround to circumvent the
|
||||||
|
# CPython bug https://bugs.python.org/issue12836
|
||||||
|
ptr = self._ctypes.cast(self._data, obj)
|
||||||
|
ptr._arr = self._arr
|
||||||
|
return ptr
|
||||||
|
|
||||||
|
def shape_as(self, obj):
|
||||||
|
"""
|
||||||
|
Return the shape tuple as an array of some other c-types
|
||||||
|
type. For example: ``self.shape_as(ctypes.c_short)``.
|
||||||
|
"""
|
||||||
|
if self._zerod:
|
||||||
|
return None
|
||||||
|
return (obj*self._arr.ndim)(*self._arr.shape)
|
||||||
|
|
||||||
|
def strides_as(self, obj):
|
||||||
|
"""
|
||||||
|
Return the strides tuple as an array of some other
|
||||||
|
c-types type. For example: ``self.strides_as(ctypes.c_longlong)``.
|
||||||
|
"""
|
||||||
|
if self._zerod:
|
||||||
|
return None
|
||||||
|
return (obj*self._arr.ndim)(*self._arr.strides)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def data(self):
|
||||||
|
"""
|
||||||
|
A pointer to the memory area of the array as a Python integer.
|
||||||
|
This memory area may contain data that is not aligned, or not in correct
|
||||||
|
byte-order. The memory area may not even be writeable. The array
|
||||||
|
flags and data-type of this array should be respected when passing this
|
||||||
|
attribute to arbitrary C-code to avoid trouble that can include Python
|
||||||
|
crashing. User Beware! The value of this attribute is exactly the same
|
||||||
|
as ``self._array_interface_['data'][0]``.
|
||||||
|
|
||||||
|
Note that unlike ``data_as``, a reference will not be kept to the array:
|
||||||
|
code like ``ctypes.c_void_p((a + b).ctypes.data)`` will result in a
|
||||||
|
pointer to a deallocated array, and should be spelt
|
||||||
|
``(a + b).ctypes.data_as(ctypes.c_void_p)``
|
||||||
|
"""
|
||||||
|
return self._data.value
|
||||||
|
|
||||||
|
@property
|
||||||
|
def shape(self):
|
||||||
|
"""
|
||||||
|
(c_intp*self.ndim): A ctypes array of length self.ndim where
|
||||||
|
the basetype is the C-integer corresponding to ``dtype('p')`` on this
|
||||||
|
platform. This base-type could be `ctypes.c_int`, `ctypes.c_long`, or
|
||||||
|
`ctypes.c_longlong` depending on the platform.
|
||||||
|
The c_intp type is defined accordingly in `numpy.ctypeslib`.
|
||||||
|
The ctypes array contains the shape of the underlying array.
|
||||||
|
"""
|
||||||
|
return self.shape_as(_getintp_ctype())
|
||||||
|
|
||||||
|
@property
|
||||||
|
def strides(self):
|
||||||
|
"""
|
||||||
|
(c_intp*self.ndim): A ctypes array of length self.ndim where
|
||||||
|
the basetype is the same as for the shape attribute. This ctypes array
|
||||||
|
contains the strides information from the underlying array. This strides
|
||||||
|
information is important for showing how many bytes must be jumped to
|
||||||
|
get to the next element in the array.
|
||||||
|
"""
|
||||||
|
return self.strides_as(_getintp_ctype())
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _as_parameter_(self):
|
||||||
|
"""
|
||||||
|
Overrides the ctypes semi-magic method
|
||||||
|
|
||||||
|
Enables `c_func(some_array.ctypes)`
|
||||||
|
"""
|
||||||
|
return self.data_as(ctypes.c_void_p)
|
||||||
|
|
||||||
|
# kept for compatibility
|
||||||
|
get_data = data.fget
|
||||||
|
get_shape = shape.fget
|
||||||
|
get_strides = strides.fget
|
||||||
|
get_as_parameter = _as_parameter_.fget
|
||||||
|
|
||||||
|
|
||||||
|
def _newnames(datatype, order):
|
||||||
|
"""
|
||||||
|
Given a datatype and an order object, return a new names tuple, with the
|
||||||
|
order indicated
|
||||||
|
"""
|
||||||
|
oldnames = datatype.names
|
||||||
|
nameslist = list(oldnames)
|
||||||
|
if isinstance(order, (str, unicode)):
|
||||||
|
order = [order]
|
||||||
|
seen = set()
|
||||||
|
if isinstance(order, (list, tuple)):
|
||||||
|
for name in order:
|
||||||
|
try:
|
||||||
|
nameslist.remove(name)
|
||||||
|
except ValueError:
|
||||||
|
if name in seen:
|
||||||
|
raise ValueError("duplicate field name: %s" % (name,))
|
||||||
|
else:
|
||||||
|
raise ValueError("unknown field name: %s" % (name,))
|
||||||
|
seen.add(name)
|
||||||
|
return tuple(list(order) + nameslist)
|
||||||
|
raise ValueError("unsupported order value: %s" % (order,))
|
||||||
|
|
||||||
|
def _copy_fields(ary):
|
||||||
|
"""Return copy of structured array with padding between fields removed.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
ary : ndarray
|
||||||
|
Structured array from which to remove padding bytes
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
ary_copy : ndarray
|
||||||
|
Copy of ary with padding bytes removed
|
||||||
|
"""
|
||||||
|
dt = ary.dtype
|
||||||
|
copy_dtype = {'names': dt.names,
|
||||||
|
'formats': [dt.fields[name][0] for name in dt.names]}
|
||||||
|
return array(ary, dtype=copy_dtype, copy=True)
|
||||||
|
|
||||||
|
def _getfield_is_safe(oldtype, newtype, offset):
|
||||||
|
""" Checks safety of getfield for object arrays.
|
||||||
|
|
||||||
|
As in _view_is_safe, we need to check that memory containing objects is not
|
||||||
|
reinterpreted as a non-object datatype and vice versa.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
oldtype : data-type
|
||||||
|
Data type of the original ndarray.
|
||||||
|
newtype : data-type
|
||||||
|
Data type of the field being accessed by ndarray.getfield
|
||||||
|
offset : int
|
||||||
|
Offset of the field being accessed by ndarray.getfield
|
||||||
|
|
||||||
|
Raises
|
||||||
|
------
|
||||||
|
TypeError
|
||||||
|
If the field access is invalid
|
||||||
|
|
||||||
|
"""
|
||||||
|
if newtype.hasobject or oldtype.hasobject:
|
||||||
|
if offset == 0 and newtype == oldtype:
|
||||||
|
return
|
||||||
|
if oldtype.names is not None:
|
||||||
|
for name in oldtype.names:
|
||||||
|
if (oldtype.fields[name][1] == offset and
|
||||||
|
oldtype.fields[name][0] == newtype):
|
||||||
|
return
|
||||||
|
raise TypeError("Cannot get/set field of an object array")
|
||||||
|
return
|
||||||
|
|
||||||
|
def _view_is_safe(oldtype, newtype):
|
||||||
|
""" Checks safety of a view involving object arrays, for example when
|
||||||
|
doing::
|
||||||
|
|
||||||
|
np.zeros(10, dtype=oldtype).view(newtype)
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
oldtype : data-type
|
||||||
|
Data type of original ndarray
|
||||||
|
newtype : data-type
|
||||||
|
Data type of the view
|
||||||
|
|
||||||
|
Raises
|
||||||
|
------
|
||||||
|
TypeError
|
||||||
|
If the new type is incompatible with the old type.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
# if the types are equivalent, there is no problem.
|
||||||
|
# for example: dtype((np.record, 'i4,i4')) == dtype((np.void, 'i4,i4'))
|
||||||
|
if oldtype == newtype:
|
||||||
|
return
|
||||||
|
|
||||||
|
if newtype.hasobject or oldtype.hasobject:
|
||||||
|
raise TypeError("Cannot change data-type for object array.")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Given a string containing a PEP 3118 format specifier,
|
||||||
|
# construct a NumPy dtype
|
||||||
|
|
||||||
|
_pep3118_native_map = {
|
||||||
|
'?': '?',
|
||||||
|
'c': 'S1',
|
||||||
|
'b': 'b',
|
||||||
|
'B': 'B',
|
||||||
|
'h': 'h',
|
||||||
|
'H': 'H',
|
||||||
|
'i': 'i',
|
||||||
|
'I': 'I',
|
||||||
|
'l': 'l',
|
||||||
|
'L': 'L',
|
||||||
|
'q': 'q',
|
||||||
|
'Q': 'Q',
|
||||||
|
'e': 'e',
|
||||||
|
'f': 'f',
|
||||||
|
'd': 'd',
|
||||||
|
'g': 'g',
|
||||||
|
'Zf': 'F',
|
||||||
|
'Zd': 'D',
|
||||||
|
'Zg': 'G',
|
||||||
|
's': 'S',
|
||||||
|
'w': 'U',
|
||||||
|
'O': 'O',
|
||||||
|
'x': 'V', # padding
|
||||||
|
}
|
||||||
|
_pep3118_native_typechars = ''.join(_pep3118_native_map.keys())
|
||||||
|
|
||||||
|
_pep3118_standard_map = {
|
||||||
|
'?': '?',
|
||||||
|
'c': 'S1',
|
||||||
|
'b': 'b',
|
||||||
|
'B': 'B',
|
||||||
|
'h': 'i2',
|
||||||
|
'H': 'u2',
|
||||||
|
'i': 'i4',
|
||||||
|
'I': 'u4',
|
||||||
|
'l': 'i4',
|
||||||
|
'L': 'u4',
|
||||||
|
'q': 'i8',
|
||||||
|
'Q': 'u8',
|
||||||
|
'e': 'f2',
|
||||||
|
'f': 'f',
|
||||||
|
'd': 'd',
|
||||||
|
'Zf': 'F',
|
||||||
|
'Zd': 'D',
|
||||||
|
's': 'S',
|
||||||
|
'w': 'U',
|
||||||
|
'O': 'O',
|
||||||
|
'x': 'V', # padding
|
||||||
|
}
|
||||||
|
_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys())
|
||||||
|
|
||||||
|
_pep3118_unsupported_map = {
|
||||||
|
'u': 'UCS-2 strings',
|
||||||
|
'&': 'pointers',
|
||||||
|
't': 'bitfields',
|
||||||
|
'X': 'function pointers',
|
||||||
|
}
|
||||||
|
|
||||||
|
class _Stream(object):
|
||||||
|
def __init__(self, s):
|
||||||
|
self.s = s
|
||||||
|
self.byteorder = '@'
|
||||||
|
|
||||||
|
def advance(self, n):
|
||||||
|
res = self.s[:n]
|
||||||
|
self.s = self.s[n:]
|
||||||
|
return res
|
||||||
|
|
||||||
|
def consume(self, c):
|
||||||
|
if self.s[:len(c)] == c:
|
||||||
|
self.advance(len(c))
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def consume_until(self, c):
|
||||||
|
if callable(c):
|
||||||
|
i = 0
|
||||||
|
while i < len(self.s) and not c(self.s[i]):
|
||||||
|
i = i + 1
|
||||||
|
return self.advance(i)
|
||||||
|
else:
|
||||||
|
i = self.s.index(c)
|
||||||
|
res = self.advance(i)
|
||||||
|
self.advance(len(c))
|
||||||
|
return res
|
||||||
|
|
||||||
|
@property
|
||||||
|
def next(self):
|
||||||
|
return self.s[0]
|
||||||
|
|
||||||
|
def __bool__(self):
|
||||||
|
return bool(self.s)
|
||||||
|
__nonzero__ = __bool__
|
||||||
|
|
||||||
|
|
||||||
|
def _dtype_from_pep3118(spec):
|
||||||
|
stream = _Stream(spec)
|
||||||
|
dtype, align = __dtype_from_pep3118(stream, is_subdtype=False)
|
||||||
|
return dtype
|
||||||
|
|
||||||
|
def __dtype_from_pep3118(stream, is_subdtype):
|
||||||
|
field_spec = dict(
|
||||||
|
names=[],
|
||||||
|
formats=[],
|
||||||
|
offsets=[],
|
||||||
|
itemsize=0
|
||||||
|
)
|
||||||
|
offset = 0
|
||||||
|
common_alignment = 1
|
||||||
|
is_padding = False
|
||||||
|
|
||||||
|
# Parse spec
|
||||||
|
while stream:
|
||||||
|
value = None
|
||||||
|
|
||||||
|
# End of structure, bail out to upper level
|
||||||
|
if stream.consume('}'):
|
||||||
|
break
|
||||||
|
|
||||||
|
# Sub-arrays (1)
|
||||||
|
shape = None
|
||||||
|
if stream.consume('('):
|
||||||
|
shape = stream.consume_until(')')
|
||||||
|
shape = tuple(map(int, shape.split(',')))
|
||||||
|
|
||||||
|
# Byte order
|
||||||
|
if stream.next in ('@', '=', '<', '>', '^', '!'):
|
||||||
|
byteorder = stream.advance(1)
|
||||||
|
if byteorder == '!':
|
||||||
|
byteorder = '>'
|
||||||
|
stream.byteorder = byteorder
|
||||||
|
|
||||||
|
# Byte order characters also control native vs. standard type sizes
|
||||||
|
if stream.byteorder in ('@', '^'):
|
||||||
|
type_map = _pep3118_native_map
|
||||||
|
type_map_chars = _pep3118_native_typechars
|
||||||
|
else:
|
||||||
|
type_map = _pep3118_standard_map
|
||||||
|
type_map_chars = _pep3118_standard_typechars
|
||||||
|
|
||||||
|
# Item sizes
|
||||||
|
itemsize_str = stream.consume_until(lambda c: not c.isdigit())
|
||||||
|
if itemsize_str:
|
||||||
|
itemsize = int(itemsize_str)
|
||||||
|
else:
|
||||||
|
itemsize = 1
|
||||||
|
|
||||||
|
# Data types
|
||||||
|
is_padding = False
|
||||||
|
|
||||||
|
if stream.consume('T{'):
|
||||||
|
value, align = __dtype_from_pep3118(
|
||||||
|
stream, is_subdtype=True)
|
||||||
|
elif stream.next in type_map_chars:
|
||||||
|
if stream.next == 'Z':
|
||||||
|
typechar = stream.advance(2)
|
||||||
|
else:
|
||||||
|
typechar = stream.advance(1)
|
||||||
|
|
||||||
|
is_padding = (typechar == 'x')
|
||||||
|
dtypechar = type_map[typechar]
|
||||||
|
if dtypechar in 'USV':
|
||||||
|
dtypechar += '%d' % itemsize
|
||||||
|
itemsize = 1
|
||||||
|
numpy_byteorder = {'@': '=', '^': '='}.get(
|
||||||
|
stream.byteorder, stream.byteorder)
|
||||||
|
value = dtype(numpy_byteorder + dtypechar)
|
||||||
|
align = value.alignment
|
||||||
|
elif stream.next in _pep3118_unsupported_map:
|
||||||
|
desc = _pep3118_unsupported_map[stream.next]
|
||||||
|
raise NotImplementedError(
|
||||||
|
"Unrepresentable PEP 3118 data type {!r} ({})"
|
||||||
|
.format(stream.next, desc))
|
||||||
|
else:
|
||||||
|
raise ValueError("Unknown PEP 3118 data type specifier %r" % stream.s)
|
||||||
|
|
||||||
|
#
|
||||||
|
# Native alignment may require padding
|
||||||
|
#
|
||||||
|
# Here we assume that the presence of a '@' character implicitly implies
|
||||||
|
# that the start of the array is *already* aligned.
|
||||||
|
#
|
||||||
|
extra_offset = 0
|
||||||
|
if stream.byteorder == '@':
|
||||||
|
start_padding = (-offset) % align
|
||||||
|
intra_padding = (-value.itemsize) % align
|
||||||
|
|
||||||
|
offset += start_padding
|
||||||
|
|
||||||
|
if intra_padding != 0:
|
||||||
|
if itemsize > 1 or (shape is not None and _prod(shape) > 1):
|
||||||
|
# Inject internal padding to the end of the sub-item
|
||||||
|
value = _add_trailing_padding(value, intra_padding)
|
||||||
|
else:
|
||||||
|
# We can postpone the injection of internal padding,
|
||||||
|
# as the item appears at most once
|
||||||
|
extra_offset += intra_padding
|
||||||
|
|
||||||
|
# Update common alignment
|
||||||
|
common_alignment = _lcm(align, common_alignment)
|
||||||
|
|
||||||
|
# Convert itemsize to sub-array
|
||||||
|
if itemsize != 1:
|
||||||
|
value = dtype((value, (itemsize,)))
|
||||||
|
|
||||||
|
# Sub-arrays (2)
|
||||||
|
if shape is not None:
|
||||||
|
value = dtype((value, shape))
|
||||||
|
|
||||||
|
# Field name
|
||||||
|
if stream.consume(':'):
|
||||||
|
name = stream.consume_until(':')
|
||||||
|
else:
|
||||||
|
name = None
|
||||||
|
|
||||||
|
if not (is_padding and name is None):
|
||||||
|
if name is not None and name in field_spec['names']:
|
||||||
|
raise RuntimeError("Duplicate field name '%s' in PEP3118 format"
|
||||||
|
% name)
|
||||||
|
field_spec['names'].append(name)
|
||||||
|
field_spec['formats'].append(value)
|
||||||
|
field_spec['offsets'].append(offset)
|
||||||
|
|
||||||
|
offset += value.itemsize
|
||||||
|
offset += extra_offset
|
||||||
|
|
||||||
|
field_spec['itemsize'] = offset
|
||||||
|
|
||||||
|
# extra final padding for aligned types
|
||||||
|
if stream.byteorder == '@':
|
||||||
|
field_spec['itemsize'] += (-offset) % common_alignment
|
||||||
|
|
||||||
|
# Check if this was a simple 1-item type, and unwrap it
|
||||||
|
if (field_spec['names'] == [None]
|
||||||
|
and field_spec['offsets'][0] == 0
|
||||||
|
and field_spec['itemsize'] == field_spec['formats'][0].itemsize
|
||||||
|
and not is_subdtype):
|
||||||
|
ret = field_spec['formats'][0]
|
||||||
|
else:
|
||||||
|
_fix_names(field_spec)
|
||||||
|
ret = dtype(field_spec)
|
||||||
|
|
||||||
|
# Finished
|
||||||
|
return ret, common_alignment
|
||||||
|
|
||||||
|
def _fix_names(field_spec):
|
||||||
|
""" Replace names which are None with the next unused f%d name """
|
||||||
|
names = field_spec['names']
|
||||||
|
for i, name in enumerate(names):
|
||||||
|
if name is not None:
|
||||||
|
continue
|
||||||
|
|
||||||
|
j = 0
|
||||||
|
while True:
|
||||||
|
name = 'f{}'.format(j)
|
||||||
|
if name not in names:
|
||||||
|
break
|
||||||
|
j = j + 1
|
||||||
|
names[i] = name
|
||||||
|
|
||||||
|
def _add_trailing_padding(value, padding):
|
||||||
|
"""Inject the specified number of padding bytes at the end of a dtype"""
|
||||||
|
if value.fields is None:
|
||||||
|
field_spec = dict(
|
||||||
|
names=['f0'],
|
||||||
|
formats=[value],
|
||||||
|
offsets=[0],
|
||||||
|
itemsize=value.itemsize
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
fields = value.fields
|
||||||
|
names = value.names
|
||||||
|
field_spec = dict(
|
||||||
|
names=names,
|
||||||
|
formats=[fields[name][0] for name in names],
|
||||||
|
offsets=[fields[name][1] for name in names],
|
||||||
|
itemsize=value.itemsize
|
||||||
|
)
|
||||||
|
|
||||||
|
field_spec['itemsize'] += padding
|
||||||
|
return dtype(field_spec)
|
||||||
|
|
||||||
|
def _prod(a):
|
||||||
|
p = 1
|
||||||
|
for x in a:
|
||||||
|
p *= x
|
||||||
|
return p
|
||||||
|
|
||||||
|
def _gcd(a, b):
|
||||||
|
"""Calculate the greatest common divisor of a and b"""
|
||||||
|
while b:
|
||||||
|
a, b = b, a % b
|
||||||
|
return a
|
||||||
|
|
||||||
|
def _lcm(a, b):
|
||||||
|
return a // _gcd(a, b) * b
|
||||||
|
|
||||||
|
def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs):
|
||||||
|
""" Format the error message for when __array_ufunc__ gives up. """
|
||||||
|
args_string = ', '.join(['{!r}'.format(arg) for arg in inputs] +
|
||||||
|
['{}={!r}'.format(k, v)
|
||||||
|
for k, v in kwargs.items()])
|
||||||
|
args = inputs + kwargs.get('out', ())
|
||||||
|
types_string = ', '.join(repr(type(arg).__name__) for arg in args)
|
||||||
|
return ('operand type(s) all returned NotImplemented from '
|
||||||
|
'__array_ufunc__({!r}, {!r}, {}): {}'
|
||||||
|
.format(ufunc, method, args_string, types_string))
|
||||||
|
|
||||||
|
|
||||||
|
def array_function_errmsg_formatter(public_api, types):
|
||||||
|
""" Format the error message for when __array_ufunc__ gives up. """
|
||||||
|
func_name = '{}.{}'.format(public_api.__module__, public_api.__name__)
|
||||||
|
return ("no implementation found for '{}' on types that implement "
|
||||||
|
'__array_function__: {}'.format(func_name, list(types)))
|
||||||
|
|
||||||
|
|
||||||
|
def _ufunc_doc_signature_formatter(ufunc):
|
||||||
|
"""
|
||||||
|
Builds a signature string which resembles PEP 457
|
||||||
|
|
||||||
|
This is used to construct the first line of the docstring
|
||||||
|
"""
|
||||||
|
|
||||||
|
# input arguments are simple
|
||||||
|
if ufunc.nin == 1:
|
||||||
|
in_args = 'x'
|
||||||
|
else:
|
||||||
|
in_args = ', '.join('x{}'.format(i+1) for i in range(ufunc.nin))
|
||||||
|
|
||||||
|
# output arguments are both keyword or positional
|
||||||
|
if ufunc.nout == 0:
|
||||||
|
out_args = ', /, out=()'
|
||||||
|
elif ufunc.nout == 1:
|
||||||
|
out_args = ', /, out=None'
|
||||||
|
else:
|
||||||
|
out_args = '[, {positional}], / [, out={default}]'.format(
|
||||||
|
positional=', '.join(
|
||||||
|
'out{}'.format(i+1) for i in range(ufunc.nout)),
|
||||||
|
default=repr((None,)*ufunc.nout)
|
||||||
|
)
|
||||||
|
|
||||||
|
# keyword only args depend on whether this is a gufunc
|
||||||
|
kwargs = (
|
||||||
|
", casting='same_kind'"
|
||||||
|
", order='K'"
|
||||||
|
", dtype=None"
|
||||||
|
", subok=True"
|
||||||
|
"[, signature"
|
||||||
|
", extobj]"
|
||||||
|
)
|
||||||
|
if ufunc.signature is None:
|
||||||
|
kwargs = ", where=True" + kwargs
|
||||||
|
|
||||||
|
# join all the parts together
|
||||||
|
return '{name}({in_args}{out_args}, *{kwargs})'.format(
|
||||||
|
name=ufunc.__name__,
|
||||||
|
in_args=in_args,
|
||||||
|
out_args=out_args,
|
||||||
|
kwargs=kwargs
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def npy_ctypes_check(cls):
|
||||||
|
# determine if a class comes from ctypes, in order to work around
|
||||||
|
# a bug in the buffer protocol for those objects, bpo-10746
|
||||||
|
try:
|
||||||
|
# ctypes class are new-style, so have an __mro__. This probably fails
|
||||||
|
# for ctypes classes with multiple inheritance.
|
||||||
|
if IS_PYPY:
|
||||||
|
# (..., _ctypes.basics._CData, Bufferable, object)
|
||||||
|
ctype_base = cls.__mro__[-3]
|
||||||
|
else:
|
||||||
|
# # (..., _ctypes._CData, object)
|
||||||
|
ctype_base = cls.__mro__[-2]
|
||||||
|
# right now, they're part of the _ctypes module
|
||||||
|
return 'ctypes' in ctype_base.__module__
|
||||||
|
except Exception:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
class recursive(object):
|
||||||
|
'''
|
||||||
|
A decorator class for recursive nested functions.
|
||||||
|
Naive recursive nested functions hold a reference to themselves:
|
||||||
|
|
||||||
|
def outer(*args):
|
||||||
|
def stringify_leaky(arg0, *arg1):
|
||||||
|
if len(arg1) > 0:
|
||||||
|
return stringify_leaky(*arg1) # <- HERE
|
||||||
|
return str(arg0)
|
||||||
|
stringify_leaky(*args)
|
||||||
|
|
||||||
|
This design pattern creates a reference cycle that is difficult for a
|
||||||
|
garbage collector to resolve. The decorator class prevents the
|
||||||
|
cycle by passing the nested function in as an argument `self`:
|
||||||
|
|
||||||
|
def outer(*args):
|
||||||
|
@recursive
|
||||||
|
def stringify(self, arg0, *arg1):
|
||||||
|
if len(arg1) > 0:
|
||||||
|
return self(*arg1)
|
||||||
|
return str(arg0)
|
||||||
|
stringify(*args)
|
||||||
|
|
||||||
|
'''
|
||||||
|
def __init__(self, func):
|
||||||
|
self.func = func
|
||||||
|
def __call__(self, *args, **kwargs):
|
||||||
|
return self.func(self, *args, **kwargs)
|
||||||
|
|
244
Restaurant/Marta/venv/Lib/site-packages/numpy/core/_methods.py
Normal file
244
Restaurant/Marta/venv/Lib/site-packages/numpy/core/_methods.py
Normal file
@ -0,0 +1,244 @@
|
|||||||
|
"""
|
||||||
|
Array methods which are called by both the C-code for the method
|
||||||
|
and the Python code for the NumPy-namespace function
|
||||||
|
|
||||||
|
"""
|
||||||
|
from __future__ import division, absolute_import, print_function
|
||||||
|
|
||||||
|
import warnings
|
||||||
|
|
||||||
|
from numpy.core import multiarray as mu
|
||||||
|
from numpy.core import umath as um
|
||||||
|
from numpy.core._asarray import asanyarray
|
||||||
|
from numpy.core import numerictypes as nt
|
||||||
|
from numpy.core import _exceptions
|
||||||
|
from numpy._globals import _NoValue
|
||||||
|
from numpy.compat import pickle, os_fspath, contextlib_nullcontext
|
||||||
|
|
||||||
|
# save those O(100) nanoseconds!
|
||||||
|
umr_maximum = um.maximum.reduce
|
||||||
|
umr_minimum = um.minimum.reduce
|
||||||
|
umr_sum = um.add.reduce
|
||||||
|
umr_prod = um.multiply.reduce
|
||||||
|
umr_any = um.logical_or.reduce
|
||||||
|
umr_all = um.logical_and.reduce
|
||||||
|
|
||||||
|
# avoid keyword arguments to speed up parsing, saves about 15%-20% for very
|
||||||
|
# small reductions
|
||||||
|
def _amax(a, axis=None, out=None, keepdims=False,
|
||||||
|
initial=_NoValue, where=True):
|
||||||
|
return umr_maximum(a, axis, None, out, keepdims, initial, where)
|
||||||
|
|
||||||
|
def _amin(a, axis=None, out=None, keepdims=False,
|
||||||
|
initial=_NoValue, where=True):
|
||||||
|
return umr_minimum(a, axis, None, out, keepdims, initial, where)
|
||||||
|
|
||||||
|
def _sum(a, axis=None, dtype=None, out=None, keepdims=False,
|
||||||
|
initial=_NoValue, where=True):
|
||||||
|
return umr_sum(a, axis, dtype, out, keepdims, initial, where)
|
||||||
|
|
||||||
|
def _prod(a, axis=None, dtype=None, out=None, keepdims=False,
|
||||||
|
initial=_NoValue, where=True):
|
||||||
|
return umr_prod(a, axis, dtype, out, keepdims, initial, where)
|
||||||
|
|
||||||
|
def _any(a, axis=None, dtype=None, out=None, keepdims=False):
|
||||||
|
return umr_any(a, axis, dtype, out, keepdims)
|
||||||
|
|
||||||
|
def _all(a, axis=None, dtype=None, out=None, keepdims=False):
|
||||||
|
return umr_all(a, axis, dtype, out, keepdims)
|
||||||
|
|
||||||
|
def _count_reduce_items(arr, axis):
|
||||||
|
if axis is None:
|
||||||
|
axis = tuple(range(arr.ndim))
|
||||||
|
if not isinstance(axis, tuple):
|
||||||
|
axis = (axis,)
|
||||||
|
items = 1
|
||||||
|
for ax in axis:
|
||||||
|
items *= arr.shape[ax]
|
||||||
|
return items
|
||||||
|
|
||||||
|
# Numpy 1.17.0, 2019-02-24
|
||||||
|
# Various clip behavior deprecations, marked with _clip_dep as a prefix.
|
||||||
|
|
||||||
|
def _clip_dep_is_scalar_nan(a):
|
||||||
|
# guarded to protect circular imports
|
||||||
|
from numpy.core.fromnumeric import ndim
|
||||||
|
if ndim(a) != 0:
|
||||||
|
return False
|
||||||
|
try:
|
||||||
|
return um.isnan(a)
|
||||||
|
except TypeError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _clip_dep_is_byte_swapped(a):
|
||||||
|
if isinstance(a, mu.ndarray):
|
||||||
|
return not a.dtype.isnative
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _clip_dep_invoke_with_casting(ufunc, *args, out=None, casting=None, **kwargs):
|
||||||
|
# normal path
|
||||||
|
if casting is not None:
|
||||||
|
return ufunc(*args, out=out, casting=casting, **kwargs)
|
||||||
|
|
||||||
|
# try to deal with broken casting rules
|
||||||
|
try:
|
||||||
|
return ufunc(*args, out=out, **kwargs)
|
||||||
|
except _exceptions._UFuncOutputCastingError as e:
|
||||||
|
# Numpy 1.17.0, 2019-02-24
|
||||||
|
warnings.warn(
|
||||||
|
"Converting the output of clip from {!r} to {!r} is deprecated. "
|
||||||
|
"Pass `casting=\"unsafe\"` explicitly to silence this warning, or "
|
||||||
|
"correct the type of the variables.".format(e.from_, e.to),
|
||||||
|
DeprecationWarning,
|
||||||
|
stacklevel=2
|
||||||
|
)
|
||||||
|
return ufunc(*args, out=out, casting="unsafe", **kwargs)
|
||||||
|
|
||||||
|
def _clip(a, min=None, max=None, out=None, *, casting=None, **kwargs):
|
||||||
|
if min is None and max is None:
|
||||||
|
raise ValueError("One of max or min must be given")
|
||||||
|
|
||||||
|
# Numpy 1.17.0, 2019-02-24
|
||||||
|
# This deprecation probably incurs a substantial slowdown for small arrays,
|
||||||
|
# it will be good to get rid of it.
|
||||||
|
if not _clip_dep_is_byte_swapped(a) and not _clip_dep_is_byte_swapped(out):
|
||||||
|
using_deprecated_nan = False
|
||||||
|
if _clip_dep_is_scalar_nan(min):
|
||||||
|
min = -float('inf')
|
||||||
|
using_deprecated_nan = True
|
||||||
|
if _clip_dep_is_scalar_nan(max):
|
||||||
|
max = float('inf')
|
||||||
|
using_deprecated_nan = True
|
||||||
|
if using_deprecated_nan:
|
||||||
|
warnings.warn(
|
||||||
|
"Passing `np.nan` to mean no clipping in np.clip has always "
|
||||||
|
"been unreliable, and is now deprecated. "
|
||||||
|
"In future, this will always return nan, like it already does "
|
||||||
|
"when min or max are arrays that contain nan. "
|
||||||
|
"To skip a bound, pass either None or an np.inf of an "
|
||||||
|
"appropriate sign.",
|
||||||
|
DeprecationWarning,
|
||||||
|
stacklevel=2
|
||||||
|
)
|
||||||
|
|
||||||
|
if min is None:
|
||||||
|
return _clip_dep_invoke_with_casting(
|
||||||
|
um.minimum, a, max, out=out, casting=casting, **kwargs)
|
||||||
|
elif max is None:
|
||||||
|
return _clip_dep_invoke_with_casting(
|
||||||
|
um.maximum, a, min, out=out, casting=casting, **kwargs)
|
||||||
|
else:
|
||||||
|
return _clip_dep_invoke_with_casting(
|
||||||
|
um.clip, a, min, max, out=out, casting=casting, **kwargs)
|
||||||
|
|
||||||
|
def _mean(a, axis=None, dtype=None, out=None, keepdims=False):
|
||||||
|
arr = asanyarray(a)
|
||||||
|
|
||||||
|
is_float16_result = False
|
||||||
|
rcount = _count_reduce_items(arr, axis)
|
||||||
|
# Make this warning show up first
|
||||||
|
if rcount == 0:
|
||||||
|
warnings.warn("Mean of empty slice.", RuntimeWarning, stacklevel=2)
|
||||||
|
|
||||||
|
# Cast bool, unsigned int, and int to float64 by default
|
||||||
|
if dtype is None:
|
||||||
|
if issubclass(arr.dtype.type, (nt.integer, nt.bool_)):
|
||||||
|
dtype = mu.dtype('f8')
|
||||||
|
elif issubclass(arr.dtype.type, nt.float16):
|
||||||
|
dtype = mu.dtype('f4')
|
||||||
|
is_float16_result = True
|
||||||
|
|
||||||
|
ret = umr_sum(arr, axis, dtype, out, keepdims)
|
||||||
|
if isinstance(ret, mu.ndarray):
|
||||||
|
ret = um.true_divide(
|
||||||
|
ret, rcount, out=ret, casting='unsafe', subok=False)
|
||||||
|
if is_float16_result and out is None:
|
||||||
|
ret = arr.dtype.type(ret)
|
||||||
|
elif hasattr(ret, 'dtype'):
|
||||||
|
if is_float16_result:
|
||||||
|
ret = arr.dtype.type(ret / rcount)
|
||||||
|
else:
|
||||||
|
ret = ret.dtype.type(ret / rcount)
|
||||||
|
else:
|
||||||
|
ret = ret / rcount
|
||||||
|
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
|
||||||
|
arr = asanyarray(a)
|
||||||
|
|
||||||
|
rcount = _count_reduce_items(arr, axis)
|
||||||
|
# Make this warning show up on top.
|
||||||
|
if ddof >= rcount:
|
||||||
|
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning,
|
||||||
|
stacklevel=2)
|
||||||
|
|
||||||
|
# Cast bool, unsigned int, and int to float64 by default
|
||||||
|
if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool_)):
|
||||||
|
dtype = mu.dtype('f8')
|
||||||
|
|
||||||
|
# Compute the mean.
|
||||||
|
# Note that if dtype is not of inexact type then arraymean will
|
||||||
|
# not be either.
|
||||||
|
arrmean = umr_sum(arr, axis, dtype, keepdims=True)
|
||||||
|
if isinstance(arrmean, mu.ndarray):
|
||||||
|
arrmean = um.true_divide(
|
||||||
|
arrmean, rcount, out=arrmean, casting='unsafe', subok=False)
|
||||||
|
else:
|
||||||
|
arrmean = arrmean.dtype.type(arrmean / rcount)
|
||||||
|
|
||||||
|
# Compute sum of squared deviations from mean
|
||||||
|
# Note that x may not be inexact and that we need it to be an array,
|
||||||
|
# not a scalar.
|
||||||
|
x = asanyarray(arr - arrmean)
|
||||||
|
if issubclass(arr.dtype.type, (nt.floating, nt.integer)):
|
||||||
|
x = um.multiply(x, x, out=x)
|
||||||
|
else:
|
||||||
|
x = um.multiply(x, um.conjugate(x), out=x).real
|
||||||
|
|
||||||
|
ret = umr_sum(x, axis, dtype, out, keepdims)
|
||||||
|
|
||||||
|
# Compute degrees of freedom and make sure it is not negative.
|
||||||
|
rcount = max([rcount - ddof, 0])
|
||||||
|
|
||||||
|
# divide by degrees of freedom
|
||||||
|
if isinstance(ret, mu.ndarray):
|
||||||
|
ret = um.true_divide(
|
||||||
|
ret, rcount, out=ret, casting='unsafe', subok=False)
|
||||||
|
elif hasattr(ret, 'dtype'):
|
||||||
|
ret = ret.dtype.type(ret / rcount)
|
||||||
|
else:
|
||||||
|
ret = ret / rcount
|
||||||
|
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
|
||||||
|
ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
|
||||||
|
keepdims=keepdims)
|
||||||
|
|
||||||
|
if isinstance(ret, mu.ndarray):
|
||||||
|
ret = um.sqrt(ret, out=ret)
|
||||||
|
elif hasattr(ret, 'dtype'):
|
||||||
|
ret = ret.dtype.type(um.sqrt(ret))
|
||||||
|
else:
|
||||||
|
ret = um.sqrt(ret)
|
||||||
|
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def _ptp(a, axis=None, out=None, keepdims=False):
|
||||||
|
return um.subtract(
|
||||||
|
umr_maximum(a, axis, None, out, keepdims),
|
||||||
|
umr_minimum(a, axis, None, None, keepdims),
|
||||||
|
out
|
||||||
|
)
|
||||||
|
|
||||||
|
def _dump(self, file, protocol=2):
|
||||||
|
if hasattr(file, 'write'):
|
||||||
|
ctx = contextlib_nullcontext(file)
|
||||||
|
else:
|
||||||
|
ctx = open(os_fspath(file), "wb")
|
||||||
|
with ctx as f:
|
||||||
|
pickle.dump(self, f, protocol=protocol)
|
||||||
|
|
||||||
|
def _dumps(self, protocol=2):
|
||||||
|
return pickle.dumps(self, protocol=protocol)
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -0,0 +1,100 @@
|
|||||||
|
"""
|
||||||
|
String-handling utilities to avoid locale-dependence.
|
||||||
|
|
||||||
|
Used primarily to generate type name aliases.
|
||||||
|
"""
|
||||||
|
# "import string" is costly to import!
|
||||||
|
# Construct the translation tables directly
|
||||||
|
# "A" = chr(65), "a" = chr(97)
|
||||||
|
_all_chars = [chr(_m) for _m in range(256)]
|
||||||
|
_ascii_upper = _all_chars[65:65+26]
|
||||||
|
_ascii_lower = _all_chars[97:97+26]
|
||||||
|
LOWER_TABLE = "".join(_all_chars[:65] + _ascii_lower + _all_chars[65+26:])
|
||||||
|
UPPER_TABLE = "".join(_all_chars[:97] + _ascii_upper + _all_chars[97+26:])
|
||||||
|
|
||||||
|
|
||||||
|
def english_lower(s):
|
||||||
|
""" Apply English case rules to convert ASCII strings to all lower case.
|
||||||
|
|
||||||
|
This is an internal utility function to replace calls to str.lower() such
|
||||||
|
that we can avoid changing behavior with changing locales. In particular,
|
||||||
|
Turkish has distinct dotted and dotless variants of the Latin letter "I" in
|
||||||
|
both lowercase and uppercase. Thus, "I".lower() != "i" in a "tr" locale.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
s : str
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
lowered : str
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
>>> from numpy.core.numerictypes import english_lower
|
||||||
|
>>> english_lower('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
|
||||||
|
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789_'
|
||||||
|
>>> english_lower('')
|
||||||
|
''
|
||||||
|
"""
|
||||||
|
lowered = s.translate(LOWER_TABLE)
|
||||||
|
return lowered
|
||||||
|
|
||||||
|
|
||||||
|
def english_upper(s):
|
||||||
|
""" Apply English case rules to convert ASCII strings to all upper case.
|
||||||
|
|
||||||
|
This is an internal utility function to replace calls to str.upper() such
|
||||||
|
that we can avoid changing behavior with changing locales. In particular,
|
||||||
|
Turkish has distinct dotted and dotless variants of the Latin letter "I" in
|
||||||
|
both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
s : str
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
uppered : str
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
>>> from numpy.core.numerictypes import english_upper
|
||||||
|
>>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
|
||||||
|
'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
|
||||||
|
>>> english_upper('')
|
||||||
|
''
|
||||||
|
"""
|
||||||
|
uppered = s.translate(UPPER_TABLE)
|
||||||
|
return uppered
|
||||||
|
|
||||||
|
|
||||||
|
def english_capitalize(s):
|
||||||
|
""" Apply English case rules to convert the first character of an ASCII
|
||||||
|
string to upper case.
|
||||||
|
|
||||||
|
This is an internal utility function to replace calls to str.capitalize()
|
||||||
|
such that we can avoid changing behavior with changing locales.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
s : str
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
capitalized : str
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
>>> from numpy.core.numerictypes import english_capitalize
|
||||||
|
>>> english_capitalize('int8')
|
||||||
|
'Int8'
|
||||||
|
>>> english_capitalize('Int8')
|
||||||
|
'Int8'
|
||||||
|
>>> english_capitalize('')
|
||||||
|
''
|
||||||
|
"""
|
||||||
|
if s:
|
||||||
|
return english_upper(s[0]) + s[1:]
|
||||||
|
else:
|
||||||
|
return s
|
Binary file not shown.
@ -0,0 +1,282 @@
|
|||||||
|
"""
|
||||||
|
Due to compatibility, numpy has a very large number of different naming
|
||||||
|
conventions for the scalar types (those subclassing from `numpy.generic`).
|
||||||
|
This file produces a convoluted set of dictionaries mapping names to types,
|
||||||
|
and sometimes other mappings too.
|
||||||
|
|
||||||
|
.. data:: allTypes
|
||||||
|
A dictionary of names to types that will be exposed as attributes through
|
||||||
|
``np.core.numerictypes.*``
|
||||||
|
|
||||||
|
.. data:: sctypeDict
|
||||||
|
Similar to `allTypes`, but maps a broader set of aliases to their types.
|
||||||
|
|
||||||
|
.. data:: sctypeNA
|
||||||
|
NumArray-compatible names for the scalar types. Contains not only
|
||||||
|
``name: type`` mappings, but ``char: name`` mappings too.
|
||||||
|
|
||||||
|
.. deprecated:: 1.16
|
||||||
|
|
||||||
|
.. data:: sctypes
|
||||||
|
A dictionary keyed by a "type group" string, providing a list of types
|
||||||
|
under that group.
|
||||||
|
|
||||||
|
"""
|
||||||
|
import warnings
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from numpy.compat import unicode
|
||||||
|
from numpy._globals import VisibleDeprecationWarning
|
||||||
|
from numpy.core._string_helpers import english_lower, english_capitalize
|
||||||
|
from numpy.core.multiarray import typeinfo, dtype
|
||||||
|
from numpy.core._dtype import _kind_name
|
||||||
|
|
||||||
|
|
||||||
|
sctypeDict = {} # Contains all leaf-node scalar types with aliases
|
||||||
|
class TypeNADict(dict):
|
||||||
|
def __getitem__(self, key):
|
||||||
|
# 2018-06-24, 1.16
|
||||||
|
warnings.warn('sctypeNA and typeNA will be removed in v1.18 '
|
||||||
|
'of numpy', VisibleDeprecationWarning, stacklevel=2)
|
||||||
|
return dict.__getitem__(self, key)
|
||||||
|
def get(self, key, default=None):
|
||||||
|
# 2018-06-24, 1.16
|
||||||
|
warnings.warn('sctypeNA and typeNA will be removed in v1.18 '
|
||||||
|
'of numpy', VisibleDeprecationWarning, stacklevel=2)
|
||||||
|
return dict.get(self, key, default)
|
||||||
|
|
||||||
|
sctypeNA = TypeNADict() # Contails all leaf-node types -> numarray type equivalences
|
||||||
|
allTypes = {} # Collect the types we will add to the module
|
||||||
|
|
||||||
|
|
||||||
|
# separate the actual type info from the abstract base classes
|
||||||
|
_abstract_types = {}
|
||||||
|
_concrete_typeinfo = {}
|
||||||
|
for k, v in typeinfo.items():
|
||||||
|
# make all the keys lowercase too
|
||||||
|
k = english_lower(k)
|
||||||
|
if isinstance(v, type):
|
||||||
|
_abstract_types[k] = v
|
||||||
|
else:
|
||||||
|
_concrete_typeinfo[k] = v
|
||||||
|
|
||||||
|
_concrete_types = {v.type for k, v in _concrete_typeinfo.items()}
|
||||||
|
|
||||||
|
|
||||||
|
def _bits_of(obj):
|
||||||
|
try:
|
||||||
|
info = next(v for v in _concrete_typeinfo.values() if v.type is obj)
|
||||||
|
except StopIteration:
|
||||||
|
if obj in _abstract_types.values():
|
||||||
|
raise ValueError("Cannot count the bits of an abstract type")
|
||||||
|
|
||||||
|
# some third-party type - make a best-guess
|
||||||
|
return dtype(obj).itemsize * 8
|
||||||
|
else:
|
||||||
|
return info.bits
|
||||||
|
|
||||||
|
|
||||||
|
def bitname(obj):
|
||||||
|
"""Return a bit-width name for a given type object"""
|
||||||
|
bits = _bits_of(obj)
|
||||||
|
dt = dtype(obj)
|
||||||
|
char = dt.kind
|
||||||
|
base = _kind_name(dt)
|
||||||
|
|
||||||
|
if base == 'object':
|
||||||
|
bits = 0
|
||||||
|
|
||||||
|
if bits != 0:
|
||||||
|
char = "%s%d" % (char, bits // 8)
|
||||||
|
|
||||||
|
return base, bits, char
|
||||||
|
|
||||||
|
|
||||||
|
def _add_types():
|
||||||
|
for name, info in _concrete_typeinfo.items():
|
||||||
|
# define C-name and insert typenum and typechar references also
|
||||||
|
allTypes[name] = info.type
|
||||||
|
sctypeDict[name] = info.type
|
||||||
|
sctypeDict[info.char] = info.type
|
||||||
|
sctypeDict[info.num] = info.type
|
||||||
|
|
||||||
|
for name, cls in _abstract_types.items():
|
||||||
|
allTypes[name] = cls
|
||||||
|
_add_types()
|
||||||
|
|
||||||
|
# This is the priority order used to assign the bit-sized NPY_INTxx names, which
|
||||||
|
# must match the order in npy_common.h in order for NPY_INTxx and np.intxx to be
|
||||||
|
# consistent.
|
||||||
|
# If two C types have the same size, then the earliest one in this list is used
|
||||||
|
# as the sized name.
|
||||||
|
_int_ctypes = ['long', 'longlong', 'int', 'short', 'byte']
|
||||||
|
_uint_ctypes = list('u' + t for t in _int_ctypes)
|
||||||
|
|
||||||
|
def _add_aliases():
|
||||||
|
for name, info in _concrete_typeinfo.items():
|
||||||
|
# these are handled by _add_integer_aliases
|
||||||
|
if name in _int_ctypes or name in _uint_ctypes:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# insert bit-width version for this class (if relevant)
|
||||||
|
base, bit, char = bitname(info.type)
|
||||||
|
|
||||||
|
myname = "%s%d" % (base, bit)
|
||||||
|
|
||||||
|
# ensure that (c)longdouble does not overwrite the aliases assigned to
|
||||||
|
# (c)double
|
||||||
|
if name in ('longdouble', 'clongdouble') and myname in allTypes:
|
||||||
|
continue
|
||||||
|
|
||||||
|
base_capitalize = english_capitalize(base)
|
||||||
|
if base == 'complex':
|
||||||
|
na_name = '%s%d' % (base_capitalize, bit//2)
|
||||||
|
elif base == 'bool':
|
||||||
|
na_name = base_capitalize
|
||||||
|
else:
|
||||||
|
na_name = "%s%d" % (base_capitalize, bit)
|
||||||
|
|
||||||
|
allTypes[myname] = info.type
|
||||||
|
|
||||||
|
# add mapping for both the bit name and the numarray name
|
||||||
|
sctypeDict[myname] = info.type
|
||||||
|
sctypeDict[na_name] = info.type
|
||||||
|
|
||||||
|
# add forward, reverse, and string mapping to numarray
|
||||||
|
sctypeNA[na_name] = info.type
|
||||||
|
sctypeNA[info.type] = na_name
|
||||||
|
sctypeNA[info.char] = na_name
|
||||||
|
|
||||||
|
sctypeDict[char] = info.type
|
||||||
|
sctypeNA[char] = na_name
|
||||||
|
_add_aliases()
|
||||||
|
|
||||||
|
def _add_integer_aliases():
|
||||||
|
seen_bits = set()
|
||||||
|
for i_ctype, u_ctype in zip(_int_ctypes, _uint_ctypes):
|
||||||
|
i_info = _concrete_typeinfo[i_ctype]
|
||||||
|
u_info = _concrete_typeinfo[u_ctype]
|
||||||
|
bits = i_info.bits # same for both
|
||||||
|
|
||||||
|
for info, charname, intname, Intname in [
|
||||||
|
(i_info,'i%d' % (bits//8,), 'int%d' % bits, 'Int%d' % bits),
|
||||||
|
(u_info,'u%d' % (bits//8,), 'uint%d' % bits, 'UInt%d' % bits)]:
|
||||||
|
if bits not in seen_bits:
|
||||||
|
# sometimes two different types have the same number of bits
|
||||||
|
# if so, the one iterated over first takes precedence
|
||||||
|
allTypes[intname] = info.type
|
||||||
|
sctypeDict[intname] = info.type
|
||||||
|
sctypeDict[Intname] = info.type
|
||||||
|
sctypeDict[charname] = info.type
|
||||||
|
sctypeNA[Intname] = info.type
|
||||||
|
sctypeNA[charname] = info.type
|
||||||
|
sctypeNA[info.type] = Intname
|
||||||
|
sctypeNA[info.char] = Intname
|
||||||
|
|
||||||
|
seen_bits.add(bits)
|
||||||
|
|
||||||
|
_add_integer_aliases()
|
||||||
|
|
||||||
|
# We use these later
|
||||||
|
void = allTypes['void']
|
||||||
|
|
||||||
|
#
|
||||||
|
# Rework the Python names (so that float and complex and int are consistent
|
||||||
|
# with Python usage)
|
||||||
|
#
|
||||||
|
def _set_up_aliases():
|
||||||
|
type_pairs = [('complex_', 'cdouble'),
|
||||||
|
('int0', 'intp'),
|
||||||
|
('uint0', 'uintp'),
|
||||||
|
('single', 'float'),
|
||||||
|
('csingle', 'cfloat'),
|
||||||
|
('singlecomplex', 'cfloat'),
|
||||||
|
('float_', 'double'),
|
||||||
|
('intc', 'int'),
|
||||||
|
('uintc', 'uint'),
|
||||||
|
('int_', 'long'),
|
||||||
|
('uint', 'ulong'),
|
||||||
|
('cfloat', 'cdouble'),
|
||||||
|
('longfloat', 'longdouble'),
|
||||||
|
('clongfloat', 'clongdouble'),
|
||||||
|
('longcomplex', 'clongdouble'),
|
||||||
|
('bool_', 'bool'),
|
||||||
|
('bytes_', 'string'),
|
||||||
|
('string_', 'string'),
|
||||||
|
('unicode_', 'unicode'),
|
||||||
|
('object_', 'object')]
|
||||||
|
if sys.version_info[0] >= 3:
|
||||||
|
type_pairs.extend([('str_', 'unicode')])
|
||||||
|
else:
|
||||||
|
type_pairs.extend([('str_', 'string')])
|
||||||
|
for alias, t in type_pairs:
|
||||||
|
allTypes[alias] = allTypes[t]
|
||||||
|
sctypeDict[alias] = sctypeDict[t]
|
||||||
|
# Remove aliases overriding python types and modules
|
||||||
|
to_remove = ['ulong', 'object', 'int', 'float',
|
||||||
|
'complex', 'bool', 'string', 'datetime', 'timedelta']
|
||||||
|
if sys.version_info[0] >= 3:
|
||||||
|
to_remove.extend(['bytes', 'str'])
|
||||||
|
else:
|
||||||
|
to_remove.extend(['unicode', 'long'])
|
||||||
|
|
||||||
|
for t in to_remove:
|
||||||
|
try:
|
||||||
|
del allTypes[t]
|
||||||
|
del sctypeDict[t]
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
_set_up_aliases()
|
||||||
|
|
||||||
|
|
||||||
|
sctypes = {'int': [],
|
||||||
|
'uint':[],
|
||||||
|
'float':[],
|
||||||
|
'complex':[],
|
||||||
|
'others':[bool, object, bytes, unicode, void]}
|
||||||
|
|
||||||
|
def _add_array_type(typename, bits):
|
||||||
|
try:
|
||||||
|
t = allTypes['%s%d' % (typename, bits)]
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
sctypes[typename].append(t)
|
||||||
|
|
||||||
|
def _set_array_types():
|
||||||
|
ibytes = [1, 2, 4, 8, 16, 32, 64]
|
||||||
|
fbytes = [2, 4, 8, 10, 12, 16, 32, 64]
|
||||||
|
for bytes in ibytes:
|
||||||
|
bits = 8*bytes
|
||||||
|
_add_array_type('int', bits)
|
||||||
|
_add_array_type('uint', bits)
|
||||||
|
for bytes in fbytes:
|
||||||
|
bits = 8*bytes
|
||||||
|
_add_array_type('float', bits)
|
||||||
|
_add_array_type('complex', 2*bits)
|
||||||
|
_gi = dtype('p')
|
||||||
|
if _gi.type not in sctypes['int']:
|
||||||
|
indx = 0
|
||||||
|
sz = _gi.itemsize
|
||||||
|
_lst = sctypes['int']
|
||||||
|
while (indx < len(_lst) and sz >= _lst[indx](0).itemsize):
|
||||||
|
indx += 1
|
||||||
|
sctypes['int'].insert(indx, _gi.type)
|
||||||
|
sctypes['uint'].insert(indx, dtype('P').type)
|
||||||
|
_set_array_types()
|
||||||
|
|
||||||
|
|
||||||
|
# Add additional strings to the sctypeDict
|
||||||
|
_toadd = ['int', 'float', 'complex', 'bool', 'object']
|
||||||
|
if sys.version_info[0] >= 3:
|
||||||
|
_toadd.extend(['str', 'bytes', ('a', 'bytes_')])
|
||||||
|
else:
|
||||||
|
_toadd.extend(['string', ('str', 'string_'), 'unicode', ('a', 'string_')])
|
||||||
|
|
||||||
|
for name in _toadd:
|
||||||
|
if isinstance(name, tuple):
|
||||||
|
sctypeDict[name[0]] = allTypes[name[1]]
|
||||||
|
else:
|
||||||
|
sctypeDict[name] = allTypes['%s_' % name]
|
||||||
|
|
||||||
|
del _toadd, name
|
@ -0,0 +1,458 @@
|
|||||||
|
"""
|
||||||
|
Functions for changing global ufunc configuration
|
||||||
|
|
||||||
|
This provides helpers which wrap `umath.geterrobj` and `umath.seterrobj`
|
||||||
|
"""
|
||||||
|
from __future__ import division, absolute_import, print_function
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Accessing collections abstract classes from collections
|
||||||
|
# has been deprecated since Python 3.3
|
||||||
|
import collections.abc as collections_abc
|
||||||
|
except ImportError:
|
||||||
|
import collections as collections_abc
|
||||||
|
import contextlib
|
||||||
|
|
||||||
|
from .overrides import set_module
|
||||||
|
from .umath import (
|
||||||
|
UFUNC_BUFSIZE_DEFAULT,
|
||||||
|
ERR_IGNORE, ERR_WARN, ERR_RAISE, ERR_CALL, ERR_PRINT, ERR_LOG, ERR_DEFAULT,
|
||||||
|
SHIFT_DIVIDEBYZERO, SHIFT_OVERFLOW, SHIFT_UNDERFLOW, SHIFT_INVALID,
|
||||||
|
)
|
||||||
|
from . import umath
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"seterr", "geterr", "setbufsize", "getbufsize", "seterrcall", "geterrcall",
|
||||||
|
"errstate",
|
||||||
|
]
|
||||||
|
|
||||||
|
_errdict = {"ignore": ERR_IGNORE,
|
||||||
|
"warn": ERR_WARN,
|
||||||
|
"raise": ERR_RAISE,
|
||||||
|
"call": ERR_CALL,
|
||||||
|
"print": ERR_PRINT,
|
||||||
|
"log": ERR_LOG}
|
||||||
|
|
||||||
|
_errdict_rev = {value: key for key, value in _errdict.items()}
|
||||||
|
|
||||||
|
|
||||||
|
@set_module('numpy')
|
||||||
|
def seterr(all=None, divide=None, over=None, under=None, invalid=None):
|
||||||
|
"""
|
||||||
|
Set how floating-point errors are handled.
|
||||||
|
|
||||||
|
Note that operations on integer scalar types (such as `int16`) are
|
||||||
|
handled like floating point, and are affected by these settings.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
|
||||||
|
Set treatment for all types of floating-point errors at once:
|
||||||
|
|
||||||
|
- ignore: Take no action when the exception occurs.
|
||||||
|
- warn: Print a `RuntimeWarning` (via the Python `warnings` module).
|
||||||
|
- raise: Raise a `FloatingPointError`.
|
||||||
|
- call: Call a function specified using the `seterrcall` function.
|
||||||
|
- print: Print a warning directly to ``stdout``.
|
||||||
|
- log: Record error in a Log object specified by `seterrcall`.
|
||||||
|
|
||||||
|
The default is not to change the current behavior.
|
||||||
|
divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
|
||||||
|
Treatment for division by zero.
|
||||||
|
over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
|
||||||
|
Treatment for floating-point overflow.
|
||||||
|
under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
|
||||||
|
Treatment for floating-point underflow.
|
||||||
|
invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
|
||||||
|
Treatment for invalid floating-point operation.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
old_settings : dict
|
||||||
|
Dictionary containing the old settings.
|
||||||
|
|
||||||
|
See also
|
||||||
|
--------
|
||||||
|
seterrcall : Set a callback function for the 'call' mode.
|
||||||
|
geterr, geterrcall, errstate
|
||||||
|
|
||||||
|
Notes
|
||||||
|
-----
|
||||||
|
The floating-point exceptions are defined in the IEEE 754 standard [1]_:
|
||||||
|
|
||||||
|
- Division by zero: infinite result obtained from finite numbers.
|
||||||
|
- Overflow: result too large to be expressed.
|
||||||
|
- Underflow: result so close to zero that some precision
|
||||||
|
was lost.
|
||||||
|
- Invalid operation: result is not an expressible number, typically
|
||||||
|
indicates that a NaN was produced.
|
||||||
|
|
||||||
|
.. [1] https://en.wikipedia.org/wiki/IEEE_754
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
>>> old_settings = np.seterr(all='ignore') #seterr to known value
|
||||||
|
>>> np.seterr(over='raise')
|
||||||
|
{'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'}
|
||||||
|
>>> np.seterr(**old_settings) # reset to default
|
||||||
|
{'divide': 'ignore', 'over': 'raise', 'under': 'ignore', 'invalid': 'ignore'}
|
||||||
|
|
||||||
|
>>> np.int16(32000) * np.int16(3)
|
||||||
|
30464
|
||||||
|
>>> old_settings = np.seterr(all='warn', over='raise')
|
||||||
|
>>> np.int16(32000) * np.int16(3)
|
||||||
|
Traceback (most recent call last):
|
||||||
|
File "<stdin>", line 1, in <module>
|
||||||
|
FloatingPointError: overflow encountered in short_scalars
|
||||||
|
|
||||||
|
>>> from collections import OrderedDict
|
||||||
|
>>> old_settings = np.seterr(all='print')
|
||||||
|
>>> OrderedDict(np.geterr())
|
||||||
|
OrderedDict([('divide', 'print'), ('over', 'print'), ('under', 'print'), ('invalid', 'print')])
|
||||||
|
>>> np.int16(32000) * np.int16(3)
|
||||||
|
30464
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
pyvals = umath.geterrobj()
|
||||||
|
old = geterr()
|
||||||
|
|
||||||
|
if divide is None:
|
||||||
|
divide = all or old['divide']
|
||||||
|
if over is None:
|
||||||
|
over = all or old['over']
|
||||||
|
if under is None:
|
||||||
|
under = all or old['under']
|
||||||
|
if invalid is None:
|
||||||
|
invalid = all or old['invalid']
|
||||||
|
|
||||||
|
maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) +
|
||||||
|
(_errdict[over] << SHIFT_OVERFLOW) +
|
||||||
|
(_errdict[under] << SHIFT_UNDERFLOW) +
|
||||||
|
(_errdict[invalid] << SHIFT_INVALID))
|
||||||
|
|
||||||
|
pyvals[1] = maskvalue
|
||||||
|
umath.seterrobj(pyvals)
|
||||||
|
return old
|
||||||
|
|
||||||
|
|
||||||
|
@set_module('numpy')
|
||||||
|
def geterr():
|
||||||
|
"""
|
||||||
|
Get the current way of handling floating-point errors.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
res : dict
|
||||||
|
A dictionary with keys "divide", "over", "under", and "invalid",
|
||||||
|
whose values are from the strings "ignore", "print", "log", "warn",
|
||||||
|
"raise", and "call". The keys represent possible floating-point
|
||||||
|
exceptions, and the values define how these exceptions are handled.
|
||||||
|
|
||||||
|
See Also
|
||||||
|
--------
|
||||||
|
geterrcall, seterr, seterrcall
|
||||||
|
|
||||||
|
Notes
|
||||||
|
-----
|
||||||
|
For complete documentation of the types of floating-point exceptions and
|
||||||
|
treatment options, see `seterr`.
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
>>> from collections import OrderedDict
|
||||||
|
>>> sorted(np.geterr().items())
|
||||||
|
[('divide', 'warn'), ('invalid', 'warn'), ('over', 'warn'), ('under', 'ignore')]
|
||||||
|
>>> np.arange(3.) / np.arange(3.)
|
||||||
|
array([nan, 1., 1.])
|
||||||
|
|
||||||
|
>>> oldsettings = np.seterr(all='warn', over='raise')
|
||||||
|
>>> OrderedDict(sorted(np.geterr().items()))
|
||||||
|
OrderedDict([('divide', 'warn'), ('invalid', 'warn'), ('over', 'raise'), ('under', 'warn')])
|
||||||
|
>>> np.arange(3.) / np.arange(3.)
|
||||||
|
array([nan, 1., 1.])
|
||||||
|
|
||||||
|
"""
|
||||||
|
maskvalue = umath.geterrobj()[1]
|
||||||
|
mask = 7
|
||||||
|
res = {}
|
||||||
|
val = (maskvalue >> SHIFT_DIVIDEBYZERO) & mask
|
||||||
|
res['divide'] = _errdict_rev[val]
|
||||||
|
val = (maskvalue >> SHIFT_OVERFLOW) & mask
|
||||||
|
res['over'] = _errdict_rev[val]
|
||||||
|
val = (maskvalue >> SHIFT_UNDERFLOW) & mask
|
||||||
|
res['under'] = _errdict_rev[val]
|
||||||
|
val = (maskvalue >> SHIFT_INVALID) & mask
|
||||||
|
res['invalid'] = _errdict_rev[val]
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
@set_module('numpy')
|
||||||
|
def setbufsize(size):
|
||||||
|
"""
|
||||||
|
Set the size of the buffer used in ufuncs.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
size : int
|
||||||
|
Size of buffer.
|
||||||
|
|
||||||
|
"""
|
||||||
|
if size > 10e6:
|
||||||
|
raise ValueError("Buffer size, %s, is too big." % size)
|
||||||
|
if size < 5:
|
||||||
|
raise ValueError("Buffer size, %s, is too small." % size)
|
||||||
|
if size % 16 != 0:
|
||||||
|
raise ValueError("Buffer size, %s, is not a multiple of 16." % size)
|
||||||
|
|
||||||
|
pyvals = umath.geterrobj()
|
||||||
|
old = getbufsize()
|
||||||
|
pyvals[0] = size
|
||||||
|
umath.seterrobj(pyvals)
|
||||||
|
return old
|
||||||
|
|
||||||
|
|
||||||
|
@set_module('numpy')
|
||||||
|
def getbufsize():
|
||||||
|
"""
|
||||||
|
Return the size of the buffer used in ufuncs.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
getbufsize : int
|
||||||
|
Size of ufunc buffer in bytes.
|
||||||
|
|
||||||
|
"""
|
||||||
|
return umath.geterrobj()[0]
|
||||||
|
|
||||||
|
|
||||||
|
@set_module('numpy')
|
||||||
|
def seterrcall(func):
|
||||||
|
"""
|
||||||
|
Set the floating-point error callback function or log object.
|
||||||
|
|
||||||
|
There are two ways to capture floating-point error messages. The first
|
||||||
|
is to set the error-handler to 'call', using `seterr`. Then, set
|
||||||
|
the function to call using this function.
|
||||||
|
|
||||||
|
The second is to set the error-handler to 'log', using `seterr`.
|
||||||
|
Floating-point errors then trigger a call to the 'write' method of
|
||||||
|
the provided object.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
func : callable f(err, flag) or object with write method
|
||||||
|
Function to call upon floating-point errors ('call'-mode) or
|
||||||
|
object whose 'write' method is used to log such message ('log'-mode).
|
||||||
|
|
||||||
|
The call function takes two arguments. The first is a string describing
|
||||||
|
the type of error (such as "divide by zero", "overflow", "underflow",
|
||||||
|
or "invalid value"), and the second is the status flag. The flag is a
|
||||||
|
byte, whose four least-significant bits indicate the type of error, one
|
||||||
|
of "divide", "over", "under", "invalid"::
|
||||||
|
|
||||||
|
[0 0 0 0 divide over under invalid]
|
||||||
|
|
||||||
|
In other words, ``flags = divide + 2*over + 4*under + 8*invalid``.
|
||||||
|
|
||||||
|
If an object is provided, its write method should take one argument,
|
||||||
|
a string.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
h : callable, log instance or None
|
||||||
|
The old error handler.
|
||||||
|
|
||||||
|
See Also
|
||||||
|
--------
|
||||||
|
seterr, geterr, geterrcall
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
Callback upon error:
|
||||||
|
|
||||||
|
>>> def err_handler(type, flag):
|
||||||
|
... print("Floating point error (%s), with flag %s" % (type, flag))
|
||||||
|
...
|
||||||
|
|
||||||
|
>>> saved_handler = np.seterrcall(err_handler)
|
||||||
|
>>> save_err = np.seterr(all='call')
|
||||||
|
>>> from collections import OrderedDict
|
||||||
|
|
||||||
|
>>> np.array([1, 2, 3]) / 0.0
|
||||||
|
Floating point error (divide by zero), with flag 1
|
||||||
|
array([inf, inf, inf])
|
||||||
|
|
||||||
|
>>> np.seterrcall(saved_handler)
|
||||||
|
<function err_handler at 0x...>
|
||||||
|
>>> OrderedDict(sorted(np.seterr(**save_err).items()))
|
||||||
|
OrderedDict([('divide', 'call'), ('invalid', 'call'), ('over', 'call'), ('under', 'call')])
|
||||||
|
|
||||||
|
Log error message:
|
||||||
|
|
||||||
|
>>> class Log(object):
|
||||||
|
... def write(self, msg):
|
||||||
|
... print("LOG: %s" % msg)
|
||||||
|
...
|
||||||
|
|
||||||
|
>>> log = Log()
|
||||||
|
>>> saved_handler = np.seterrcall(log)
|
||||||
|
>>> save_err = np.seterr(all='log')
|
||||||
|
|
||||||
|
>>> np.array([1, 2, 3]) / 0.0
|
||||||
|
LOG: Warning: divide by zero encountered in true_divide
|
||||||
|
array([inf, inf, inf])
|
||||||
|
|
||||||
|
>>> np.seterrcall(saved_handler)
|
||||||
|
<numpy.core.numeric.Log object at 0x...>
|
||||||
|
>>> OrderedDict(sorted(np.seterr(**save_err).items()))
|
||||||
|
OrderedDict([('divide', 'log'), ('invalid', 'log'), ('over', 'log'), ('under', 'log')])
|
||||||
|
|
||||||
|
"""
|
||||||
|
if func is not None and not isinstance(func, collections_abc.Callable):
|
||||||
|
if not hasattr(func, 'write') or not isinstance(func.write, collections_abc.Callable):
|
||||||
|
raise ValueError("Only callable can be used as callback")
|
||||||
|
pyvals = umath.geterrobj()
|
||||||
|
old = geterrcall()
|
||||||
|
pyvals[2] = func
|
||||||
|
umath.seterrobj(pyvals)
|
||||||
|
return old
|
||||||
|
|
||||||
|
|
||||||
|
@set_module('numpy')
|
||||||
|
def geterrcall():
|
||||||
|
"""
|
||||||
|
Return the current callback function used on floating-point errors.
|
||||||
|
|
||||||
|
When the error handling for a floating-point error (one of "divide",
|
||||||
|
"over", "under", or "invalid") is set to 'call' or 'log', the function
|
||||||
|
that is called or the log instance that is written to is returned by
|
||||||
|
`geterrcall`. This function or log instance has been set with
|
||||||
|
`seterrcall`.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
errobj : callable, log instance or None
|
||||||
|
The current error handler. If no handler was set through `seterrcall`,
|
||||||
|
``None`` is returned.
|
||||||
|
|
||||||
|
See Also
|
||||||
|
--------
|
||||||
|
seterrcall, seterr, geterr
|
||||||
|
|
||||||
|
Notes
|
||||||
|
-----
|
||||||
|
For complete documentation of the types of floating-point exceptions and
|
||||||
|
treatment options, see `seterr`.
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
>>> np.geterrcall() # we did not yet set a handler, returns None
|
||||||
|
|
||||||
|
>>> oldsettings = np.seterr(all='call')
|
||||||
|
>>> def err_handler(type, flag):
|
||||||
|
... print("Floating point error (%s), with flag %s" % (type, flag))
|
||||||
|
>>> oldhandler = np.seterrcall(err_handler)
|
||||||
|
>>> np.array([1, 2, 3]) / 0.0
|
||||||
|
Floating point error (divide by zero), with flag 1
|
||||||
|
array([inf, inf, inf])
|
||||||
|
|
||||||
|
>>> cur_handler = np.geterrcall()
|
||||||
|
>>> cur_handler is err_handler
|
||||||
|
True
|
||||||
|
|
||||||
|
"""
|
||||||
|
return umath.geterrobj()[2]
|
||||||
|
|
||||||
|
|
||||||
|
class _unspecified(object):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
_Unspecified = _unspecified()
|
||||||
|
|
||||||
|
|
||||||
|
@set_module('numpy')
|
||||||
|
class errstate(contextlib.ContextDecorator):
|
||||||
|
"""
|
||||||
|
errstate(**kwargs)
|
||||||
|
|
||||||
|
Context manager for floating-point error handling.
|
||||||
|
|
||||||
|
Using an instance of `errstate` as a context manager allows statements in
|
||||||
|
that context to execute with a known error handling behavior. Upon entering
|
||||||
|
the context the error handling is set with `seterr` and `seterrcall`, and
|
||||||
|
upon exiting it is reset to what it was before.
|
||||||
|
|
||||||
|
.. versionchanged:: 1.17.0
|
||||||
|
`errstate` is also usable as a function decorator, saving
|
||||||
|
a level of indentation if an entire function is wrapped.
|
||||||
|
See :py:class:`contextlib.ContextDecorator` for more information.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
kwargs : {divide, over, under, invalid}
|
||||||
|
Keyword arguments. The valid keywords are the possible floating-point
|
||||||
|
exceptions. Each keyword should have a string value that defines the
|
||||||
|
treatment for the particular error. Possible values are
|
||||||
|
{'ignore', 'warn', 'raise', 'call', 'print', 'log'}.
|
||||||
|
|
||||||
|
See Also
|
||||||
|
--------
|
||||||
|
seterr, geterr, seterrcall, geterrcall
|
||||||
|
|
||||||
|
Notes
|
||||||
|
-----
|
||||||
|
For complete documentation of the types of floating-point exceptions and
|
||||||
|
treatment options, see `seterr`.
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
>>> from collections import OrderedDict
|
||||||
|
>>> olderr = np.seterr(all='ignore') # Set error handling to known state.
|
||||||
|
|
||||||
|
>>> np.arange(3) / 0.
|
||||||
|
array([nan, inf, inf])
|
||||||
|
>>> with np.errstate(divide='warn'):
|
||||||
|
... np.arange(3) / 0.
|
||||||
|
array([nan, inf, inf])
|
||||||
|
|
||||||
|
>>> np.sqrt(-1)
|
||||||
|
nan
|
||||||
|
>>> with np.errstate(invalid='raise'):
|
||||||
|
... np.sqrt(-1)
|
||||||
|
Traceback (most recent call last):
|
||||||
|
File "<stdin>", line 2, in <module>
|
||||||
|
FloatingPointError: invalid value encountered in sqrt
|
||||||
|
|
||||||
|
Outside the context the error handling behavior has not changed:
|
||||||
|
|
||||||
|
>>> OrderedDict(sorted(np.geterr().items()))
|
||||||
|
OrderedDict([('divide', 'ignore'), ('invalid', 'ignore'), ('over', 'ignore'), ('under', 'ignore')])
|
||||||
|
|
||||||
|
"""
|
||||||
|
# Note that we don't want to run the above doctests because they will fail
|
||||||
|
# without a from __future__ import with_statement
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self.call = kwargs.pop('call', _Unspecified)
|
||||||
|
self.kwargs = kwargs
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
self.oldstate = seterr(**self.kwargs)
|
||||||
|
if self.call is not _Unspecified:
|
||||||
|
self.oldcall = seterrcall(self.call)
|
||||||
|
|
||||||
|
def __exit__(self, *exc_info):
|
||||||
|
seterr(**self.oldstate)
|
||||||
|
if self.call is not _Unspecified:
|
||||||
|
seterrcall(self.oldcall)
|
||||||
|
|
||||||
|
|
||||||
|
def _setdef():
|
||||||
|
defval = [UFUNC_BUFSIZE_DEFAULT, ERR_DEFAULT, None]
|
||||||
|
umath.seterrobj(defval)
|
||||||
|
|
||||||
|
|
||||||
|
# set the default values
|
||||||
|
_setdef()
|
Binary file not shown.
1622
Restaurant/Marta/venv/Lib/site-packages/numpy/core/arrayprint.py
Normal file
1622
Restaurant/Marta/venv/Lib/site-packages/numpy/core/arrayprint.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,15 @@
|
|||||||
|
"""Simple script to compute the api hash of the current API.
|
||||||
|
|
||||||
|
The API has is defined by numpy_api_order and ufunc_api_order.
|
||||||
|
|
||||||
|
"""
|
||||||
|
from __future__ import division, absolute_import, print_function
|
||||||
|
|
||||||
|
from os.path import dirname
|
||||||
|
|
||||||
|
from code_generators.genapi import fullapi_hash
|
||||||
|
from code_generators.numpy_api import full_api
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
curdir = dirname(__file__)
|
||||||
|
print(fullapi_hash(full_api))
|
2819
Restaurant/Marta/venv/Lib/site-packages/numpy/core/defchararray.py
Normal file
2819
Restaurant/Marta/venv/Lib/site-packages/numpy/core/defchararray.py
Normal file
File diff suppressed because it is too large
Load Diff
1432
Restaurant/Marta/venv/Lib/site-packages/numpy/core/einsumfunc.py
Normal file
1432
Restaurant/Marta/venv/Lib/site-packages/numpy/core/einsumfunc.py
Normal file
File diff suppressed because it is too large
Load Diff
3649
Restaurant/Marta/venv/Lib/site-packages/numpy/core/fromnumeric.py
Normal file
3649
Restaurant/Marta/venv/Lib/site-packages/numpy/core/fromnumeric.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,514 @@
|
|||||||
|
from __future__ import division, absolute_import, print_function
|
||||||
|
|
||||||
|
import functools
|
||||||
|
import warnings
|
||||||
|
import operator
|
||||||
|
import types
|
||||||
|
|
||||||
|
from . import numeric as _nx
|
||||||
|
from .numeric import (result_type, NaN, shares_memory, MAY_SHARE_BOUNDS,
|
||||||
|
TooHardError, asanyarray, ndim)
|
||||||
|
from numpy.core.multiarray import add_docstring
|
||||||
|
from numpy.core import overrides
|
||||||
|
|
||||||
|
__all__ = ['logspace', 'linspace', 'geomspace']
|
||||||
|
|
||||||
|
|
||||||
|
array_function_dispatch = functools.partial(
|
||||||
|
overrides.array_function_dispatch, module='numpy')
|
||||||
|
|
||||||
|
|
||||||
|
def _linspace_dispatcher(start, stop, num=None, endpoint=None, retstep=None,
|
||||||
|
dtype=None, axis=None):
|
||||||
|
return (start, stop)
|
||||||
|
|
||||||
|
|
||||||
|
@array_function_dispatch(_linspace_dispatcher)
|
||||||
|
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None,
|
||||||
|
axis=0):
|
||||||
|
"""
|
||||||
|
Return evenly spaced numbers over a specified interval.
|
||||||
|
|
||||||
|
Returns `num` evenly spaced samples, calculated over the
|
||||||
|
interval [`start`, `stop`].
|
||||||
|
|
||||||
|
The endpoint of the interval can optionally be excluded.
|
||||||
|
|
||||||
|
.. versionchanged:: 1.16.0
|
||||||
|
Non-scalar `start` and `stop` are now supported.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
start : array_like
|
||||||
|
The starting value of the sequence.
|
||||||
|
stop : array_like
|
||||||
|
The end value of the sequence, unless `endpoint` is set to False.
|
||||||
|
In that case, the sequence consists of all but the last of ``num + 1``
|
||||||
|
evenly spaced samples, so that `stop` is excluded. Note that the step
|
||||||
|
size changes when `endpoint` is False.
|
||||||
|
num : int, optional
|
||||||
|
Number of samples to generate. Default is 50. Must be non-negative.
|
||||||
|
endpoint : bool, optional
|
||||||
|
If True, `stop` is the last sample. Otherwise, it is not included.
|
||||||
|
Default is True.
|
||||||
|
retstep : bool, optional
|
||||||
|
If True, return (`samples`, `step`), where `step` is the spacing
|
||||||
|
between samples.
|
||||||
|
dtype : dtype, optional
|
||||||
|
The type of the output array. If `dtype` is not given, infer the data
|
||||||
|
type from the other input arguments.
|
||||||
|
|
||||||
|
.. versionadded:: 1.9.0
|
||||||
|
|
||||||
|
axis : int, optional
|
||||||
|
The axis in the result to store the samples. Relevant only if start
|
||||||
|
or stop are array-like. By default (0), the samples will be along a
|
||||||
|
new axis inserted at the beginning. Use -1 to get an axis at the end.
|
||||||
|
|
||||||
|
.. versionadded:: 1.16.0
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
samples : ndarray
|
||||||
|
There are `num` equally spaced samples in the closed interval
|
||||||
|
``[start, stop]`` or the half-open interval ``[start, stop)``
|
||||||
|
(depending on whether `endpoint` is True or False).
|
||||||
|
step : float, optional
|
||||||
|
Only returned if `retstep` is True
|
||||||
|
|
||||||
|
Size of spacing between samples.
|
||||||
|
|
||||||
|
|
||||||
|
See Also
|
||||||
|
--------
|
||||||
|
arange : Similar to `linspace`, but uses a step size (instead of the
|
||||||
|
number of samples).
|
||||||
|
geomspace : Similar to `linspace`, but with numbers spaced evenly on a log
|
||||||
|
scale (a geometric progression).
|
||||||
|
logspace : Similar to `geomspace`, but with the end points specified as
|
||||||
|
logarithms.
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
>>> np.linspace(2.0, 3.0, num=5)
|
||||||
|
array([2. , 2.25, 2.5 , 2.75, 3. ])
|
||||||
|
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
|
||||||
|
array([2. , 2.2, 2.4, 2.6, 2.8])
|
||||||
|
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
|
||||||
|
(array([2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
|
||||||
|
|
||||||
|
Graphical illustration:
|
||||||
|
|
||||||
|
>>> import matplotlib.pyplot as plt
|
||||||
|
>>> N = 8
|
||||||
|
>>> y = np.zeros(N)
|
||||||
|
>>> x1 = np.linspace(0, 10, N, endpoint=True)
|
||||||
|
>>> x2 = np.linspace(0, 10, N, endpoint=False)
|
||||||
|
>>> plt.plot(x1, y, 'o')
|
||||||
|
[<matplotlib.lines.Line2D object at 0x...>]
|
||||||
|
>>> plt.plot(x2, y + 0.5, 'o')
|
||||||
|
[<matplotlib.lines.Line2D object at 0x...>]
|
||||||
|
>>> plt.ylim([-0.5, 1])
|
||||||
|
(-0.5, 1)
|
||||||
|
>>> plt.show()
|
||||||
|
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
num = operator.index(num)
|
||||||
|
except TypeError:
|
||||||
|
raise TypeError(
|
||||||
|
"object of type {} cannot be safely interpreted as an integer."
|
||||||
|
.format(type(num)))
|
||||||
|
|
||||||
|
if num < 0:
|
||||||
|
raise ValueError("Number of samples, %s, must be non-negative." % num)
|
||||||
|
div = (num - 1) if endpoint else num
|
||||||
|
|
||||||
|
# Convert float/complex array scalars to float, gh-3504
|
||||||
|
# and make sure one can use variables that have an __array_interface__, gh-6634
|
||||||
|
start = asanyarray(start) * 1.0
|
||||||
|
stop = asanyarray(stop) * 1.0
|
||||||
|
|
||||||
|
dt = result_type(start, stop, float(num))
|
||||||
|
if dtype is None:
|
||||||
|
dtype = dt
|
||||||
|
|
||||||
|
delta = stop - start
|
||||||
|
y = _nx.arange(0, num, dtype=dt).reshape((-1,) + (1,) * ndim(delta))
|
||||||
|
# In-place multiplication y *= delta/div is faster, but prevents the multiplicant
|
||||||
|
# from overriding what class is produced, and thus prevents, e.g. use of Quantities,
|
||||||
|
# see gh-7142. Hence, we multiply in place only for standard scalar types.
|
||||||
|
_mult_inplace = _nx.isscalar(delta)
|
||||||
|
if div > 0:
|
||||||
|
step = delta / div
|
||||||
|
if _nx.any(step == 0):
|
||||||
|
# Special handling for denormal numbers, gh-5437
|
||||||
|
y /= div
|
||||||
|
if _mult_inplace:
|
||||||
|
y *= delta
|
||||||
|
else:
|
||||||
|
y = y * delta
|
||||||
|
else:
|
||||||
|
if _mult_inplace:
|
||||||
|
y *= step
|
||||||
|
else:
|
||||||
|
y = y * step
|
||||||
|
else:
|
||||||
|
# sequences with 0 items or 1 item with endpoint=True (i.e. div <= 0)
|
||||||
|
# have an undefined step
|
||||||
|
step = NaN
|
||||||
|
# Multiply with delta to allow possible override of output class.
|
||||||
|
y = y * delta
|
||||||
|
|
||||||
|
y += start
|
||||||
|
|
||||||
|
if endpoint and num > 1:
|
||||||
|
y[-1] = stop
|
||||||
|
|
||||||
|
if axis != 0:
|
||||||
|
y = _nx.moveaxis(y, 0, axis)
|
||||||
|
|
||||||
|
if retstep:
|
||||||
|
return y.astype(dtype, copy=False), step
|
||||||
|
else:
|
||||||
|
return y.astype(dtype, copy=False)
|
||||||
|
|
||||||
|
|
||||||
|
def _logspace_dispatcher(start, stop, num=None, endpoint=None, base=None,
|
||||||
|
dtype=None, axis=None):
|
||||||
|
return (start, stop)
|
||||||
|
|
||||||
|
|
||||||
|
@array_function_dispatch(_logspace_dispatcher)
|
||||||
|
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None,
|
||||||
|
axis=0):
|
||||||
|
"""
|
||||||
|
Return numbers spaced evenly on a log scale.
|
||||||
|
|
||||||
|
In linear space, the sequence starts at ``base ** start``
|
||||||
|
(`base` to the power of `start`) and ends with ``base ** stop``
|
||||||
|
(see `endpoint` below).
|
||||||
|
|
||||||
|
.. versionchanged:: 1.16.0
|
||||||
|
Non-scalar `start` and `stop` are now supported.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
start : array_like
|
||||||
|
``base ** start`` is the starting value of the sequence.
|
||||||
|
stop : array_like
|
||||||
|
``base ** stop`` is the final value of the sequence, unless `endpoint`
|
||||||
|
is False. In that case, ``num + 1`` values are spaced over the
|
||||||
|
interval in log-space, of which all but the last (a sequence of
|
||||||
|
length `num`) are returned.
|
||||||
|
num : integer, optional
|
||||||
|
Number of samples to generate. Default is 50.
|
||||||
|
endpoint : boolean, optional
|
||||||
|
If true, `stop` is the last sample. Otherwise, it is not included.
|
||||||
|
Default is True.
|
||||||
|
base : float, optional
|
||||||
|
The base of the log space. The step size between the elements in
|
||||||
|
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
|
||||||
|
Default is 10.0.
|
||||||
|
dtype : dtype
|
||||||
|
The type of the output array. If `dtype` is not given, infer the data
|
||||||
|
type from the other input arguments.
|
||||||
|
axis : int, optional
|
||||||
|
The axis in the result to store the samples. Relevant only if start
|
||||||
|
or stop are array-like. By default (0), the samples will be along a
|
||||||
|
new axis inserted at the beginning. Use -1 to get an axis at the end.
|
||||||
|
|
||||||
|
.. versionadded:: 1.16.0
|
||||||
|
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
samples : ndarray
|
||||||
|
`num` samples, equally spaced on a log scale.
|
||||||
|
|
||||||
|
See Also
|
||||||
|
--------
|
||||||
|
arange : Similar to linspace, with the step size specified instead of the
|
||||||
|
number of samples. Note that, when used with a float endpoint, the
|
||||||
|
endpoint may or may not be included.
|
||||||
|
linspace : Similar to logspace, but with the samples uniformly distributed
|
||||||
|
in linear space, instead of log space.
|
||||||
|
geomspace : Similar to logspace, but with endpoints specified directly.
|
||||||
|
|
||||||
|
Notes
|
||||||
|
-----
|
||||||
|
Logspace is equivalent to the code
|
||||||
|
|
||||||
|
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
|
||||||
|
... # doctest: +SKIP
|
||||||
|
>>> power(base, y).astype(dtype)
|
||||||
|
... # doctest: +SKIP
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
>>> np.logspace(2.0, 3.0, num=4)
|
||||||
|
array([ 100. , 215.443469 , 464.15888336, 1000. ])
|
||||||
|
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
|
||||||
|
array([100. , 177.827941 , 316.22776602, 562.34132519])
|
||||||
|
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
|
||||||
|
array([4. , 5.0396842 , 6.34960421, 8. ])
|
||||||
|
|
||||||
|
Graphical illustration:
|
||||||
|
|
||||||
|
>>> import matplotlib.pyplot as plt
|
||||||
|
>>> N = 10
|
||||||
|
>>> x1 = np.logspace(0.1, 1, N, endpoint=True)
|
||||||
|
>>> x2 = np.logspace(0.1, 1, N, endpoint=False)
|
||||||
|
>>> y = np.zeros(N)
|
||||||
|
>>> plt.plot(x1, y, 'o')
|
||||||
|
[<matplotlib.lines.Line2D object at 0x...>]
|
||||||
|
>>> plt.plot(x2, y + 0.5, 'o')
|
||||||
|
[<matplotlib.lines.Line2D object at 0x...>]
|
||||||
|
>>> plt.ylim([-0.5, 1])
|
||||||
|
(-0.5, 1)
|
||||||
|
>>> plt.show()
|
||||||
|
|
||||||
|
"""
|
||||||
|
y = linspace(start, stop, num=num, endpoint=endpoint, axis=axis)
|
||||||
|
if dtype is None:
|
||||||
|
return _nx.power(base, y)
|
||||||
|
return _nx.power(base, y).astype(dtype, copy=False)
|
||||||
|
|
||||||
|
|
||||||
|
def _geomspace_dispatcher(start, stop, num=None, endpoint=None, dtype=None,
|
||||||
|
axis=None):
|
||||||
|
return (start, stop)
|
||||||
|
|
||||||
|
|
||||||
|
@array_function_dispatch(_geomspace_dispatcher)
|
||||||
|
def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0):
|
||||||
|
"""
|
||||||
|
Return numbers spaced evenly on a log scale (a geometric progression).
|
||||||
|
|
||||||
|
This is similar to `logspace`, but with endpoints specified directly.
|
||||||
|
Each output sample is a constant multiple of the previous.
|
||||||
|
|
||||||
|
.. versionchanged:: 1.16.0
|
||||||
|
Non-scalar `start` and `stop` are now supported.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
start : array_like
|
||||||
|
The starting value of the sequence.
|
||||||
|
stop : array_like
|
||||||
|
The final value of the sequence, unless `endpoint` is False.
|
||||||
|
In that case, ``num + 1`` values are spaced over the
|
||||||
|
interval in log-space, of which all but the last (a sequence of
|
||||||
|
length `num`) are returned.
|
||||||
|
num : integer, optional
|
||||||
|
Number of samples to generate. Default is 50.
|
||||||
|
endpoint : boolean, optional
|
||||||
|
If true, `stop` is the last sample. Otherwise, it is not included.
|
||||||
|
Default is True.
|
||||||
|
dtype : dtype
|
||||||
|
The type of the output array. If `dtype` is not given, infer the data
|
||||||
|
type from the other input arguments.
|
||||||
|
axis : int, optional
|
||||||
|
The axis in the result to store the samples. Relevant only if start
|
||||||
|
or stop are array-like. By default (0), the samples will be along a
|
||||||
|
new axis inserted at the beginning. Use -1 to get an axis at the end.
|
||||||
|
|
||||||
|
.. versionadded:: 1.16.0
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
samples : ndarray
|
||||||
|
`num` samples, equally spaced on a log scale.
|
||||||
|
|
||||||
|
See Also
|
||||||
|
--------
|
||||||
|
logspace : Similar to geomspace, but with endpoints specified using log
|
||||||
|
and base.
|
||||||
|
linspace : Similar to geomspace, but with arithmetic instead of geometric
|
||||||
|
progression.
|
||||||
|
arange : Similar to linspace, with the step size specified instead of the
|
||||||
|
number of samples.
|
||||||
|
|
||||||
|
Notes
|
||||||
|
-----
|
||||||
|
If the inputs or dtype are complex, the output will follow a logarithmic
|
||||||
|
spiral in the complex plane. (There are an infinite number of spirals
|
||||||
|
passing through two points; the output will follow the shortest such path.)
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
>>> np.geomspace(1, 1000, num=4)
|
||||||
|
array([ 1., 10., 100., 1000.])
|
||||||
|
>>> np.geomspace(1, 1000, num=3, endpoint=False)
|
||||||
|
array([ 1., 10., 100.])
|
||||||
|
>>> np.geomspace(1, 1000, num=4, endpoint=False)
|
||||||
|
array([ 1. , 5.62341325, 31.6227766 , 177.827941 ])
|
||||||
|
>>> np.geomspace(1, 256, num=9)
|
||||||
|
array([ 1., 2., 4., 8., 16., 32., 64., 128., 256.])
|
||||||
|
|
||||||
|
Note that the above may not produce exact integers:
|
||||||
|
|
||||||
|
>>> np.geomspace(1, 256, num=9, dtype=int)
|
||||||
|
array([ 1, 2, 4, 7, 16, 32, 63, 127, 256])
|
||||||
|
>>> np.around(np.geomspace(1, 256, num=9)).astype(int)
|
||||||
|
array([ 1, 2, 4, 8, 16, 32, 64, 128, 256])
|
||||||
|
|
||||||
|
Negative, decreasing, and complex inputs are allowed:
|
||||||
|
|
||||||
|
>>> np.geomspace(1000, 1, num=4)
|
||||||
|
array([1000., 100., 10., 1.])
|
||||||
|
>>> np.geomspace(-1000, -1, num=4)
|
||||||
|
array([-1000., -100., -10., -1.])
|
||||||
|
>>> np.geomspace(1j, 1000j, num=4) # Straight line
|
||||||
|
array([0. +1.j, 0. +10.j, 0. +100.j, 0.+1000.j])
|
||||||
|
>>> np.geomspace(-1+0j, 1+0j, num=5) # Circle
|
||||||
|
array([-1.00000000e+00+1.22464680e-16j, -7.07106781e-01+7.07106781e-01j,
|
||||||
|
6.12323400e-17+1.00000000e+00j, 7.07106781e-01+7.07106781e-01j,
|
||||||
|
1.00000000e+00+0.00000000e+00j])
|
||||||
|
|
||||||
|
Graphical illustration of ``endpoint`` parameter:
|
||||||
|
|
||||||
|
>>> import matplotlib.pyplot as plt
|
||||||
|
>>> N = 10
|
||||||
|
>>> y = np.zeros(N)
|
||||||
|
>>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=True), y + 1, 'o')
|
||||||
|
[<matplotlib.lines.Line2D object at 0x...>]
|
||||||
|
>>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=False), y + 2, 'o')
|
||||||
|
[<matplotlib.lines.Line2D object at 0x...>]
|
||||||
|
>>> plt.axis([0.5, 2000, 0, 3])
|
||||||
|
[0.5, 2000, 0, 3]
|
||||||
|
>>> plt.grid(True, color='0.7', linestyle='-', which='both', axis='both')
|
||||||
|
>>> plt.show()
|
||||||
|
|
||||||
|
"""
|
||||||
|
start = asanyarray(start)
|
||||||
|
stop = asanyarray(stop)
|
||||||
|
if _nx.any(start == 0) or _nx.any(stop == 0):
|
||||||
|
raise ValueError('Geometric sequence cannot include zero')
|
||||||
|
|
||||||
|
dt = result_type(start, stop, float(num), _nx.zeros((), dtype))
|
||||||
|
if dtype is None:
|
||||||
|
dtype = dt
|
||||||
|
else:
|
||||||
|
# complex to dtype('complex128'), for instance
|
||||||
|
dtype = _nx.dtype(dtype)
|
||||||
|
|
||||||
|
# Promote both arguments to the same dtype in case, for instance, one is
|
||||||
|
# complex and another is negative and log would produce NaN otherwise.
|
||||||
|
# Copy since we may change things in-place further down.
|
||||||
|
start = start.astype(dt, copy=True)
|
||||||
|
stop = stop.astype(dt, copy=True)
|
||||||
|
|
||||||
|
out_sign = _nx.ones(_nx.broadcast(start, stop).shape, dt)
|
||||||
|
# Avoid negligible real or imaginary parts in output by rotating to
|
||||||
|
# positive real, calculating, then undoing rotation
|
||||||
|
if _nx.issubdtype(dt, _nx.complexfloating):
|
||||||
|
all_imag = (start.real == 0.) & (stop.real == 0.)
|
||||||
|
if _nx.any(all_imag):
|
||||||
|
start[all_imag] = start[all_imag].imag
|
||||||
|
stop[all_imag] = stop[all_imag].imag
|
||||||
|
out_sign[all_imag] = 1j
|
||||||
|
|
||||||
|
both_negative = (_nx.sign(start) == -1) & (_nx.sign(stop) == -1)
|
||||||
|
if _nx.any(both_negative):
|
||||||
|
_nx.negative(start, out=start, where=both_negative)
|
||||||
|
_nx.negative(stop, out=stop, where=both_negative)
|
||||||
|
_nx.negative(out_sign, out=out_sign, where=both_negative)
|
||||||
|
|
||||||
|
log_start = _nx.log10(start)
|
||||||
|
log_stop = _nx.log10(stop)
|
||||||
|
result = out_sign * logspace(log_start, log_stop, num=num,
|
||||||
|
endpoint=endpoint, base=10.0, dtype=dtype)
|
||||||
|
if axis != 0:
|
||||||
|
result = _nx.moveaxis(result, 0, axis)
|
||||||
|
|
||||||
|
return result.astype(dtype, copy=False)
|
||||||
|
|
||||||
|
|
||||||
|
def _needs_add_docstring(obj):
|
||||||
|
"""
|
||||||
|
Returns true if the only way to set the docstring of `obj` from python is
|
||||||
|
via add_docstring.
|
||||||
|
|
||||||
|
This function errs on the side of being overly conservative.
|
||||||
|
"""
|
||||||
|
Py_TPFLAGS_HEAPTYPE = 1 << 9
|
||||||
|
|
||||||
|
if isinstance(obj, (types.FunctionType, types.MethodType, property)):
|
||||||
|
return False
|
||||||
|
|
||||||
|
if isinstance(obj, type) and obj.__flags__ & Py_TPFLAGS_HEAPTYPE:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def _add_docstring(obj, doc, warn_on_python):
|
||||||
|
if warn_on_python and not _needs_add_docstring(obj):
|
||||||
|
warnings.warn(
|
||||||
|
"add_newdoc was used on a pure-python object {}. "
|
||||||
|
"Prefer to attach it directly to the source."
|
||||||
|
.format(obj),
|
||||||
|
UserWarning,
|
||||||
|
stacklevel=3)
|
||||||
|
try:
|
||||||
|
add_docstring(obj, doc)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def add_newdoc(place, obj, doc, warn_on_python=True):
|
||||||
|
"""
|
||||||
|
Add documentation to an existing object, typically one defined in C
|
||||||
|
|
||||||
|
The purpose is to allow easier editing of the docstrings without requiring
|
||||||
|
a re-compile. This exists primarily for internal use within numpy itself.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
place : str
|
||||||
|
The absolute name of the module to import from
|
||||||
|
obj : str
|
||||||
|
The name of the object to add documentation to, typically a class or
|
||||||
|
function name
|
||||||
|
doc : {str, Tuple[str, str], List[Tuple[str, str]]}
|
||||||
|
If a string, the documentation to apply to `obj`
|
||||||
|
|
||||||
|
If a tuple, then the first element is interpreted as an attribute of
|
||||||
|
`obj` and the second as the docstring to apply - ``(method, docstring)``
|
||||||
|
|
||||||
|
If a list, then each element of the list should be a tuple of length
|
||||||
|
two - ``[(method1, docstring1), (method2, docstring2), ...]``
|
||||||
|
warn_on_python : bool
|
||||||
|
If True, the default, emit `UserWarning` if this is used to attach
|
||||||
|
documentation to a pure-python object.
|
||||||
|
|
||||||
|
Notes
|
||||||
|
-----
|
||||||
|
This routine never raises an error if the docstring can't be written, but
|
||||||
|
will raise an error if the object being documented does not exist.
|
||||||
|
|
||||||
|
This routine cannot modify read-only docstrings, as appear
|
||||||
|
in new-style classes or built-in functions. Because this
|
||||||
|
routine never raises an error the caller must check manually
|
||||||
|
that the docstrings were changed.
|
||||||
|
|
||||||
|
Since this function grabs the ``char *`` from a c-level str object and puts
|
||||||
|
it into the ``tp_doc`` slot of the type of `obj`, it violates a number of
|
||||||
|
C-API best-practices, by:
|
||||||
|
|
||||||
|
- modifying a `PyTypeObject` after calling `PyType_Ready`
|
||||||
|
- calling `Py_INCREF` on the str and losing the reference, so the str
|
||||||
|
will never be released
|
||||||
|
|
||||||
|
If possible it should be avoided.
|
||||||
|
"""
|
||||||
|
new = getattr(__import__(place, globals(), {}, [obj]), obj)
|
||||||
|
if isinstance(doc, str):
|
||||||
|
_add_docstring(new, doc.strip(), warn_on_python)
|
||||||
|
elif isinstance(doc, tuple):
|
||||||
|
attr, docstring = doc
|
||||||
|
_add_docstring(getattr(new, attr), docstring.strip(), warn_on_python)
|
||||||
|
elif isinstance(doc, list):
|
||||||
|
for attr, docstring in doc:
|
||||||
|
_add_docstring(getattr(new, attr), docstring.strip(), warn_on_python)
|
@ -0,0 +1,254 @@
|
|||||||
|
from __future__ import division, print_function
|
||||||
|
|
||||||
|
import os
|
||||||
|
import genapi
|
||||||
|
|
||||||
|
from genapi import \
|
||||||
|
TypeApi, GlobalVarApi, FunctionApi, BoolValuesApi
|
||||||
|
|
||||||
|
import numpy_api
|
||||||
|
|
||||||
|
# use annotated api when running under cpychecker
|
||||||
|
h_template = r"""
|
||||||
|
#if defined(_MULTIARRAYMODULE) || defined(WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE)
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
PyObject_HEAD
|
||||||
|
npy_bool obval;
|
||||||
|
} PyBoolScalarObject;
|
||||||
|
|
||||||
|
extern NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type;
|
||||||
|
extern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type;
|
||||||
|
extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
|
||||||
|
|
||||||
|
%s
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
#if defined(PY_ARRAY_UNIQUE_SYMBOL)
|
||||||
|
#define PyArray_API PY_ARRAY_UNIQUE_SYMBOL
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY)
|
||||||
|
extern void **PyArray_API;
|
||||||
|
#else
|
||||||
|
#if defined(PY_ARRAY_UNIQUE_SYMBOL)
|
||||||
|
void **PyArray_API;
|
||||||
|
#else
|
||||||
|
static void **PyArray_API=NULL;
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
%s
|
||||||
|
|
||||||
|
#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT)
|
||||||
|
static int
|
||||||
|
_import_array(void)
|
||||||
|
{
|
||||||
|
int st;
|
||||||
|
PyObject *numpy = PyImport_ImportModule("numpy.core._multiarray_umath");
|
||||||
|
PyObject *c_api = NULL;
|
||||||
|
|
||||||
|
if (numpy == NULL) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
c_api = PyObject_GetAttrString(numpy, "_ARRAY_API");
|
||||||
|
Py_DECREF(numpy);
|
||||||
|
if (c_api == NULL) {
|
||||||
|
PyErr_SetString(PyExc_AttributeError, "_ARRAY_API not found");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if PY_VERSION_HEX >= 0x03000000
|
||||||
|
if (!PyCapsule_CheckExact(c_api)) {
|
||||||
|
PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCapsule object");
|
||||||
|
Py_DECREF(c_api);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL);
|
||||||
|
#else
|
||||||
|
if (!PyCObject_Check(c_api)) {
|
||||||
|
PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCObject object");
|
||||||
|
Py_DECREF(c_api);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
PyArray_API = (void **)PyCObject_AsVoidPtr(c_api);
|
||||||
|
#endif
|
||||||
|
Py_DECREF(c_api);
|
||||||
|
if (PyArray_API == NULL) {
|
||||||
|
PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is NULL pointer");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Perform runtime check of C API version */
|
||||||
|
if (NPY_VERSION != PyArray_GetNDArrayCVersion()) {
|
||||||
|
PyErr_Format(PyExc_RuntimeError, "module compiled against "\
|
||||||
|
"ABI version 0x%%x but this version of numpy is 0x%%x", \
|
||||||
|
(int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion());
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
if (NPY_FEATURE_VERSION > PyArray_GetNDArrayCFeatureVersion()) {
|
||||||
|
PyErr_Format(PyExc_RuntimeError, "module compiled against "\
|
||||||
|
"API version 0x%%x but this version of numpy is 0x%%x", \
|
||||||
|
(int) NPY_FEATURE_VERSION, (int) PyArray_GetNDArrayCFeatureVersion());
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Perform runtime check of endianness and check it matches the one set by
|
||||||
|
* the headers (npy_endian.h) as a safeguard
|
||||||
|
*/
|
||||||
|
st = PyArray_GetEndianness();
|
||||||
|
if (st == NPY_CPU_UNKNOWN_ENDIAN) {
|
||||||
|
PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as unknown endian");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN
|
||||||
|
if (st != NPY_CPU_BIG) {
|
||||||
|
PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\
|
||||||
|
"big endian, but detected different endianness at runtime");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
#elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN
|
||||||
|
if (st != NPY_CPU_LITTLE) {
|
||||||
|
PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\
|
||||||
|
"little endian, but detected different endianness at runtime");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if PY_VERSION_HEX >= 0x03000000
|
||||||
|
#define NUMPY_IMPORT_ARRAY_RETVAL NULL
|
||||||
|
#else
|
||||||
|
#define NUMPY_IMPORT_ARRAY_RETVAL
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define import_array() {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return NUMPY_IMPORT_ARRAY_RETVAL; } }
|
||||||
|
|
||||||
|
#define import_array1(ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return ret; } }
|
||||||
|
|
||||||
|
#define import_array2(msg, ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, msg); return ret; } }
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
c_template = r"""
|
||||||
|
/* These pointers will be stored in the C-object for use in other
|
||||||
|
extension modules
|
||||||
|
*/
|
||||||
|
|
||||||
|
void *PyArray_API[] = {
|
||||||
|
%s
|
||||||
|
};
|
||||||
|
"""
|
||||||
|
|
||||||
|
c_api_header = """
|
||||||
|
===========
|
||||||
|
NumPy C-API
|
||||||
|
===========
|
||||||
|
"""
|
||||||
|
|
||||||
|
def generate_api(output_dir, force=False):
|
||||||
|
basename = 'multiarray_api'
|
||||||
|
|
||||||
|
h_file = os.path.join(output_dir, '__%s.h' % basename)
|
||||||
|
c_file = os.path.join(output_dir, '__%s.c' % basename)
|
||||||
|
d_file = os.path.join(output_dir, '%s.txt' % basename)
|
||||||
|
targets = (h_file, c_file, d_file)
|
||||||
|
|
||||||
|
sources = numpy_api.multiarray_api
|
||||||
|
|
||||||
|
if (not force and not genapi.should_rebuild(targets, [numpy_api.__file__, __file__])):
|
||||||
|
return targets
|
||||||
|
else:
|
||||||
|
do_generate_api(targets, sources)
|
||||||
|
|
||||||
|
return targets
|
||||||
|
|
||||||
|
def do_generate_api(targets, sources):
|
||||||
|
header_file = targets[0]
|
||||||
|
c_file = targets[1]
|
||||||
|
doc_file = targets[2]
|
||||||
|
|
||||||
|
global_vars = sources[0]
|
||||||
|
scalar_bool_values = sources[1]
|
||||||
|
types_api = sources[2]
|
||||||
|
multiarray_funcs = sources[3]
|
||||||
|
|
||||||
|
multiarray_api = sources[:]
|
||||||
|
|
||||||
|
module_list = []
|
||||||
|
extension_list = []
|
||||||
|
init_list = []
|
||||||
|
|
||||||
|
# Check multiarray api indexes
|
||||||
|
multiarray_api_index = genapi.merge_api_dicts(multiarray_api)
|
||||||
|
genapi.check_api_dict(multiarray_api_index)
|
||||||
|
|
||||||
|
numpyapi_list = genapi.get_api_functions('NUMPY_API',
|
||||||
|
multiarray_funcs)
|
||||||
|
|
||||||
|
# FIXME: ordered_funcs_api is unused
|
||||||
|
ordered_funcs_api = genapi.order_dict(multiarray_funcs)
|
||||||
|
|
||||||
|
# Create dict name -> *Api instance
|
||||||
|
api_name = 'PyArray_API'
|
||||||
|
multiarray_api_dict = {}
|
||||||
|
for f in numpyapi_list:
|
||||||
|
name = f.name
|
||||||
|
index = multiarray_funcs[name][0]
|
||||||
|
annotations = multiarray_funcs[name][1:]
|
||||||
|
multiarray_api_dict[f.name] = FunctionApi(f.name, index, annotations,
|
||||||
|
f.return_type,
|
||||||
|
f.args, api_name)
|
||||||
|
|
||||||
|
for name, val in global_vars.items():
|
||||||
|
index, type = val
|
||||||
|
multiarray_api_dict[name] = GlobalVarApi(name, index, type, api_name)
|
||||||
|
|
||||||
|
for name, val in scalar_bool_values.items():
|
||||||
|
index = val[0]
|
||||||
|
multiarray_api_dict[name] = BoolValuesApi(name, index, api_name)
|
||||||
|
|
||||||
|
for name, val in types_api.items():
|
||||||
|
index = val[0]
|
||||||
|
multiarray_api_dict[name] = TypeApi(name, index, 'PyTypeObject', api_name)
|
||||||
|
|
||||||
|
if len(multiarray_api_dict) != len(multiarray_api_index):
|
||||||
|
keys_dict = set(multiarray_api_dict.keys())
|
||||||
|
keys_index = set(multiarray_api_index.keys())
|
||||||
|
raise AssertionError(
|
||||||
|
"Multiarray API size mismatch - "
|
||||||
|
"index has extra keys {}, dict has extra keys {}"
|
||||||
|
.format(keys_index - keys_dict, keys_dict - keys_index)
|
||||||
|
)
|
||||||
|
|
||||||
|
extension_list = []
|
||||||
|
for name, index in genapi.order_dict(multiarray_api_index):
|
||||||
|
api_item = multiarray_api_dict[name]
|
||||||
|
extension_list.append(api_item.define_from_array_api_string())
|
||||||
|
init_list.append(api_item.array_api_define())
|
||||||
|
module_list.append(api_item.internal_define())
|
||||||
|
|
||||||
|
# Write to header
|
||||||
|
s = h_template % ('\n'.join(module_list), '\n'.join(extension_list))
|
||||||
|
genapi.write_file(header_file, s)
|
||||||
|
|
||||||
|
# Write to c-code
|
||||||
|
s = c_template % ',\n'.join(init_list)
|
||||||
|
genapi.write_file(c_file, s)
|
||||||
|
|
||||||
|
# write to documentation
|
||||||
|
s = c_api_header
|
||||||
|
for func in numpyapi_list:
|
||||||
|
s += func.to_ReST()
|
||||||
|
s += '\n\n'
|
||||||
|
genapi.write_file(doc_file, s)
|
||||||
|
|
||||||
|
return targets
|
548
Restaurant/Marta/venv/Lib/site-packages/numpy/core/getlimits.py
Normal file
548
Restaurant/Marta/venv/Lib/site-packages/numpy/core/getlimits.py
Normal file
@ -0,0 +1,548 @@
|
|||||||
|
"""Machine limits for Float32 and Float64 and (long double) if available...
|
||||||
|
|
||||||
|
"""
|
||||||
|
from __future__ import division, absolute_import, print_function
|
||||||
|
|
||||||
|
__all__ = ['finfo', 'iinfo']
|
||||||
|
|
||||||
|
import warnings
|
||||||
|
|
||||||
|
from .machar import MachAr
|
||||||
|
from .overrides import set_module
|
||||||
|
from . import numeric
|
||||||
|
from . import numerictypes as ntypes
|
||||||
|
from .numeric import array, inf
|
||||||
|
from .umath import log10, exp2
|
||||||
|
from . import umath
|
||||||
|
|
||||||
|
|
||||||
|
def _fr0(a):
|
||||||
|
"""fix rank-0 --> rank-1"""
|
||||||
|
if a.ndim == 0:
|
||||||
|
a = a.copy()
|
||||||
|
a.shape = (1,)
|
||||||
|
return a
|
||||||
|
|
||||||
|
|
||||||
|
def _fr1(a):
|
||||||
|
"""fix rank > 0 --> rank-0"""
|
||||||
|
if a.size == 1:
|
||||||
|
a = a.copy()
|
||||||
|
a.shape = ()
|
||||||
|
return a
|
||||||
|
|
||||||
|
class MachArLike(object):
|
||||||
|
""" Object to simulate MachAr instance """
|
||||||
|
|
||||||
|
def __init__(self,
|
||||||
|
ftype,
|
||||||
|
**kwargs):
|
||||||
|
params = _MACHAR_PARAMS[ftype]
|
||||||
|
float_conv = lambda v: array([v], ftype)
|
||||||
|
float_to_float = lambda v : _fr1(float_conv(v))
|
||||||
|
float_to_str = lambda v: (params['fmt'] % array(_fr0(v)[0], ftype))
|
||||||
|
|
||||||
|
self.title = params['title']
|
||||||
|
# Parameter types same as for discovered MachAr object.
|
||||||
|
self.epsilon = self.eps = float_to_float(kwargs.pop('eps'))
|
||||||
|
self.epsneg = float_to_float(kwargs.pop('epsneg'))
|
||||||
|
self.xmax = self.huge = float_to_float(kwargs.pop('huge'))
|
||||||
|
self.xmin = self.tiny = float_to_float(kwargs.pop('tiny'))
|
||||||
|
self.ibeta = params['itype'](kwargs.pop('ibeta'))
|
||||||
|
self.__dict__.update(kwargs)
|
||||||
|
self.precision = int(-log10(self.eps))
|
||||||
|
self.resolution = float_to_float(float_conv(10) ** (-self.precision))
|
||||||
|
self._str_eps = float_to_str(self.eps)
|
||||||
|
self._str_epsneg = float_to_str(self.epsneg)
|
||||||
|
self._str_xmin = float_to_str(self.xmin)
|
||||||
|
self._str_xmax = float_to_str(self.xmax)
|
||||||
|
self._str_resolution = float_to_str(self.resolution)
|
||||||
|
|
||||||
|
_convert_to_float = {
|
||||||
|
ntypes.csingle: ntypes.single,
|
||||||
|
ntypes.complex_: ntypes.float_,
|
||||||
|
ntypes.clongfloat: ntypes.longfloat
|
||||||
|
}
|
||||||
|
|
||||||
|
# Parameters for creating MachAr / MachAr-like objects
|
||||||
|
_title_fmt = 'numpy {} precision floating point number'
|
||||||
|
_MACHAR_PARAMS = {
|
||||||
|
ntypes.double: dict(
|
||||||
|
itype = ntypes.int64,
|
||||||
|
fmt = '%24.16e',
|
||||||
|
title = _title_fmt.format('double')),
|
||||||
|
ntypes.single: dict(
|
||||||
|
itype = ntypes.int32,
|
||||||
|
fmt = '%15.7e',
|
||||||
|
title = _title_fmt.format('single')),
|
||||||
|
ntypes.longdouble: dict(
|
||||||
|
itype = ntypes.longlong,
|
||||||
|
fmt = '%s',
|
||||||
|
title = _title_fmt.format('long double')),
|
||||||
|
ntypes.half: dict(
|
||||||
|
itype = ntypes.int16,
|
||||||
|
fmt = '%12.5e',
|
||||||
|
title = _title_fmt.format('half'))}
|
||||||
|
|
||||||
|
# Key to identify the floating point type. Key is result of
|
||||||
|
# ftype('-0.1').newbyteorder('<').tobytes()
|
||||||
|
# See:
|
||||||
|
# https://perl5.git.perl.org/perl.git/blob/3118d7d684b56cbeb702af874f4326683c45f045:/Configure
|
||||||
|
_KNOWN_TYPES = {}
|
||||||
|
def _register_type(machar, bytepat):
|
||||||
|
_KNOWN_TYPES[bytepat] = machar
|
||||||
|
_float_ma = {}
|
||||||
|
|
||||||
|
def _register_known_types():
|
||||||
|
# Known parameters for float16
|
||||||
|
# See docstring of MachAr class for description of parameters.
|
||||||
|
f16 = ntypes.float16
|
||||||
|
float16_ma = MachArLike(f16,
|
||||||
|
machep=-10,
|
||||||
|
negep=-11,
|
||||||
|
minexp=-14,
|
||||||
|
maxexp=16,
|
||||||
|
it=10,
|
||||||
|
iexp=5,
|
||||||
|
ibeta=2,
|
||||||
|
irnd=5,
|
||||||
|
ngrd=0,
|
||||||
|
eps=exp2(f16(-10)),
|
||||||
|
epsneg=exp2(f16(-11)),
|
||||||
|
huge=f16(65504),
|
||||||
|
tiny=f16(2 ** -14))
|
||||||
|
_register_type(float16_ma, b'f\xae')
|
||||||
|
_float_ma[16] = float16_ma
|
||||||
|
|
||||||
|
# Known parameters for float32
|
||||||
|
f32 = ntypes.float32
|
||||||
|
float32_ma = MachArLike(f32,
|
||||||
|
machep=-23,
|
||||||
|
negep=-24,
|
||||||
|
minexp=-126,
|
||||||
|
maxexp=128,
|
||||||
|
it=23,
|
||||||
|
iexp=8,
|
||||||
|
ibeta=2,
|
||||||
|
irnd=5,
|
||||||
|
ngrd=0,
|
||||||
|
eps=exp2(f32(-23)),
|
||||||
|
epsneg=exp2(f32(-24)),
|
||||||
|
huge=f32((1 - 2 ** -24) * 2**128),
|
||||||
|
tiny=exp2(f32(-126)))
|
||||||
|
_register_type(float32_ma, b'\xcd\xcc\xcc\xbd')
|
||||||
|
_float_ma[32] = float32_ma
|
||||||
|
|
||||||
|
# Known parameters for float64
|
||||||
|
f64 = ntypes.float64
|
||||||
|
epsneg_f64 = 2.0 ** -53.0
|
||||||
|
tiny_f64 = 2.0 ** -1022.0
|
||||||
|
float64_ma = MachArLike(f64,
|
||||||
|
machep=-52,
|
||||||
|
negep=-53,
|
||||||
|
minexp=-1022,
|
||||||
|
maxexp=1024,
|
||||||
|
it=52,
|
||||||
|
iexp=11,
|
||||||
|
ibeta=2,
|
||||||
|
irnd=5,
|
||||||
|
ngrd=0,
|
||||||
|
eps=2.0 ** -52.0,
|
||||||
|
epsneg=epsneg_f64,
|
||||||
|
huge=(1.0 - epsneg_f64) / tiny_f64 * f64(4),
|
||||||
|
tiny=tiny_f64)
|
||||||
|
_register_type(float64_ma, b'\x9a\x99\x99\x99\x99\x99\xb9\xbf')
|
||||||
|
_float_ma[64] = float64_ma
|
||||||
|
|
||||||
|
# Known parameters for IEEE 754 128-bit binary float
|
||||||
|
ld = ntypes.longdouble
|
||||||
|
epsneg_f128 = exp2(ld(-113))
|
||||||
|
tiny_f128 = exp2(ld(-16382))
|
||||||
|
# Ignore runtime error when this is not f128
|
||||||
|
with numeric.errstate(all='ignore'):
|
||||||
|
huge_f128 = (ld(1) - epsneg_f128) / tiny_f128 * ld(4)
|
||||||
|
float128_ma = MachArLike(ld,
|
||||||
|
machep=-112,
|
||||||
|
negep=-113,
|
||||||
|
minexp=-16382,
|
||||||
|
maxexp=16384,
|
||||||
|
it=112,
|
||||||
|
iexp=15,
|
||||||
|
ibeta=2,
|
||||||
|
irnd=5,
|
||||||
|
ngrd=0,
|
||||||
|
eps=exp2(ld(-112)),
|
||||||
|
epsneg=epsneg_f128,
|
||||||
|
huge=huge_f128,
|
||||||
|
tiny=tiny_f128)
|
||||||
|
# IEEE 754 128-bit binary float
|
||||||
|
_register_type(float128_ma,
|
||||||
|
b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf')
|
||||||
|
_register_type(float128_ma,
|
||||||
|
b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf')
|
||||||
|
_float_ma[128] = float128_ma
|
||||||
|
|
||||||
|
# Known parameters for float80 (Intel 80-bit extended precision)
|
||||||
|
epsneg_f80 = exp2(ld(-64))
|
||||||
|
tiny_f80 = exp2(ld(-16382))
|
||||||
|
# Ignore runtime error when this is not f80
|
||||||
|
with numeric.errstate(all='ignore'):
|
||||||
|
huge_f80 = (ld(1) - epsneg_f80) / tiny_f80 * ld(4)
|
||||||
|
float80_ma = MachArLike(ld,
|
||||||
|
machep=-63,
|
||||||
|
negep=-64,
|
||||||
|
minexp=-16382,
|
||||||
|
maxexp=16384,
|
||||||
|
it=63,
|
||||||
|
iexp=15,
|
||||||
|
ibeta=2,
|
||||||
|
irnd=5,
|
||||||
|
ngrd=0,
|
||||||
|
eps=exp2(ld(-63)),
|
||||||
|
epsneg=epsneg_f80,
|
||||||
|
huge=huge_f80,
|
||||||
|
tiny=tiny_f80)
|
||||||
|
# float80, first 10 bytes containing actual storage
|
||||||
|
_register_type(float80_ma, b'\xcd\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf')
|
||||||
|
_float_ma[80] = float80_ma
|
||||||
|
|
||||||
|
# Guessed / known parameters for double double; see:
|
||||||
|
# https://en.wikipedia.org/wiki/Quadruple-precision_floating-point_format#Double-double_arithmetic
|
||||||
|
# These numbers have the same exponent range as float64, but extended number of
|
||||||
|
# digits in the significand.
|
||||||
|
huge_dd = (umath.nextafter(ld(inf), ld(0))
|
||||||
|
if hasattr(umath, 'nextafter') # Missing on some platforms?
|
||||||
|
else float64_ma.huge)
|
||||||
|
float_dd_ma = MachArLike(ld,
|
||||||
|
machep=-105,
|
||||||
|
negep=-106,
|
||||||
|
minexp=-1022,
|
||||||
|
maxexp=1024,
|
||||||
|
it=105,
|
||||||
|
iexp=11,
|
||||||
|
ibeta=2,
|
||||||
|
irnd=5,
|
||||||
|
ngrd=0,
|
||||||
|
eps=exp2(ld(-105)),
|
||||||
|
epsneg= exp2(ld(-106)),
|
||||||
|
huge=huge_dd,
|
||||||
|
tiny=exp2(ld(-1022)))
|
||||||
|
# double double; low, high order (e.g. PPC 64)
|
||||||
|
_register_type(float_dd_ma,
|
||||||
|
b'\x9a\x99\x99\x99\x99\x99Y<\x9a\x99\x99\x99\x99\x99\xb9\xbf')
|
||||||
|
# double double; high, low order (e.g. PPC 64 le)
|
||||||
|
_register_type(float_dd_ma,
|
||||||
|
b'\x9a\x99\x99\x99\x99\x99\xb9\xbf\x9a\x99\x99\x99\x99\x99Y<')
|
||||||
|
_float_ma['dd'] = float_dd_ma
|
||||||
|
|
||||||
|
|
||||||
|
def _get_machar(ftype):
|
||||||
|
""" Get MachAr instance or MachAr-like instance
|
||||||
|
|
||||||
|
Get parameters for floating point type, by first trying signatures of
|
||||||
|
various known floating point types, then, if none match, attempting to
|
||||||
|
identify parameters by analysis.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
ftype : class
|
||||||
|
Numpy floating point type class (e.g. ``np.float64``)
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
ma_like : instance of :class:`MachAr` or :class:`MachArLike`
|
||||||
|
Object giving floating point parameters for `ftype`.
|
||||||
|
|
||||||
|
Warns
|
||||||
|
-----
|
||||||
|
UserWarning
|
||||||
|
If the binary signature of the float type is not in the dictionary of
|
||||||
|
known float types.
|
||||||
|
"""
|
||||||
|
params = _MACHAR_PARAMS.get(ftype)
|
||||||
|
if params is None:
|
||||||
|
raise ValueError(repr(ftype))
|
||||||
|
# Detect known / suspected types
|
||||||
|
key = ftype('-0.1').newbyteorder('<').tobytes()
|
||||||
|
ma_like = _KNOWN_TYPES.get(key)
|
||||||
|
# Could be 80 bit == 10 byte extended precision, where last bytes can be
|
||||||
|
# random garbage. Try comparing first 10 bytes to pattern.
|
||||||
|
if ma_like is None and ftype == ntypes.longdouble:
|
||||||
|
ma_like = _KNOWN_TYPES.get(key[:10])
|
||||||
|
if ma_like is not None:
|
||||||
|
return ma_like
|
||||||
|
# Fall back to parameter discovery
|
||||||
|
warnings.warn(
|
||||||
|
'Signature {} for {} does not match any known type: '
|
||||||
|
'falling back to type probe function'.format(key, ftype),
|
||||||
|
UserWarning, stacklevel=2)
|
||||||
|
return _discovered_machar(ftype)
|
||||||
|
|
||||||
|
|
||||||
|
def _discovered_machar(ftype):
|
||||||
|
""" Create MachAr instance with found information on float types
|
||||||
|
"""
|
||||||
|
params = _MACHAR_PARAMS[ftype]
|
||||||
|
return MachAr(lambda v: array([v], ftype),
|
||||||
|
lambda v:_fr0(v.astype(params['itype']))[0],
|
||||||
|
lambda v:array(_fr0(v)[0], ftype),
|
||||||
|
lambda v: params['fmt'] % array(_fr0(v)[0], ftype),
|
||||||
|
params['title'])
|
||||||
|
|
||||||
|
|
||||||
|
@set_module('numpy')
|
||||||
|
class finfo(object):
|
||||||
|
"""
|
||||||
|
finfo(dtype)
|
||||||
|
|
||||||
|
Machine limits for floating point types.
|
||||||
|
|
||||||
|
Attributes
|
||||||
|
----------
|
||||||
|
bits : int
|
||||||
|
The number of bits occupied by the type.
|
||||||
|
eps : float
|
||||||
|
The smallest representable positive number such that
|
||||||
|
``1.0 + eps != 1.0``. Type of `eps` is an appropriate floating
|
||||||
|
point type.
|
||||||
|
epsneg : floating point number of the appropriate type
|
||||||
|
The smallest representable positive number such that
|
||||||
|
``1.0 - epsneg != 1.0``.
|
||||||
|
iexp : int
|
||||||
|
The number of bits in the exponent portion of the floating point
|
||||||
|
representation.
|
||||||
|
machar : MachAr
|
||||||
|
The object which calculated these parameters and holds more
|
||||||
|
detailed information.
|
||||||
|
machep : int
|
||||||
|
The exponent that yields `eps`.
|
||||||
|
max : floating point number of the appropriate type
|
||||||
|
The largest representable number.
|
||||||
|
maxexp : int
|
||||||
|
The smallest positive power of the base (2) that causes overflow.
|
||||||
|
min : floating point number of the appropriate type
|
||||||
|
The smallest representable number, typically ``-max``.
|
||||||
|
minexp : int
|
||||||
|
The most negative power of the base (2) consistent with there
|
||||||
|
being no leading 0's in the mantissa.
|
||||||
|
negep : int
|
||||||
|
The exponent that yields `epsneg`.
|
||||||
|
nexp : int
|
||||||
|
The number of bits in the exponent including its sign and bias.
|
||||||
|
nmant : int
|
||||||
|
The number of bits in the mantissa.
|
||||||
|
precision : int
|
||||||
|
The approximate number of decimal digits to which this kind of
|
||||||
|
float is precise.
|
||||||
|
resolution : floating point number of the appropriate type
|
||||||
|
The approximate decimal resolution of this type, i.e.,
|
||||||
|
``10**-precision``.
|
||||||
|
tiny : float
|
||||||
|
The smallest positive usable number. Type of `tiny` is an
|
||||||
|
appropriate floating point type.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
dtype : float, dtype, or instance
|
||||||
|
Kind of floating point data-type about which to get information.
|
||||||
|
|
||||||
|
See Also
|
||||||
|
--------
|
||||||
|
MachAr : The implementation of the tests that produce this information.
|
||||||
|
iinfo : The equivalent for integer data types.
|
||||||
|
|
||||||
|
Notes
|
||||||
|
-----
|
||||||
|
For developers of NumPy: do not instantiate this at the module level.
|
||||||
|
The initial calculation of these parameters is expensive and negatively
|
||||||
|
impacts import times. These objects are cached, so calling ``finfo()``
|
||||||
|
repeatedly inside your functions is not a problem.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
_finfo_cache = {}
|
||||||
|
|
||||||
|
def __new__(cls, dtype):
|
||||||
|
try:
|
||||||
|
dtype = numeric.dtype(dtype)
|
||||||
|
except TypeError:
|
||||||
|
# In case a float instance was given
|
||||||
|
dtype = numeric.dtype(type(dtype))
|
||||||
|
|
||||||
|
obj = cls._finfo_cache.get(dtype, None)
|
||||||
|
if obj is not None:
|
||||||
|
return obj
|
||||||
|
dtypes = [dtype]
|
||||||
|
newdtype = numeric.obj2sctype(dtype)
|
||||||
|
if newdtype is not dtype:
|
||||||
|
dtypes.append(newdtype)
|
||||||
|
dtype = newdtype
|
||||||
|
if not issubclass(dtype, numeric.inexact):
|
||||||
|
raise ValueError("data type %r not inexact" % (dtype))
|
||||||
|
obj = cls._finfo_cache.get(dtype, None)
|
||||||
|
if obj is not None:
|
||||||
|
return obj
|
||||||
|
if not issubclass(dtype, numeric.floating):
|
||||||
|
newdtype = _convert_to_float[dtype]
|
||||||
|
if newdtype is not dtype:
|
||||||
|
dtypes.append(newdtype)
|
||||||
|
dtype = newdtype
|
||||||
|
obj = cls._finfo_cache.get(dtype, None)
|
||||||
|
if obj is not None:
|
||||||
|
return obj
|
||||||
|
obj = object.__new__(cls)._init(dtype)
|
||||||
|
for dt in dtypes:
|
||||||
|
cls._finfo_cache[dt] = obj
|
||||||
|
return obj
|
||||||
|
|
||||||
|
def _init(self, dtype):
|
||||||
|
self.dtype = numeric.dtype(dtype)
|
||||||
|
machar = _get_machar(dtype)
|
||||||
|
|
||||||
|
for word in ['precision', 'iexp',
|
||||||
|
'maxexp', 'minexp', 'negep',
|
||||||
|
'machep']:
|
||||||
|
setattr(self, word, getattr(machar, word))
|
||||||
|
for word in ['tiny', 'resolution', 'epsneg']:
|
||||||
|
setattr(self, word, getattr(machar, word).flat[0])
|
||||||
|
self.bits = self.dtype.itemsize * 8
|
||||||
|
self.max = machar.huge.flat[0]
|
||||||
|
self.min = -self.max
|
||||||
|
self.eps = machar.eps.flat[0]
|
||||||
|
self.nexp = machar.iexp
|
||||||
|
self.nmant = machar.it
|
||||||
|
self.machar = machar
|
||||||
|
self._str_tiny = machar._str_xmin.strip()
|
||||||
|
self._str_max = machar._str_xmax.strip()
|
||||||
|
self._str_epsneg = machar._str_epsneg.strip()
|
||||||
|
self._str_eps = machar._str_eps.strip()
|
||||||
|
self._str_resolution = machar._str_resolution.strip()
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
fmt = (
|
||||||
|
'Machine parameters for %(dtype)s\n'
|
||||||
|
'---------------------------------------------------------------\n'
|
||||||
|
'precision = %(precision)3s resolution = %(_str_resolution)s\n'
|
||||||
|
'machep = %(machep)6s eps = %(_str_eps)s\n'
|
||||||
|
'negep = %(negep)6s epsneg = %(_str_epsneg)s\n'
|
||||||
|
'minexp = %(minexp)6s tiny = %(_str_tiny)s\n'
|
||||||
|
'maxexp = %(maxexp)6s max = %(_str_max)s\n'
|
||||||
|
'nexp = %(nexp)6s min = -max\n'
|
||||||
|
'---------------------------------------------------------------\n'
|
||||||
|
)
|
||||||
|
return fmt % self.__dict__
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
c = self.__class__.__name__
|
||||||
|
d = self.__dict__.copy()
|
||||||
|
d['klass'] = c
|
||||||
|
return (("%(klass)s(resolution=%(resolution)s, min=-%(_str_max)s,"
|
||||||
|
" max=%(_str_max)s, dtype=%(dtype)s)") % d)
|
||||||
|
|
||||||
|
|
||||||
|
@set_module('numpy')
|
||||||
|
class iinfo(object):
|
||||||
|
"""
|
||||||
|
iinfo(type)
|
||||||
|
|
||||||
|
Machine limits for integer types.
|
||||||
|
|
||||||
|
Attributes
|
||||||
|
----------
|
||||||
|
bits : int
|
||||||
|
The number of bits occupied by the type.
|
||||||
|
min : int
|
||||||
|
The smallest integer expressible by the type.
|
||||||
|
max : int
|
||||||
|
The largest integer expressible by the type.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
int_type : integer type, dtype, or instance
|
||||||
|
The kind of integer data type to get information about.
|
||||||
|
|
||||||
|
See Also
|
||||||
|
--------
|
||||||
|
finfo : The equivalent for floating point data types.
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
With types:
|
||||||
|
|
||||||
|
>>> ii16 = np.iinfo(np.int16)
|
||||||
|
>>> ii16.min
|
||||||
|
-32768
|
||||||
|
>>> ii16.max
|
||||||
|
32767
|
||||||
|
>>> ii32 = np.iinfo(np.int32)
|
||||||
|
>>> ii32.min
|
||||||
|
-2147483648
|
||||||
|
>>> ii32.max
|
||||||
|
2147483647
|
||||||
|
|
||||||
|
With instances:
|
||||||
|
|
||||||
|
>>> ii32 = np.iinfo(np.int32(10))
|
||||||
|
>>> ii32.min
|
||||||
|
-2147483648
|
||||||
|
>>> ii32.max
|
||||||
|
2147483647
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
_min_vals = {}
|
||||||
|
_max_vals = {}
|
||||||
|
|
||||||
|
def __init__(self, int_type):
|
||||||
|
try:
|
||||||
|
self.dtype = numeric.dtype(int_type)
|
||||||
|
except TypeError:
|
||||||
|
self.dtype = numeric.dtype(type(int_type))
|
||||||
|
self.kind = self.dtype.kind
|
||||||
|
self.bits = self.dtype.itemsize * 8
|
||||||
|
self.key = "%s%d" % (self.kind, self.bits)
|
||||||
|
if self.kind not in 'iu':
|
||||||
|
raise ValueError("Invalid integer data type %r." % (self.kind,))
|
||||||
|
|
||||||
|
@property
|
||||||
|
def min(self):
|
||||||
|
"""Minimum value of given dtype."""
|
||||||
|
if self.kind == 'u':
|
||||||
|
return 0
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
val = iinfo._min_vals[self.key]
|
||||||
|
except KeyError:
|
||||||
|
val = int(-(1 << (self.bits-1)))
|
||||||
|
iinfo._min_vals[self.key] = val
|
||||||
|
return val
|
||||||
|
|
||||||
|
@property
|
||||||
|
def max(self):
|
||||||
|
"""Maximum value of given dtype."""
|
||||||
|
try:
|
||||||
|
val = iinfo._max_vals[self.key]
|
||||||
|
except KeyError:
|
||||||
|
if self.kind == 'u':
|
||||||
|
val = int((1 << self.bits) - 1)
|
||||||
|
else:
|
||||||
|
val = int((1 << (self.bits-1)) - 1)
|
||||||
|
iinfo._max_vals[self.key] = val
|
||||||
|
return val
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
"""String representation."""
|
||||||
|
fmt = (
|
||||||
|
'Machine parameters for %(dtype)s\n'
|
||||||
|
'---------------------------------------------------------------\n'
|
||||||
|
'min = %(min)s\n'
|
||||||
|
'max = %(max)s\n'
|
||||||
|
'---------------------------------------------------------------\n'
|
||||||
|
)
|
||||||
|
return fmt % {'dtype': self.dtype, 'min': self.min, 'max': self.max}
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "%s(min=%s, max=%s, dtype=%s)" % (self.__class__.__name__,
|
||||||
|
self.min, self.max, self.dtype)
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,326 @@
|
|||||||
|
|
||||||
|
#ifdef _UMATHMODULE
|
||||||
|
|
||||||
|
extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type;
|
||||||
|
|
||||||
|
extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type;
|
||||||
|
|
||||||
|
NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndData \
|
||||||
|
(PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int);
|
||||||
|
NPY_NO_EXPORT int PyUFunc_RegisterLoopForType \
|
||||||
|
(PyUFuncObject *, int, PyUFuncGenericFunction, const int *, void *);
|
||||||
|
NPY_NO_EXPORT int PyUFunc_GenericFunction \
|
||||||
|
(PyUFuncObject *, PyObject *, PyObject *, PyArrayObject **);
|
||||||
|
NPY_NO_EXPORT void PyUFunc_f_f_As_d_d \
|
||||||
|
(char **, npy_intp *, npy_intp *, void *);
|
||||||
|
NPY_NO_EXPORT void PyUFunc_d_d \
|
||||||
|
(char **, npy_intp *, npy_intp *, void *);
|
||||||
|
NPY_NO_EXPORT void PyUFunc_f_f \
|
||||||
|
(char **, npy_intp *, npy_intp *, void *);
|
||||||
|
NPY_NO_EXPORT void PyUFunc_g_g \
|
||||||
|
(char **, npy_intp *, npy_intp *, void *);
|
||||||
|
NPY_NO_EXPORT void PyUFunc_F_F_As_D_D \
|
||||||
|
(char **, npy_intp *, npy_intp *, void *);
|
||||||
|
NPY_NO_EXPORT void PyUFunc_F_F \
|
||||||
|
(char **, npy_intp *, npy_intp *, void *);
|
||||||
|
NPY_NO_EXPORT void PyUFunc_D_D \
|
||||||
|
(char **, npy_intp *, npy_intp *, void *);
|
||||||
|
NPY_NO_EXPORT void PyUFunc_G_G \
|
||||||
|
(char **, npy_intp *, npy_intp *, void *);
|
||||||
|
NPY_NO_EXPORT void PyUFunc_O_O \
|
||||||
|
(char **, npy_intp *, npy_intp *, void *);
|
||||||
|
NPY_NO_EXPORT void PyUFunc_ff_f_As_dd_d \
|
||||||
|
(char **, npy_intp *, npy_intp *, void *);
|
||||||
|
NPY_NO_EXPORT void PyUFunc_ff_f \
|
||||||
|
(char **, npy_intp *, npy_intp *, void *);
|
||||||
|
NPY_NO_EXPORT void PyUFunc_dd_d \
|
||||||
|
(char **, npy_intp *, npy_intp *, void *);
|
||||||
|
NPY_NO_EXPORT void PyUFunc_gg_g \
|
||||||
|
(char **, npy_intp *, npy_intp *, void *);
|
||||||
|
NPY_NO_EXPORT void PyUFunc_FF_F_As_DD_D \
|
||||||
|
(char **, npy_intp *, npy_intp *, void *);
|
||||||
|
NPY_NO_EXPORT void PyUFunc_DD_D \
|
||||||
|
(char **, npy_intp *, npy_intp *, void *);
|
||||||
|
NPY_NO_EXPORT void PyUFunc_FF_F \
|
||||||
|
(char **, npy_intp *, npy_intp *, void *);
|
||||||
|
NPY_NO_EXPORT void PyUFunc_GG_G \
|
||||||
|
(char **, npy_intp *, npy_intp *, void *);
|
||||||
|
NPY_NO_EXPORT void PyUFunc_OO_O \
|
||||||
|
(char **, npy_intp *, npy_intp *, void *);
|
||||||
|
NPY_NO_EXPORT void PyUFunc_O_O_method \
|
||||||
|
(char **, npy_intp *, npy_intp *, void *);
|
||||||
|
NPY_NO_EXPORT void PyUFunc_OO_O_method \
|
||||||
|
(char **, npy_intp *, npy_intp *, void *);
|
||||||
|
NPY_NO_EXPORT void PyUFunc_On_Om \
|
||||||
|
(char **, npy_intp *, npy_intp *, void *);
|
||||||
|
NPY_NO_EXPORT int PyUFunc_GetPyValues \
|
||||||
|
(char *, int *, int *, PyObject **);
|
||||||
|
NPY_NO_EXPORT int PyUFunc_checkfperr \
|
||||||
|
(int, PyObject *, int *);
|
||||||
|
NPY_NO_EXPORT void PyUFunc_clearfperr \
|
||||||
|
(void);
|
||||||
|
NPY_NO_EXPORT int PyUFunc_getfperr \
|
||||||
|
(void);
|
||||||
|
NPY_NO_EXPORT int PyUFunc_handlefperr \
|
||||||
|
(int, PyObject *, int, int *);
|
||||||
|
NPY_NO_EXPORT int PyUFunc_ReplaceLoopBySignature \
|
||||||
|
(PyUFuncObject *, PyUFuncGenericFunction, const int *, PyUFuncGenericFunction *);
|
||||||
|
NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndDataAndSignature \
|
||||||
|
(PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int, const char *);
|
||||||
|
NPY_NO_EXPORT int PyUFunc_SetUsesArraysAsData \
|
||||||
|
(void **, size_t);
|
||||||
|
NPY_NO_EXPORT void PyUFunc_e_e \
|
||||||
|
(char **, npy_intp *, npy_intp *, void *);
|
||||||
|
NPY_NO_EXPORT void PyUFunc_e_e_As_f_f \
|
||||||
|
(char **, npy_intp *, npy_intp *, void *);
|
||||||
|
NPY_NO_EXPORT void PyUFunc_e_e_As_d_d \
|
||||||
|
(char **, npy_intp *, npy_intp *, void *);
|
||||||
|
NPY_NO_EXPORT void PyUFunc_ee_e \
|
||||||
|
(char **, npy_intp *, npy_intp *, void *);
|
||||||
|
NPY_NO_EXPORT void PyUFunc_ee_e_As_ff_f \
|
||||||
|
(char **, npy_intp *, npy_intp *, void *);
|
||||||
|
NPY_NO_EXPORT void PyUFunc_ee_e_As_dd_d \
|
||||||
|
(char **, npy_intp *, npy_intp *, void *);
|
||||||
|
NPY_NO_EXPORT int PyUFunc_DefaultTypeResolver \
|
||||||
|
(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyObject *, PyArray_Descr **);
|
||||||
|
NPY_NO_EXPORT int PyUFunc_ValidateCasting \
|
||||||
|
(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyArray_Descr **);
|
||||||
|
NPY_NO_EXPORT int PyUFunc_RegisterLoopForDescr \
|
||||||
|
(PyUFuncObject *, PyArray_Descr *, PyUFuncGenericFunction, PyArray_Descr **, void *);
|
||||||
|
NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndDataAndSignatureAndIdentity \
|
||||||
|
(PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, const int, const char *, PyObject *);
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
#if defined(PY_UFUNC_UNIQUE_SYMBOL)
|
||||||
|
#define PyUFunc_API PY_UFUNC_UNIQUE_SYMBOL
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(NO_IMPORT) || defined(NO_IMPORT_UFUNC)
|
||||||
|
extern void **PyUFunc_API;
|
||||||
|
#else
|
||||||
|
#if defined(PY_UFUNC_UNIQUE_SYMBOL)
|
||||||
|
void **PyUFunc_API;
|
||||||
|
#else
|
||||||
|
static void **PyUFunc_API=NULL;
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define PyUFunc_Type (*(PyTypeObject *)PyUFunc_API[0])
|
||||||
|
#define PyUFunc_FromFuncAndData \
|
||||||
|
(*(PyObject * (*)(PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int)) \
|
||||||
|
PyUFunc_API[1])
|
||||||
|
#define PyUFunc_RegisterLoopForType \
|
||||||
|
(*(int (*)(PyUFuncObject *, int, PyUFuncGenericFunction, const int *, void *)) \
|
||||||
|
PyUFunc_API[2])
|
||||||
|
#define PyUFunc_GenericFunction \
|
||||||
|
(*(int (*)(PyUFuncObject *, PyObject *, PyObject *, PyArrayObject **)) \
|
||||||
|
PyUFunc_API[3])
|
||||||
|
#define PyUFunc_f_f_As_d_d \
|
||||||
|
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||||
|
PyUFunc_API[4])
|
||||||
|
#define PyUFunc_d_d \
|
||||||
|
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||||
|
PyUFunc_API[5])
|
||||||
|
#define PyUFunc_f_f \
|
||||||
|
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||||
|
PyUFunc_API[6])
|
||||||
|
#define PyUFunc_g_g \
|
||||||
|
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||||
|
PyUFunc_API[7])
|
||||||
|
#define PyUFunc_F_F_As_D_D \
|
||||||
|
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||||
|
PyUFunc_API[8])
|
||||||
|
#define PyUFunc_F_F \
|
||||||
|
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||||
|
PyUFunc_API[9])
|
||||||
|
#define PyUFunc_D_D \
|
||||||
|
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||||
|
PyUFunc_API[10])
|
||||||
|
#define PyUFunc_G_G \
|
||||||
|
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||||
|
PyUFunc_API[11])
|
||||||
|
#define PyUFunc_O_O \
|
||||||
|
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||||
|
PyUFunc_API[12])
|
||||||
|
#define PyUFunc_ff_f_As_dd_d \
|
||||||
|
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||||
|
PyUFunc_API[13])
|
||||||
|
#define PyUFunc_ff_f \
|
||||||
|
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||||
|
PyUFunc_API[14])
|
||||||
|
#define PyUFunc_dd_d \
|
||||||
|
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||||
|
PyUFunc_API[15])
|
||||||
|
#define PyUFunc_gg_g \
|
||||||
|
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||||
|
PyUFunc_API[16])
|
||||||
|
#define PyUFunc_FF_F_As_DD_D \
|
||||||
|
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||||
|
PyUFunc_API[17])
|
||||||
|
#define PyUFunc_DD_D \
|
||||||
|
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||||
|
PyUFunc_API[18])
|
||||||
|
#define PyUFunc_FF_F \
|
||||||
|
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||||
|
PyUFunc_API[19])
|
||||||
|
#define PyUFunc_GG_G \
|
||||||
|
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||||
|
PyUFunc_API[20])
|
||||||
|
#define PyUFunc_OO_O \
|
||||||
|
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||||
|
PyUFunc_API[21])
|
||||||
|
#define PyUFunc_O_O_method \
|
||||||
|
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||||
|
PyUFunc_API[22])
|
||||||
|
#define PyUFunc_OO_O_method \
|
||||||
|
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||||
|
PyUFunc_API[23])
|
||||||
|
#define PyUFunc_On_Om \
|
||||||
|
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||||
|
PyUFunc_API[24])
|
||||||
|
#define PyUFunc_GetPyValues \
|
||||||
|
(*(int (*)(char *, int *, int *, PyObject **)) \
|
||||||
|
PyUFunc_API[25])
|
||||||
|
#define PyUFunc_checkfperr \
|
||||||
|
(*(int (*)(int, PyObject *, int *)) \
|
||||||
|
PyUFunc_API[26])
|
||||||
|
#define PyUFunc_clearfperr \
|
||||||
|
(*(void (*)(void)) \
|
||||||
|
PyUFunc_API[27])
|
||||||
|
#define PyUFunc_getfperr \
|
||||||
|
(*(int (*)(void)) \
|
||||||
|
PyUFunc_API[28])
|
||||||
|
#define PyUFunc_handlefperr \
|
||||||
|
(*(int (*)(int, PyObject *, int, int *)) \
|
||||||
|
PyUFunc_API[29])
|
||||||
|
#define PyUFunc_ReplaceLoopBySignature \
|
||||||
|
(*(int (*)(PyUFuncObject *, PyUFuncGenericFunction, const int *, PyUFuncGenericFunction *)) \
|
||||||
|
PyUFunc_API[30])
|
||||||
|
#define PyUFunc_FromFuncAndDataAndSignature \
|
||||||
|
(*(PyObject * (*)(PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int, const char *)) \
|
||||||
|
PyUFunc_API[31])
|
||||||
|
#define PyUFunc_SetUsesArraysAsData \
|
||||||
|
(*(int (*)(void **, size_t)) \
|
||||||
|
PyUFunc_API[32])
|
||||||
|
#define PyUFunc_e_e \
|
||||||
|
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||||
|
PyUFunc_API[33])
|
||||||
|
#define PyUFunc_e_e_As_f_f \
|
||||||
|
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||||
|
PyUFunc_API[34])
|
||||||
|
#define PyUFunc_e_e_As_d_d \
|
||||||
|
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||||
|
PyUFunc_API[35])
|
||||||
|
#define PyUFunc_ee_e \
|
||||||
|
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||||
|
PyUFunc_API[36])
|
||||||
|
#define PyUFunc_ee_e_As_ff_f \
|
||||||
|
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||||
|
PyUFunc_API[37])
|
||||||
|
#define PyUFunc_ee_e_As_dd_d \
|
||||||
|
(*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
|
||||||
|
PyUFunc_API[38])
|
||||||
|
#define PyUFunc_DefaultTypeResolver \
|
||||||
|
(*(int (*)(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyObject *, PyArray_Descr **)) \
|
||||||
|
PyUFunc_API[39])
|
||||||
|
#define PyUFunc_ValidateCasting \
|
||||||
|
(*(int (*)(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyArray_Descr **)) \
|
||||||
|
PyUFunc_API[40])
|
||||||
|
#define PyUFunc_RegisterLoopForDescr \
|
||||||
|
(*(int (*)(PyUFuncObject *, PyArray_Descr *, PyUFuncGenericFunction, PyArray_Descr **, void *)) \
|
||||||
|
PyUFunc_API[41])
|
||||||
|
#define PyUFunc_FromFuncAndDataAndSignatureAndIdentity \
|
||||||
|
(*(PyObject * (*)(PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, const int, const char *, PyObject *)) \
|
||||||
|
PyUFunc_API[42])
|
||||||
|
|
||||||
|
static NPY_INLINE int
|
||||||
|
_import_umath(void)
|
||||||
|
{
|
||||||
|
PyObject *numpy = PyImport_ImportModule("numpy.core._multiarray_umath");
|
||||||
|
PyObject *c_api = NULL;
|
||||||
|
|
||||||
|
if (numpy == NULL) {
|
||||||
|
PyErr_SetString(PyExc_ImportError,
|
||||||
|
"numpy.core._multiarray_umath failed to import");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
c_api = PyObject_GetAttrString(numpy, "_UFUNC_API");
|
||||||
|
Py_DECREF(numpy);
|
||||||
|
if (c_api == NULL) {
|
||||||
|
PyErr_SetString(PyExc_AttributeError, "_UFUNC_API not found");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if PY_VERSION_HEX >= 0x03000000
|
||||||
|
if (!PyCapsule_CheckExact(c_api)) {
|
||||||
|
PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is not PyCapsule object");
|
||||||
|
Py_DECREF(c_api);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
PyUFunc_API = (void **)PyCapsule_GetPointer(c_api, NULL);
|
||||||
|
#else
|
||||||
|
if (!PyCObject_Check(c_api)) {
|
||||||
|
PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is not PyCObject object");
|
||||||
|
Py_DECREF(c_api);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
PyUFunc_API = (void **)PyCObject_AsVoidPtr(c_api);
|
||||||
|
#endif
|
||||||
|
Py_DECREF(c_api);
|
||||||
|
if (PyUFunc_API == NULL) {
|
||||||
|
PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is NULL pointer");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if PY_VERSION_HEX >= 0x03000000
|
||||||
|
#define NUMPY_IMPORT_UMATH_RETVAL NULL
|
||||||
|
#else
|
||||||
|
#define NUMPY_IMPORT_UMATH_RETVAL
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define import_umath() \
|
||||||
|
do {\
|
||||||
|
UFUNC_NOFPE\
|
||||||
|
if (_import_umath() < 0) {\
|
||||||
|
PyErr_Print();\
|
||||||
|
PyErr_SetString(PyExc_ImportError,\
|
||||||
|
"numpy.core.umath failed to import");\
|
||||||
|
return NUMPY_IMPORT_UMATH_RETVAL;\
|
||||||
|
}\
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define import_umath1(ret) \
|
||||||
|
do {\
|
||||||
|
UFUNC_NOFPE\
|
||||||
|
if (_import_umath() < 0) {\
|
||||||
|
PyErr_Print();\
|
||||||
|
PyErr_SetString(PyExc_ImportError,\
|
||||||
|
"numpy.core.umath failed to import");\
|
||||||
|
return ret;\
|
||||||
|
}\
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define import_umath2(ret, msg) \
|
||||||
|
do {\
|
||||||
|
UFUNC_NOFPE\
|
||||||
|
if (_import_umath() < 0) {\
|
||||||
|
PyErr_Print();\
|
||||||
|
PyErr_SetString(PyExc_ImportError, msg);\
|
||||||
|
return ret;\
|
||||||
|
}\
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define import_ufunc() \
|
||||||
|
do {\
|
||||||
|
UFUNC_NOFPE\
|
||||||
|
if (_import_umath() < 0) {\
|
||||||
|
PyErr_Print();\
|
||||||
|
PyErr_SetString(PyExc_ImportError,\
|
||||||
|
"numpy.core.umath failed to import");\
|
||||||
|
}\
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#endif
|
@ -0,0 +1,90 @@
|
|||||||
|
#ifndef _NPY_INCLUDE_NEIGHBORHOOD_IMP
|
||||||
|
#error You should not include this header directly
|
||||||
|
#endif
|
||||||
|
/*
|
||||||
|
* Private API (here for inline)
|
||||||
|
*/
|
||||||
|
static NPY_INLINE int
|
||||||
|
_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Update to next item of the iterator
|
||||||
|
*
|
||||||
|
* Note: this simply increment the coordinates vector, last dimension
|
||||||
|
* incremented first , i.e, for dimension 3
|
||||||
|
* ...
|
||||||
|
* -1, -1, -1
|
||||||
|
* -1, -1, 0
|
||||||
|
* -1, -1, 1
|
||||||
|
* ....
|
||||||
|
* -1, 0, -1
|
||||||
|
* -1, 0, 0
|
||||||
|
* ....
|
||||||
|
* 0, -1, -1
|
||||||
|
* 0, -1, 0
|
||||||
|
* ....
|
||||||
|
*/
|
||||||
|
#define _UPDATE_COORD_ITER(c) \
|
||||||
|
wb = iter->coordinates[c] < iter->bounds[c][1]; \
|
||||||
|
if (wb) { \
|
||||||
|
iter->coordinates[c] += 1; \
|
||||||
|
return 0; \
|
||||||
|
} \
|
||||||
|
else { \
|
||||||
|
iter->coordinates[c] = iter->bounds[c][0]; \
|
||||||
|
}
|
||||||
|
|
||||||
|
static NPY_INLINE int
|
||||||
|
_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter)
|
||||||
|
{
|
||||||
|
npy_intp i, wb;
|
||||||
|
|
||||||
|
for (i = iter->nd - 1; i >= 0; --i) {
|
||||||
|
_UPDATE_COORD_ITER(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Version optimized for 2d arrays, manual loop unrolling
|
||||||
|
*/
|
||||||
|
static NPY_INLINE int
|
||||||
|
_PyArrayNeighborhoodIter_IncrCoord2D(PyArrayNeighborhoodIterObject* iter)
|
||||||
|
{
|
||||||
|
npy_intp wb;
|
||||||
|
|
||||||
|
_UPDATE_COORD_ITER(1)
|
||||||
|
_UPDATE_COORD_ITER(0)
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#undef _UPDATE_COORD_ITER
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Advance to the next neighbour
|
||||||
|
*/
|
||||||
|
static NPY_INLINE int
|
||||||
|
PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter)
|
||||||
|
{
|
||||||
|
_PyArrayNeighborhoodIter_IncrCoord (iter);
|
||||||
|
iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Reset functions
|
||||||
|
*/
|
||||||
|
static NPY_INLINE int
|
||||||
|
PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter)
|
||||||
|
{
|
||||||
|
npy_intp i;
|
||||||
|
|
||||||
|
for (i = 0; i < iter->nd; ++i) {
|
||||||
|
iter->coordinates[i] = iter->bounds[i][0];
|
||||||
|
}
|
||||||
|
iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
@ -0,0 +1,29 @@
|
|||||||
|
#define NPY_SIZEOF_SHORT SIZEOF_SHORT
|
||||||
|
#define NPY_SIZEOF_INT SIZEOF_INT
|
||||||
|
#define NPY_SIZEOF_LONG SIZEOF_LONG
|
||||||
|
#define NPY_SIZEOF_FLOAT 4
|
||||||
|
#define NPY_SIZEOF_COMPLEX_FLOAT 8
|
||||||
|
#define NPY_SIZEOF_DOUBLE 8
|
||||||
|
#define NPY_SIZEOF_COMPLEX_DOUBLE 16
|
||||||
|
#define NPY_SIZEOF_LONGDOUBLE 8
|
||||||
|
#define NPY_SIZEOF_COMPLEX_LONGDOUBLE 16
|
||||||
|
#define NPY_SIZEOF_PY_INTPTR_T 8
|
||||||
|
#define NPY_SIZEOF_OFF_T 4
|
||||||
|
#define NPY_SIZEOF_PY_LONG_LONG 8
|
||||||
|
#define NPY_SIZEOF_LONGLONG 8
|
||||||
|
#define NPY_NO_SIGNAL 1
|
||||||
|
#define NPY_NO_SMP 0
|
||||||
|
#define NPY_HAVE_DECL_ISNAN
|
||||||
|
#define NPY_HAVE_DECL_ISINF
|
||||||
|
#define NPY_HAVE_DECL_SIGNBIT
|
||||||
|
#define NPY_HAVE_DECL_ISFINITE
|
||||||
|
#define NPY_USE_C99_COMPLEX 1
|
||||||
|
#define NPY_RELAXED_STRIDES_CHECKING 1
|
||||||
|
#define NPY_USE_C99_FORMATS 1
|
||||||
|
#define NPY_VISIBILITY_HIDDEN
|
||||||
|
#define NPY_ABI_VERSION 0x01000009
|
||||||
|
#define NPY_API_VERSION 0x0000000D
|
||||||
|
|
||||||
|
#ifndef __STDC_FORMAT_MACROS
|
||||||
|
#define __STDC_FORMAT_MACROS 1
|
||||||
|
#endif
|
@ -0,0 +1,11 @@
|
|||||||
|
#ifndef Py_ARRAYOBJECT_H
|
||||||
|
#define Py_ARRAYOBJECT_H
|
||||||
|
|
||||||
|
#include "ndarrayobject.h"
|
||||||
|
#include "npy_interrupt.h"
|
||||||
|
|
||||||
|
#ifdef NPY_NO_PREFIX
|
||||||
|
#include "noprefix.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif
|
@ -0,0 +1,175 @@
|
|||||||
|
#ifndef _NPY_ARRAYSCALARS_H_
|
||||||
|
#define _NPY_ARRAYSCALARS_H_
|
||||||
|
|
||||||
|
#ifndef _MULTIARRAYMODULE
|
||||||
|
typedef struct {
|
||||||
|
PyObject_HEAD
|
||||||
|
npy_bool obval;
|
||||||
|
} PyBoolScalarObject;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
PyObject_HEAD
|
||||||
|
signed char obval;
|
||||||
|
} PyByteScalarObject;
|
||||||
|
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
PyObject_HEAD
|
||||||
|
short obval;
|
||||||
|
} PyShortScalarObject;
|
||||||
|
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
PyObject_HEAD
|
||||||
|
int obval;
|
||||||
|
} PyIntScalarObject;
|
||||||
|
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
PyObject_HEAD
|
||||||
|
long obval;
|
||||||
|
} PyLongScalarObject;
|
||||||
|
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
PyObject_HEAD
|
||||||
|
npy_longlong obval;
|
||||||
|
} PyLongLongScalarObject;
|
||||||
|
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
PyObject_HEAD
|
||||||
|
unsigned char obval;
|
||||||
|
} PyUByteScalarObject;
|
||||||
|
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
PyObject_HEAD
|
||||||
|
unsigned short obval;
|
||||||
|
} PyUShortScalarObject;
|
||||||
|
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
PyObject_HEAD
|
||||||
|
unsigned int obval;
|
||||||
|
} PyUIntScalarObject;
|
||||||
|
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
PyObject_HEAD
|
||||||
|
unsigned long obval;
|
||||||
|
} PyULongScalarObject;
|
||||||
|
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
PyObject_HEAD
|
||||||
|
npy_ulonglong obval;
|
||||||
|
} PyULongLongScalarObject;
|
||||||
|
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
PyObject_HEAD
|
||||||
|
npy_half obval;
|
||||||
|
} PyHalfScalarObject;
|
||||||
|
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
PyObject_HEAD
|
||||||
|
float obval;
|
||||||
|
} PyFloatScalarObject;
|
||||||
|
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
PyObject_HEAD
|
||||||
|
double obval;
|
||||||
|
} PyDoubleScalarObject;
|
||||||
|
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
PyObject_HEAD
|
||||||
|
npy_longdouble obval;
|
||||||
|
} PyLongDoubleScalarObject;
|
||||||
|
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
PyObject_HEAD
|
||||||
|
npy_cfloat obval;
|
||||||
|
} PyCFloatScalarObject;
|
||||||
|
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
PyObject_HEAD
|
||||||
|
npy_cdouble obval;
|
||||||
|
} PyCDoubleScalarObject;
|
||||||
|
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
PyObject_HEAD
|
||||||
|
npy_clongdouble obval;
|
||||||
|
} PyCLongDoubleScalarObject;
|
||||||
|
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
PyObject_HEAD
|
||||||
|
PyObject * obval;
|
||||||
|
} PyObjectScalarObject;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
PyObject_HEAD
|
||||||
|
npy_datetime obval;
|
||||||
|
PyArray_DatetimeMetaData obmeta;
|
||||||
|
} PyDatetimeScalarObject;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
PyObject_HEAD
|
||||||
|
npy_timedelta obval;
|
||||||
|
PyArray_DatetimeMetaData obmeta;
|
||||||
|
} PyTimedeltaScalarObject;
|
||||||
|
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
PyObject_HEAD
|
||||||
|
char obval;
|
||||||
|
} PyScalarObject;
|
||||||
|
|
||||||
|
#define PyStringScalarObject PyStringObject
|
||||||
|
#define PyUnicodeScalarObject PyUnicodeObject
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
PyObject_VAR_HEAD
|
||||||
|
char *obval;
|
||||||
|
PyArray_Descr *descr;
|
||||||
|
int flags;
|
||||||
|
PyObject *base;
|
||||||
|
} PyVoidScalarObject;
|
||||||
|
|
||||||
|
/* Macros
|
||||||
|
Py<Cls><bitsize>ScalarObject
|
||||||
|
Py<Cls><bitsize>ArrType_Type
|
||||||
|
are defined in ndarrayobject.h
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define PyArrayScalar_False ((PyObject *)(&(_PyArrayScalar_BoolValues[0])))
|
||||||
|
#define PyArrayScalar_True ((PyObject *)(&(_PyArrayScalar_BoolValues[1])))
|
||||||
|
#define PyArrayScalar_FromLong(i) \
|
||||||
|
((PyObject *)(&(_PyArrayScalar_BoolValues[((i)!=0)])))
|
||||||
|
#define PyArrayScalar_RETURN_BOOL_FROM_LONG(i) \
|
||||||
|
return Py_INCREF(PyArrayScalar_FromLong(i)), \
|
||||||
|
PyArrayScalar_FromLong(i)
|
||||||
|
#define PyArrayScalar_RETURN_FALSE \
|
||||||
|
return Py_INCREF(PyArrayScalar_False), \
|
||||||
|
PyArrayScalar_False
|
||||||
|
#define PyArrayScalar_RETURN_TRUE \
|
||||||
|
return Py_INCREF(PyArrayScalar_True), \
|
||||||
|
PyArrayScalar_True
|
||||||
|
|
||||||
|
#define PyArrayScalar_New(cls) \
|
||||||
|
Py##cls##ArrType_Type.tp_alloc(&Py##cls##ArrType_Type, 0)
|
||||||
|
#define PyArrayScalar_VAL(obj, cls) \
|
||||||
|
((Py##cls##ScalarObject *)obj)->obval
|
||||||
|
#define PyArrayScalar_ASSIGN(obj, cls, val) \
|
||||||
|
PyArrayScalar_VAL(obj, cls) = val
|
||||||
|
|
||||||
|
#endif
|
@ -0,0 +1,70 @@
|
|||||||
|
#ifndef __NPY_HALFFLOAT_H__
|
||||||
|
#define __NPY_HALFFLOAT_H__
|
||||||
|
|
||||||
|
#include <Python.h>
|
||||||
|
#include <numpy/npy_math.h>
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Half-precision routines
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* Conversions */
|
||||||
|
float npy_half_to_float(npy_half h);
|
||||||
|
double npy_half_to_double(npy_half h);
|
||||||
|
npy_half npy_float_to_half(float f);
|
||||||
|
npy_half npy_double_to_half(double d);
|
||||||
|
/* Comparisons */
|
||||||
|
int npy_half_eq(npy_half h1, npy_half h2);
|
||||||
|
int npy_half_ne(npy_half h1, npy_half h2);
|
||||||
|
int npy_half_le(npy_half h1, npy_half h2);
|
||||||
|
int npy_half_lt(npy_half h1, npy_half h2);
|
||||||
|
int npy_half_ge(npy_half h1, npy_half h2);
|
||||||
|
int npy_half_gt(npy_half h1, npy_half h2);
|
||||||
|
/* faster *_nonan variants for when you know h1 and h2 are not NaN */
|
||||||
|
int npy_half_eq_nonan(npy_half h1, npy_half h2);
|
||||||
|
int npy_half_lt_nonan(npy_half h1, npy_half h2);
|
||||||
|
int npy_half_le_nonan(npy_half h1, npy_half h2);
|
||||||
|
/* Miscellaneous functions */
|
||||||
|
int npy_half_iszero(npy_half h);
|
||||||
|
int npy_half_isnan(npy_half h);
|
||||||
|
int npy_half_isinf(npy_half h);
|
||||||
|
int npy_half_isfinite(npy_half h);
|
||||||
|
int npy_half_signbit(npy_half h);
|
||||||
|
npy_half npy_half_copysign(npy_half x, npy_half y);
|
||||||
|
npy_half npy_half_spacing(npy_half h);
|
||||||
|
npy_half npy_half_nextafter(npy_half x, npy_half y);
|
||||||
|
npy_half npy_half_divmod(npy_half x, npy_half y, npy_half *modulus);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Half-precision constants
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define NPY_HALF_ZERO (0x0000u)
|
||||||
|
#define NPY_HALF_PZERO (0x0000u)
|
||||||
|
#define NPY_HALF_NZERO (0x8000u)
|
||||||
|
#define NPY_HALF_ONE (0x3c00u)
|
||||||
|
#define NPY_HALF_NEGONE (0xbc00u)
|
||||||
|
#define NPY_HALF_PINF (0x7c00u)
|
||||||
|
#define NPY_HALF_NINF (0xfc00u)
|
||||||
|
#define NPY_HALF_NAN (0x7e00u)
|
||||||
|
|
||||||
|
#define NPY_MAX_HALF (0x7bffu)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Bit-level conversions
|
||||||
|
*/
|
||||||
|
|
||||||
|
npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f);
|
||||||
|
npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d);
|
||||||
|
npy_uint32 npy_halfbits_to_floatbits(npy_uint16 h);
|
||||||
|
npy_uint64 npy_halfbits_to_doublebits(npy_uint16 h);
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,285 @@
|
|||||||
|
/*
|
||||||
|
* DON'T INCLUDE THIS DIRECTLY.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef NPY_NDARRAYOBJECT_H
|
||||||
|
#define NPY_NDARRAYOBJECT_H
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include <Python.h>
|
||||||
|
#include "ndarraytypes.h"
|
||||||
|
|
||||||
|
/* Includes the "function" C-API -- these are all stored in a
|
||||||
|
list of pointers --- one for each file
|
||||||
|
The two lists are concatenated into one in multiarray.
|
||||||
|
|
||||||
|
They are available as import_array()
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "__multiarray_api.h"
|
||||||
|
|
||||||
|
|
||||||
|
/* C-API that requires previous API to be defined */
|
||||||
|
|
||||||
|
#define PyArray_DescrCheck(op) PyObject_TypeCheck(op, &PyArrayDescr_Type)
|
||||||
|
|
||||||
|
#define PyArray_Check(op) PyObject_TypeCheck(op, &PyArray_Type)
|
||||||
|
#define PyArray_CheckExact(op) (((PyObject*)(op))->ob_type == &PyArray_Type)
|
||||||
|
|
||||||
|
#define PyArray_HasArrayInterfaceType(op, type, context, out) \
|
||||||
|
((((out)=PyArray_FromStructInterface(op)) != Py_NotImplemented) || \
|
||||||
|
(((out)=PyArray_FromInterface(op)) != Py_NotImplemented) || \
|
||||||
|
(((out)=PyArray_FromArrayAttr(op, type, context)) != \
|
||||||
|
Py_NotImplemented))
|
||||||
|
|
||||||
|
#define PyArray_HasArrayInterface(op, out) \
|
||||||
|
PyArray_HasArrayInterfaceType(op, NULL, NULL, out)
|
||||||
|
|
||||||
|
#define PyArray_IsZeroDim(op) (PyArray_Check(op) && \
|
||||||
|
(PyArray_NDIM((PyArrayObject *)op) == 0))
|
||||||
|
|
||||||
|
#define PyArray_IsScalar(obj, cls) \
|
||||||
|
(PyObject_TypeCheck(obj, &Py##cls##ArrType_Type))
|
||||||
|
|
||||||
|
#define PyArray_CheckScalar(m) (PyArray_IsScalar(m, Generic) || \
|
||||||
|
PyArray_IsZeroDim(m))
|
||||||
|
#if PY_MAJOR_VERSION >= 3
|
||||||
|
#define PyArray_IsPythonNumber(obj) \
|
||||||
|
(PyFloat_Check(obj) || PyComplex_Check(obj) || \
|
||||||
|
PyLong_Check(obj) || PyBool_Check(obj))
|
||||||
|
#define PyArray_IsIntegerScalar(obj) (PyLong_Check(obj) \
|
||||||
|
|| PyArray_IsScalar((obj), Integer))
|
||||||
|
#define PyArray_IsPythonScalar(obj) \
|
||||||
|
(PyArray_IsPythonNumber(obj) || PyBytes_Check(obj) || \
|
||||||
|
PyUnicode_Check(obj))
|
||||||
|
#else
|
||||||
|
#define PyArray_IsPythonNumber(obj) \
|
||||||
|
(PyInt_Check(obj) || PyFloat_Check(obj) || PyComplex_Check(obj) || \
|
||||||
|
PyLong_Check(obj) || PyBool_Check(obj))
|
||||||
|
#define PyArray_IsIntegerScalar(obj) (PyInt_Check(obj) \
|
||||||
|
|| PyLong_Check(obj) \
|
||||||
|
|| PyArray_IsScalar((obj), Integer))
|
||||||
|
#define PyArray_IsPythonScalar(obj) \
|
||||||
|
(PyArray_IsPythonNumber(obj) || PyString_Check(obj) || \
|
||||||
|
PyUnicode_Check(obj))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define PyArray_IsAnyScalar(obj) \
|
||||||
|
(PyArray_IsScalar(obj, Generic) || PyArray_IsPythonScalar(obj))
|
||||||
|
|
||||||
|
#define PyArray_CheckAnyScalar(obj) (PyArray_IsPythonScalar(obj) || \
|
||||||
|
PyArray_CheckScalar(obj))
|
||||||
|
|
||||||
|
|
||||||
|
#define PyArray_GETCONTIGUOUS(m) (PyArray_ISCONTIGUOUS(m) ? \
|
||||||
|
Py_INCREF(m), (m) : \
|
||||||
|
(PyArrayObject *)(PyArray_Copy(m)))
|
||||||
|
|
||||||
|
#define PyArray_SAMESHAPE(a1,a2) ((PyArray_NDIM(a1) == PyArray_NDIM(a2)) && \
|
||||||
|
PyArray_CompareLists(PyArray_DIMS(a1), \
|
||||||
|
PyArray_DIMS(a2), \
|
||||||
|
PyArray_NDIM(a1)))
|
||||||
|
|
||||||
|
#define PyArray_SIZE(m) PyArray_MultiplyList(PyArray_DIMS(m), PyArray_NDIM(m))
|
||||||
|
#define PyArray_NBYTES(m) (PyArray_ITEMSIZE(m) * PyArray_SIZE(m))
|
||||||
|
#define PyArray_FROM_O(m) PyArray_FromAny(m, NULL, 0, 0, 0, NULL)
|
||||||
|
|
||||||
|
#define PyArray_FROM_OF(m,flags) PyArray_CheckFromAny(m, NULL, 0, 0, flags, \
|
||||||
|
NULL)
|
||||||
|
|
||||||
|
#define PyArray_FROM_OT(m,type) PyArray_FromAny(m, \
|
||||||
|
PyArray_DescrFromType(type), 0, 0, 0, NULL)
|
||||||
|
|
||||||
|
#define PyArray_FROM_OTF(m, type, flags) \
|
||||||
|
PyArray_FromAny(m, PyArray_DescrFromType(type), 0, 0, \
|
||||||
|
(((flags) & NPY_ARRAY_ENSURECOPY) ? \
|
||||||
|
((flags) | NPY_ARRAY_DEFAULT) : (flags)), NULL)
|
||||||
|
|
||||||
|
#define PyArray_FROMANY(m, type, min, max, flags) \
|
||||||
|
PyArray_FromAny(m, PyArray_DescrFromType(type), min, max, \
|
||||||
|
(((flags) & NPY_ARRAY_ENSURECOPY) ? \
|
||||||
|
(flags) | NPY_ARRAY_DEFAULT : (flags)), NULL)
|
||||||
|
|
||||||
|
#define PyArray_ZEROS(m, dims, type, is_f_order) \
|
||||||
|
PyArray_Zeros(m, dims, PyArray_DescrFromType(type), is_f_order)
|
||||||
|
|
||||||
|
#define PyArray_EMPTY(m, dims, type, is_f_order) \
|
||||||
|
PyArray_Empty(m, dims, PyArray_DescrFromType(type), is_f_order)
|
||||||
|
|
||||||
|
#define PyArray_FILLWBYTE(obj, val) memset(PyArray_DATA(obj), val, \
|
||||||
|
PyArray_NBYTES(obj))
|
||||||
|
#ifndef PYPY_VERSION
|
||||||
|
#define PyArray_REFCOUNT(obj) (((PyObject *)(obj))->ob_refcnt)
|
||||||
|
#define NPY_REFCOUNT PyArray_REFCOUNT
|
||||||
|
#endif
|
||||||
|
#define NPY_MAX_ELSIZE (2 * NPY_SIZEOF_LONGDOUBLE)
|
||||||
|
|
||||||
|
#define PyArray_ContiguousFromAny(op, type, min_depth, max_depth) \
|
||||||
|
PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \
|
||||||
|
max_depth, NPY_ARRAY_DEFAULT, NULL)
|
||||||
|
|
||||||
|
#define PyArray_EquivArrTypes(a1, a2) \
|
||||||
|
PyArray_EquivTypes(PyArray_DESCR(a1), PyArray_DESCR(a2))
|
||||||
|
|
||||||
|
#define PyArray_EquivByteorders(b1, b2) \
|
||||||
|
(((b1) == (b2)) || (PyArray_ISNBO(b1) == PyArray_ISNBO(b2)))
|
||||||
|
|
||||||
|
#define PyArray_SimpleNew(nd, dims, typenum) \
|
||||||
|
PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, NULL, 0, 0, NULL)
|
||||||
|
|
||||||
|
#define PyArray_SimpleNewFromData(nd, dims, typenum, data) \
|
||||||
|
PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, \
|
||||||
|
data, 0, NPY_ARRAY_CARRAY, NULL)
|
||||||
|
|
||||||
|
#define PyArray_SimpleNewFromDescr(nd, dims, descr) \
|
||||||
|
PyArray_NewFromDescr(&PyArray_Type, descr, nd, dims, \
|
||||||
|
NULL, NULL, 0, NULL)
|
||||||
|
|
||||||
|
#define PyArray_ToScalar(data, arr) \
|
||||||
|
PyArray_Scalar(data, PyArray_DESCR(arr), (PyObject *)arr)
|
||||||
|
|
||||||
|
|
||||||
|
/* These might be faster without the dereferencing of obj
|
||||||
|
going on inside -- of course an optimizing compiler should
|
||||||
|
inline the constants inside a for loop making it a moot point
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define PyArray_GETPTR1(obj, i) ((void *)(PyArray_BYTES(obj) + \
|
||||||
|
(i)*PyArray_STRIDES(obj)[0]))
|
||||||
|
|
||||||
|
#define PyArray_GETPTR2(obj, i, j) ((void *)(PyArray_BYTES(obj) + \
|
||||||
|
(i)*PyArray_STRIDES(obj)[0] + \
|
||||||
|
(j)*PyArray_STRIDES(obj)[1]))
|
||||||
|
|
||||||
|
#define PyArray_GETPTR3(obj, i, j, k) ((void *)(PyArray_BYTES(obj) + \
|
||||||
|
(i)*PyArray_STRIDES(obj)[0] + \
|
||||||
|
(j)*PyArray_STRIDES(obj)[1] + \
|
||||||
|
(k)*PyArray_STRIDES(obj)[2]))
|
||||||
|
|
||||||
|
#define PyArray_GETPTR4(obj, i, j, k, l) ((void *)(PyArray_BYTES(obj) + \
|
||||||
|
(i)*PyArray_STRIDES(obj)[0] + \
|
||||||
|
(j)*PyArray_STRIDES(obj)[1] + \
|
||||||
|
(k)*PyArray_STRIDES(obj)[2] + \
|
||||||
|
(l)*PyArray_STRIDES(obj)[3]))
|
||||||
|
|
||||||
|
/* Move to arrayobject.c once PyArray_XDECREF_ERR is removed */
|
||||||
|
static NPY_INLINE void
|
||||||
|
PyArray_DiscardWritebackIfCopy(PyArrayObject *arr)
|
||||||
|
{
|
||||||
|
PyArrayObject_fields *fa = (PyArrayObject_fields *)arr;
|
||||||
|
if (fa && fa->base) {
|
||||||
|
if ((fa->flags & NPY_ARRAY_UPDATEIFCOPY) ||
|
||||||
|
(fa->flags & NPY_ARRAY_WRITEBACKIFCOPY)) {
|
||||||
|
PyArray_ENABLEFLAGS((PyArrayObject*)fa->base, NPY_ARRAY_WRITEABLE);
|
||||||
|
Py_DECREF(fa->base);
|
||||||
|
fa->base = NULL;
|
||||||
|
PyArray_CLEARFLAGS(arr, NPY_ARRAY_WRITEBACKIFCOPY);
|
||||||
|
PyArray_CLEARFLAGS(arr, NPY_ARRAY_UPDATEIFCOPY);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#define PyArray_DESCR_REPLACE(descr) do { \
|
||||||
|
PyArray_Descr *_new_; \
|
||||||
|
_new_ = PyArray_DescrNew(descr); \
|
||||||
|
Py_XDECREF(descr); \
|
||||||
|
descr = _new_; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
/* Copy should always return contiguous array */
|
||||||
|
#define PyArray_Copy(obj) PyArray_NewCopy(obj, NPY_CORDER)
|
||||||
|
|
||||||
|
#define PyArray_FromObject(op, type, min_depth, max_depth) \
|
||||||
|
PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \
|
||||||
|
max_depth, NPY_ARRAY_BEHAVED | \
|
||||||
|
NPY_ARRAY_ENSUREARRAY, NULL)
|
||||||
|
|
||||||
|
#define PyArray_ContiguousFromObject(op, type, min_depth, max_depth) \
|
||||||
|
PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \
|
||||||
|
max_depth, NPY_ARRAY_DEFAULT | \
|
||||||
|
NPY_ARRAY_ENSUREARRAY, NULL)
|
||||||
|
|
||||||
|
#define PyArray_CopyFromObject(op, type, min_depth, max_depth) \
|
||||||
|
PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \
|
||||||
|
max_depth, NPY_ARRAY_ENSURECOPY | \
|
||||||
|
NPY_ARRAY_DEFAULT | \
|
||||||
|
NPY_ARRAY_ENSUREARRAY, NULL)
|
||||||
|
|
||||||
|
#define PyArray_Cast(mp, type_num) \
|
||||||
|
PyArray_CastToType(mp, PyArray_DescrFromType(type_num), 0)
|
||||||
|
|
||||||
|
#define PyArray_Take(ap, items, axis) \
|
||||||
|
PyArray_TakeFrom(ap, items, axis, NULL, NPY_RAISE)
|
||||||
|
|
||||||
|
#define PyArray_Put(ap, items, values) \
|
||||||
|
PyArray_PutTo(ap, items, values, NPY_RAISE)
|
||||||
|
|
||||||
|
/* Compatibility with old Numeric stuff -- don't use in new code */
|
||||||
|
|
||||||
|
#define PyArray_FromDimsAndData(nd, d, type, data) \
|
||||||
|
PyArray_FromDimsAndDataAndDescr(nd, d, PyArray_DescrFromType(type), \
|
||||||
|
data)
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
Check to see if this key in the dictionary is the "title"
|
||||||
|
entry of the tuple (i.e. a duplicate dictionary entry in the fields
|
||||||
|
dict.
|
||||||
|
*/
|
||||||
|
|
||||||
|
static NPY_INLINE int
|
||||||
|
NPY_TITLE_KEY_check(PyObject *key, PyObject *value)
|
||||||
|
{
|
||||||
|
PyObject *title;
|
||||||
|
if (PyTuple_Size(value) != 3) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
title = PyTuple_GetItem(value, 2);
|
||||||
|
if (key == title) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
#ifdef PYPY_VERSION
|
||||||
|
/*
|
||||||
|
* On PyPy, dictionary keys do not always preserve object identity.
|
||||||
|
* Fall back to comparison by value.
|
||||||
|
*/
|
||||||
|
if (PyUnicode_Check(title) && PyUnicode_Check(key)) {
|
||||||
|
return PyUnicode_Compare(title, key) == 0 ? 1 : 0;
|
||||||
|
}
|
||||||
|
#if PY_VERSION_HEX < 0x03000000
|
||||||
|
if (PyString_Check(title) && PyString_Check(key)) {
|
||||||
|
return PyObject_Compare(title, key) == 0 ? 1 : 0;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Macro, for backward compat with "if NPY_TITLE_KEY(key, value) { ..." */
|
||||||
|
#define NPY_TITLE_KEY(key, value) (NPY_TITLE_KEY_check((key), (value)))
|
||||||
|
|
||||||
|
#define DEPRECATE(msg) PyErr_WarnEx(PyExc_DeprecationWarning,msg,1)
|
||||||
|
#define DEPRECATE_FUTUREWARNING(msg) PyErr_WarnEx(PyExc_FutureWarning,msg,1)
|
||||||
|
|
||||||
|
#if !defined(NPY_NO_DEPRECATED_API) || \
|
||||||
|
(NPY_NO_DEPRECATED_API < NPY_1_14_API_VERSION)
|
||||||
|
static NPY_INLINE void
|
||||||
|
PyArray_XDECREF_ERR(PyArrayObject *arr)
|
||||||
|
{
|
||||||
|
/* 2017-Nov-10 1.14 */
|
||||||
|
DEPRECATE("PyArray_XDECREF_ERR is deprecated, call "
|
||||||
|
"PyArray_DiscardWritebackIfCopy then Py_XDECREF instead");
|
||||||
|
PyArray_DiscardWritebackIfCopy(arr);
|
||||||
|
Py_XDECREF(arr);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
#endif /* NPY_NDARRAYOBJECT_H */
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,212 @@
|
|||||||
|
#ifndef NPY_NOPREFIX_H
|
||||||
|
#define NPY_NOPREFIX_H
|
||||||
|
|
||||||
|
/*
|
||||||
|
* You can directly include noprefix.h as a backward
|
||||||
|
* compatibility measure
|
||||||
|
*/
|
||||||
|
#ifndef NPY_NO_PREFIX
|
||||||
|
#include "ndarrayobject.h"
|
||||||
|
#include "npy_interrupt.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define SIGSETJMP NPY_SIGSETJMP
|
||||||
|
#define SIGLONGJMP NPY_SIGLONGJMP
|
||||||
|
#define SIGJMP_BUF NPY_SIGJMP_BUF
|
||||||
|
|
||||||
|
#define MAX_DIMS NPY_MAXDIMS
|
||||||
|
|
||||||
|
#define longlong npy_longlong
|
||||||
|
#define ulonglong npy_ulonglong
|
||||||
|
#define Bool npy_bool
|
||||||
|
#define longdouble npy_longdouble
|
||||||
|
#define byte npy_byte
|
||||||
|
|
||||||
|
#ifndef _BSD_SOURCE
|
||||||
|
#define ushort npy_ushort
|
||||||
|
#define uint npy_uint
|
||||||
|
#define ulong npy_ulong
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define ubyte npy_ubyte
|
||||||
|
#define ushort npy_ushort
|
||||||
|
#define uint npy_uint
|
||||||
|
#define ulong npy_ulong
|
||||||
|
#define cfloat npy_cfloat
|
||||||
|
#define cdouble npy_cdouble
|
||||||
|
#define clongdouble npy_clongdouble
|
||||||
|
#define Int8 npy_int8
|
||||||
|
#define UInt8 npy_uint8
|
||||||
|
#define Int16 npy_int16
|
||||||
|
#define UInt16 npy_uint16
|
||||||
|
#define Int32 npy_int32
|
||||||
|
#define UInt32 npy_uint32
|
||||||
|
#define Int64 npy_int64
|
||||||
|
#define UInt64 npy_uint64
|
||||||
|
#define Int128 npy_int128
|
||||||
|
#define UInt128 npy_uint128
|
||||||
|
#define Int256 npy_int256
|
||||||
|
#define UInt256 npy_uint256
|
||||||
|
#define Float16 npy_float16
|
||||||
|
#define Complex32 npy_complex32
|
||||||
|
#define Float32 npy_float32
|
||||||
|
#define Complex64 npy_complex64
|
||||||
|
#define Float64 npy_float64
|
||||||
|
#define Complex128 npy_complex128
|
||||||
|
#define Float80 npy_float80
|
||||||
|
#define Complex160 npy_complex160
|
||||||
|
#define Float96 npy_float96
|
||||||
|
#define Complex192 npy_complex192
|
||||||
|
#define Float128 npy_float128
|
||||||
|
#define Complex256 npy_complex256
|
||||||
|
#define intp npy_intp
|
||||||
|
#define uintp npy_uintp
|
||||||
|
#define datetime npy_datetime
|
||||||
|
#define timedelta npy_timedelta
|
||||||
|
|
||||||
|
#define SIZEOF_LONGLONG NPY_SIZEOF_LONGLONG
|
||||||
|
#define SIZEOF_INTP NPY_SIZEOF_INTP
|
||||||
|
#define SIZEOF_UINTP NPY_SIZEOF_UINTP
|
||||||
|
#define SIZEOF_HALF NPY_SIZEOF_HALF
|
||||||
|
#define SIZEOF_LONGDOUBLE NPY_SIZEOF_LONGDOUBLE
|
||||||
|
#define SIZEOF_DATETIME NPY_SIZEOF_DATETIME
|
||||||
|
#define SIZEOF_TIMEDELTA NPY_SIZEOF_TIMEDELTA
|
||||||
|
|
||||||
|
#define LONGLONG_FMT NPY_LONGLONG_FMT
|
||||||
|
#define ULONGLONG_FMT NPY_ULONGLONG_FMT
|
||||||
|
#define LONGLONG_SUFFIX NPY_LONGLONG_SUFFIX
|
||||||
|
#define ULONGLONG_SUFFIX NPY_ULONGLONG_SUFFIX
|
||||||
|
|
||||||
|
#define MAX_INT8 127
|
||||||
|
#define MIN_INT8 -128
|
||||||
|
#define MAX_UINT8 255
|
||||||
|
#define MAX_INT16 32767
|
||||||
|
#define MIN_INT16 -32768
|
||||||
|
#define MAX_UINT16 65535
|
||||||
|
#define MAX_INT32 2147483647
|
||||||
|
#define MIN_INT32 (-MAX_INT32 - 1)
|
||||||
|
#define MAX_UINT32 4294967295U
|
||||||
|
#define MAX_INT64 LONGLONG_SUFFIX(9223372036854775807)
|
||||||
|
#define MIN_INT64 (-MAX_INT64 - LONGLONG_SUFFIX(1))
|
||||||
|
#define MAX_UINT64 ULONGLONG_SUFFIX(18446744073709551615)
|
||||||
|
#define MAX_INT128 LONGLONG_SUFFIX(85070591730234615865843651857942052864)
|
||||||
|
#define MIN_INT128 (-MAX_INT128 - LONGLONG_SUFFIX(1))
|
||||||
|
#define MAX_UINT128 ULONGLONG_SUFFIX(170141183460469231731687303715884105728)
|
||||||
|
#define MAX_INT256 LONGLONG_SUFFIX(57896044618658097711785492504343953926634992332820282019728792003956564819967)
|
||||||
|
#define MIN_INT256 (-MAX_INT256 - LONGLONG_SUFFIX(1))
|
||||||
|
#define MAX_UINT256 ULONGLONG_SUFFIX(115792089237316195423570985008687907853269984665640564039457584007913129639935)
|
||||||
|
|
||||||
|
#define MAX_BYTE NPY_MAX_BYTE
|
||||||
|
#define MIN_BYTE NPY_MIN_BYTE
|
||||||
|
#define MAX_UBYTE NPY_MAX_UBYTE
|
||||||
|
#define MAX_SHORT NPY_MAX_SHORT
|
||||||
|
#define MIN_SHORT NPY_MIN_SHORT
|
||||||
|
#define MAX_USHORT NPY_MAX_USHORT
|
||||||
|
#define MAX_INT NPY_MAX_INT
|
||||||
|
#define MIN_INT NPY_MIN_INT
|
||||||
|
#define MAX_UINT NPY_MAX_UINT
|
||||||
|
#define MAX_LONG NPY_MAX_LONG
|
||||||
|
#define MIN_LONG NPY_MIN_LONG
|
||||||
|
#define MAX_ULONG NPY_MAX_ULONG
|
||||||
|
#define MAX_LONGLONG NPY_MAX_LONGLONG
|
||||||
|
#define MIN_LONGLONG NPY_MIN_LONGLONG
|
||||||
|
#define MAX_ULONGLONG NPY_MAX_ULONGLONG
|
||||||
|
#define MIN_DATETIME NPY_MIN_DATETIME
|
||||||
|
#define MAX_DATETIME NPY_MAX_DATETIME
|
||||||
|
#define MIN_TIMEDELTA NPY_MIN_TIMEDELTA
|
||||||
|
#define MAX_TIMEDELTA NPY_MAX_TIMEDELTA
|
||||||
|
|
||||||
|
#define BITSOF_BOOL NPY_BITSOF_BOOL
|
||||||
|
#define BITSOF_CHAR NPY_BITSOF_CHAR
|
||||||
|
#define BITSOF_SHORT NPY_BITSOF_SHORT
|
||||||
|
#define BITSOF_INT NPY_BITSOF_INT
|
||||||
|
#define BITSOF_LONG NPY_BITSOF_LONG
|
||||||
|
#define BITSOF_LONGLONG NPY_BITSOF_LONGLONG
|
||||||
|
#define BITSOF_HALF NPY_BITSOF_HALF
|
||||||
|
#define BITSOF_FLOAT NPY_BITSOF_FLOAT
|
||||||
|
#define BITSOF_DOUBLE NPY_BITSOF_DOUBLE
|
||||||
|
#define BITSOF_LONGDOUBLE NPY_BITSOF_LONGDOUBLE
|
||||||
|
#define BITSOF_DATETIME NPY_BITSOF_DATETIME
|
||||||
|
#define BITSOF_TIMEDELTA NPY_BITSOF_TIMEDELTA
|
||||||
|
|
||||||
|
#define _pya_malloc PyArray_malloc
|
||||||
|
#define _pya_free PyArray_free
|
||||||
|
#define _pya_realloc PyArray_realloc
|
||||||
|
|
||||||
|
#define BEGIN_THREADS_DEF NPY_BEGIN_THREADS_DEF
|
||||||
|
#define BEGIN_THREADS NPY_BEGIN_THREADS
|
||||||
|
#define END_THREADS NPY_END_THREADS
|
||||||
|
#define ALLOW_C_API_DEF NPY_ALLOW_C_API_DEF
|
||||||
|
#define ALLOW_C_API NPY_ALLOW_C_API
|
||||||
|
#define DISABLE_C_API NPY_DISABLE_C_API
|
||||||
|
|
||||||
|
#define PY_FAIL NPY_FAIL
|
||||||
|
#define PY_SUCCEED NPY_SUCCEED
|
||||||
|
|
||||||
|
#ifndef TRUE
|
||||||
|
#define TRUE NPY_TRUE
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef FALSE
|
||||||
|
#define FALSE NPY_FALSE
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define LONGDOUBLE_FMT NPY_LONGDOUBLE_FMT
|
||||||
|
|
||||||
|
#define CONTIGUOUS NPY_CONTIGUOUS
|
||||||
|
#define C_CONTIGUOUS NPY_C_CONTIGUOUS
|
||||||
|
#define FORTRAN NPY_FORTRAN
|
||||||
|
#define F_CONTIGUOUS NPY_F_CONTIGUOUS
|
||||||
|
#define OWNDATA NPY_OWNDATA
|
||||||
|
#define FORCECAST NPY_FORCECAST
|
||||||
|
#define ENSURECOPY NPY_ENSURECOPY
|
||||||
|
#define ENSUREARRAY NPY_ENSUREARRAY
|
||||||
|
#define ELEMENTSTRIDES NPY_ELEMENTSTRIDES
|
||||||
|
#define ALIGNED NPY_ALIGNED
|
||||||
|
#define NOTSWAPPED NPY_NOTSWAPPED
|
||||||
|
#define WRITEABLE NPY_WRITEABLE
|
||||||
|
#define UPDATEIFCOPY NPY_UPDATEIFCOPY
|
||||||
|
#define WRITEBACKIFCOPY NPY_ARRAY_WRITEBACKIFCOPY
|
||||||
|
#define ARR_HAS_DESCR NPY_ARR_HAS_DESCR
|
||||||
|
#define BEHAVED NPY_BEHAVED
|
||||||
|
#define BEHAVED_NS NPY_BEHAVED_NS
|
||||||
|
#define CARRAY NPY_CARRAY
|
||||||
|
#define CARRAY_RO NPY_CARRAY_RO
|
||||||
|
#define FARRAY NPY_FARRAY
|
||||||
|
#define FARRAY_RO NPY_FARRAY_RO
|
||||||
|
#define DEFAULT NPY_DEFAULT
|
||||||
|
#define IN_ARRAY NPY_IN_ARRAY
|
||||||
|
#define OUT_ARRAY NPY_OUT_ARRAY
|
||||||
|
#define INOUT_ARRAY NPY_INOUT_ARRAY
|
||||||
|
#define IN_FARRAY NPY_IN_FARRAY
|
||||||
|
#define OUT_FARRAY NPY_OUT_FARRAY
|
||||||
|
#define INOUT_FARRAY NPY_INOUT_FARRAY
|
||||||
|
#define UPDATE_ALL NPY_UPDATE_ALL
|
||||||
|
|
||||||
|
#define OWN_DATA NPY_OWNDATA
|
||||||
|
#define BEHAVED_FLAGS NPY_BEHAVED
|
||||||
|
#define BEHAVED_FLAGS_NS NPY_BEHAVED_NS
|
||||||
|
#define CARRAY_FLAGS_RO NPY_CARRAY_RO
|
||||||
|
#define CARRAY_FLAGS NPY_CARRAY
|
||||||
|
#define FARRAY_FLAGS NPY_FARRAY
|
||||||
|
#define FARRAY_FLAGS_RO NPY_FARRAY_RO
|
||||||
|
#define DEFAULT_FLAGS NPY_DEFAULT
|
||||||
|
#define UPDATE_ALL_FLAGS NPY_UPDATE_ALL_FLAGS
|
||||||
|
|
||||||
|
#ifndef MIN
|
||||||
|
#define MIN PyArray_MIN
|
||||||
|
#endif
|
||||||
|
#ifndef MAX
|
||||||
|
#define MAX PyArray_MAX
|
||||||
|
#endif
|
||||||
|
#define MAX_INTP NPY_MAX_INTP
|
||||||
|
#define MIN_INTP NPY_MIN_INTP
|
||||||
|
#define MAX_UINTP NPY_MAX_UINTP
|
||||||
|
#define INTP_FMT NPY_INTP_FMT
|
||||||
|
|
||||||
|
#ifndef PYPY_VERSION
|
||||||
|
#define REFCOUNT PyArray_REFCOUNT
|
||||||
|
#define MAX_ELSIZE NPY_MAX_ELSIZE
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif
|
@ -0,0 +1,133 @@
|
|||||||
|
#ifndef _NPY_1_7_DEPRECATED_API_H
|
||||||
|
#define _NPY_1_7_DEPRECATED_API_H
|
||||||
|
|
||||||
|
#ifndef NPY_DEPRECATED_INCLUDES
|
||||||
|
#error "Should never include npy_*_*_deprecated_api directly."
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* Emit a warning if the user did not specifically request the old API */
|
||||||
|
#ifndef NPY_NO_DEPRECATED_API
|
||||||
|
#if defined(_WIN32)
|
||||||
|
#define _WARN___STR2__(x) #x
|
||||||
|
#define _WARN___STR1__(x) _WARN___STR2__(x)
|
||||||
|
#define _WARN___LOC__ __FILE__ "(" _WARN___STR1__(__LINE__) ") : Warning Msg: "
|
||||||
|
#pragma message(_WARN___LOC__"Using deprecated NumPy API, disable it with " \
|
||||||
|
"#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION")
|
||||||
|
#elif defined(__GNUC__)
|
||||||
|
#warning "Using deprecated NumPy API, disable it with " \
|
||||||
|
"#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION"
|
||||||
|
#endif
|
||||||
|
/* TODO: How to do this warning message for other compilers? */
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This header exists to collect all dangerous/deprecated NumPy API
|
||||||
|
* as of NumPy 1.7.
|
||||||
|
*
|
||||||
|
* This is an attempt to remove bad API, the proliferation of macros,
|
||||||
|
* and namespace pollution currently produced by the NumPy headers.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* These array flags are deprecated as of NumPy 1.7 */
|
||||||
|
#define NPY_CONTIGUOUS NPY_ARRAY_C_CONTIGUOUS
|
||||||
|
#define NPY_FORTRAN NPY_ARRAY_F_CONTIGUOUS
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The consistent NPY_ARRAY_* names which don't pollute the NPY_*
|
||||||
|
* namespace were added in NumPy 1.7.
|
||||||
|
*
|
||||||
|
* These versions of the carray flags are deprecated, but
|
||||||
|
* probably should only be removed after two releases instead of one.
|
||||||
|
*/
|
||||||
|
#define NPY_C_CONTIGUOUS NPY_ARRAY_C_CONTIGUOUS
|
||||||
|
#define NPY_F_CONTIGUOUS NPY_ARRAY_F_CONTIGUOUS
|
||||||
|
#define NPY_OWNDATA NPY_ARRAY_OWNDATA
|
||||||
|
#define NPY_FORCECAST NPY_ARRAY_FORCECAST
|
||||||
|
#define NPY_ENSURECOPY NPY_ARRAY_ENSURECOPY
|
||||||
|
#define NPY_ENSUREARRAY NPY_ARRAY_ENSUREARRAY
|
||||||
|
#define NPY_ELEMENTSTRIDES NPY_ARRAY_ELEMENTSTRIDES
|
||||||
|
#define NPY_ALIGNED NPY_ARRAY_ALIGNED
|
||||||
|
#define NPY_NOTSWAPPED NPY_ARRAY_NOTSWAPPED
|
||||||
|
#define NPY_WRITEABLE NPY_ARRAY_WRITEABLE
|
||||||
|
#define NPY_UPDATEIFCOPY NPY_ARRAY_UPDATEIFCOPY
|
||||||
|
#define NPY_BEHAVED NPY_ARRAY_BEHAVED
|
||||||
|
#define NPY_BEHAVED_NS NPY_ARRAY_BEHAVED_NS
|
||||||
|
#define NPY_CARRAY NPY_ARRAY_CARRAY
|
||||||
|
#define NPY_CARRAY_RO NPY_ARRAY_CARRAY_RO
|
||||||
|
#define NPY_FARRAY NPY_ARRAY_FARRAY
|
||||||
|
#define NPY_FARRAY_RO NPY_ARRAY_FARRAY_RO
|
||||||
|
#define NPY_DEFAULT NPY_ARRAY_DEFAULT
|
||||||
|
#define NPY_IN_ARRAY NPY_ARRAY_IN_ARRAY
|
||||||
|
#define NPY_OUT_ARRAY NPY_ARRAY_OUT_ARRAY
|
||||||
|
#define NPY_INOUT_ARRAY NPY_ARRAY_INOUT_ARRAY
|
||||||
|
#define NPY_IN_FARRAY NPY_ARRAY_IN_FARRAY
|
||||||
|
#define NPY_OUT_FARRAY NPY_ARRAY_OUT_FARRAY
|
||||||
|
#define NPY_INOUT_FARRAY NPY_ARRAY_INOUT_FARRAY
|
||||||
|
#define NPY_UPDATE_ALL NPY_ARRAY_UPDATE_ALL
|
||||||
|
|
||||||
|
/* This way of accessing the default type is deprecated as of NumPy 1.7 */
|
||||||
|
#define PyArray_DEFAULT NPY_DEFAULT_TYPE
|
||||||
|
|
||||||
|
/* These DATETIME bits aren't used internally */
|
||||||
|
#if PY_VERSION_HEX >= 0x03000000
|
||||||
|
#define PyDataType_GetDatetimeMetaData(descr) \
|
||||||
|
((descr->metadata == NULL) ? NULL : \
|
||||||
|
((PyArray_DatetimeMetaData *)(PyCapsule_GetPointer( \
|
||||||
|
PyDict_GetItemString( \
|
||||||
|
descr->metadata, NPY_METADATA_DTSTR), NULL))))
|
||||||
|
#else
|
||||||
|
#define PyDataType_GetDatetimeMetaData(descr) \
|
||||||
|
((descr->metadata == NULL) ? NULL : \
|
||||||
|
((PyArray_DatetimeMetaData *)(PyCObject_AsVoidPtr( \
|
||||||
|
PyDict_GetItemString(descr->metadata, NPY_METADATA_DTSTR)))))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Deprecated as of NumPy 1.7, this kind of shortcut doesn't
|
||||||
|
* belong in the public API.
|
||||||
|
*/
|
||||||
|
#define NPY_AO PyArrayObject
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Deprecated as of NumPy 1.7, an all-lowercase macro doesn't
|
||||||
|
* belong in the public API.
|
||||||
|
*/
|
||||||
|
#define fortran fortran_
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Deprecated as of NumPy 1.7, as it is a namespace-polluting
|
||||||
|
* macro.
|
||||||
|
*/
|
||||||
|
#define FORTRAN_IF PyArray_FORTRAN_IF
|
||||||
|
|
||||||
|
/* Deprecated as of NumPy 1.7, datetime64 uses c_metadata instead */
|
||||||
|
#define NPY_METADATA_DTSTR "__timeunit__"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Deprecated as of NumPy 1.7.
|
||||||
|
* The reasoning:
|
||||||
|
* - These are for datetime, but there's no datetime "namespace".
|
||||||
|
* - They just turn NPY_STR_<x> into "<x>", which is just
|
||||||
|
* making something simple be indirected.
|
||||||
|
*/
|
||||||
|
#define NPY_STR_Y "Y"
|
||||||
|
#define NPY_STR_M "M"
|
||||||
|
#define NPY_STR_W "W"
|
||||||
|
#define NPY_STR_D "D"
|
||||||
|
#define NPY_STR_h "h"
|
||||||
|
#define NPY_STR_m "m"
|
||||||
|
#define NPY_STR_s "s"
|
||||||
|
#define NPY_STR_ms "ms"
|
||||||
|
#define NPY_STR_us "us"
|
||||||
|
#define NPY_STR_ns "ns"
|
||||||
|
#define NPY_STR_ps "ps"
|
||||||
|
#define NPY_STR_fs "fs"
|
||||||
|
#define NPY_STR_as "as"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The macros in old_defines.h are Deprecated as of NumPy 1.7 and will be
|
||||||
|
* removed in the next major release.
|
||||||
|
*/
|
||||||
|
#include "old_defines.h"
|
||||||
|
|
||||||
|
#endif
|
@ -0,0 +1,577 @@
|
|||||||
|
/*
|
||||||
|
* This is a convenience header file providing compatibility utilities
|
||||||
|
* for supporting Python 2 and Python 3 in the same code base.
|
||||||
|
*
|
||||||
|
* If you want to use this for your own projects, it's recommended to make a
|
||||||
|
* copy of it. Although the stuff below is unlikely to change, we don't provide
|
||||||
|
* strong backwards compatibility guarantees at the moment.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _NPY_3KCOMPAT_H_
|
||||||
|
#define _NPY_3KCOMPAT_H_
|
||||||
|
|
||||||
|
#include <Python.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
|
||||||
|
#if PY_VERSION_HEX >= 0x03000000
|
||||||
|
#ifndef NPY_PY3K
|
||||||
|
#define NPY_PY3K 1
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include "numpy/npy_common.h"
|
||||||
|
#include "numpy/ndarrayobject.h"
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* PyInt -> PyLong
|
||||||
|
*/
|
||||||
|
|
||||||
|
#if defined(NPY_PY3K)
|
||||||
|
/* Return True only if the long fits in a C long */
|
||||||
|
static NPY_INLINE int PyInt_Check(PyObject *op) {
|
||||||
|
int overflow = 0;
|
||||||
|
if (!PyLong_Check(op)) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
PyLong_AsLongAndOverflow(op, &overflow);
|
||||||
|
return (overflow == 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#define PyInt_FromLong PyLong_FromLong
|
||||||
|
#define PyInt_AsLong PyLong_AsLong
|
||||||
|
#define PyInt_AS_LONG PyLong_AsLong
|
||||||
|
#define PyInt_AsSsize_t PyLong_AsSsize_t
|
||||||
|
|
||||||
|
/* NOTE:
|
||||||
|
*
|
||||||
|
* Since the PyLong type is very different from the fixed-range PyInt,
|
||||||
|
* we don't define PyInt_Type -> PyLong_Type.
|
||||||
|
*/
|
||||||
|
#endif /* NPY_PY3K */
|
||||||
|
|
||||||
|
/* Py3 changes PySlice_GetIndicesEx' first argument's type to PyObject* */
|
||||||
|
#ifdef NPY_PY3K
|
||||||
|
# define NpySlice_GetIndicesEx PySlice_GetIndicesEx
|
||||||
|
#else
|
||||||
|
# define NpySlice_GetIndicesEx(op, nop, start, end, step, slicelength) \
|
||||||
|
PySlice_GetIndicesEx((PySliceObject *)op, nop, start, end, step, slicelength)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* <2.7.11 and <3.4.4 have the wrong argument type for Py_EnterRecursiveCall */
|
||||||
|
#if (PY_VERSION_HEX < 0x02070B00) || \
|
||||||
|
((0x03000000 <= PY_VERSION_HEX) && (PY_VERSION_HEX < 0x03040400))
|
||||||
|
#define Npy_EnterRecursiveCall(x) Py_EnterRecursiveCall((char *)(x))
|
||||||
|
#else
|
||||||
|
#define Npy_EnterRecursiveCall(x) Py_EnterRecursiveCall(x)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* Py_SETREF was added in 3.5.2, and only if Py_LIMITED_API is absent */
|
||||||
|
#if PY_VERSION_HEX < 0x03050200
|
||||||
|
#define Py_SETREF(op, op2) \
|
||||||
|
do { \
|
||||||
|
PyObject *_py_tmp = (PyObject *)(op); \
|
||||||
|
(op) = (op2); \
|
||||||
|
Py_DECREF(_py_tmp); \
|
||||||
|
} while (0)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* PyString -> PyBytes
|
||||||
|
*/
|
||||||
|
|
||||||
|
#if defined(NPY_PY3K)
|
||||||
|
|
||||||
|
#define PyString_Type PyBytes_Type
|
||||||
|
#define PyString_Check PyBytes_Check
|
||||||
|
#define PyStringObject PyBytesObject
|
||||||
|
#define PyString_FromString PyBytes_FromString
|
||||||
|
#define PyString_FromStringAndSize PyBytes_FromStringAndSize
|
||||||
|
#define PyString_AS_STRING PyBytes_AS_STRING
|
||||||
|
#define PyString_AsStringAndSize PyBytes_AsStringAndSize
|
||||||
|
#define PyString_FromFormat PyBytes_FromFormat
|
||||||
|
#define PyString_Concat PyBytes_Concat
|
||||||
|
#define PyString_ConcatAndDel PyBytes_ConcatAndDel
|
||||||
|
#define PyString_AsString PyBytes_AsString
|
||||||
|
#define PyString_GET_SIZE PyBytes_GET_SIZE
|
||||||
|
#define PyString_Size PyBytes_Size
|
||||||
|
|
||||||
|
#define PyUString_Type PyUnicode_Type
|
||||||
|
#define PyUString_Check PyUnicode_Check
|
||||||
|
#define PyUStringObject PyUnicodeObject
|
||||||
|
#define PyUString_FromString PyUnicode_FromString
|
||||||
|
#define PyUString_FromStringAndSize PyUnicode_FromStringAndSize
|
||||||
|
#define PyUString_FromFormat PyUnicode_FromFormat
|
||||||
|
#define PyUString_Concat PyUnicode_Concat2
|
||||||
|
#define PyUString_ConcatAndDel PyUnicode_ConcatAndDel
|
||||||
|
#define PyUString_GET_SIZE PyUnicode_GET_SIZE
|
||||||
|
#define PyUString_Size PyUnicode_Size
|
||||||
|
#define PyUString_InternFromString PyUnicode_InternFromString
|
||||||
|
#define PyUString_Format PyUnicode_Format
|
||||||
|
|
||||||
|
#define PyBaseString_Check(obj) (PyUnicode_Check(obj))
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
#define PyBytes_Type PyString_Type
|
||||||
|
#define PyBytes_Check PyString_Check
|
||||||
|
#define PyBytesObject PyStringObject
|
||||||
|
#define PyBytes_FromString PyString_FromString
|
||||||
|
#define PyBytes_FromStringAndSize PyString_FromStringAndSize
|
||||||
|
#define PyBytes_AS_STRING PyString_AS_STRING
|
||||||
|
#define PyBytes_AsStringAndSize PyString_AsStringAndSize
|
||||||
|
#define PyBytes_FromFormat PyString_FromFormat
|
||||||
|
#define PyBytes_Concat PyString_Concat
|
||||||
|
#define PyBytes_ConcatAndDel PyString_ConcatAndDel
|
||||||
|
#define PyBytes_AsString PyString_AsString
|
||||||
|
#define PyBytes_GET_SIZE PyString_GET_SIZE
|
||||||
|
#define PyBytes_Size PyString_Size
|
||||||
|
|
||||||
|
#define PyUString_Type PyString_Type
|
||||||
|
#define PyUString_Check PyString_Check
|
||||||
|
#define PyUStringObject PyStringObject
|
||||||
|
#define PyUString_FromString PyString_FromString
|
||||||
|
#define PyUString_FromStringAndSize PyString_FromStringAndSize
|
||||||
|
#define PyUString_FromFormat PyString_FromFormat
|
||||||
|
#define PyUString_Concat PyString_Concat
|
||||||
|
#define PyUString_ConcatAndDel PyString_ConcatAndDel
|
||||||
|
#define PyUString_GET_SIZE PyString_GET_SIZE
|
||||||
|
#define PyUString_Size PyString_Size
|
||||||
|
#define PyUString_InternFromString PyString_InternFromString
|
||||||
|
#define PyUString_Format PyString_Format
|
||||||
|
|
||||||
|
#define PyBaseString_Check(obj) (PyBytes_Check(obj) || PyUnicode_Check(obj))
|
||||||
|
|
||||||
|
#endif /* NPY_PY3K */
|
||||||
|
|
||||||
|
|
||||||
|
static NPY_INLINE void
|
||||||
|
PyUnicode_ConcatAndDel(PyObject **left, PyObject *right)
|
||||||
|
{
|
||||||
|
Py_SETREF(*left, PyUnicode_Concat(*left, right));
|
||||||
|
Py_DECREF(right);
|
||||||
|
}
|
||||||
|
|
||||||
|
static NPY_INLINE void
|
||||||
|
PyUnicode_Concat2(PyObject **left, PyObject *right)
|
||||||
|
{
|
||||||
|
Py_SETREF(*left, PyUnicode_Concat(*left, right));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* PyFile_* compatibility
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Get a FILE* handle to the file represented by the Python object
|
||||||
|
*/
|
||||||
|
static NPY_INLINE FILE*
|
||||||
|
npy_PyFile_Dup2(PyObject *file, char *mode, npy_off_t *orig_pos)
|
||||||
|
{
|
||||||
|
int fd, fd2, unbuf;
|
||||||
|
PyObject *ret, *os, *io, *io_raw;
|
||||||
|
npy_off_t pos;
|
||||||
|
FILE *handle;
|
||||||
|
|
||||||
|
/* For Python 2 PyFileObject, use PyFile_AsFile */
|
||||||
|
#if !defined(NPY_PY3K)
|
||||||
|
if (PyFile_Check(file)) {
|
||||||
|
return PyFile_AsFile(file);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* Flush first to ensure things end up in the file in the correct order */
|
||||||
|
ret = PyObject_CallMethod(file, "flush", "");
|
||||||
|
if (ret == NULL) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
Py_DECREF(ret);
|
||||||
|
fd = PyObject_AsFileDescriptor(file);
|
||||||
|
if (fd == -1) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The handle needs to be dup'd because we have to call fclose
|
||||||
|
* at the end
|
||||||
|
*/
|
||||||
|
os = PyImport_ImportModule("os");
|
||||||
|
if (os == NULL) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
ret = PyObject_CallMethod(os, "dup", "i", fd);
|
||||||
|
Py_DECREF(os);
|
||||||
|
if (ret == NULL) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
fd2 = PyNumber_AsSsize_t(ret, NULL);
|
||||||
|
Py_DECREF(ret);
|
||||||
|
|
||||||
|
/* Convert to FILE* handle */
|
||||||
|
#ifdef _WIN32
|
||||||
|
handle = _fdopen(fd2, mode);
|
||||||
|
#else
|
||||||
|
handle = fdopen(fd2, mode);
|
||||||
|
#endif
|
||||||
|
if (handle == NULL) {
|
||||||
|
PyErr_SetString(PyExc_IOError,
|
||||||
|
"Getting a FILE* from a Python file object failed");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Record the original raw file handle position */
|
||||||
|
*orig_pos = npy_ftell(handle);
|
||||||
|
if (*orig_pos == -1) {
|
||||||
|
/* The io module is needed to determine if buffering is used */
|
||||||
|
io = PyImport_ImportModule("io");
|
||||||
|
if (io == NULL) {
|
||||||
|
fclose(handle);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
/* File object instances of RawIOBase are unbuffered */
|
||||||
|
io_raw = PyObject_GetAttrString(io, "RawIOBase");
|
||||||
|
Py_DECREF(io);
|
||||||
|
if (io_raw == NULL) {
|
||||||
|
fclose(handle);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
unbuf = PyObject_IsInstance(file, io_raw);
|
||||||
|
Py_DECREF(io_raw);
|
||||||
|
if (unbuf == 1) {
|
||||||
|
/* Succeed if the IO is unbuffered */
|
||||||
|
return handle;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
PyErr_SetString(PyExc_IOError, "obtaining file position failed");
|
||||||
|
fclose(handle);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Seek raw handle to the Python-side position */
|
||||||
|
ret = PyObject_CallMethod(file, "tell", "");
|
||||||
|
if (ret == NULL) {
|
||||||
|
fclose(handle);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
pos = PyLong_AsLongLong(ret);
|
||||||
|
Py_DECREF(ret);
|
||||||
|
if (PyErr_Occurred()) {
|
||||||
|
fclose(handle);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
if (npy_fseek(handle, pos, SEEK_SET) == -1) {
|
||||||
|
PyErr_SetString(PyExc_IOError, "seeking file failed");
|
||||||
|
fclose(handle);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
return handle;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Close the dup-ed file handle, and seek the Python one to the current position
|
||||||
|
*/
|
||||||
|
static NPY_INLINE int
|
||||||
|
npy_PyFile_DupClose2(PyObject *file, FILE* handle, npy_off_t orig_pos)
|
||||||
|
{
|
||||||
|
int fd, unbuf;
|
||||||
|
PyObject *ret, *io, *io_raw;
|
||||||
|
npy_off_t position;
|
||||||
|
|
||||||
|
/* For Python 2 PyFileObject, do nothing */
|
||||||
|
#if !defined(NPY_PY3K)
|
||||||
|
if (PyFile_Check(file)) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
position = npy_ftell(handle);
|
||||||
|
|
||||||
|
/* Close the FILE* handle */
|
||||||
|
fclose(handle);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Restore original file handle position, in order to not confuse
|
||||||
|
* Python-side data structures
|
||||||
|
*/
|
||||||
|
fd = PyObject_AsFileDescriptor(file);
|
||||||
|
if (fd == -1) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (npy_lseek(fd, orig_pos, SEEK_SET) == -1) {
|
||||||
|
|
||||||
|
/* The io module is needed to determine if buffering is used */
|
||||||
|
io = PyImport_ImportModule("io");
|
||||||
|
if (io == NULL) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
/* File object instances of RawIOBase are unbuffered */
|
||||||
|
io_raw = PyObject_GetAttrString(io, "RawIOBase");
|
||||||
|
Py_DECREF(io);
|
||||||
|
if (io_raw == NULL) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
unbuf = PyObject_IsInstance(file, io_raw);
|
||||||
|
Py_DECREF(io_raw);
|
||||||
|
if (unbuf == 1) {
|
||||||
|
/* Succeed if the IO is unbuffered */
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
PyErr_SetString(PyExc_IOError, "seeking file failed");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (position == -1) {
|
||||||
|
PyErr_SetString(PyExc_IOError, "obtaining file position failed");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Seek Python-side handle to the FILE* handle position */
|
||||||
|
ret = PyObject_CallMethod(file, "seek", NPY_OFF_T_PYFMT "i", position, 0);
|
||||||
|
if (ret == NULL) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
Py_DECREF(ret);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static NPY_INLINE int
|
||||||
|
npy_PyFile_Check(PyObject *file)
|
||||||
|
{
|
||||||
|
int fd;
|
||||||
|
/* For Python 2, check if it is a PyFileObject */
|
||||||
|
#if !defined(NPY_PY3K)
|
||||||
|
if (PyFile_Check(file)) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
fd = PyObject_AsFileDescriptor(file);
|
||||||
|
if (fd == -1) {
|
||||||
|
PyErr_Clear();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static NPY_INLINE PyObject*
|
||||||
|
npy_PyFile_OpenFile(PyObject *filename, const char *mode)
|
||||||
|
{
|
||||||
|
PyObject *open;
|
||||||
|
open = PyDict_GetItemString(PyEval_GetBuiltins(), "open");
|
||||||
|
if (open == NULL) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
return PyObject_CallFunction(open, "Os", filename, mode);
|
||||||
|
}
|
||||||
|
|
||||||
|
static NPY_INLINE int
|
||||||
|
npy_PyFile_CloseFile(PyObject *file)
|
||||||
|
{
|
||||||
|
PyObject *ret;
|
||||||
|
|
||||||
|
ret = PyObject_CallMethod(file, "close", NULL);
|
||||||
|
if (ret == NULL) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
Py_DECREF(ret);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* This is a copy of _PyErr_ChainExceptions
|
||||||
|
*/
|
||||||
|
static NPY_INLINE void
|
||||||
|
npy_PyErr_ChainExceptions(PyObject *exc, PyObject *val, PyObject *tb)
|
||||||
|
{
|
||||||
|
if (exc == NULL)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (PyErr_Occurred()) {
|
||||||
|
/* only py3 supports this anyway */
|
||||||
|
#ifdef NPY_PY3K
|
||||||
|
PyObject *exc2, *val2, *tb2;
|
||||||
|
PyErr_Fetch(&exc2, &val2, &tb2);
|
||||||
|
PyErr_NormalizeException(&exc, &val, &tb);
|
||||||
|
if (tb != NULL) {
|
||||||
|
PyException_SetTraceback(val, tb);
|
||||||
|
Py_DECREF(tb);
|
||||||
|
}
|
||||||
|
Py_DECREF(exc);
|
||||||
|
PyErr_NormalizeException(&exc2, &val2, &tb2);
|
||||||
|
PyException_SetContext(val2, val);
|
||||||
|
PyErr_Restore(exc2, val2, tb2);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
PyErr_Restore(exc, val, tb);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* This is a copy of _PyErr_ChainExceptions, with:
|
||||||
|
* - a minimal implementation for python 2
|
||||||
|
* - __cause__ used instead of __context__
|
||||||
|
*/
|
||||||
|
static NPY_INLINE void
|
||||||
|
npy_PyErr_ChainExceptionsCause(PyObject *exc, PyObject *val, PyObject *tb)
|
||||||
|
{
|
||||||
|
if (exc == NULL)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (PyErr_Occurred()) {
|
||||||
|
/* only py3 supports this anyway */
|
||||||
|
#ifdef NPY_PY3K
|
||||||
|
PyObject *exc2, *val2, *tb2;
|
||||||
|
PyErr_Fetch(&exc2, &val2, &tb2);
|
||||||
|
PyErr_NormalizeException(&exc, &val, &tb);
|
||||||
|
if (tb != NULL) {
|
||||||
|
PyException_SetTraceback(val, tb);
|
||||||
|
Py_DECREF(tb);
|
||||||
|
}
|
||||||
|
Py_DECREF(exc);
|
||||||
|
PyErr_NormalizeException(&exc2, &val2, &tb2);
|
||||||
|
PyException_SetCause(val2, val);
|
||||||
|
PyErr_Restore(exc2, val2, tb2);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
PyErr_Restore(exc, val, tb);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* PyObject_Cmp
|
||||||
|
*/
|
||||||
|
#if defined(NPY_PY3K)
|
||||||
|
static NPY_INLINE int
|
||||||
|
PyObject_Cmp(PyObject *i1, PyObject *i2, int *cmp)
|
||||||
|
{
|
||||||
|
int v;
|
||||||
|
v = PyObject_RichCompareBool(i1, i2, Py_LT);
|
||||||
|
if (v == 1) {
|
||||||
|
*cmp = -1;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
else if (v == -1) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
v = PyObject_RichCompareBool(i1, i2, Py_GT);
|
||||||
|
if (v == 1) {
|
||||||
|
*cmp = 1;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
else if (v == -1) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
v = PyObject_RichCompareBool(i1, i2, Py_EQ);
|
||||||
|
if (v == 1) {
|
||||||
|
*cmp = 0;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
*cmp = 0;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* PyCObject functions adapted to PyCapsules.
|
||||||
|
*
|
||||||
|
* The main job here is to get rid of the improved error handling
|
||||||
|
* of PyCapsules. It's a shame...
|
||||||
|
*/
|
||||||
|
#if PY_VERSION_HEX >= 0x03000000
|
||||||
|
|
||||||
|
static NPY_INLINE PyObject *
|
||||||
|
NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *))
|
||||||
|
{
|
||||||
|
PyObject *ret = PyCapsule_New(ptr, NULL, dtor);
|
||||||
|
if (ret == NULL) {
|
||||||
|
PyErr_Clear();
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static NPY_INLINE PyObject *
|
||||||
|
NpyCapsule_FromVoidPtrAndDesc(void *ptr, void* context, void (*dtor)(PyObject *))
|
||||||
|
{
|
||||||
|
PyObject *ret = NpyCapsule_FromVoidPtr(ptr, dtor);
|
||||||
|
if (ret != NULL && PyCapsule_SetContext(ret, context) != 0) {
|
||||||
|
PyErr_Clear();
|
||||||
|
Py_DECREF(ret);
|
||||||
|
ret = NULL;
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static NPY_INLINE void *
|
||||||
|
NpyCapsule_AsVoidPtr(PyObject *obj)
|
||||||
|
{
|
||||||
|
void *ret = PyCapsule_GetPointer(obj, NULL);
|
||||||
|
if (ret == NULL) {
|
||||||
|
PyErr_Clear();
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static NPY_INLINE void *
|
||||||
|
NpyCapsule_GetDesc(PyObject *obj)
|
||||||
|
{
|
||||||
|
return PyCapsule_GetContext(obj);
|
||||||
|
}
|
||||||
|
|
||||||
|
static NPY_INLINE int
|
||||||
|
NpyCapsule_Check(PyObject *ptr)
|
||||||
|
{
|
||||||
|
return PyCapsule_CheckExact(ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
static NPY_INLINE PyObject *
|
||||||
|
NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(void *))
|
||||||
|
{
|
||||||
|
return PyCObject_FromVoidPtr(ptr, dtor);
|
||||||
|
}
|
||||||
|
|
||||||
|
static NPY_INLINE PyObject *
|
||||||
|
NpyCapsule_FromVoidPtrAndDesc(void *ptr, void* context,
|
||||||
|
void (*dtor)(void *, void *))
|
||||||
|
{
|
||||||
|
return PyCObject_FromVoidPtrAndDesc(ptr, context, dtor);
|
||||||
|
}
|
||||||
|
|
||||||
|
static NPY_INLINE void *
|
||||||
|
NpyCapsule_AsVoidPtr(PyObject *ptr)
|
||||||
|
{
|
||||||
|
return PyCObject_AsVoidPtr(ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
static NPY_INLINE void *
|
||||||
|
NpyCapsule_GetDesc(PyObject *obj)
|
||||||
|
{
|
||||||
|
return PyCObject_GetDesc(obj);
|
||||||
|
}
|
||||||
|
|
||||||
|
static NPY_INLINE int
|
||||||
|
NpyCapsule_Check(PyObject *ptr)
|
||||||
|
{
|
||||||
|
return PyCObject_Check(ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif /* _NPY_3KCOMPAT_H_ */
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,118 @@
|
|||||||
|
/*
|
||||||
|
* This set (target) cpu specific macros:
|
||||||
|
* - Possible values:
|
||||||
|
* NPY_CPU_X86
|
||||||
|
* NPY_CPU_AMD64
|
||||||
|
* NPY_CPU_PPC
|
||||||
|
* NPY_CPU_PPC64
|
||||||
|
* NPY_CPU_PPC64LE
|
||||||
|
* NPY_CPU_SPARC
|
||||||
|
* NPY_CPU_S390
|
||||||
|
* NPY_CPU_IA64
|
||||||
|
* NPY_CPU_HPPA
|
||||||
|
* NPY_CPU_ALPHA
|
||||||
|
* NPY_CPU_ARMEL
|
||||||
|
* NPY_CPU_ARMEB
|
||||||
|
* NPY_CPU_SH_LE
|
||||||
|
* NPY_CPU_SH_BE
|
||||||
|
* NPY_CPU_ARCEL
|
||||||
|
* NPY_CPU_ARCEB
|
||||||
|
* NPY_CPU_RISCV64
|
||||||
|
*/
|
||||||
|
#ifndef _NPY_CPUARCH_H_
|
||||||
|
#define _NPY_CPUARCH_H_
|
||||||
|
|
||||||
|
#include "numpyconfig.h"
|
||||||
|
#include <string.h> /* for memcpy */
|
||||||
|
|
||||||
|
#if defined( __i386__ ) || defined(i386) || defined(_M_IX86)
|
||||||
|
/*
|
||||||
|
* __i386__ is defined by gcc and Intel compiler on Linux,
|
||||||
|
* _M_IX86 by VS compiler,
|
||||||
|
* i386 by Sun compilers on opensolaris at least
|
||||||
|
*/
|
||||||
|
#define NPY_CPU_X86
|
||||||
|
#elif defined(__x86_64__) || defined(__amd64__) || defined(__x86_64) || defined(_M_AMD64)
|
||||||
|
/*
|
||||||
|
* both __x86_64__ and __amd64__ are defined by gcc
|
||||||
|
* __x86_64 defined by sun compiler on opensolaris at least
|
||||||
|
* _M_AMD64 defined by MS compiler
|
||||||
|
*/
|
||||||
|
#define NPY_CPU_AMD64
|
||||||
|
#elif defined(__powerpc64__) && defined(__LITTLE_ENDIAN__)
|
||||||
|
#define NPY_CPU_PPC64LE
|
||||||
|
#elif defined(__powerpc64__) && defined(__BIG_ENDIAN__)
|
||||||
|
#define NPY_CPU_PPC64
|
||||||
|
#elif defined(__ppc__) || defined(__powerpc__) || defined(_ARCH_PPC)
|
||||||
|
/*
|
||||||
|
* __ppc__ is defined by gcc, I remember having seen __powerpc__ once,
|
||||||
|
* but can't find it ATM
|
||||||
|
* _ARCH_PPC is used by at least gcc on AIX
|
||||||
|
* As __powerpc__ and _ARCH_PPC are also defined by PPC64 check
|
||||||
|
* for those specifically first before defaulting to ppc
|
||||||
|
*/
|
||||||
|
#define NPY_CPU_PPC
|
||||||
|
#elif defined(__sparc__) || defined(__sparc)
|
||||||
|
/* __sparc__ is defined by gcc and Forte (e.g. Sun) compilers */
|
||||||
|
#define NPY_CPU_SPARC
|
||||||
|
#elif defined(__s390__)
|
||||||
|
#define NPY_CPU_S390
|
||||||
|
#elif defined(__ia64)
|
||||||
|
#define NPY_CPU_IA64
|
||||||
|
#elif defined(__hppa)
|
||||||
|
#define NPY_CPU_HPPA
|
||||||
|
#elif defined(__alpha__)
|
||||||
|
#define NPY_CPU_ALPHA
|
||||||
|
#elif defined(__arm__) || defined(__aarch64__)
|
||||||
|
#if defined(__ARMEB__) || defined(__AARCH64EB__)
|
||||||
|
#if defined(__ARM_32BIT_STATE)
|
||||||
|
#define NPY_CPU_ARMEB_AARCH32
|
||||||
|
#elif defined(__ARM_64BIT_STATE)
|
||||||
|
#define NPY_CPU_ARMEB_AARCH64
|
||||||
|
#else
|
||||||
|
#define NPY_CPU_ARMEB
|
||||||
|
#endif
|
||||||
|
#elif defined(__ARMEL__) || defined(__AARCH64EL__)
|
||||||
|
#if defined(__ARM_32BIT_STATE)
|
||||||
|
#define NPY_CPU_ARMEL_AARCH32
|
||||||
|
#elif defined(__ARM_64BIT_STATE)
|
||||||
|
#define NPY_CPU_ARMEL_AARCH64
|
||||||
|
#else
|
||||||
|
#define NPY_CPU_ARMEL
|
||||||
|
#endif
|
||||||
|
#else
|
||||||
|
# error Unknown ARM CPU, please report this to numpy maintainers with \
|
||||||
|
information about your platform (OS, CPU and compiler)
|
||||||
|
#endif
|
||||||
|
#elif defined(__sh__) && defined(__LITTLE_ENDIAN__)
|
||||||
|
#define NPY_CPU_SH_LE
|
||||||
|
#elif defined(__sh__) && defined(__BIG_ENDIAN__)
|
||||||
|
#define NPY_CPU_SH_BE
|
||||||
|
#elif defined(__MIPSEL__)
|
||||||
|
#define NPY_CPU_MIPSEL
|
||||||
|
#elif defined(__MIPSEB__)
|
||||||
|
#define NPY_CPU_MIPSEB
|
||||||
|
#elif defined(__or1k__)
|
||||||
|
#define NPY_CPU_OR1K
|
||||||
|
#elif defined(__mc68000__)
|
||||||
|
#define NPY_CPU_M68K
|
||||||
|
#elif defined(__arc__) && defined(__LITTLE_ENDIAN__)
|
||||||
|
#define NPY_CPU_ARCEL
|
||||||
|
#elif defined(__arc__) && defined(__BIG_ENDIAN__)
|
||||||
|
#define NPY_CPU_ARCEB
|
||||||
|
#elif defined(__riscv) && defined(__riscv_xlen) && __riscv_xlen == 64
|
||||||
|
#define NPY_CPU_RISCV64
|
||||||
|
#else
|
||||||
|
#error Unknown CPU, please report this to numpy maintainers with \
|
||||||
|
information about your platform (OS, CPU and compiler)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define NPY_COPY_PYOBJECT_PTR(dst, src) memcpy(dst, src, sizeof(PyObject *))
|
||||||
|
|
||||||
|
#if (defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64))
|
||||||
|
#define NPY_CPU_HAVE_UNALIGNED_ACCESS 1
|
||||||
|
#else
|
||||||
|
#define NPY_CPU_HAVE_UNALIGNED_ACCESS 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif
|
@ -0,0 +1,72 @@
|
|||||||
|
#ifndef _NPY_ENDIAN_H_
|
||||||
|
#define _NPY_ENDIAN_H_
|
||||||
|
|
||||||
|
/*
|
||||||
|
* NPY_BYTE_ORDER is set to the same value as BYTE_ORDER set by glibc in
|
||||||
|
* endian.h
|
||||||
|
*/
|
||||||
|
|
||||||
|
#if defined(NPY_HAVE_ENDIAN_H) || defined(NPY_HAVE_SYS_ENDIAN_H)
|
||||||
|
/* Use endian.h if available */
|
||||||
|
|
||||||
|
#if defined(NPY_HAVE_ENDIAN_H)
|
||||||
|
#include <endian.h>
|
||||||
|
#elif defined(NPY_HAVE_SYS_ENDIAN_H)
|
||||||
|
#include <sys/endian.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(BYTE_ORDER) && defined(BIG_ENDIAN) && defined(LITTLE_ENDIAN)
|
||||||
|
#define NPY_BYTE_ORDER BYTE_ORDER
|
||||||
|
#define NPY_LITTLE_ENDIAN LITTLE_ENDIAN
|
||||||
|
#define NPY_BIG_ENDIAN BIG_ENDIAN
|
||||||
|
#elif defined(_BYTE_ORDER) && defined(_BIG_ENDIAN) && defined(_LITTLE_ENDIAN)
|
||||||
|
#define NPY_BYTE_ORDER _BYTE_ORDER
|
||||||
|
#define NPY_LITTLE_ENDIAN _LITTLE_ENDIAN
|
||||||
|
#define NPY_BIG_ENDIAN _BIG_ENDIAN
|
||||||
|
#elif defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && defined(__LITTLE_ENDIAN)
|
||||||
|
#define NPY_BYTE_ORDER __BYTE_ORDER
|
||||||
|
#define NPY_LITTLE_ENDIAN __LITTLE_ENDIAN
|
||||||
|
#define NPY_BIG_ENDIAN __BIG_ENDIAN
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef NPY_BYTE_ORDER
|
||||||
|
/* Set endianness info using target CPU */
|
||||||
|
#include "npy_cpu.h"
|
||||||
|
|
||||||
|
#define NPY_LITTLE_ENDIAN 1234
|
||||||
|
#define NPY_BIG_ENDIAN 4321
|
||||||
|
|
||||||
|
#if defined(NPY_CPU_X86) \
|
||||||
|
|| defined(NPY_CPU_AMD64) \
|
||||||
|
|| defined(NPY_CPU_IA64) \
|
||||||
|
|| defined(NPY_CPU_ALPHA) \
|
||||||
|
|| defined(NPY_CPU_ARMEL) \
|
||||||
|
|| defined(NPY_CPU_ARMEL_AARCH32) \
|
||||||
|
|| defined(NPY_CPU_ARMEL_AARCH64) \
|
||||||
|
|| defined(NPY_CPU_SH_LE) \
|
||||||
|
|| defined(NPY_CPU_MIPSEL) \
|
||||||
|
|| defined(NPY_CPU_PPC64LE) \
|
||||||
|
|| defined(NPY_CPU_ARCEL) \
|
||||||
|
|| defined(NPY_CPU_RISCV64)
|
||||||
|
#define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN
|
||||||
|
#elif defined(NPY_CPU_PPC) \
|
||||||
|
|| defined(NPY_CPU_SPARC) \
|
||||||
|
|| defined(NPY_CPU_S390) \
|
||||||
|
|| defined(NPY_CPU_HPPA) \
|
||||||
|
|| defined(NPY_CPU_PPC64) \
|
||||||
|
|| defined(NPY_CPU_ARMEB) \
|
||||||
|
|| defined(NPY_CPU_ARMEB_AARCH32) \
|
||||||
|
|| defined(NPY_CPU_ARMEB_AARCH64) \
|
||||||
|
|| defined(NPY_CPU_SH_BE) \
|
||||||
|
|| defined(NPY_CPU_MIPSEB) \
|
||||||
|
|| defined(NPY_CPU_OR1K) \
|
||||||
|
|| defined(NPY_CPU_M68K) \
|
||||||
|
|| defined(NPY_CPU_ARCEB)
|
||||||
|
#define NPY_BYTE_ORDER NPY_BIG_ENDIAN
|
||||||
|
#else
|
||||||
|
#error Unknown CPU: can not set endianness
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif
|
@ -0,0 +1,117 @@
|
|||||||
|
|
||||||
|
/* Signal handling:
|
||||||
|
|
||||||
|
This header file defines macros that allow your code to handle
|
||||||
|
interrupts received during processing. Interrupts that
|
||||||
|
could reasonably be handled:
|
||||||
|
|
||||||
|
SIGINT, SIGABRT, SIGALRM, SIGSEGV
|
||||||
|
|
||||||
|
****Warning***************
|
||||||
|
|
||||||
|
Do not allow code that creates temporary memory or increases reference
|
||||||
|
counts of Python objects to be interrupted unless you handle it
|
||||||
|
differently.
|
||||||
|
|
||||||
|
**************************
|
||||||
|
|
||||||
|
The mechanism for handling interrupts is conceptually simple:
|
||||||
|
|
||||||
|
- replace the signal handler with our own home-grown version
|
||||||
|
and store the old one.
|
||||||
|
- run the code to be interrupted -- if an interrupt occurs
|
||||||
|
the handler should basically just cause a return to the
|
||||||
|
calling function for finish work.
|
||||||
|
- restore the old signal handler
|
||||||
|
|
||||||
|
Of course, every code that allows interrupts must account for
|
||||||
|
returning via the interrupt and handle clean-up correctly. But,
|
||||||
|
even still, the simple paradigm is complicated by at least three
|
||||||
|
factors.
|
||||||
|
|
||||||
|
1) platform portability (i.e. Microsoft says not to use longjmp
|
||||||
|
to return from signal handling. They have a __try and __except
|
||||||
|
extension to C instead but what about mingw?).
|
||||||
|
|
||||||
|
2) how to handle threads: apparently whether signals are delivered to
|
||||||
|
every thread of the process or the "invoking" thread is platform
|
||||||
|
dependent. --- we don't handle threads for now.
|
||||||
|
|
||||||
|
3) do we need to worry about re-entrance. For now, assume the
|
||||||
|
code will not call-back into itself.
|
||||||
|
|
||||||
|
Ideas:
|
||||||
|
|
||||||
|
1) Start by implementing an approach that works on platforms that
|
||||||
|
can use setjmp and longjmp functionality and does nothing
|
||||||
|
on other platforms.
|
||||||
|
|
||||||
|
2) Ignore threads --- i.e. do not mix interrupt handling and threads
|
||||||
|
|
||||||
|
3) Add a default signal_handler function to the C-API but have the rest
|
||||||
|
use macros.
|
||||||
|
|
||||||
|
|
||||||
|
Simple Interface:
|
||||||
|
|
||||||
|
|
||||||
|
In your C-extension: around a block of code you want to be interruptible
|
||||||
|
with a SIGINT
|
||||||
|
|
||||||
|
NPY_SIGINT_ON
|
||||||
|
[code]
|
||||||
|
NPY_SIGINT_OFF
|
||||||
|
|
||||||
|
In order for this to work correctly, the
|
||||||
|
[code] block must not allocate any memory or alter the reference count of any
|
||||||
|
Python objects. In other words [code] must be interruptible so that continuation
|
||||||
|
after NPY_SIGINT_OFF will only be "missing some computations"
|
||||||
|
|
||||||
|
Interrupt handling does not work well with threads.
|
||||||
|
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* Add signal handling macros
|
||||||
|
Make the global variable and signal handler part of the C-API
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef NPY_INTERRUPT_H
|
||||||
|
#define NPY_INTERRUPT_H
|
||||||
|
|
||||||
|
#ifndef NPY_NO_SIGNAL
|
||||||
|
|
||||||
|
#include <setjmp.h>
|
||||||
|
#include <signal.h>
|
||||||
|
|
||||||
|
#ifndef sigsetjmp
|
||||||
|
|
||||||
|
#define NPY_SIGSETJMP(arg1, arg2) setjmp(arg1)
|
||||||
|
#define NPY_SIGLONGJMP(arg1, arg2) longjmp(arg1, arg2)
|
||||||
|
#define NPY_SIGJMP_BUF jmp_buf
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
#define NPY_SIGSETJMP(arg1, arg2) sigsetjmp(arg1, arg2)
|
||||||
|
#define NPY_SIGLONGJMP(arg1, arg2) siglongjmp(arg1, arg2)
|
||||||
|
#define NPY_SIGJMP_BUF sigjmp_buf
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
# define NPY_SIGINT_ON { \
|
||||||
|
PyOS_sighandler_t _npy_sig_save; \
|
||||||
|
_npy_sig_save = PyOS_setsig(SIGINT, _PyArray_SigintHandler); \
|
||||||
|
if (NPY_SIGSETJMP(*((NPY_SIGJMP_BUF *)_PyArray_GetSigintBuf()), \
|
||||||
|
1) == 0) { \
|
||||||
|
|
||||||
|
# define NPY_SIGINT_OFF } \
|
||||||
|
PyOS_setsig(SIGINT, _npy_sig_save); \
|
||||||
|
}
|
||||||
|
|
||||||
|
#else /* NPY_NO_SIGNAL */
|
||||||
|
|
||||||
|
#define NPY_SIGINT_ON
|
||||||
|
#define NPY_SIGINT_OFF
|
||||||
|
|
||||||
|
#endif /* HAVE_SIGSETJMP */
|
||||||
|
|
||||||
|
#endif /* NPY_INTERRUPT_H */
|
@ -0,0 +1,646 @@
|
|||||||
|
#ifndef __NPY_MATH_C99_H_
|
||||||
|
#define __NPY_MATH_C99_H_
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include <math.h>
|
||||||
|
#ifdef __SUNPRO_CC
|
||||||
|
#include <sunmath.h>
|
||||||
|
#endif
|
||||||
|
#ifdef HAVE_NPY_CONFIG_H
|
||||||
|
#include <npy_config.h>
|
||||||
|
#endif
|
||||||
|
#include <numpy/npy_common.h>
|
||||||
|
|
||||||
|
/* By adding static inline specifiers to npy_math function definitions when
|
||||||
|
appropriate, compiler is given the opportunity to optimize */
|
||||||
|
#if NPY_INLINE_MATH
|
||||||
|
#define NPY_INPLACE NPY_INLINE static
|
||||||
|
#else
|
||||||
|
#define NPY_INPLACE
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* NAN and INFINITY like macros (same behavior as glibc for NAN, same as C99
|
||||||
|
* for INFINITY)
|
||||||
|
*
|
||||||
|
* XXX: I should test whether INFINITY and NAN are available on the platform
|
||||||
|
*/
|
||||||
|
NPY_INLINE static float __npy_inff(void)
|
||||||
|
{
|
||||||
|
const union { npy_uint32 __i; float __f;} __bint = {0x7f800000UL};
|
||||||
|
return __bint.__f;
|
||||||
|
}
|
||||||
|
|
||||||
|
NPY_INLINE static float __npy_nanf(void)
|
||||||
|
{
|
||||||
|
const union { npy_uint32 __i; float __f;} __bint = {0x7fc00000UL};
|
||||||
|
return __bint.__f;
|
||||||
|
}
|
||||||
|
|
||||||
|
NPY_INLINE static float __npy_pzerof(void)
|
||||||
|
{
|
||||||
|
const union { npy_uint32 __i; float __f;} __bint = {0x00000000UL};
|
||||||
|
return __bint.__f;
|
||||||
|
}
|
||||||
|
|
||||||
|
NPY_INLINE static float __npy_nzerof(void)
|
||||||
|
{
|
||||||
|
const union { npy_uint32 __i; float __f;} __bint = {0x80000000UL};
|
||||||
|
return __bint.__f;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define NPY_INFINITYF __npy_inff()
|
||||||
|
#define NPY_NANF __npy_nanf()
|
||||||
|
#define NPY_PZEROF __npy_pzerof()
|
||||||
|
#define NPY_NZEROF __npy_nzerof()
|
||||||
|
|
||||||
|
#define NPY_INFINITY ((npy_double)NPY_INFINITYF)
|
||||||
|
#define NPY_NAN ((npy_double)NPY_NANF)
|
||||||
|
#define NPY_PZERO ((npy_double)NPY_PZEROF)
|
||||||
|
#define NPY_NZERO ((npy_double)NPY_NZEROF)
|
||||||
|
|
||||||
|
#define NPY_INFINITYL ((npy_longdouble)NPY_INFINITYF)
|
||||||
|
#define NPY_NANL ((npy_longdouble)NPY_NANF)
|
||||||
|
#define NPY_PZEROL ((npy_longdouble)NPY_PZEROF)
|
||||||
|
#define NPY_NZEROL ((npy_longdouble)NPY_NZEROF)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Useful constants
|
||||||
|
*/
|
||||||
|
#define NPY_E 2.718281828459045235360287471352662498 /* e */
|
||||||
|
#define NPY_LOG2E 1.442695040888963407359924681001892137 /* log_2 e */
|
||||||
|
#define NPY_LOG10E 0.434294481903251827651128918916605082 /* log_10 e */
|
||||||
|
#define NPY_LOGE2 0.693147180559945309417232121458176568 /* log_e 2 */
|
||||||
|
#define NPY_LOGE10 2.302585092994045684017991454684364208 /* log_e 10 */
|
||||||
|
#define NPY_PI 3.141592653589793238462643383279502884 /* pi */
|
||||||
|
#define NPY_PI_2 1.570796326794896619231321691639751442 /* pi/2 */
|
||||||
|
#define NPY_PI_4 0.785398163397448309615660845819875721 /* pi/4 */
|
||||||
|
#define NPY_1_PI 0.318309886183790671537767526745028724 /* 1/pi */
|
||||||
|
#define NPY_2_PI 0.636619772367581343075535053490057448 /* 2/pi */
|
||||||
|
#define NPY_EULER 0.577215664901532860606512090082402431 /* Euler constant */
|
||||||
|
#define NPY_SQRT2 1.414213562373095048801688724209698079 /* sqrt(2) */
|
||||||
|
#define NPY_SQRT1_2 0.707106781186547524400844362104849039 /* 1/sqrt(2) */
|
||||||
|
|
||||||
|
#define NPY_Ef 2.718281828459045235360287471352662498F /* e */
|
||||||
|
#define NPY_LOG2Ef 1.442695040888963407359924681001892137F /* log_2 e */
|
||||||
|
#define NPY_LOG10Ef 0.434294481903251827651128918916605082F /* log_10 e */
|
||||||
|
#define NPY_LOGE2f 0.693147180559945309417232121458176568F /* log_e 2 */
|
||||||
|
#define NPY_LOGE10f 2.302585092994045684017991454684364208F /* log_e 10 */
|
||||||
|
#define NPY_PIf 3.141592653589793238462643383279502884F /* pi */
|
||||||
|
#define NPY_PI_2f 1.570796326794896619231321691639751442F /* pi/2 */
|
||||||
|
#define NPY_PI_4f 0.785398163397448309615660845819875721F /* pi/4 */
|
||||||
|
#define NPY_1_PIf 0.318309886183790671537767526745028724F /* 1/pi */
|
||||||
|
#define NPY_2_PIf 0.636619772367581343075535053490057448F /* 2/pi */
|
||||||
|
#define NPY_EULERf 0.577215664901532860606512090082402431F /* Euler constant */
|
||||||
|
#define NPY_SQRT2f 1.414213562373095048801688724209698079F /* sqrt(2) */
|
||||||
|
#define NPY_SQRT1_2f 0.707106781186547524400844362104849039F /* 1/sqrt(2) */
|
||||||
|
|
||||||
|
#define NPY_El 2.718281828459045235360287471352662498L /* e */
|
||||||
|
#define NPY_LOG2El 1.442695040888963407359924681001892137L /* log_2 e */
|
||||||
|
#define NPY_LOG10El 0.434294481903251827651128918916605082L /* log_10 e */
|
||||||
|
#define NPY_LOGE2l 0.693147180559945309417232121458176568L /* log_e 2 */
|
||||||
|
#define NPY_LOGE10l 2.302585092994045684017991454684364208L /* log_e 10 */
|
||||||
|
#define NPY_PIl 3.141592653589793238462643383279502884L /* pi */
|
||||||
|
#define NPY_PI_2l 1.570796326794896619231321691639751442L /* pi/2 */
|
||||||
|
#define NPY_PI_4l 0.785398163397448309615660845819875721L /* pi/4 */
|
||||||
|
#define NPY_1_PIl 0.318309886183790671537767526745028724L /* 1/pi */
|
||||||
|
#define NPY_2_PIl 0.636619772367581343075535053490057448L /* 2/pi */
|
||||||
|
#define NPY_EULERl 0.577215664901532860606512090082402431L /* Euler constant */
|
||||||
|
#define NPY_SQRT2l 1.414213562373095048801688724209698079L /* sqrt(2) */
|
||||||
|
#define NPY_SQRT1_2l 0.707106781186547524400844362104849039L /* 1/sqrt(2) */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Constants used in vector implementation of exp(x)
|
||||||
|
*/
|
||||||
|
#define NPY_RINT_CVT_MAGICf 0x1.800000p+23f
|
||||||
|
#define NPY_CODY_WAITE_LOGE_2_HIGHf -6.93145752e-1f
|
||||||
|
#define NPY_CODY_WAITE_LOGE_2_LOWf -1.42860677e-6f
|
||||||
|
#define NPY_COEFF_P0_EXPf 9.999999999980870924916e-01f
|
||||||
|
#define NPY_COEFF_P1_EXPf 7.257664613233124478488e-01f
|
||||||
|
#define NPY_COEFF_P2_EXPf 2.473615434895520810817e-01f
|
||||||
|
#define NPY_COEFF_P3_EXPf 5.114512081637298353406e-02f
|
||||||
|
#define NPY_COEFF_P4_EXPf 6.757896990527504603057e-03f
|
||||||
|
#define NPY_COEFF_P5_EXPf 5.082762527590693718096e-04f
|
||||||
|
#define NPY_COEFF_Q0_EXPf 1.000000000000000000000e+00f
|
||||||
|
#define NPY_COEFF_Q1_EXPf -2.742335390411667452936e-01f
|
||||||
|
#define NPY_COEFF_Q2_EXPf 2.159509375685829852307e-02f
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Constants used in vector implementation of log(x)
|
||||||
|
*/
|
||||||
|
#define NPY_COEFF_P0_LOGf 0.000000000000000000000e+00f
|
||||||
|
#define NPY_COEFF_P1_LOGf 9.999999999999998702752e-01f
|
||||||
|
#define NPY_COEFF_P2_LOGf 2.112677543073053063722e+00f
|
||||||
|
#define NPY_COEFF_P3_LOGf 1.480000633576506585156e+00f
|
||||||
|
#define NPY_COEFF_P4_LOGf 3.808837741388407920751e-01f
|
||||||
|
#define NPY_COEFF_P5_LOGf 2.589979117907922693523e-02f
|
||||||
|
#define NPY_COEFF_Q0_LOGf 1.000000000000000000000e+00f
|
||||||
|
#define NPY_COEFF_Q1_LOGf 2.612677543073109236779e+00f
|
||||||
|
#define NPY_COEFF_Q2_LOGf 2.453006071784736363091e+00f
|
||||||
|
#define NPY_COEFF_Q3_LOGf 9.864942958519418960339e-01f
|
||||||
|
#define NPY_COEFF_Q4_LOGf 1.546476374983906719538e-01f
|
||||||
|
#define NPY_COEFF_Q5_LOGf 5.875095403124574342950e-03f
|
||||||
|
/*
|
||||||
|
* Constants used in vector implementation of sinf/cosf(x)
|
||||||
|
*/
|
||||||
|
#define NPY_TWO_O_PIf 0x1.45f306p-1f
|
||||||
|
#define NPY_CODY_WAITE_PI_O_2_HIGHf -0x1.921fb0p+00f
|
||||||
|
#define NPY_CODY_WAITE_PI_O_2_MEDf -0x1.5110b4p-22f
|
||||||
|
#define NPY_CODY_WAITE_PI_O_2_LOWf -0x1.846988p-48f
|
||||||
|
#define NPY_COEFF_INVF0_COSINEf 0x1.000000p+00f
|
||||||
|
#define NPY_COEFF_INVF2_COSINEf -0x1.000000p-01f
|
||||||
|
#define NPY_COEFF_INVF4_COSINEf 0x1.55553cp-05f
|
||||||
|
#define NPY_COEFF_INVF6_COSINEf -0x1.6c06dcp-10f
|
||||||
|
#define NPY_COEFF_INVF8_COSINEf 0x1.98e616p-16f
|
||||||
|
#define NPY_COEFF_INVF3_SINEf -0x1.555556p-03f
|
||||||
|
#define NPY_COEFF_INVF5_SINEf 0x1.11119ap-07f
|
||||||
|
#define NPY_COEFF_INVF7_SINEf -0x1.a06bbap-13f
|
||||||
|
#define NPY_COEFF_INVF9_SINEf 0x1.7d3bbcp-19f
|
||||||
|
/*
|
||||||
|
* Integer functions.
|
||||||
|
*/
|
||||||
|
NPY_INPLACE npy_uint npy_gcdu(npy_uint a, npy_uint b);
|
||||||
|
NPY_INPLACE npy_uint npy_lcmu(npy_uint a, npy_uint b);
|
||||||
|
NPY_INPLACE npy_ulong npy_gcdul(npy_ulong a, npy_ulong b);
|
||||||
|
NPY_INPLACE npy_ulong npy_lcmul(npy_ulong a, npy_ulong b);
|
||||||
|
NPY_INPLACE npy_ulonglong npy_gcdull(npy_ulonglong a, npy_ulonglong b);
|
||||||
|
NPY_INPLACE npy_ulonglong npy_lcmull(npy_ulonglong a, npy_ulonglong b);
|
||||||
|
|
||||||
|
NPY_INPLACE npy_int npy_gcd(npy_int a, npy_int b);
|
||||||
|
NPY_INPLACE npy_int npy_lcm(npy_int a, npy_int b);
|
||||||
|
NPY_INPLACE npy_long npy_gcdl(npy_long a, npy_long b);
|
||||||
|
NPY_INPLACE npy_long npy_lcml(npy_long a, npy_long b);
|
||||||
|
NPY_INPLACE npy_longlong npy_gcdll(npy_longlong a, npy_longlong b);
|
||||||
|
NPY_INPLACE npy_longlong npy_lcmll(npy_longlong a, npy_longlong b);
|
||||||
|
|
||||||
|
NPY_INPLACE npy_ubyte npy_rshiftuhh(npy_ubyte a, npy_ubyte b);
|
||||||
|
NPY_INPLACE npy_ubyte npy_lshiftuhh(npy_ubyte a, npy_ubyte b);
|
||||||
|
NPY_INPLACE npy_ushort npy_rshiftuh(npy_ushort a, npy_ushort b);
|
||||||
|
NPY_INPLACE npy_ushort npy_lshiftuh(npy_ushort a, npy_ushort b);
|
||||||
|
NPY_INPLACE npy_uint npy_rshiftu(npy_uint a, npy_uint b);
|
||||||
|
NPY_INPLACE npy_uint npy_lshiftu(npy_uint a, npy_uint b);
|
||||||
|
NPY_INPLACE npy_ulong npy_rshiftul(npy_ulong a, npy_ulong b);
|
||||||
|
NPY_INPLACE npy_ulong npy_lshiftul(npy_ulong a, npy_ulong b);
|
||||||
|
NPY_INPLACE npy_ulonglong npy_rshiftull(npy_ulonglong a, npy_ulonglong b);
|
||||||
|
NPY_INPLACE npy_ulonglong npy_lshiftull(npy_ulonglong a, npy_ulonglong b);
|
||||||
|
|
||||||
|
NPY_INPLACE npy_byte npy_rshifthh(npy_byte a, npy_byte b);
|
||||||
|
NPY_INPLACE npy_byte npy_lshifthh(npy_byte a, npy_byte b);
|
||||||
|
NPY_INPLACE npy_short npy_rshifth(npy_short a, npy_short b);
|
||||||
|
NPY_INPLACE npy_short npy_lshifth(npy_short a, npy_short b);
|
||||||
|
NPY_INPLACE npy_int npy_rshift(npy_int a, npy_int b);
|
||||||
|
NPY_INPLACE npy_int npy_lshift(npy_int a, npy_int b);
|
||||||
|
NPY_INPLACE npy_long npy_rshiftl(npy_long a, npy_long b);
|
||||||
|
NPY_INPLACE npy_long npy_lshiftl(npy_long a, npy_long b);
|
||||||
|
NPY_INPLACE npy_longlong npy_rshiftll(npy_longlong a, npy_longlong b);
|
||||||
|
NPY_INPLACE npy_longlong npy_lshiftll(npy_longlong a, npy_longlong b);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* avx function has a common API for both sin & cos. This enum is used to
|
||||||
|
* distinguish between the two
|
||||||
|
*/
|
||||||
|
typedef enum {
|
||||||
|
npy_compute_sin,
|
||||||
|
npy_compute_cos
|
||||||
|
} NPY_TRIG_OP;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* C99 double math funcs
|
||||||
|
*/
|
||||||
|
NPY_INPLACE double npy_sin(double x);
|
||||||
|
NPY_INPLACE double npy_cos(double x);
|
||||||
|
NPY_INPLACE double npy_tan(double x);
|
||||||
|
NPY_INPLACE double npy_sinh(double x);
|
||||||
|
NPY_INPLACE double npy_cosh(double x);
|
||||||
|
NPY_INPLACE double npy_tanh(double x);
|
||||||
|
|
||||||
|
NPY_INPLACE double npy_asin(double x);
|
||||||
|
NPY_INPLACE double npy_acos(double x);
|
||||||
|
NPY_INPLACE double npy_atan(double x);
|
||||||
|
|
||||||
|
NPY_INPLACE double npy_log(double x);
|
||||||
|
NPY_INPLACE double npy_log10(double x);
|
||||||
|
NPY_INPLACE double npy_exp(double x);
|
||||||
|
NPY_INPLACE double npy_sqrt(double x);
|
||||||
|
NPY_INPLACE double npy_cbrt(double x);
|
||||||
|
|
||||||
|
NPY_INPLACE double npy_fabs(double x);
|
||||||
|
NPY_INPLACE double npy_ceil(double x);
|
||||||
|
NPY_INPLACE double npy_fmod(double x, double y);
|
||||||
|
NPY_INPLACE double npy_floor(double x);
|
||||||
|
|
||||||
|
NPY_INPLACE double npy_expm1(double x);
|
||||||
|
NPY_INPLACE double npy_log1p(double x);
|
||||||
|
NPY_INPLACE double npy_hypot(double x, double y);
|
||||||
|
NPY_INPLACE double npy_acosh(double x);
|
||||||
|
NPY_INPLACE double npy_asinh(double xx);
|
||||||
|
NPY_INPLACE double npy_atanh(double x);
|
||||||
|
NPY_INPLACE double npy_rint(double x);
|
||||||
|
NPY_INPLACE double npy_trunc(double x);
|
||||||
|
NPY_INPLACE double npy_exp2(double x);
|
||||||
|
NPY_INPLACE double npy_log2(double x);
|
||||||
|
|
||||||
|
NPY_INPLACE double npy_atan2(double x, double y);
|
||||||
|
NPY_INPLACE double npy_pow(double x, double y);
|
||||||
|
NPY_INPLACE double npy_modf(double x, double* y);
|
||||||
|
NPY_INPLACE double npy_frexp(double x, int* y);
|
||||||
|
NPY_INPLACE double npy_ldexp(double n, int y);
|
||||||
|
|
||||||
|
NPY_INPLACE double npy_copysign(double x, double y);
|
||||||
|
double npy_nextafter(double x, double y);
|
||||||
|
double npy_spacing(double x);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* IEEE 754 fpu handling. Those are guaranteed to be macros
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* use builtins to avoid function calls in tight loops
|
||||||
|
* only available if npy_config.h is available (= numpys own build) */
|
||||||
|
#if HAVE___BUILTIN_ISNAN
|
||||||
|
#define npy_isnan(x) __builtin_isnan(x)
|
||||||
|
#else
|
||||||
|
#ifndef NPY_HAVE_DECL_ISNAN
|
||||||
|
#define npy_isnan(x) ((x) != (x))
|
||||||
|
#else
|
||||||
|
#if defined(_MSC_VER) && (_MSC_VER < 1900)
|
||||||
|
#define npy_isnan(x) _isnan((x))
|
||||||
|
#else
|
||||||
|
#define npy_isnan(x) isnan(x)
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
/* only available if npy_config.h is available (= numpys own build) */
|
||||||
|
#if HAVE___BUILTIN_ISFINITE
|
||||||
|
#define npy_isfinite(x) __builtin_isfinite(x)
|
||||||
|
#else
|
||||||
|
#ifndef NPY_HAVE_DECL_ISFINITE
|
||||||
|
#ifdef _MSC_VER
|
||||||
|
#define npy_isfinite(x) _finite((x))
|
||||||
|
#else
|
||||||
|
#define npy_isfinite(x) !npy_isnan((x) + (-x))
|
||||||
|
#endif
|
||||||
|
#else
|
||||||
|
#define npy_isfinite(x) isfinite((x))
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* only available if npy_config.h is available (= numpys own build) */
|
||||||
|
#if HAVE___BUILTIN_ISINF
|
||||||
|
#define npy_isinf(x) __builtin_isinf(x)
|
||||||
|
#else
|
||||||
|
#ifndef NPY_HAVE_DECL_ISINF
|
||||||
|
#define npy_isinf(x) (!npy_isfinite(x) && !npy_isnan(x))
|
||||||
|
#else
|
||||||
|
#if defined(_MSC_VER) && (_MSC_VER < 1900)
|
||||||
|
#define npy_isinf(x) (!_finite((x)) && !_isnan((x)))
|
||||||
|
#else
|
||||||
|
#define npy_isinf(x) isinf((x))
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef NPY_HAVE_DECL_SIGNBIT
|
||||||
|
int _npy_signbit_f(float x);
|
||||||
|
int _npy_signbit_d(double x);
|
||||||
|
int _npy_signbit_ld(long double x);
|
||||||
|
#define npy_signbit(x) \
|
||||||
|
(sizeof (x) == sizeof (long double) ? _npy_signbit_ld (x) \
|
||||||
|
: sizeof (x) == sizeof (double) ? _npy_signbit_d (x) \
|
||||||
|
: _npy_signbit_f (x))
|
||||||
|
#else
|
||||||
|
#define npy_signbit(x) signbit((x))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* float C99 math functions
|
||||||
|
*/
|
||||||
|
NPY_INPLACE float npy_sinf(float x);
|
||||||
|
NPY_INPLACE float npy_cosf(float x);
|
||||||
|
NPY_INPLACE float npy_tanf(float x);
|
||||||
|
NPY_INPLACE float npy_sinhf(float x);
|
||||||
|
NPY_INPLACE float npy_coshf(float x);
|
||||||
|
NPY_INPLACE float npy_tanhf(float x);
|
||||||
|
NPY_INPLACE float npy_fabsf(float x);
|
||||||
|
NPY_INPLACE float npy_floorf(float x);
|
||||||
|
NPY_INPLACE float npy_ceilf(float x);
|
||||||
|
NPY_INPLACE float npy_rintf(float x);
|
||||||
|
NPY_INPLACE float npy_truncf(float x);
|
||||||
|
NPY_INPLACE float npy_sqrtf(float x);
|
||||||
|
NPY_INPLACE float npy_cbrtf(float x);
|
||||||
|
NPY_INPLACE float npy_log10f(float x);
|
||||||
|
NPY_INPLACE float npy_logf(float x);
|
||||||
|
NPY_INPLACE float npy_expf(float x);
|
||||||
|
NPY_INPLACE float npy_expm1f(float x);
|
||||||
|
NPY_INPLACE float npy_asinf(float x);
|
||||||
|
NPY_INPLACE float npy_acosf(float x);
|
||||||
|
NPY_INPLACE float npy_atanf(float x);
|
||||||
|
NPY_INPLACE float npy_asinhf(float x);
|
||||||
|
NPY_INPLACE float npy_acoshf(float x);
|
||||||
|
NPY_INPLACE float npy_atanhf(float x);
|
||||||
|
NPY_INPLACE float npy_log1pf(float x);
|
||||||
|
NPY_INPLACE float npy_exp2f(float x);
|
||||||
|
NPY_INPLACE float npy_log2f(float x);
|
||||||
|
|
||||||
|
NPY_INPLACE float npy_atan2f(float x, float y);
|
||||||
|
NPY_INPLACE float npy_hypotf(float x, float y);
|
||||||
|
NPY_INPLACE float npy_powf(float x, float y);
|
||||||
|
NPY_INPLACE float npy_fmodf(float x, float y);
|
||||||
|
|
||||||
|
NPY_INPLACE float npy_modff(float x, float* y);
|
||||||
|
NPY_INPLACE float npy_frexpf(float x, int* y);
|
||||||
|
NPY_INPLACE float npy_ldexpf(float x, int y);
|
||||||
|
|
||||||
|
NPY_INPLACE float npy_copysignf(float x, float y);
|
||||||
|
float npy_nextafterf(float x, float y);
|
||||||
|
float npy_spacingf(float x);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* long double C99 math functions
|
||||||
|
*/
|
||||||
|
NPY_INPLACE npy_longdouble npy_sinl(npy_longdouble x);
|
||||||
|
NPY_INPLACE npy_longdouble npy_cosl(npy_longdouble x);
|
||||||
|
NPY_INPLACE npy_longdouble npy_tanl(npy_longdouble x);
|
||||||
|
NPY_INPLACE npy_longdouble npy_sinhl(npy_longdouble x);
|
||||||
|
NPY_INPLACE npy_longdouble npy_coshl(npy_longdouble x);
|
||||||
|
NPY_INPLACE npy_longdouble npy_tanhl(npy_longdouble x);
|
||||||
|
NPY_INPLACE npy_longdouble npy_fabsl(npy_longdouble x);
|
||||||
|
NPY_INPLACE npy_longdouble npy_floorl(npy_longdouble x);
|
||||||
|
NPY_INPLACE npy_longdouble npy_ceill(npy_longdouble x);
|
||||||
|
NPY_INPLACE npy_longdouble npy_rintl(npy_longdouble x);
|
||||||
|
NPY_INPLACE npy_longdouble npy_truncl(npy_longdouble x);
|
||||||
|
NPY_INPLACE npy_longdouble npy_sqrtl(npy_longdouble x);
|
||||||
|
NPY_INPLACE npy_longdouble npy_cbrtl(npy_longdouble x);
|
||||||
|
NPY_INPLACE npy_longdouble npy_log10l(npy_longdouble x);
|
||||||
|
NPY_INPLACE npy_longdouble npy_logl(npy_longdouble x);
|
||||||
|
NPY_INPLACE npy_longdouble npy_expl(npy_longdouble x);
|
||||||
|
NPY_INPLACE npy_longdouble npy_expm1l(npy_longdouble x);
|
||||||
|
NPY_INPLACE npy_longdouble npy_asinl(npy_longdouble x);
|
||||||
|
NPY_INPLACE npy_longdouble npy_acosl(npy_longdouble x);
|
||||||
|
NPY_INPLACE npy_longdouble npy_atanl(npy_longdouble x);
|
||||||
|
NPY_INPLACE npy_longdouble npy_asinhl(npy_longdouble x);
|
||||||
|
NPY_INPLACE npy_longdouble npy_acoshl(npy_longdouble x);
|
||||||
|
NPY_INPLACE npy_longdouble npy_atanhl(npy_longdouble x);
|
||||||
|
NPY_INPLACE npy_longdouble npy_log1pl(npy_longdouble x);
|
||||||
|
NPY_INPLACE npy_longdouble npy_exp2l(npy_longdouble x);
|
||||||
|
NPY_INPLACE npy_longdouble npy_log2l(npy_longdouble x);
|
||||||
|
|
||||||
|
NPY_INPLACE npy_longdouble npy_atan2l(npy_longdouble x, npy_longdouble y);
|
||||||
|
NPY_INPLACE npy_longdouble npy_hypotl(npy_longdouble x, npy_longdouble y);
|
||||||
|
NPY_INPLACE npy_longdouble npy_powl(npy_longdouble x, npy_longdouble y);
|
||||||
|
NPY_INPLACE npy_longdouble npy_fmodl(npy_longdouble x, npy_longdouble y);
|
||||||
|
|
||||||
|
NPY_INPLACE npy_longdouble npy_modfl(npy_longdouble x, npy_longdouble* y);
|
||||||
|
NPY_INPLACE npy_longdouble npy_frexpl(npy_longdouble x, int* y);
|
||||||
|
NPY_INPLACE npy_longdouble npy_ldexpl(npy_longdouble x, int y);
|
||||||
|
|
||||||
|
NPY_INPLACE npy_longdouble npy_copysignl(npy_longdouble x, npy_longdouble y);
|
||||||
|
npy_longdouble npy_nextafterl(npy_longdouble x, npy_longdouble y);
|
||||||
|
npy_longdouble npy_spacingl(npy_longdouble x);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Non standard functions
|
||||||
|
*/
|
||||||
|
NPY_INPLACE double npy_deg2rad(double x);
|
||||||
|
NPY_INPLACE double npy_rad2deg(double x);
|
||||||
|
NPY_INPLACE double npy_logaddexp(double x, double y);
|
||||||
|
NPY_INPLACE double npy_logaddexp2(double x, double y);
|
||||||
|
NPY_INPLACE double npy_divmod(double x, double y, double *modulus);
|
||||||
|
NPY_INPLACE double npy_heaviside(double x, double h0);
|
||||||
|
|
||||||
|
NPY_INPLACE float npy_deg2radf(float x);
|
||||||
|
NPY_INPLACE float npy_rad2degf(float x);
|
||||||
|
NPY_INPLACE float npy_logaddexpf(float x, float y);
|
||||||
|
NPY_INPLACE float npy_logaddexp2f(float x, float y);
|
||||||
|
NPY_INPLACE float npy_divmodf(float x, float y, float *modulus);
|
||||||
|
NPY_INPLACE float npy_heavisidef(float x, float h0);
|
||||||
|
|
||||||
|
NPY_INPLACE npy_longdouble npy_deg2radl(npy_longdouble x);
|
||||||
|
NPY_INPLACE npy_longdouble npy_rad2degl(npy_longdouble x);
|
||||||
|
NPY_INPLACE npy_longdouble npy_logaddexpl(npy_longdouble x, npy_longdouble y);
|
||||||
|
NPY_INPLACE npy_longdouble npy_logaddexp2l(npy_longdouble x, npy_longdouble y);
|
||||||
|
NPY_INPLACE npy_longdouble npy_divmodl(npy_longdouble x, npy_longdouble y,
|
||||||
|
npy_longdouble *modulus);
|
||||||
|
NPY_INPLACE npy_longdouble npy_heavisidel(npy_longdouble x, npy_longdouble h0);
|
||||||
|
|
||||||
|
#define npy_degrees npy_rad2deg
|
||||||
|
#define npy_degreesf npy_rad2degf
|
||||||
|
#define npy_degreesl npy_rad2degl
|
||||||
|
|
||||||
|
#define npy_radians npy_deg2rad
|
||||||
|
#define npy_radiansf npy_deg2radf
|
||||||
|
#define npy_radiansl npy_deg2radl
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Complex declarations
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* C99 specifies that complex numbers have the same representation as
|
||||||
|
* an array of two elements, where the first element is the real part
|
||||||
|
* and the second element is the imaginary part.
|
||||||
|
*/
|
||||||
|
#define __NPY_CPACK_IMP(x, y, type, ctype) \
|
||||||
|
union { \
|
||||||
|
ctype z; \
|
||||||
|
type a[2]; \
|
||||||
|
} z1;; \
|
||||||
|
\
|
||||||
|
z1.a[0] = (x); \
|
||||||
|
z1.a[1] = (y); \
|
||||||
|
\
|
||||||
|
return z1.z;
|
||||||
|
|
||||||
|
static NPY_INLINE npy_cdouble npy_cpack(double x, double y)
|
||||||
|
{
|
||||||
|
__NPY_CPACK_IMP(x, y, double, npy_cdouble);
|
||||||
|
}
|
||||||
|
|
||||||
|
static NPY_INLINE npy_cfloat npy_cpackf(float x, float y)
|
||||||
|
{
|
||||||
|
__NPY_CPACK_IMP(x, y, float, npy_cfloat);
|
||||||
|
}
|
||||||
|
|
||||||
|
static NPY_INLINE npy_clongdouble npy_cpackl(npy_longdouble x, npy_longdouble y)
|
||||||
|
{
|
||||||
|
__NPY_CPACK_IMP(x, y, npy_longdouble, npy_clongdouble);
|
||||||
|
}
|
||||||
|
#undef __NPY_CPACK_IMP
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Same remark as above, but in the other direction: extract first/second
|
||||||
|
* member of complex number, assuming a C99-compatible representation
|
||||||
|
*
|
||||||
|
* Those are defineds as static inline, and such as a reasonable compiler would
|
||||||
|
* most likely compile this to one or two instructions (on CISC at least)
|
||||||
|
*/
|
||||||
|
#define __NPY_CEXTRACT_IMP(z, index, type, ctype) \
|
||||||
|
union { \
|
||||||
|
ctype z; \
|
||||||
|
type a[2]; \
|
||||||
|
} __z_repr; \
|
||||||
|
__z_repr.z = z; \
|
||||||
|
\
|
||||||
|
return __z_repr.a[index];
|
||||||
|
|
||||||
|
static NPY_INLINE double npy_creal(npy_cdouble z)
|
||||||
|
{
|
||||||
|
__NPY_CEXTRACT_IMP(z, 0, double, npy_cdouble);
|
||||||
|
}
|
||||||
|
|
||||||
|
static NPY_INLINE double npy_cimag(npy_cdouble z)
|
||||||
|
{
|
||||||
|
__NPY_CEXTRACT_IMP(z, 1, double, npy_cdouble);
|
||||||
|
}
|
||||||
|
|
||||||
|
static NPY_INLINE float npy_crealf(npy_cfloat z)
|
||||||
|
{
|
||||||
|
__NPY_CEXTRACT_IMP(z, 0, float, npy_cfloat);
|
||||||
|
}
|
||||||
|
|
||||||
|
static NPY_INLINE float npy_cimagf(npy_cfloat z)
|
||||||
|
{
|
||||||
|
__NPY_CEXTRACT_IMP(z, 1, float, npy_cfloat);
|
||||||
|
}
|
||||||
|
|
||||||
|
static NPY_INLINE npy_longdouble npy_creall(npy_clongdouble z)
|
||||||
|
{
|
||||||
|
__NPY_CEXTRACT_IMP(z, 0, npy_longdouble, npy_clongdouble);
|
||||||
|
}
|
||||||
|
|
||||||
|
static NPY_INLINE npy_longdouble npy_cimagl(npy_clongdouble z)
|
||||||
|
{
|
||||||
|
__NPY_CEXTRACT_IMP(z, 1, npy_longdouble, npy_clongdouble);
|
||||||
|
}
|
||||||
|
#undef __NPY_CEXTRACT_IMP
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Double precision complex functions
|
||||||
|
*/
|
||||||
|
double npy_cabs(npy_cdouble z);
|
||||||
|
double npy_carg(npy_cdouble z);
|
||||||
|
|
||||||
|
npy_cdouble npy_cexp(npy_cdouble z);
|
||||||
|
npy_cdouble npy_clog(npy_cdouble z);
|
||||||
|
npy_cdouble npy_cpow(npy_cdouble x, npy_cdouble y);
|
||||||
|
|
||||||
|
npy_cdouble npy_csqrt(npy_cdouble z);
|
||||||
|
|
||||||
|
npy_cdouble npy_ccos(npy_cdouble z);
|
||||||
|
npy_cdouble npy_csin(npy_cdouble z);
|
||||||
|
npy_cdouble npy_ctan(npy_cdouble z);
|
||||||
|
|
||||||
|
npy_cdouble npy_ccosh(npy_cdouble z);
|
||||||
|
npy_cdouble npy_csinh(npy_cdouble z);
|
||||||
|
npy_cdouble npy_ctanh(npy_cdouble z);
|
||||||
|
|
||||||
|
npy_cdouble npy_cacos(npy_cdouble z);
|
||||||
|
npy_cdouble npy_casin(npy_cdouble z);
|
||||||
|
npy_cdouble npy_catan(npy_cdouble z);
|
||||||
|
|
||||||
|
npy_cdouble npy_cacosh(npy_cdouble z);
|
||||||
|
npy_cdouble npy_casinh(npy_cdouble z);
|
||||||
|
npy_cdouble npy_catanh(npy_cdouble z);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Single precision complex functions
|
||||||
|
*/
|
||||||
|
float npy_cabsf(npy_cfloat z);
|
||||||
|
float npy_cargf(npy_cfloat z);
|
||||||
|
|
||||||
|
npy_cfloat npy_cexpf(npy_cfloat z);
|
||||||
|
npy_cfloat npy_clogf(npy_cfloat z);
|
||||||
|
npy_cfloat npy_cpowf(npy_cfloat x, npy_cfloat y);
|
||||||
|
|
||||||
|
npy_cfloat npy_csqrtf(npy_cfloat z);
|
||||||
|
|
||||||
|
npy_cfloat npy_ccosf(npy_cfloat z);
|
||||||
|
npy_cfloat npy_csinf(npy_cfloat z);
|
||||||
|
npy_cfloat npy_ctanf(npy_cfloat z);
|
||||||
|
|
||||||
|
npy_cfloat npy_ccoshf(npy_cfloat z);
|
||||||
|
npy_cfloat npy_csinhf(npy_cfloat z);
|
||||||
|
npy_cfloat npy_ctanhf(npy_cfloat z);
|
||||||
|
|
||||||
|
npy_cfloat npy_cacosf(npy_cfloat z);
|
||||||
|
npy_cfloat npy_casinf(npy_cfloat z);
|
||||||
|
npy_cfloat npy_catanf(npy_cfloat z);
|
||||||
|
|
||||||
|
npy_cfloat npy_cacoshf(npy_cfloat z);
|
||||||
|
npy_cfloat npy_casinhf(npy_cfloat z);
|
||||||
|
npy_cfloat npy_catanhf(npy_cfloat z);
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Extended precision complex functions
|
||||||
|
*/
|
||||||
|
npy_longdouble npy_cabsl(npy_clongdouble z);
|
||||||
|
npy_longdouble npy_cargl(npy_clongdouble z);
|
||||||
|
|
||||||
|
npy_clongdouble npy_cexpl(npy_clongdouble z);
|
||||||
|
npy_clongdouble npy_clogl(npy_clongdouble z);
|
||||||
|
npy_clongdouble npy_cpowl(npy_clongdouble x, npy_clongdouble y);
|
||||||
|
|
||||||
|
npy_clongdouble npy_csqrtl(npy_clongdouble z);
|
||||||
|
|
||||||
|
npy_clongdouble npy_ccosl(npy_clongdouble z);
|
||||||
|
npy_clongdouble npy_csinl(npy_clongdouble z);
|
||||||
|
npy_clongdouble npy_ctanl(npy_clongdouble z);
|
||||||
|
|
||||||
|
npy_clongdouble npy_ccoshl(npy_clongdouble z);
|
||||||
|
npy_clongdouble npy_csinhl(npy_clongdouble z);
|
||||||
|
npy_clongdouble npy_ctanhl(npy_clongdouble z);
|
||||||
|
|
||||||
|
npy_clongdouble npy_cacosl(npy_clongdouble z);
|
||||||
|
npy_clongdouble npy_casinl(npy_clongdouble z);
|
||||||
|
npy_clongdouble npy_catanl(npy_clongdouble z);
|
||||||
|
|
||||||
|
npy_clongdouble npy_cacoshl(npy_clongdouble z);
|
||||||
|
npy_clongdouble npy_casinhl(npy_clongdouble z);
|
||||||
|
npy_clongdouble npy_catanhl(npy_clongdouble z);
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Functions that set the floating point error
|
||||||
|
* status word.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* platform-dependent code translates floating point
|
||||||
|
* status to an integer sum of these values
|
||||||
|
*/
|
||||||
|
#define NPY_FPE_DIVIDEBYZERO 1
|
||||||
|
#define NPY_FPE_OVERFLOW 2
|
||||||
|
#define NPY_FPE_UNDERFLOW 4
|
||||||
|
#define NPY_FPE_INVALID 8
|
||||||
|
|
||||||
|
int npy_clear_floatstatus_barrier(char*);
|
||||||
|
int npy_get_floatstatus_barrier(char*);
|
||||||
|
/*
|
||||||
|
* use caution with these - clang and gcc8.1 are known to reorder calls
|
||||||
|
* to this form of the function which can defeat the check. The _barrier
|
||||||
|
* form of the call is preferable, where the argument is
|
||||||
|
* (char*)&local_variable
|
||||||
|
*/
|
||||||
|
int npy_clear_floatstatus(void);
|
||||||
|
int npy_get_floatstatus(void);
|
||||||
|
|
||||||
|
void npy_set_floatstatus_divbyzero(void);
|
||||||
|
void npy_set_floatstatus_overflow(void);
|
||||||
|
void npy_set_floatstatus_underflow(void);
|
||||||
|
void npy_set_floatstatus_invalid(void);
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if NPY_INLINE_MATH
|
||||||
|
#include "npy_math_internal.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif
|
@ -0,0 +1,19 @@
|
|||||||
|
/*
|
||||||
|
* This include file is provided for inclusion in Cython *.pyd files where
|
||||||
|
* one would like to define the NPY_NO_DEPRECATED_API macro. It can be
|
||||||
|
* included by
|
||||||
|
*
|
||||||
|
* cdef extern from "npy_no_deprecated_api.h": pass
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
#ifndef NPY_NO_DEPRECATED_API
|
||||||
|
|
||||||
|
/* put this check here since there may be multiple includes in C extensions. */
|
||||||
|
#if defined(NDARRAYTYPES_H) || defined(_NPY_DEPRECATED_API_H) || \
|
||||||
|
defined(OLD_DEFINES_H)
|
||||||
|
#error "npy_no_deprecated_api.h" must be first among numpy includes.
|
||||||
|
#else
|
||||||
|
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif
|
@ -0,0 +1,30 @@
|
|||||||
|
#ifndef _NPY_OS_H_
|
||||||
|
#define _NPY_OS_H_
|
||||||
|
|
||||||
|
#if defined(linux) || defined(__linux) || defined(__linux__)
|
||||||
|
#define NPY_OS_LINUX
|
||||||
|
#elif defined(__FreeBSD__) || defined(__NetBSD__) || \
|
||||||
|
defined(__OpenBSD__) || defined(__DragonFly__)
|
||||||
|
#define NPY_OS_BSD
|
||||||
|
#ifdef __FreeBSD__
|
||||||
|
#define NPY_OS_FREEBSD
|
||||||
|
#elif defined(__NetBSD__)
|
||||||
|
#define NPY_OS_NETBSD
|
||||||
|
#elif defined(__OpenBSD__)
|
||||||
|
#define NPY_OS_OPENBSD
|
||||||
|
#elif defined(__DragonFly__)
|
||||||
|
#define NPY_OS_DRAGONFLY
|
||||||
|
#endif
|
||||||
|
#elif defined(sun) || defined(__sun)
|
||||||
|
#define NPY_OS_SOLARIS
|
||||||
|
#elif defined(__CYGWIN__)
|
||||||
|
#define NPY_OS_CYGWIN
|
||||||
|
#elif defined(_WIN32) || defined(__WIN32__) || defined(WIN32)
|
||||||
|
#define NPY_OS_WIN32
|
||||||
|
#elif defined(__APPLE__)
|
||||||
|
#define NPY_OS_DARWIN
|
||||||
|
#else
|
||||||
|
#define NPY_OS_UNKNOWN
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif
|
@ -0,0 +1,44 @@
|
|||||||
|
#ifndef _NPY_NUMPYCONFIG_H_
|
||||||
|
#define _NPY_NUMPYCONFIG_H_
|
||||||
|
|
||||||
|
#include "_numpyconfig.h"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On Mac OS X, because there is only one configuration stage for all the archs
|
||||||
|
* in universal builds, any macro which depends on the arch needs to be
|
||||||
|
* hardcoded
|
||||||
|
*/
|
||||||
|
#ifdef __APPLE__
|
||||||
|
#undef NPY_SIZEOF_LONG
|
||||||
|
#undef NPY_SIZEOF_PY_INTPTR_T
|
||||||
|
|
||||||
|
#ifdef __LP64__
|
||||||
|
#define NPY_SIZEOF_LONG 8
|
||||||
|
#define NPY_SIZEOF_PY_INTPTR_T 8
|
||||||
|
#else
|
||||||
|
#define NPY_SIZEOF_LONG 4
|
||||||
|
#define NPY_SIZEOF_PY_INTPTR_T 4
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/**
|
||||||
|
* To help with the NPY_NO_DEPRECATED_API macro, we include API version
|
||||||
|
* numbers for specific versions of NumPy. To exclude all API that was
|
||||||
|
* deprecated as of 1.7, add the following before #including any NumPy
|
||||||
|
* headers:
|
||||||
|
* #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
|
||||||
|
*/
|
||||||
|
#define NPY_1_7_API_VERSION 0x00000007
|
||||||
|
#define NPY_1_8_API_VERSION 0x00000008
|
||||||
|
#define NPY_1_9_API_VERSION 0x00000008
|
||||||
|
#define NPY_1_10_API_VERSION 0x00000008
|
||||||
|
#define NPY_1_11_API_VERSION 0x00000008
|
||||||
|
#define NPY_1_12_API_VERSION 0x00000008
|
||||||
|
#define NPY_1_13_API_VERSION 0x00000008
|
||||||
|
#define NPY_1_14_API_VERSION 0x00000008
|
||||||
|
#define NPY_1_15_API_VERSION 0x00000008
|
||||||
|
#define NPY_1_16_API_VERSION 0x00000008
|
||||||
|
#define NPY_1_17_API_VERSION 0x00000008
|
||||||
|
#define NPY_1_18_API_VERSION 0x00000008
|
||||||
|
|
||||||
|
#endif
|
@ -0,0 +1,187 @@
|
|||||||
|
/* This header is deprecated as of NumPy 1.7 */
|
||||||
|
#ifndef OLD_DEFINES_H
|
||||||
|
#define OLD_DEFINES_H
|
||||||
|
|
||||||
|
#if defined(NPY_NO_DEPRECATED_API) && NPY_NO_DEPRECATED_API >= NPY_1_7_API_VERSION
|
||||||
|
#error The header "old_defines.h" is deprecated as of NumPy 1.7.
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define NDARRAY_VERSION NPY_VERSION
|
||||||
|
|
||||||
|
#define PyArray_MIN_BUFSIZE NPY_MIN_BUFSIZE
|
||||||
|
#define PyArray_MAX_BUFSIZE NPY_MAX_BUFSIZE
|
||||||
|
#define PyArray_BUFSIZE NPY_BUFSIZE
|
||||||
|
|
||||||
|
#define PyArray_PRIORITY NPY_PRIORITY
|
||||||
|
#define PyArray_SUBTYPE_PRIORITY NPY_PRIORITY
|
||||||
|
#define PyArray_NUM_FLOATTYPE NPY_NUM_FLOATTYPE
|
||||||
|
|
||||||
|
#define NPY_MAX PyArray_MAX
|
||||||
|
#define NPY_MIN PyArray_MIN
|
||||||
|
|
||||||
|
#define PyArray_TYPES NPY_TYPES
|
||||||
|
#define PyArray_BOOL NPY_BOOL
|
||||||
|
#define PyArray_BYTE NPY_BYTE
|
||||||
|
#define PyArray_UBYTE NPY_UBYTE
|
||||||
|
#define PyArray_SHORT NPY_SHORT
|
||||||
|
#define PyArray_USHORT NPY_USHORT
|
||||||
|
#define PyArray_INT NPY_INT
|
||||||
|
#define PyArray_UINT NPY_UINT
|
||||||
|
#define PyArray_LONG NPY_LONG
|
||||||
|
#define PyArray_ULONG NPY_ULONG
|
||||||
|
#define PyArray_LONGLONG NPY_LONGLONG
|
||||||
|
#define PyArray_ULONGLONG NPY_ULONGLONG
|
||||||
|
#define PyArray_HALF NPY_HALF
|
||||||
|
#define PyArray_FLOAT NPY_FLOAT
|
||||||
|
#define PyArray_DOUBLE NPY_DOUBLE
|
||||||
|
#define PyArray_LONGDOUBLE NPY_LONGDOUBLE
|
||||||
|
#define PyArray_CFLOAT NPY_CFLOAT
|
||||||
|
#define PyArray_CDOUBLE NPY_CDOUBLE
|
||||||
|
#define PyArray_CLONGDOUBLE NPY_CLONGDOUBLE
|
||||||
|
#define PyArray_OBJECT NPY_OBJECT
|
||||||
|
#define PyArray_STRING NPY_STRING
|
||||||
|
#define PyArray_UNICODE NPY_UNICODE
|
||||||
|
#define PyArray_VOID NPY_VOID
|
||||||
|
#define PyArray_DATETIME NPY_DATETIME
|
||||||
|
#define PyArray_TIMEDELTA NPY_TIMEDELTA
|
||||||
|
#define PyArray_NTYPES NPY_NTYPES
|
||||||
|
#define PyArray_NOTYPE NPY_NOTYPE
|
||||||
|
#define PyArray_CHAR NPY_CHAR
|
||||||
|
#define PyArray_USERDEF NPY_USERDEF
|
||||||
|
#define PyArray_NUMUSERTYPES NPY_NUMUSERTYPES
|
||||||
|
|
||||||
|
#define PyArray_INTP NPY_INTP
|
||||||
|
#define PyArray_UINTP NPY_UINTP
|
||||||
|
|
||||||
|
#define PyArray_INT8 NPY_INT8
|
||||||
|
#define PyArray_UINT8 NPY_UINT8
|
||||||
|
#define PyArray_INT16 NPY_INT16
|
||||||
|
#define PyArray_UINT16 NPY_UINT16
|
||||||
|
#define PyArray_INT32 NPY_INT32
|
||||||
|
#define PyArray_UINT32 NPY_UINT32
|
||||||
|
|
||||||
|
#ifdef NPY_INT64
|
||||||
|
#define PyArray_INT64 NPY_INT64
|
||||||
|
#define PyArray_UINT64 NPY_UINT64
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef NPY_INT128
|
||||||
|
#define PyArray_INT128 NPY_INT128
|
||||||
|
#define PyArray_UINT128 NPY_UINT128
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef NPY_FLOAT16
|
||||||
|
#define PyArray_FLOAT16 NPY_FLOAT16
|
||||||
|
#define PyArray_COMPLEX32 NPY_COMPLEX32
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef NPY_FLOAT80
|
||||||
|
#define PyArray_FLOAT80 NPY_FLOAT80
|
||||||
|
#define PyArray_COMPLEX160 NPY_COMPLEX160
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef NPY_FLOAT96
|
||||||
|
#define PyArray_FLOAT96 NPY_FLOAT96
|
||||||
|
#define PyArray_COMPLEX192 NPY_COMPLEX192
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef NPY_FLOAT128
|
||||||
|
#define PyArray_FLOAT128 NPY_FLOAT128
|
||||||
|
#define PyArray_COMPLEX256 NPY_COMPLEX256
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define PyArray_FLOAT32 NPY_FLOAT32
|
||||||
|
#define PyArray_COMPLEX64 NPY_COMPLEX64
|
||||||
|
#define PyArray_FLOAT64 NPY_FLOAT64
|
||||||
|
#define PyArray_COMPLEX128 NPY_COMPLEX128
|
||||||
|
|
||||||
|
|
||||||
|
#define PyArray_TYPECHAR NPY_TYPECHAR
|
||||||
|
#define PyArray_BOOLLTR NPY_BOOLLTR
|
||||||
|
#define PyArray_BYTELTR NPY_BYTELTR
|
||||||
|
#define PyArray_UBYTELTR NPY_UBYTELTR
|
||||||
|
#define PyArray_SHORTLTR NPY_SHORTLTR
|
||||||
|
#define PyArray_USHORTLTR NPY_USHORTLTR
|
||||||
|
#define PyArray_INTLTR NPY_INTLTR
|
||||||
|
#define PyArray_UINTLTR NPY_UINTLTR
|
||||||
|
#define PyArray_LONGLTR NPY_LONGLTR
|
||||||
|
#define PyArray_ULONGLTR NPY_ULONGLTR
|
||||||
|
#define PyArray_LONGLONGLTR NPY_LONGLONGLTR
|
||||||
|
#define PyArray_ULONGLONGLTR NPY_ULONGLONGLTR
|
||||||
|
#define PyArray_HALFLTR NPY_HALFLTR
|
||||||
|
#define PyArray_FLOATLTR NPY_FLOATLTR
|
||||||
|
#define PyArray_DOUBLELTR NPY_DOUBLELTR
|
||||||
|
#define PyArray_LONGDOUBLELTR NPY_LONGDOUBLELTR
|
||||||
|
#define PyArray_CFLOATLTR NPY_CFLOATLTR
|
||||||
|
#define PyArray_CDOUBLELTR NPY_CDOUBLELTR
|
||||||
|
#define PyArray_CLONGDOUBLELTR NPY_CLONGDOUBLELTR
|
||||||
|
#define PyArray_OBJECTLTR NPY_OBJECTLTR
|
||||||
|
#define PyArray_STRINGLTR NPY_STRINGLTR
|
||||||
|
#define PyArray_STRINGLTR2 NPY_STRINGLTR2
|
||||||
|
#define PyArray_UNICODELTR NPY_UNICODELTR
|
||||||
|
#define PyArray_VOIDLTR NPY_VOIDLTR
|
||||||
|
#define PyArray_DATETIMELTR NPY_DATETIMELTR
|
||||||
|
#define PyArray_TIMEDELTALTR NPY_TIMEDELTALTR
|
||||||
|
#define PyArray_CHARLTR NPY_CHARLTR
|
||||||
|
#define PyArray_INTPLTR NPY_INTPLTR
|
||||||
|
#define PyArray_UINTPLTR NPY_UINTPLTR
|
||||||
|
#define PyArray_GENBOOLLTR NPY_GENBOOLLTR
|
||||||
|
#define PyArray_SIGNEDLTR NPY_SIGNEDLTR
|
||||||
|
#define PyArray_UNSIGNEDLTR NPY_UNSIGNEDLTR
|
||||||
|
#define PyArray_FLOATINGLTR NPY_FLOATINGLTR
|
||||||
|
#define PyArray_COMPLEXLTR NPY_COMPLEXLTR
|
||||||
|
|
||||||
|
#define PyArray_QUICKSORT NPY_QUICKSORT
|
||||||
|
#define PyArray_HEAPSORT NPY_HEAPSORT
|
||||||
|
#define PyArray_MERGESORT NPY_MERGESORT
|
||||||
|
#define PyArray_SORTKIND NPY_SORTKIND
|
||||||
|
#define PyArray_NSORTS NPY_NSORTS
|
||||||
|
|
||||||
|
#define PyArray_NOSCALAR NPY_NOSCALAR
|
||||||
|
#define PyArray_BOOL_SCALAR NPY_BOOL_SCALAR
|
||||||
|
#define PyArray_INTPOS_SCALAR NPY_INTPOS_SCALAR
|
||||||
|
#define PyArray_INTNEG_SCALAR NPY_INTNEG_SCALAR
|
||||||
|
#define PyArray_FLOAT_SCALAR NPY_FLOAT_SCALAR
|
||||||
|
#define PyArray_COMPLEX_SCALAR NPY_COMPLEX_SCALAR
|
||||||
|
#define PyArray_OBJECT_SCALAR NPY_OBJECT_SCALAR
|
||||||
|
#define PyArray_SCALARKIND NPY_SCALARKIND
|
||||||
|
#define PyArray_NSCALARKINDS NPY_NSCALARKINDS
|
||||||
|
|
||||||
|
#define PyArray_ANYORDER NPY_ANYORDER
|
||||||
|
#define PyArray_CORDER NPY_CORDER
|
||||||
|
#define PyArray_FORTRANORDER NPY_FORTRANORDER
|
||||||
|
#define PyArray_ORDER NPY_ORDER
|
||||||
|
|
||||||
|
#define PyDescr_ISBOOL PyDataType_ISBOOL
|
||||||
|
#define PyDescr_ISUNSIGNED PyDataType_ISUNSIGNED
|
||||||
|
#define PyDescr_ISSIGNED PyDataType_ISSIGNED
|
||||||
|
#define PyDescr_ISINTEGER PyDataType_ISINTEGER
|
||||||
|
#define PyDescr_ISFLOAT PyDataType_ISFLOAT
|
||||||
|
#define PyDescr_ISNUMBER PyDataType_ISNUMBER
|
||||||
|
#define PyDescr_ISSTRING PyDataType_ISSTRING
|
||||||
|
#define PyDescr_ISCOMPLEX PyDataType_ISCOMPLEX
|
||||||
|
#define PyDescr_ISPYTHON PyDataType_ISPYTHON
|
||||||
|
#define PyDescr_ISFLEXIBLE PyDataType_ISFLEXIBLE
|
||||||
|
#define PyDescr_ISUSERDEF PyDataType_ISUSERDEF
|
||||||
|
#define PyDescr_ISEXTENDED PyDataType_ISEXTENDED
|
||||||
|
#define PyDescr_ISOBJECT PyDataType_ISOBJECT
|
||||||
|
#define PyDescr_HASFIELDS PyDataType_HASFIELDS
|
||||||
|
|
||||||
|
#define PyArray_LITTLE NPY_LITTLE
|
||||||
|
#define PyArray_BIG NPY_BIG
|
||||||
|
#define PyArray_NATIVE NPY_NATIVE
|
||||||
|
#define PyArray_SWAP NPY_SWAP
|
||||||
|
#define PyArray_IGNORE NPY_IGNORE
|
||||||
|
|
||||||
|
#define PyArray_NATBYTE NPY_NATBYTE
|
||||||
|
#define PyArray_OPPBYTE NPY_OPPBYTE
|
||||||
|
|
||||||
|
#define PyArray_MAX_ELSIZE NPY_MAX_ELSIZE
|
||||||
|
|
||||||
|
#define PyArray_USE_PYMEM NPY_USE_PYMEM
|
||||||
|
|
||||||
|
#define PyArray_RemoveLargest PyArray_RemoveSmallest
|
||||||
|
|
||||||
|
#define PyArray_UCS4 npy_ucs4
|
||||||
|
|
||||||
|
#endif
|
@ -0,0 +1,25 @@
|
|||||||
|
#include "arrayobject.h"
|
||||||
|
|
||||||
|
#ifndef PYPY_VERSION
|
||||||
|
#ifndef REFCOUNT
|
||||||
|
# define REFCOUNT NPY_REFCOUNT
|
||||||
|
# define MAX_ELSIZE 16
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define PyArray_UNSIGNED_TYPES
|
||||||
|
#define PyArray_SBYTE NPY_BYTE
|
||||||
|
#define PyArray_CopyArray PyArray_CopyInto
|
||||||
|
#define _PyArray_multiply_list PyArray_MultiplyIntList
|
||||||
|
#define PyArray_ISSPACESAVER(m) NPY_FALSE
|
||||||
|
#define PyScalarArray_Check PyArray_CheckScalar
|
||||||
|
|
||||||
|
#define CONTIGUOUS NPY_CONTIGUOUS
|
||||||
|
#define OWN_DIMENSIONS 0
|
||||||
|
#define OWN_STRIDES 0
|
||||||
|
#define OWN_DATA NPY_OWNDATA
|
||||||
|
#define SAVESPACE 0
|
||||||
|
#define SAVESPACEBIT 0
|
||||||
|
|
||||||
|
#undef import_array
|
||||||
|
#define import_array() { if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); } }
|
@ -0,0 +1,20 @@
|
|||||||
|
#ifndef _RANDOM_BITGEN_H
|
||||||
|
#define _RANDOM_BITGEN_H
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
#include <stddef.h>
|
||||||
|
#include <stdbool.h>
|
||||||
|
#include <stdint.h>
|
||||||
|
|
||||||
|
/* Must match the declaration in numpy/random/<any>.pxd */
|
||||||
|
|
||||||
|
typedef struct bitgen {
|
||||||
|
void *state;
|
||||||
|
uint64_t (*next_uint64)(void *st);
|
||||||
|
uint32_t (*next_uint32)(void *st);
|
||||||
|
double (*next_double)(void *st);
|
||||||
|
uint64_t (*next_raw)(void *st);
|
||||||
|
} bitgen_t;
|
||||||
|
|
||||||
|
|
||||||
|
#endif
|
@ -0,0 +1,200 @@
|
|||||||
|
#ifndef _RANDOMDGEN__DISTRIBUTIONS_H_
|
||||||
|
#define _RANDOMDGEN__DISTRIBUTIONS_H_
|
||||||
|
|
||||||
|
#include "Python.h"
|
||||||
|
#include "numpy/npy_common.h"
|
||||||
|
#include <stddef.h>
|
||||||
|
#include <stdbool.h>
|
||||||
|
#include <stdint.h>
|
||||||
|
|
||||||
|
#include "numpy/npy_math.h"
|
||||||
|
#include "numpy/random/bitgen.h"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* RAND_INT_TYPE is used to share integer generators with RandomState which
|
||||||
|
* used long in place of int64_t. If changing a distribution that uses
|
||||||
|
* RAND_INT_TYPE, then the original unmodified copy must be retained for
|
||||||
|
* use in RandomState by copying to the legacy distributions source file.
|
||||||
|
*/
|
||||||
|
#ifdef NP_RANDOM_LEGACY
|
||||||
|
#define RAND_INT_TYPE long
|
||||||
|
#define RAND_INT_MAX LONG_MAX
|
||||||
|
#else
|
||||||
|
#define RAND_INT_TYPE int64_t
|
||||||
|
#define RAND_INT_MAX INT64_MAX
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef _MSC_VER
|
||||||
|
#define DECLDIR __declspec(dllexport)
|
||||||
|
#else
|
||||||
|
#define DECLDIR extern
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef MIN
|
||||||
|
#define MIN(x, y) (((x) < (y)) ? x : y)
|
||||||
|
#define MAX(x, y) (((x) > (y)) ? x : y)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef M_PI
|
||||||
|
#define M_PI 3.14159265358979323846264338328
|
||||||
|
#endif
|
||||||
|
|
||||||
|
typedef struct s_binomial_t {
|
||||||
|
int has_binomial; /* !=0: following parameters initialized for binomial */
|
||||||
|
double psave;
|
||||||
|
RAND_INT_TYPE nsave;
|
||||||
|
double r;
|
||||||
|
double q;
|
||||||
|
double fm;
|
||||||
|
RAND_INT_TYPE m;
|
||||||
|
double p1;
|
||||||
|
double xm;
|
||||||
|
double xl;
|
||||||
|
double xr;
|
||||||
|
double c;
|
||||||
|
double laml;
|
||||||
|
double lamr;
|
||||||
|
double p2;
|
||||||
|
double p3;
|
||||||
|
double p4;
|
||||||
|
} binomial_t;
|
||||||
|
|
||||||
|
DECLDIR float random_standard_uniform_f(bitgen_t *bitgen_state);
|
||||||
|
DECLDIR double random_standard_uniform(bitgen_t *bitgen_state);
|
||||||
|
DECLDIR void random_standard_uniform_fill(bitgen_t *, npy_intp, double *);
|
||||||
|
DECLDIR void random_standard_uniform_fill_f(bitgen_t *, npy_intp, float *);
|
||||||
|
|
||||||
|
DECLDIR int64_t random_positive_int64(bitgen_t *bitgen_state);
|
||||||
|
DECLDIR int32_t random_positive_int32(bitgen_t *bitgen_state);
|
||||||
|
DECLDIR int64_t random_positive_int(bitgen_t *bitgen_state);
|
||||||
|
DECLDIR uint64_t random_uint(bitgen_t *bitgen_state);
|
||||||
|
|
||||||
|
DECLDIR double random_standard_exponential(bitgen_t *bitgen_state);
|
||||||
|
DECLDIR float random_standard_exponential_f(bitgen_t *bitgen_state);
|
||||||
|
DECLDIR void random_standard_exponential_fill(bitgen_t *, npy_intp, double *);
|
||||||
|
DECLDIR void random_standard_exponential_fill_f(bitgen_t *, npy_intp, float *);
|
||||||
|
DECLDIR void random_standard_exponential_inv_fill(bitgen_t *, npy_intp, double *);
|
||||||
|
DECLDIR void random_standard_exponential_inv_fill_f(bitgen_t *, npy_intp, float *);
|
||||||
|
|
||||||
|
DECLDIR double random_standard_normal(bitgen_t *bitgen_state);
|
||||||
|
DECLDIR float random_standard_normal_f(bitgen_t *bitgen_state);
|
||||||
|
DECLDIR void random_standard_normal_fill(bitgen_t *, npy_intp, double *);
|
||||||
|
DECLDIR void random_standard_normal_fill_f(bitgen_t *, npy_intp, float *);
|
||||||
|
DECLDIR double random_standard_gamma(bitgen_t *bitgen_state, double shape);
|
||||||
|
DECLDIR float random_standard_gamma_f(bitgen_t *bitgen_state, float shape);
|
||||||
|
|
||||||
|
DECLDIR double random_normal(bitgen_t *bitgen_state, double loc, double scale);
|
||||||
|
|
||||||
|
DECLDIR double random_gamma(bitgen_t *bitgen_state, double shape, double scale);
|
||||||
|
DECLDIR float random_gamma_f(bitgen_t *bitgen_state, float shape, float scale);
|
||||||
|
|
||||||
|
DECLDIR double random_exponential(bitgen_t *bitgen_state, double scale);
|
||||||
|
DECLDIR double random_uniform(bitgen_t *bitgen_state, double lower, double range);
|
||||||
|
DECLDIR double random_beta(bitgen_t *bitgen_state, double a, double b);
|
||||||
|
DECLDIR double random_chisquare(bitgen_t *bitgen_state, double df);
|
||||||
|
DECLDIR double random_f(bitgen_t *bitgen_state, double dfnum, double dfden);
|
||||||
|
DECLDIR double random_standard_cauchy(bitgen_t *bitgen_state);
|
||||||
|
DECLDIR double random_pareto(bitgen_t *bitgen_state, double a);
|
||||||
|
DECLDIR double random_weibull(bitgen_t *bitgen_state, double a);
|
||||||
|
DECLDIR double random_power(bitgen_t *bitgen_state, double a);
|
||||||
|
DECLDIR double random_laplace(bitgen_t *bitgen_state, double loc, double scale);
|
||||||
|
DECLDIR double random_gumbel(bitgen_t *bitgen_state, double loc, double scale);
|
||||||
|
DECLDIR double random_logistic(bitgen_t *bitgen_state, double loc, double scale);
|
||||||
|
DECLDIR double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma);
|
||||||
|
DECLDIR double random_rayleigh(bitgen_t *bitgen_state, double mode);
|
||||||
|
DECLDIR double random_standard_t(bitgen_t *bitgen_state, double df);
|
||||||
|
DECLDIR double random_noncentral_chisquare(bitgen_t *bitgen_state, double df,
|
||||||
|
double nonc);
|
||||||
|
DECLDIR double random_noncentral_f(bitgen_t *bitgen_state, double dfnum,
|
||||||
|
double dfden, double nonc);
|
||||||
|
DECLDIR double random_wald(bitgen_t *bitgen_state, double mean, double scale);
|
||||||
|
DECLDIR double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa);
|
||||||
|
DECLDIR double random_triangular(bitgen_t *bitgen_state, double left, double mode,
|
||||||
|
double right);
|
||||||
|
|
||||||
|
DECLDIR RAND_INT_TYPE random_poisson(bitgen_t *bitgen_state, double lam);
|
||||||
|
DECLDIR RAND_INT_TYPE random_negative_binomial(bitgen_t *bitgen_state, double n,
|
||||||
|
double p);
|
||||||
|
|
||||||
|
DECLDIR int64_t random_binomial(bitgen_t *bitgen_state, double p,
|
||||||
|
int64_t n, binomial_t *binomial);
|
||||||
|
|
||||||
|
DECLDIR RAND_INT_TYPE random_logseries(bitgen_t *bitgen_state, double p);
|
||||||
|
DECLDIR RAND_INT_TYPE random_geometric(bitgen_t *bitgen_state, double p);
|
||||||
|
DECLDIR RAND_INT_TYPE random_zipf(bitgen_t *bitgen_state, double a);
|
||||||
|
DECLDIR int64_t random_hypergeometric(bitgen_t *bitgen_state,
|
||||||
|
int64_t good, int64_t bad, int64_t sample);
|
||||||
|
DECLDIR uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max);
|
||||||
|
|
||||||
|
/* Generate random uint64 numbers in closed interval [off, off + rng]. */
|
||||||
|
DECLDIR uint64_t random_bounded_uint64(bitgen_t *bitgen_state, uint64_t off,
|
||||||
|
uint64_t rng, uint64_t mask,
|
||||||
|
bool use_masked);
|
||||||
|
|
||||||
|
/* Generate random uint32 numbers in closed interval [off, off + rng]. */
|
||||||
|
DECLDIR uint32_t random_buffered_bounded_uint32(bitgen_t *bitgen_state,
|
||||||
|
uint32_t off, uint32_t rng,
|
||||||
|
uint32_t mask, bool use_masked,
|
||||||
|
int *bcnt, uint32_t *buf);
|
||||||
|
DECLDIR uint16_t random_buffered_bounded_uint16(bitgen_t *bitgen_state,
|
||||||
|
uint16_t off, uint16_t rng,
|
||||||
|
uint16_t mask, bool use_masked,
|
||||||
|
int *bcnt, uint32_t *buf);
|
||||||
|
DECLDIR uint8_t random_buffered_bounded_uint8(bitgen_t *bitgen_state, uint8_t off,
|
||||||
|
uint8_t rng, uint8_t mask,
|
||||||
|
bool use_masked, int *bcnt,
|
||||||
|
uint32_t *buf);
|
||||||
|
DECLDIR npy_bool random_buffered_bounded_bool(bitgen_t *bitgen_state, npy_bool off,
|
||||||
|
npy_bool rng, npy_bool mask,
|
||||||
|
bool use_masked, int *bcnt,
|
||||||
|
uint32_t *buf);
|
||||||
|
|
||||||
|
DECLDIR void random_bounded_uint64_fill(bitgen_t *bitgen_state, uint64_t off,
|
||||||
|
uint64_t rng, npy_intp cnt,
|
||||||
|
bool use_masked, uint64_t *out);
|
||||||
|
DECLDIR void random_bounded_uint32_fill(bitgen_t *bitgen_state, uint32_t off,
|
||||||
|
uint32_t rng, npy_intp cnt,
|
||||||
|
bool use_masked, uint32_t *out);
|
||||||
|
DECLDIR void random_bounded_uint16_fill(bitgen_t *bitgen_state, uint16_t off,
|
||||||
|
uint16_t rng, npy_intp cnt,
|
||||||
|
bool use_masked, uint16_t *out);
|
||||||
|
DECLDIR void random_bounded_uint8_fill(bitgen_t *bitgen_state, uint8_t off,
|
||||||
|
uint8_t rng, npy_intp cnt,
|
||||||
|
bool use_masked, uint8_t *out);
|
||||||
|
DECLDIR void random_bounded_bool_fill(bitgen_t *bitgen_state, npy_bool off,
|
||||||
|
npy_bool rng, npy_intp cnt,
|
||||||
|
bool use_masked, npy_bool *out);
|
||||||
|
|
||||||
|
DECLDIR void random_multinomial(bitgen_t *bitgen_state, RAND_INT_TYPE n, RAND_INT_TYPE *mnix,
|
||||||
|
double *pix, npy_intp d, binomial_t *binomial);
|
||||||
|
|
||||||
|
/* multivariate hypergeometric, "count" method */
|
||||||
|
DECLDIR int random_multivariate_hypergeometric_count(bitgen_t *bitgen_state,
|
||||||
|
int64_t total,
|
||||||
|
size_t num_colors, int64_t *colors,
|
||||||
|
int64_t nsample,
|
||||||
|
size_t num_variates, int64_t *variates);
|
||||||
|
|
||||||
|
/* multivariate hypergeometric, "marginals" method */
|
||||||
|
DECLDIR void random_multivariate_hypergeometric_marginals(bitgen_t *bitgen_state,
|
||||||
|
int64_t total,
|
||||||
|
size_t num_colors, int64_t *colors,
|
||||||
|
int64_t nsample,
|
||||||
|
size_t num_variates, int64_t *variates);
|
||||||
|
|
||||||
|
/* Common to legacy-distributions.c and distributions.c but not exported */
|
||||||
|
|
||||||
|
RAND_INT_TYPE random_binomial_btpe(bitgen_t *bitgen_state,
|
||||||
|
RAND_INT_TYPE n,
|
||||||
|
double p,
|
||||||
|
binomial_t *binomial);
|
||||||
|
RAND_INT_TYPE random_binomial_inversion(bitgen_t *bitgen_state,
|
||||||
|
RAND_INT_TYPE n,
|
||||||
|
double p,
|
||||||
|
binomial_t *binomial);
|
||||||
|
double random_loggam(double x);
|
||||||
|
static NPY_INLINE double next_double(bitgen_t *bitgen_state) {
|
||||||
|
return bitgen_state->next_double(bitgen_state->state);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
@ -0,0 +1,338 @@
|
|||||||
|
|
||||||
|
=================
|
||||||
|
NumPy Ufunc C-API
|
||||||
|
=================
|
||||||
|
::
|
||||||
|
|
||||||
|
PyObject *
|
||||||
|
PyUFunc_FromFuncAndData(PyUFuncGenericFunction *func, void
|
||||||
|
**data, char *types, int ntypes, int nin, int
|
||||||
|
nout, int identity, const char *name, const
|
||||||
|
char *doc, int unused)
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
int
|
||||||
|
PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc, int
|
||||||
|
usertype, PyUFuncGenericFunction
|
||||||
|
function, const int *arg_types, void
|
||||||
|
*data)
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
int
|
||||||
|
PyUFunc_GenericFunction(PyUFuncObject *ufunc, PyObject *args, PyObject
|
||||||
|
*kwds, PyArrayObject **op)
|
||||||
|
|
||||||
|
|
||||||
|
This generic function is called with the ufunc object, the arguments to it,
|
||||||
|
and an array of (pointers to) PyArrayObjects which are NULL.
|
||||||
|
|
||||||
|
'op' is an array of at least NPY_MAXARGS PyArrayObject *.
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
void
|
||||||
|
PyUFunc_f_f_As_d_d(char **args, npy_intp *dimensions, npy_intp
|
||||||
|
*steps, void *func)
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
void
|
||||||
|
PyUFunc_d_d(char **args, npy_intp *dimensions, npy_intp *steps, void
|
||||||
|
*func)
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
void
|
||||||
|
PyUFunc_f_f(char **args, npy_intp *dimensions, npy_intp *steps, void
|
||||||
|
*func)
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
void
|
||||||
|
PyUFunc_g_g(char **args, npy_intp *dimensions, npy_intp *steps, void
|
||||||
|
*func)
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
void
|
||||||
|
PyUFunc_F_F_As_D_D(char **args, npy_intp *dimensions, npy_intp
|
||||||
|
*steps, void *func)
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
void
|
||||||
|
PyUFunc_F_F(char **args, npy_intp *dimensions, npy_intp *steps, void
|
||||||
|
*func)
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
void
|
||||||
|
PyUFunc_D_D(char **args, npy_intp *dimensions, npy_intp *steps, void
|
||||||
|
*func)
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
void
|
||||||
|
PyUFunc_G_G(char **args, npy_intp *dimensions, npy_intp *steps, void
|
||||||
|
*func)
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
void
|
||||||
|
PyUFunc_O_O(char **args, npy_intp *dimensions, npy_intp *steps, void
|
||||||
|
*func)
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
void
|
||||||
|
PyUFunc_ff_f_As_dd_d(char **args, npy_intp *dimensions, npy_intp
|
||||||
|
*steps, void *func)
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
void
|
||||||
|
PyUFunc_ff_f(char **args, npy_intp *dimensions, npy_intp *steps, void
|
||||||
|
*func)
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
void
|
||||||
|
PyUFunc_dd_d(char **args, npy_intp *dimensions, npy_intp *steps, void
|
||||||
|
*func)
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
void
|
||||||
|
PyUFunc_gg_g(char **args, npy_intp *dimensions, npy_intp *steps, void
|
||||||
|
*func)
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
void
|
||||||
|
PyUFunc_FF_F_As_DD_D(char **args, npy_intp *dimensions, npy_intp
|
||||||
|
*steps, void *func)
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
void
|
||||||
|
PyUFunc_DD_D(char **args, npy_intp *dimensions, npy_intp *steps, void
|
||||||
|
*func)
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
void
|
||||||
|
PyUFunc_FF_F(char **args, npy_intp *dimensions, npy_intp *steps, void
|
||||||
|
*func)
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
void
|
||||||
|
PyUFunc_GG_G(char **args, npy_intp *dimensions, npy_intp *steps, void
|
||||||
|
*func)
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
void
|
||||||
|
PyUFunc_OO_O(char **args, npy_intp *dimensions, npy_intp *steps, void
|
||||||
|
*func)
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
void
|
||||||
|
PyUFunc_O_O_method(char **args, npy_intp *dimensions, npy_intp
|
||||||
|
*steps, void *func)
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
void
|
||||||
|
PyUFunc_OO_O_method(char **args, npy_intp *dimensions, npy_intp
|
||||||
|
*steps, void *func)
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
void
|
||||||
|
PyUFunc_On_Om(char **args, npy_intp *dimensions, npy_intp *steps, void
|
||||||
|
*func)
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
int
|
||||||
|
PyUFunc_GetPyValues(char *name, int *bufsize, int *errmask, PyObject
|
||||||
|
**errobj)
|
||||||
|
|
||||||
|
|
||||||
|
On return, if errobj is populated with a non-NULL value, the caller
|
||||||
|
owns a new reference to errobj.
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
int
|
||||||
|
PyUFunc_checkfperr(int errmask, PyObject *errobj, int *first)
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
void
|
||||||
|
PyUFunc_clearfperr()
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
int
|
||||||
|
PyUFunc_getfperr(void )
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
int
|
||||||
|
PyUFunc_handlefperr(int errmask, PyObject *errobj, int retstatus, int
|
||||||
|
*first)
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
int
|
||||||
|
PyUFunc_ReplaceLoopBySignature(PyUFuncObject
|
||||||
|
*func, PyUFuncGenericFunction
|
||||||
|
newfunc, const int
|
||||||
|
*signature, PyUFuncGenericFunction
|
||||||
|
*oldfunc)
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
PyObject *
|
||||||
|
PyUFunc_FromFuncAndDataAndSignature(PyUFuncGenericFunction *func, void
|
||||||
|
**data, char *types, int
|
||||||
|
ntypes, int nin, int nout, int
|
||||||
|
identity, const char *name, const
|
||||||
|
char *doc, int unused, const char
|
||||||
|
*signature)
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
int
|
||||||
|
PyUFunc_SetUsesArraysAsData(void **data, size_t i)
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
void
|
||||||
|
PyUFunc_e_e(char **args, npy_intp *dimensions, npy_intp *steps, void
|
||||||
|
*func)
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
void
|
||||||
|
PyUFunc_e_e_As_f_f(char **args, npy_intp *dimensions, npy_intp
|
||||||
|
*steps, void *func)
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
void
|
||||||
|
PyUFunc_e_e_As_d_d(char **args, npy_intp *dimensions, npy_intp
|
||||||
|
*steps, void *func)
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
void
|
||||||
|
PyUFunc_ee_e(char **args, npy_intp *dimensions, npy_intp *steps, void
|
||||||
|
*func)
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
void
|
||||||
|
PyUFunc_ee_e_As_ff_f(char **args, npy_intp *dimensions, npy_intp
|
||||||
|
*steps, void *func)
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
void
|
||||||
|
PyUFunc_ee_e_As_dd_d(char **args, npy_intp *dimensions, npy_intp
|
||||||
|
*steps, void *func)
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
int
|
||||||
|
PyUFunc_DefaultTypeResolver(PyUFuncObject *ufunc, NPY_CASTING
|
||||||
|
casting, PyArrayObject
|
||||||
|
**operands, PyObject
|
||||||
|
*type_tup, PyArray_Descr **out_dtypes)
|
||||||
|
|
||||||
|
|
||||||
|
This function applies the default type resolution rules
|
||||||
|
for the provided ufunc.
|
||||||
|
|
||||||
|
Returns 0 on success, -1 on error.
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
int
|
||||||
|
PyUFunc_ValidateCasting(PyUFuncObject *ufunc, NPY_CASTING
|
||||||
|
casting, PyArrayObject
|
||||||
|
**operands, PyArray_Descr **dtypes)
|
||||||
|
|
||||||
|
|
||||||
|
Validates that the input operands can be cast to
|
||||||
|
the input types, and the output types can be cast to
|
||||||
|
the output operands where provided.
|
||||||
|
|
||||||
|
Returns 0 on success, -1 (with exception raised) on validation failure.
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
int
|
||||||
|
PyUFunc_RegisterLoopForDescr(PyUFuncObject *ufunc, PyArray_Descr
|
||||||
|
*user_dtype, PyUFuncGenericFunction
|
||||||
|
function, PyArray_Descr
|
||||||
|
**arg_dtypes, void *data)
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
PyObject *
|
||||||
|
PyUFunc_FromFuncAndDataAndSignatureAndIdentity(PyUFuncGenericFunction
|
||||||
|
*func, void
|
||||||
|
**data, char
|
||||||
|
*types, int ntypes, int
|
||||||
|
nin, int nout, int
|
||||||
|
identity, const char
|
||||||
|
*name, const char
|
||||||
|
*doc, const int
|
||||||
|
unused, const char
|
||||||
|
*signature, PyObject
|
||||||
|
*identity_value)
|
||||||
|
|
||||||
|
|
@ -0,0 +1,369 @@
|
|||||||
|
#ifndef Py_UFUNCOBJECT_H
|
||||||
|
#define Py_UFUNCOBJECT_H
|
||||||
|
|
||||||
|
#include <numpy/npy_math.h>
|
||||||
|
#include <numpy/npy_common.h>
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The legacy generic inner loop for a standard element-wise or
|
||||||
|
* generalized ufunc.
|
||||||
|
*/
|
||||||
|
typedef void (*PyUFuncGenericFunction)
|
||||||
|
(char **args,
|
||||||
|
npy_intp *dimensions,
|
||||||
|
npy_intp *strides,
|
||||||
|
void *innerloopdata);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The most generic one-dimensional inner loop for
|
||||||
|
* a masked standard element-wise ufunc. "Masked" here means that it skips
|
||||||
|
* doing calculations on any items for which the maskptr array has a true
|
||||||
|
* value.
|
||||||
|
*/
|
||||||
|
typedef void (PyUFunc_MaskedStridedInnerLoopFunc)(
|
||||||
|
char **dataptrs, npy_intp *strides,
|
||||||
|
char *maskptr, npy_intp mask_stride,
|
||||||
|
npy_intp count,
|
||||||
|
NpyAuxData *innerloopdata);
|
||||||
|
|
||||||
|
/* Forward declaration for the type resolver and loop selector typedefs */
|
||||||
|
struct _tagPyUFuncObject;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Given the operands for calling a ufunc, should determine the
|
||||||
|
* calculation input and output data types and return an inner loop function.
|
||||||
|
* This function should validate that the casting rule is being followed,
|
||||||
|
* and fail if it is not.
|
||||||
|
*
|
||||||
|
* For backwards compatibility, the regular type resolution function does not
|
||||||
|
* support auxiliary data with object semantics. The type resolution call
|
||||||
|
* which returns a masked generic function returns a standard NpyAuxData
|
||||||
|
* object, for which the NPY_AUXDATA_FREE and NPY_AUXDATA_CLONE macros
|
||||||
|
* work.
|
||||||
|
*
|
||||||
|
* ufunc: The ufunc object.
|
||||||
|
* casting: The 'casting' parameter provided to the ufunc.
|
||||||
|
* operands: An array of length (ufunc->nin + ufunc->nout),
|
||||||
|
* with the output parameters possibly NULL.
|
||||||
|
* type_tup: Either NULL, or the type_tup passed to the ufunc.
|
||||||
|
* out_dtypes: An array which should be populated with new
|
||||||
|
* references to (ufunc->nin + ufunc->nout) new
|
||||||
|
* dtypes, one for each input and output. These
|
||||||
|
* dtypes should all be in native-endian format.
|
||||||
|
*
|
||||||
|
* Should return 0 on success, -1 on failure (with exception set),
|
||||||
|
* or -2 if Py_NotImplemented should be returned.
|
||||||
|
*/
|
||||||
|
typedef int (PyUFunc_TypeResolutionFunc)(
|
||||||
|
struct _tagPyUFuncObject *ufunc,
|
||||||
|
NPY_CASTING casting,
|
||||||
|
PyArrayObject **operands,
|
||||||
|
PyObject *type_tup,
|
||||||
|
PyArray_Descr **out_dtypes);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Given an array of DTypes as returned by the PyUFunc_TypeResolutionFunc,
|
||||||
|
* and an array of fixed strides (the array will contain NPY_MAX_INTP for
|
||||||
|
* strides which are not necessarily fixed), returns an inner loop
|
||||||
|
* with associated auxiliary data.
|
||||||
|
*
|
||||||
|
* For backwards compatibility, there is a variant of the inner loop
|
||||||
|
* selection which returns an inner loop irrespective of the strides,
|
||||||
|
* and with a void* static auxiliary data instead of an NpyAuxData *
|
||||||
|
* dynamically allocatable auxiliary data.
|
||||||
|
*
|
||||||
|
* ufunc: The ufunc object.
|
||||||
|
* dtypes: An array which has been populated with dtypes,
|
||||||
|
* in most cases by the type resolution function
|
||||||
|
* for the same ufunc.
|
||||||
|
* fixed_strides: For each input/output, either the stride that
|
||||||
|
* will be used every time the function is called
|
||||||
|
* or NPY_MAX_INTP if the stride might change or
|
||||||
|
* is not known ahead of time. The loop selection
|
||||||
|
* function may use this stride to pick inner loops
|
||||||
|
* which are optimized for contiguous or 0-stride
|
||||||
|
* cases.
|
||||||
|
* out_innerloop: Should be populated with the correct ufunc inner
|
||||||
|
* loop for the given type.
|
||||||
|
* out_innerloopdata: Should be populated with the void* data to
|
||||||
|
* be passed into the out_innerloop function.
|
||||||
|
* out_needs_api: If the inner loop needs to use the Python API,
|
||||||
|
* should set the to 1, otherwise should leave
|
||||||
|
* this untouched.
|
||||||
|
*/
|
||||||
|
typedef int (PyUFunc_LegacyInnerLoopSelectionFunc)(
|
||||||
|
struct _tagPyUFuncObject *ufunc,
|
||||||
|
PyArray_Descr **dtypes,
|
||||||
|
PyUFuncGenericFunction *out_innerloop,
|
||||||
|
void **out_innerloopdata,
|
||||||
|
int *out_needs_api);
|
||||||
|
typedef int (PyUFunc_MaskedInnerLoopSelectionFunc)(
|
||||||
|
struct _tagPyUFuncObject *ufunc,
|
||||||
|
PyArray_Descr **dtypes,
|
||||||
|
PyArray_Descr *mask_dtype,
|
||||||
|
npy_intp *fixed_strides,
|
||||||
|
npy_intp fixed_mask_stride,
|
||||||
|
PyUFunc_MaskedStridedInnerLoopFunc **out_innerloop,
|
||||||
|
NpyAuxData **out_innerloopdata,
|
||||||
|
int *out_needs_api);
|
||||||
|
|
||||||
|
typedef struct _tagPyUFuncObject {
|
||||||
|
PyObject_HEAD
|
||||||
|
/*
|
||||||
|
* nin: Number of inputs
|
||||||
|
* nout: Number of outputs
|
||||||
|
* nargs: Always nin + nout (Why is it stored?)
|
||||||
|
*/
|
||||||
|
int nin, nout, nargs;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Identity for reduction, any of PyUFunc_One, PyUFunc_Zero
|
||||||
|
* PyUFunc_MinusOne, PyUFunc_None, PyUFunc_ReorderableNone,
|
||||||
|
* PyUFunc_IdentityValue.
|
||||||
|
*/
|
||||||
|
int identity;
|
||||||
|
|
||||||
|
/* Array of one-dimensional core loops */
|
||||||
|
PyUFuncGenericFunction *functions;
|
||||||
|
/* Array of funcdata that gets passed into the functions */
|
||||||
|
void **data;
|
||||||
|
/* The number of elements in 'functions' and 'data' */
|
||||||
|
int ntypes;
|
||||||
|
|
||||||
|
/* Used to be unused field 'check_return' */
|
||||||
|
int reserved1;
|
||||||
|
|
||||||
|
/* The name of the ufunc */
|
||||||
|
const char *name;
|
||||||
|
|
||||||
|
/* Array of type numbers, of size ('nargs' * 'ntypes') */
|
||||||
|
char *types;
|
||||||
|
|
||||||
|
/* Documentation string */
|
||||||
|
const char *doc;
|
||||||
|
|
||||||
|
void *ptr;
|
||||||
|
PyObject *obj;
|
||||||
|
PyObject *userloops;
|
||||||
|
|
||||||
|
/* generalized ufunc parameters */
|
||||||
|
|
||||||
|
/* 0 for scalar ufunc; 1 for generalized ufunc */
|
||||||
|
int core_enabled;
|
||||||
|
/* number of distinct dimension names in signature */
|
||||||
|
int core_num_dim_ix;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* dimension indices of input/output argument k are stored in
|
||||||
|
* core_dim_ixs[core_offsets[k]..core_offsets[k]+core_num_dims[k]-1]
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* numbers of core dimensions of each argument */
|
||||||
|
int *core_num_dims;
|
||||||
|
/*
|
||||||
|
* dimension indices in a flatted form; indices
|
||||||
|
* are in the range of [0,core_num_dim_ix)
|
||||||
|
*/
|
||||||
|
int *core_dim_ixs;
|
||||||
|
/*
|
||||||
|
* positions of 1st core dimensions of each
|
||||||
|
* argument in core_dim_ixs, equivalent to cumsum(core_num_dims)
|
||||||
|
*/
|
||||||
|
int *core_offsets;
|
||||||
|
/* signature string for printing purpose */
|
||||||
|
char *core_signature;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* A function which resolves the types and fills an array
|
||||||
|
* with the dtypes for the inputs and outputs.
|
||||||
|
*/
|
||||||
|
PyUFunc_TypeResolutionFunc *type_resolver;
|
||||||
|
/*
|
||||||
|
* A function which returns an inner loop written for
|
||||||
|
* NumPy 1.6 and earlier ufuncs. This is for backwards
|
||||||
|
* compatibility, and may be NULL if inner_loop_selector
|
||||||
|
* is specified.
|
||||||
|
*/
|
||||||
|
PyUFunc_LegacyInnerLoopSelectionFunc *legacy_inner_loop_selector;
|
||||||
|
/*
|
||||||
|
* This was blocked off to be the "new" inner loop selector in 1.7,
|
||||||
|
* but this was never implemented. (This is also why the above
|
||||||
|
* selector is called the "legacy" selector.)
|
||||||
|
*/
|
||||||
|
void *reserved2;
|
||||||
|
/*
|
||||||
|
* A function which returns a masked inner loop for the ufunc.
|
||||||
|
*/
|
||||||
|
PyUFunc_MaskedInnerLoopSelectionFunc *masked_inner_loop_selector;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* List of flags for each operand when ufunc is called by nditer object.
|
||||||
|
* These flags will be used in addition to the default flags for each
|
||||||
|
* operand set by nditer object.
|
||||||
|
*/
|
||||||
|
npy_uint32 *op_flags;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* List of global flags used when ufunc is called by nditer object.
|
||||||
|
* These flags will be used in addition to the default global flags
|
||||||
|
* set by nditer object.
|
||||||
|
*/
|
||||||
|
npy_uint32 iter_flags;
|
||||||
|
|
||||||
|
/* New in NPY_API_VERSION 0x0000000D and above */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* for each core_num_dim_ix distinct dimension names,
|
||||||
|
* the possible "frozen" size (-1 if not frozen).
|
||||||
|
*/
|
||||||
|
npy_intp *core_dim_sizes;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* for each distinct core dimension, a set of UFUNC_CORE_DIM* flags
|
||||||
|
*/
|
||||||
|
npy_uint32 *core_dim_flags;
|
||||||
|
|
||||||
|
/* Identity for reduction, when identity == PyUFunc_IdentityValue */
|
||||||
|
PyObject *identity_value;
|
||||||
|
|
||||||
|
} PyUFuncObject;
|
||||||
|
|
||||||
|
#include "arrayobject.h"
|
||||||
|
/* Generalized ufunc; 0x0001 reserved for possible use as CORE_ENABLED */
|
||||||
|
/* the core dimension's size will be determined by the operands. */
|
||||||
|
#define UFUNC_CORE_DIM_SIZE_INFERRED 0x0002
|
||||||
|
/* the core dimension may be absent */
|
||||||
|
#define UFUNC_CORE_DIM_CAN_IGNORE 0x0004
|
||||||
|
/* flags inferred during execution */
|
||||||
|
#define UFUNC_CORE_DIM_MISSING 0x00040000
|
||||||
|
|
||||||
|
#define UFUNC_ERR_IGNORE 0
|
||||||
|
#define UFUNC_ERR_WARN 1
|
||||||
|
#define UFUNC_ERR_RAISE 2
|
||||||
|
#define UFUNC_ERR_CALL 3
|
||||||
|
#define UFUNC_ERR_PRINT 4
|
||||||
|
#define UFUNC_ERR_LOG 5
|
||||||
|
|
||||||
|
/* Python side integer mask */
|
||||||
|
|
||||||
|
#define UFUNC_MASK_DIVIDEBYZERO 0x07
|
||||||
|
#define UFUNC_MASK_OVERFLOW 0x3f
|
||||||
|
#define UFUNC_MASK_UNDERFLOW 0x1ff
|
||||||
|
#define UFUNC_MASK_INVALID 0xfff
|
||||||
|
|
||||||
|
#define UFUNC_SHIFT_DIVIDEBYZERO 0
|
||||||
|
#define UFUNC_SHIFT_OVERFLOW 3
|
||||||
|
#define UFUNC_SHIFT_UNDERFLOW 6
|
||||||
|
#define UFUNC_SHIFT_INVALID 9
|
||||||
|
|
||||||
|
|
||||||
|
#define UFUNC_OBJ_ISOBJECT 1
|
||||||
|
#define UFUNC_OBJ_NEEDS_API 2
|
||||||
|
|
||||||
|
/* Default user error mode */
|
||||||
|
#define UFUNC_ERR_DEFAULT \
|
||||||
|
(UFUNC_ERR_WARN << UFUNC_SHIFT_DIVIDEBYZERO) + \
|
||||||
|
(UFUNC_ERR_WARN << UFUNC_SHIFT_OVERFLOW) + \
|
||||||
|
(UFUNC_ERR_WARN << UFUNC_SHIFT_INVALID)
|
||||||
|
|
||||||
|
#if NPY_ALLOW_THREADS
|
||||||
|
#define NPY_LOOP_BEGIN_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) _save = PyEval_SaveThread();} while (0);
|
||||||
|
#define NPY_LOOP_END_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) PyEval_RestoreThread(_save);} while (0);
|
||||||
|
#else
|
||||||
|
#define NPY_LOOP_BEGIN_THREADS
|
||||||
|
#define NPY_LOOP_END_THREADS
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* UFunc has unit of 0, and the order of operations can be reordered
|
||||||
|
* This case allows reduction with multiple axes at once.
|
||||||
|
*/
|
||||||
|
#define PyUFunc_Zero 0
|
||||||
|
/*
|
||||||
|
* UFunc has unit of 1, and the order of operations can be reordered
|
||||||
|
* This case allows reduction with multiple axes at once.
|
||||||
|
*/
|
||||||
|
#define PyUFunc_One 1
|
||||||
|
/*
|
||||||
|
* UFunc has unit of -1, and the order of operations can be reordered
|
||||||
|
* This case allows reduction with multiple axes at once. Intended for
|
||||||
|
* bitwise_and reduction.
|
||||||
|
*/
|
||||||
|
#define PyUFunc_MinusOne 2
|
||||||
|
/*
|
||||||
|
* UFunc has no unit, and the order of operations cannot be reordered.
|
||||||
|
* This case does not allow reduction with multiple axes at once.
|
||||||
|
*/
|
||||||
|
#define PyUFunc_None -1
|
||||||
|
/*
|
||||||
|
* UFunc has no unit, and the order of operations can be reordered
|
||||||
|
* This case allows reduction with multiple axes at once.
|
||||||
|
*/
|
||||||
|
#define PyUFunc_ReorderableNone -2
|
||||||
|
/*
|
||||||
|
* UFunc unit is an identity_value, and the order of operations can be reordered
|
||||||
|
* This case allows reduction with multiple axes at once.
|
||||||
|
*/
|
||||||
|
#define PyUFunc_IdentityValue -3
|
||||||
|
|
||||||
|
|
||||||
|
#define UFUNC_REDUCE 0
|
||||||
|
#define UFUNC_ACCUMULATE 1
|
||||||
|
#define UFUNC_REDUCEAT 2
|
||||||
|
#define UFUNC_OUTER 3
|
||||||
|
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
int nin;
|
||||||
|
int nout;
|
||||||
|
PyObject *callable;
|
||||||
|
} PyUFunc_PyFuncData;
|
||||||
|
|
||||||
|
/* A linked-list of function information for
|
||||||
|
user-defined 1-d loops.
|
||||||
|
*/
|
||||||
|
typedef struct _loop1d_info {
|
||||||
|
PyUFuncGenericFunction func;
|
||||||
|
void *data;
|
||||||
|
int *arg_types;
|
||||||
|
struct _loop1d_info *next;
|
||||||
|
int nargs;
|
||||||
|
PyArray_Descr **arg_dtypes;
|
||||||
|
} PyUFunc_Loop1d;
|
||||||
|
|
||||||
|
|
||||||
|
#include "__ufunc_api.h"
|
||||||
|
|
||||||
|
#define UFUNC_PYVALS_NAME "UFUNC_PYVALS"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* THESE MACROS ARE DEPRECATED.
|
||||||
|
* Use npy_set_floatstatus_* in the npymath library.
|
||||||
|
*/
|
||||||
|
#define UFUNC_FPE_DIVIDEBYZERO NPY_FPE_DIVIDEBYZERO
|
||||||
|
#define UFUNC_FPE_OVERFLOW NPY_FPE_OVERFLOW
|
||||||
|
#define UFUNC_FPE_UNDERFLOW NPY_FPE_UNDERFLOW
|
||||||
|
#define UFUNC_FPE_INVALID NPY_FPE_INVALID
|
||||||
|
|
||||||
|
#define generate_divbyzero_error() npy_set_floatstatus_divbyzero()
|
||||||
|
#define generate_overflow_error() npy_set_floatstatus_overflow()
|
||||||
|
|
||||||
|
/* Make sure it gets defined if it isn't already */
|
||||||
|
#ifndef UFUNC_NOFPE
|
||||||
|
/* Clear the floating point exception default of Borland C++ */
|
||||||
|
#if defined(__BORLANDC__)
|
||||||
|
#define UFUNC_NOFPE _control87(MCW_EM, MCW_EM);
|
||||||
|
#else
|
||||||
|
#define UFUNC_NOFPE
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
#endif /* !Py_UFUNCOBJECT_H */
|
@ -0,0 +1,21 @@
|
|||||||
|
#ifndef __NUMPY_UTILS_HEADER__
|
||||||
|
#define __NUMPY_UTILS_HEADER__
|
||||||
|
|
||||||
|
#ifndef __COMP_NPY_UNUSED
|
||||||
|
#if defined(__GNUC__)
|
||||||
|
#define __COMP_NPY_UNUSED __attribute__ ((__unused__))
|
||||||
|
# elif defined(__ICC)
|
||||||
|
#define __COMP_NPY_UNUSED __attribute__ ((__unused__))
|
||||||
|
# elif defined(__clang__)
|
||||||
|
#define __COMP_NPY_UNUSED __attribute__ ((unused))
|
||||||
|
#else
|
||||||
|
#define __COMP_NPY_UNUSED
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* Use this to tag a variable as not used. It will remove unused variable
|
||||||
|
* warning on support platforms (see __COM_NPY_UNUSED) and mangle the variable
|
||||||
|
* to avoid accidental use */
|
||||||
|
#define NPY_UNUSED(x) (__NPY_UNUSED_TAGGED ## x) __COMP_NPY_UNUSED
|
||||||
|
|
||||||
|
#endif
|
@ -0,0 +1,12 @@
|
|||||||
|
[meta]
|
||||||
|
Name = mlib
|
||||||
|
Description = Math library used with this version of numpy
|
||||||
|
Version = 1.0
|
||||||
|
|
||||||
|
[default]
|
||||||
|
Libs=
|
||||||
|
Cflags=
|
||||||
|
|
||||||
|
[msvc]
|
||||||
|
Libs=
|
||||||
|
Cflags=
|
@ -0,0 +1,20 @@
|
|||||||
|
[meta]
|
||||||
|
Name=npymath
|
||||||
|
Description=Portable, core math library implementing C99 standard
|
||||||
|
Version=0.1
|
||||||
|
|
||||||
|
[variables]
|
||||||
|
pkgname=numpy.core
|
||||||
|
prefix=${pkgdir}
|
||||||
|
libdir=${prefix}\lib
|
||||||
|
includedir=${prefix}\include
|
||||||
|
|
||||||
|
[default]
|
||||||
|
Libs=-L${libdir} -lnpymath
|
||||||
|
Cflags=-I${includedir}
|
||||||
|
Requires=mlib
|
||||||
|
|
||||||
|
[msvc]
|
||||||
|
Libs=/LIBPATH:${libdir} npymath.lib
|
||||||
|
Cflags=/INCLUDE:${includedir}
|
||||||
|
Requires=mlib
|
Binary file not shown.
344
Restaurant/Marta/venv/Lib/site-packages/numpy/core/machar.py
Normal file
344
Restaurant/Marta/venv/Lib/site-packages/numpy/core/machar.py
Normal file
@ -0,0 +1,344 @@
|
|||||||
|
"""
|
||||||
|
Machine arithmetics - determine the parameters of the
|
||||||
|
floating-point arithmetic system
|
||||||
|
|
||||||
|
Author: Pearu Peterson, September 2003
|
||||||
|
|
||||||
|
"""
|
||||||
|
from __future__ import division, absolute_import, print_function
|
||||||
|
|
||||||
|
__all__ = ['MachAr']
|
||||||
|
|
||||||
|
from numpy.core.fromnumeric import any
|
||||||
|
from numpy.core._ufunc_config import errstate
|
||||||
|
from numpy.core.overrides import set_module
|
||||||
|
|
||||||
|
# Need to speed this up...especially for longfloat
|
||||||
|
|
||||||
|
@set_module('numpy')
|
||||||
|
class MachAr(object):
|
||||||
|
"""
|
||||||
|
Diagnosing machine parameters.
|
||||||
|
|
||||||
|
Attributes
|
||||||
|
----------
|
||||||
|
ibeta : int
|
||||||
|
Radix in which numbers are represented.
|
||||||
|
it : int
|
||||||
|
Number of base-`ibeta` digits in the floating point mantissa M.
|
||||||
|
machep : int
|
||||||
|
Exponent of the smallest (most negative) power of `ibeta` that,
|
||||||
|
added to 1.0, gives something different from 1.0
|
||||||
|
eps : float
|
||||||
|
Floating-point number ``beta**machep`` (floating point precision)
|
||||||
|
negep : int
|
||||||
|
Exponent of the smallest power of `ibeta` that, subtracted
|
||||||
|
from 1.0, gives something different from 1.0.
|
||||||
|
epsneg : float
|
||||||
|
Floating-point number ``beta**negep``.
|
||||||
|
iexp : int
|
||||||
|
Number of bits in the exponent (including its sign and bias).
|
||||||
|
minexp : int
|
||||||
|
Smallest (most negative) power of `ibeta` consistent with there
|
||||||
|
being no leading zeros in the mantissa.
|
||||||
|
xmin : float
|
||||||
|
Floating point number ``beta**minexp`` (the smallest [in
|
||||||
|
magnitude] usable floating value).
|
||||||
|
maxexp : int
|
||||||
|
Smallest (positive) power of `ibeta` that causes overflow.
|
||||||
|
xmax : float
|
||||||
|
``(1-epsneg) * beta**maxexp`` (the largest [in magnitude]
|
||||||
|
usable floating value).
|
||||||
|
irnd : int
|
||||||
|
In ``range(6)``, information on what kind of rounding is done
|
||||||
|
in addition, and on how underflow is handled.
|
||||||
|
ngrd : int
|
||||||
|
Number of 'guard digits' used when truncating the product
|
||||||
|
of two mantissas to fit the representation.
|
||||||
|
epsilon : float
|
||||||
|
Same as `eps`.
|
||||||
|
tiny : float
|
||||||
|
Same as `xmin`.
|
||||||
|
huge : float
|
||||||
|
Same as `xmax`.
|
||||||
|
precision : float
|
||||||
|
``- int(-log10(eps))``
|
||||||
|
resolution : float
|
||||||
|
``- 10**(-precision)``
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
float_conv : function, optional
|
||||||
|
Function that converts an integer or integer array to a float
|
||||||
|
or float array. Default is `float`.
|
||||||
|
int_conv : function, optional
|
||||||
|
Function that converts a float or float array to an integer or
|
||||||
|
integer array. Default is `int`.
|
||||||
|
float_to_float : function, optional
|
||||||
|
Function that converts a float array to float. Default is `float`.
|
||||||
|
Note that this does not seem to do anything useful in the current
|
||||||
|
implementation.
|
||||||
|
float_to_str : function, optional
|
||||||
|
Function that converts a single float to a string. Default is
|
||||||
|
``lambda v:'%24.16e' %v``.
|
||||||
|
title : str, optional
|
||||||
|
Title that is printed in the string representation of `MachAr`.
|
||||||
|
|
||||||
|
See Also
|
||||||
|
--------
|
||||||
|
finfo : Machine limits for floating point types.
|
||||||
|
iinfo : Machine limits for integer types.
|
||||||
|
|
||||||
|
References
|
||||||
|
----------
|
||||||
|
.. [1] Press, Teukolsky, Vetterling and Flannery,
|
||||||
|
"Numerical Recipes in C++," 2nd ed,
|
||||||
|
Cambridge University Press, 2002, p. 31.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, float_conv=float,int_conv=int,
|
||||||
|
float_to_float=float,
|
||||||
|
float_to_str=lambda v:'%24.16e' % v,
|
||||||
|
title='Python floating point number'):
|
||||||
|
"""
|
||||||
|
|
||||||
|
float_conv - convert integer to float (array)
|
||||||
|
int_conv - convert float (array) to integer
|
||||||
|
float_to_float - convert float array to float
|
||||||
|
float_to_str - convert array float to str
|
||||||
|
title - description of used floating point numbers
|
||||||
|
|
||||||
|
"""
|
||||||
|
# We ignore all errors here because we are purposely triggering
|
||||||
|
# underflow to detect the properties of the runninng arch.
|
||||||
|
with errstate(under='ignore'):
|
||||||
|
self._do_init(float_conv, int_conv, float_to_float, float_to_str, title)
|
||||||
|
|
||||||
|
def _do_init(self, float_conv, int_conv, float_to_float, float_to_str, title):
|
||||||
|
max_iterN = 10000
|
||||||
|
msg = "Did not converge after %d tries with %s"
|
||||||
|
one = float_conv(1)
|
||||||
|
two = one + one
|
||||||
|
zero = one - one
|
||||||
|
|
||||||
|
# Do we really need to do this? Aren't they 2 and 2.0?
|
||||||
|
# Determine ibeta and beta
|
||||||
|
a = one
|
||||||
|
for _ in range(max_iterN):
|
||||||
|
a = a + a
|
||||||
|
temp = a + one
|
||||||
|
temp1 = temp - a
|
||||||
|
if any(temp1 - one != zero):
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
raise RuntimeError(msg % (_, one.dtype))
|
||||||
|
b = one
|
||||||
|
for _ in range(max_iterN):
|
||||||
|
b = b + b
|
||||||
|
temp = a + b
|
||||||
|
itemp = int_conv(temp-a)
|
||||||
|
if any(itemp != 0):
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
raise RuntimeError(msg % (_, one.dtype))
|
||||||
|
ibeta = itemp
|
||||||
|
beta = float_conv(ibeta)
|
||||||
|
|
||||||
|
# Determine it and irnd
|
||||||
|
it = -1
|
||||||
|
b = one
|
||||||
|
for _ in range(max_iterN):
|
||||||
|
it = it + 1
|
||||||
|
b = b * beta
|
||||||
|
temp = b + one
|
||||||
|
temp1 = temp - b
|
||||||
|
if any(temp1 - one != zero):
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
raise RuntimeError(msg % (_, one.dtype))
|
||||||
|
|
||||||
|
betah = beta / two
|
||||||
|
a = one
|
||||||
|
for _ in range(max_iterN):
|
||||||
|
a = a + a
|
||||||
|
temp = a + one
|
||||||
|
temp1 = temp - a
|
||||||
|
if any(temp1 - one != zero):
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
raise RuntimeError(msg % (_, one.dtype))
|
||||||
|
temp = a + betah
|
||||||
|
irnd = 0
|
||||||
|
if any(temp-a != zero):
|
||||||
|
irnd = 1
|
||||||
|
tempa = a + beta
|
||||||
|
temp = tempa + betah
|
||||||
|
if irnd == 0 and any(temp-tempa != zero):
|
||||||
|
irnd = 2
|
||||||
|
|
||||||
|
# Determine negep and epsneg
|
||||||
|
negep = it + 3
|
||||||
|
betain = one / beta
|
||||||
|
a = one
|
||||||
|
for i in range(negep):
|
||||||
|
a = a * betain
|
||||||
|
b = a
|
||||||
|
for _ in range(max_iterN):
|
||||||
|
temp = one - a
|
||||||
|
if any(temp-one != zero):
|
||||||
|
break
|
||||||
|
a = a * beta
|
||||||
|
negep = negep - 1
|
||||||
|
# Prevent infinite loop on PPC with gcc 4.0:
|
||||||
|
if negep < 0:
|
||||||
|
raise RuntimeError("could not determine machine tolerance "
|
||||||
|
"for 'negep', locals() -> %s" % (locals()))
|
||||||
|
else:
|
||||||
|
raise RuntimeError(msg % (_, one.dtype))
|
||||||
|
negep = -negep
|
||||||
|
epsneg = a
|
||||||
|
|
||||||
|
# Determine machep and eps
|
||||||
|
machep = - it - 3
|
||||||
|
a = b
|
||||||
|
|
||||||
|
for _ in range(max_iterN):
|
||||||
|
temp = one + a
|
||||||
|
if any(temp-one != zero):
|
||||||
|
break
|
||||||
|
a = a * beta
|
||||||
|
machep = machep + 1
|
||||||
|
else:
|
||||||
|
raise RuntimeError(msg % (_, one.dtype))
|
||||||
|
eps = a
|
||||||
|
|
||||||
|
# Determine ngrd
|
||||||
|
ngrd = 0
|
||||||
|
temp = one + eps
|
||||||
|
if irnd == 0 and any(temp*one - one != zero):
|
||||||
|
ngrd = 1
|
||||||
|
|
||||||
|
# Determine iexp
|
||||||
|
i = 0
|
||||||
|
k = 1
|
||||||
|
z = betain
|
||||||
|
t = one + eps
|
||||||
|
nxres = 0
|
||||||
|
for _ in range(max_iterN):
|
||||||
|
y = z
|
||||||
|
z = y*y
|
||||||
|
a = z*one # Check here for underflow
|
||||||
|
temp = z*t
|
||||||
|
if any(a+a == zero) or any(abs(z) >= y):
|
||||||
|
break
|
||||||
|
temp1 = temp * betain
|
||||||
|
if any(temp1*beta == z):
|
||||||
|
break
|
||||||
|
i = i + 1
|
||||||
|
k = k + k
|
||||||
|
else:
|
||||||
|
raise RuntimeError(msg % (_, one.dtype))
|
||||||
|
if ibeta != 10:
|
||||||
|
iexp = i + 1
|
||||||
|
mx = k + k
|
||||||
|
else:
|
||||||
|
iexp = 2
|
||||||
|
iz = ibeta
|
||||||
|
while k >= iz:
|
||||||
|
iz = iz * ibeta
|
||||||
|
iexp = iexp + 1
|
||||||
|
mx = iz + iz - 1
|
||||||
|
|
||||||
|
# Determine minexp and xmin
|
||||||
|
for _ in range(max_iterN):
|
||||||
|
xmin = y
|
||||||
|
y = y * betain
|
||||||
|
a = y * one
|
||||||
|
temp = y * t
|
||||||
|
if any((a + a) != zero) and any(abs(y) < xmin):
|
||||||
|
k = k + 1
|
||||||
|
temp1 = temp * betain
|
||||||
|
if any(temp1*beta == y) and any(temp != y):
|
||||||
|
nxres = 3
|
||||||
|
xmin = y
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
raise RuntimeError(msg % (_, one.dtype))
|
||||||
|
minexp = -k
|
||||||
|
|
||||||
|
# Determine maxexp, xmax
|
||||||
|
if mx <= k + k - 3 and ibeta != 10:
|
||||||
|
mx = mx + mx
|
||||||
|
iexp = iexp + 1
|
||||||
|
maxexp = mx + minexp
|
||||||
|
irnd = irnd + nxres
|
||||||
|
if irnd >= 2:
|
||||||
|
maxexp = maxexp - 2
|
||||||
|
i = maxexp + minexp
|
||||||
|
if ibeta == 2 and not i:
|
||||||
|
maxexp = maxexp - 1
|
||||||
|
if i > 20:
|
||||||
|
maxexp = maxexp - 1
|
||||||
|
if any(a != y):
|
||||||
|
maxexp = maxexp - 2
|
||||||
|
xmax = one - epsneg
|
||||||
|
if any(xmax*one != xmax):
|
||||||
|
xmax = one - beta*epsneg
|
||||||
|
xmax = xmax / (xmin*beta*beta*beta)
|
||||||
|
i = maxexp + minexp + 3
|
||||||
|
for j in range(i):
|
||||||
|
if ibeta == 2:
|
||||||
|
xmax = xmax + xmax
|
||||||
|
else:
|
||||||
|
xmax = xmax * beta
|
||||||
|
|
||||||
|
self.ibeta = ibeta
|
||||||
|
self.it = it
|
||||||
|
self.negep = negep
|
||||||
|
self.epsneg = float_to_float(epsneg)
|
||||||
|
self._str_epsneg = float_to_str(epsneg)
|
||||||
|
self.machep = machep
|
||||||
|
self.eps = float_to_float(eps)
|
||||||
|
self._str_eps = float_to_str(eps)
|
||||||
|
self.ngrd = ngrd
|
||||||
|
self.iexp = iexp
|
||||||
|
self.minexp = minexp
|
||||||
|
self.xmin = float_to_float(xmin)
|
||||||
|
self._str_xmin = float_to_str(xmin)
|
||||||
|
self.maxexp = maxexp
|
||||||
|
self.xmax = float_to_float(xmax)
|
||||||
|
self._str_xmax = float_to_str(xmax)
|
||||||
|
self.irnd = irnd
|
||||||
|
|
||||||
|
self.title = title
|
||||||
|
# Commonly used parameters
|
||||||
|
self.epsilon = self.eps
|
||||||
|
self.tiny = self.xmin
|
||||||
|
self.huge = self.xmax
|
||||||
|
|
||||||
|
import math
|
||||||
|
self.precision = int(-math.log10(float_to_float(self.eps)))
|
||||||
|
ten = two + two + two + two + two
|
||||||
|
resolution = ten ** (-self.precision)
|
||||||
|
self.resolution = float_to_float(resolution)
|
||||||
|
self._str_resolution = float_to_str(resolution)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
fmt = (
|
||||||
|
'Machine parameters for %(title)s\n'
|
||||||
|
'---------------------------------------------------------------------\n'
|
||||||
|
'ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s\n'
|
||||||
|
'machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon)\n'
|
||||||
|
'negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg)\n'
|
||||||
|
'minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny)\n'
|
||||||
|
'maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)\n'
|
||||||
|
'---------------------------------------------------------------------\n'
|
||||||
|
)
|
||||||
|
return fmt % self.__dict__
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
print(MachAr())
|
334
Restaurant/Marta/venv/Lib/site-packages/numpy/core/memmap.py
Normal file
334
Restaurant/Marta/venv/Lib/site-packages/numpy/core/memmap.py
Normal file
@ -0,0 +1,334 @@
|
|||||||
|
from __future__ import division, absolute_import, print_function
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from .numeric import uint8, ndarray, dtype
|
||||||
|
from numpy.compat import (
|
||||||
|
long, basestring, os_fspath, contextlib_nullcontext, is_pathlib_path
|
||||||
|
)
|
||||||
|
from numpy.core.overrides import set_module
|
||||||
|
|
||||||
|
__all__ = ['memmap']
|
||||||
|
|
||||||
|
dtypedescr = dtype
|
||||||
|
valid_filemodes = ["r", "c", "r+", "w+"]
|
||||||
|
writeable_filemodes = ["r+", "w+"]
|
||||||
|
|
||||||
|
mode_equivalents = {
|
||||||
|
"readonly":"r",
|
||||||
|
"copyonwrite":"c",
|
||||||
|
"readwrite":"r+",
|
||||||
|
"write":"w+"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@set_module('numpy')
|
||||||
|
class memmap(ndarray):
|
||||||
|
"""Create a memory-map to an array stored in a *binary* file on disk.
|
||||||
|
|
||||||
|
Memory-mapped files are used for accessing small segments of large files
|
||||||
|
on disk, without reading the entire file into memory. NumPy's
|
||||||
|
memmap's are array-like objects. This differs from Python's ``mmap``
|
||||||
|
module, which uses file-like objects.
|
||||||
|
|
||||||
|
This subclass of ndarray has some unpleasant interactions with
|
||||||
|
some operations, because it doesn't quite fit properly as a subclass.
|
||||||
|
An alternative to using this subclass is to create the ``mmap``
|
||||||
|
object yourself, then create an ndarray with ndarray.__new__ directly,
|
||||||
|
passing the object created in its 'buffer=' parameter.
|
||||||
|
|
||||||
|
This class may at some point be turned into a factory function
|
||||||
|
which returns a view into an mmap buffer.
|
||||||
|
|
||||||
|
Delete the memmap instance to close the memmap file.
|
||||||
|
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
filename : str, file-like object, or pathlib.Path instance
|
||||||
|
The file name or file object to be used as the array data buffer.
|
||||||
|
dtype : data-type, optional
|
||||||
|
The data-type used to interpret the file contents.
|
||||||
|
Default is `uint8`.
|
||||||
|
mode : {'r+', 'r', 'w+', 'c'}, optional
|
||||||
|
The file is opened in this mode:
|
||||||
|
|
||||||
|
+------+-------------------------------------------------------------+
|
||||||
|
| 'r' | Open existing file for reading only. |
|
||||||
|
+------+-------------------------------------------------------------+
|
||||||
|
| 'r+' | Open existing file for reading and writing. |
|
||||||
|
+------+-------------------------------------------------------------+
|
||||||
|
| 'w+' | Create or overwrite existing file for reading and writing. |
|
||||||
|
+------+-------------------------------------------------------------+
|
||||||
|
| 'c' | Copy-on-write: assignments affect data in memory, but |
|
||||||
|
| | changes are not saved to disk. The file on disk is |
|
||||||
|
| | read-only. |
|
||||||
|
+------+-------------------------------------------------------------+
|
||||||
|
|
||||||
|
Default is 'r+'.
|
||||||
|
offset : int, optional
|
||||||
|
In the file, array data starts at this offset. Since `offset` is
|
||||||
|
measured in bytes, it should normally be a multiple of the byte-size
|
||||||
|
of `dtype`. When ``mode != 'r'``, even positive offsets beyond end of
|
||||||
|
file are valid; The file will be extended to accommodate the
|
||||||
|
additional data. By default, ``memmap`` will start at the beginning of
|
||||||
|
the file, even if ``filename`` is a file pointer ``fp`` and
|
||||||
|
``fp.tell() != 0``.
|
||||||
|
shape : tuple, optional
|
||||||
|
The desired shape of the array. If ``mode == 'r'`` and the number
|
||||||
|
of remaining bytes after `offset` is not a multiple of the byte-size
|
||||||
|
of `dtype`, you must specify `shape`. By default, the returned array
|
||||||
|
will be 1-D with the number of elements determined by file size
|
||||||
|
and data-type.
|
||||||
|
order : {'C', 'F'}, optional
|
||||||
|
Specify the order of the ndarray memory layout:
|
||||||
|
:term:`row-major`, C-style or :term:`column-major`,
|
||||||
|
Fortran-style. This only has an effect if the shape is
|
||||||
|
greater than 1-D. The default order is 'C'.
|
||||||
|
|
||||||
|
Attributes
|
||||||
|
----------
|
||||||
|
filename : str or pathlib.Path instance
|
||||||
|
Path to the mapped file.
|
||||||
|
offset : int
|
||||||
|
Offset position in the file.
|
||||||
|
mode : str
|
||||||
|
File mode.
|
||||||
|
|
||||||
|
Methods
|
||||||
|
-------
|
||||||
|
flush
|
||||||
|
Flush any changes in memory to file on disk.
|
||||||
|
When you delete a memmap object, flush is called first to write
|
||||||
|
changes to disk before removing the object.
|
||||||
|
|
||||||
|
|
||||||
|
See also
|
||||||
|
--------
|
||||||
|
lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
|
||||||
|
|
||||||
|
Notes
|
||||||
|
-----
|
||||||
|
The memmap object can be used anywhere an ndarray is accepted.
|
||||||
|
Given a memmap ``fp``, ``isinstance(fp, numpy.ndarray)`` returns
|
||||||
|
``True``.
|
||||||
|
|
||||||
|
Memory-mapped files cannot be larger than 2GB on 32-bit systems.
|
||||||
|
|
||||||
|
When a memmap causes a file to be created or extended beyond its
|
||||||
|
current size in the filesystem, the contents of the new part are
|
||||||
|
unspecified. On systems with POSIX filesystem semantics, the extended
|
||||||
|
part will be filled with zero bytes.
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
>>> data = np.arange(12, dtype='float32')
|
||||||
|
>>> data.resize((3,4))
|
||||||
|
|
||||||
|
This example uses a temporary file so that doctest doesn't write
|
||||||
|
files to your directory. You would use a 'normal' filename.
|
||||||
|
|
||||||
|
>>> from tempfile import mkdtemp
|
||||||
|
>>> import os.path as path
|
||||||
|
>>> filename = path.join(mkdtemp(), 'newfile.dat')
|
||||||
|
|
||||||
|
Create a memmap with dtype and shape that matches our data:
|
||||||
|
|
||||||
|
>>> fp = np.memmap(filename, dtype='float32', mode='w+', shape=(3,4))
|
||||||
|
>>> fp
|
||||||
|
memmap([[0., 0., 0., 0.],
|
||||||
|
[0., 0., 0., 0.],
|
||||||
|
[0., 0., 0., 0.]], dtype=float32)
|
||||||
|
|
||||||
|
Write data to memmap array:
|
||||||
|
|
||||||
|
>>> fp[:] = data[:]
|
||||||
|
>>> fp
|
||||||
|
memmap([[ 0., 1., 2., 3.],
|
||||||
|
[ 4., 5., 6., 7.],
|
||||||
|
[ 8., 9., 10., 11.]], dtype=float32)
|
||||||
|
|
||||||
|
>>> fp.filename == path.abspath(filename)
|
||||||
|
True
|
||||||
|
|
||||||
|
Deletion flushes memory changes to disk before removing the object:
|
||||||
|
|
||||||
|
>>> del fp
|
||||||
|
|
||||||
|
Load the memmap and verify data was stored:
|
||||||
|
|
||||||
|
>>> newfp = np.memmap(filename, dtype='float32', mode='r', shape=(3,4))
|
||||||
|
>>> newfp
|
||||||
|
memmap([[ 0., 1., 2., 3.],
|
||||||
|
[ 4., 5., 6., 7.],
|
||||||
|
[ 8., 9., 10., 11.]], dtype=float32)
|
||||||
|
|
||||||
|
Read-only memmap:
|
||||||
|
|
||||||
|
>>> fpr = np.memmap(filename, dtype='float32', mode='r', shape=(3,4))
|
||||||
|
>>> fpr.flags.writeable
|
||||||
|
False
|
||||||
|
|
||||||
|
Copy-on-write memmap:
|
||||||
|
|
||||||
|
>>> fpc = np.memmap(filename, dtype='float32', mode='c', shape=(3,4))
|
||||||
|
>>> fpc.flags.writeable
|
||||||
|
True
|
||||||
|
|
||||||
|
It's possible to assign to copy-on-write array, but values are only
|
||||||
|
written into the memory copy of the array, and not written to disk:
|
||||||
|
|
||||||
|
>>> fpc
|
||||||
|
memmap([[ 0., 1., 2., 3.],
|
||||||
|
[ 4., 5., 6., 7.],
|
||||||
|
[ 8., 9., 10., 11.]], dtype=float32)
|
||||||
|
>>> fpc[0,:] = 0
|
||||||
|
>>> fpc
|
||||||
|
memmap([[ 0., 0., 0., 0.],
|
||||||
|
[ 4., 5., 6., 7.],
|
||||||
|
[ 8., 9., 10., 11.]], dtype=float32)
|
||||||
|
|
||||||
|
File on disk is unchanged:
|
||||||
|
|
||||||
|
>>> fpr
|
||||||
|
memmap([[ 0., 1., 2., 3.],
|
||||||
|
[ 4., 5., 6., 7.],
|
||||||
|
[ 8., 9., 10., 11.]], dtype=float32)
|
||||||
|
|
||||||
|
Offset into a memmap:
|
||||||
|
|
||||||
|
>>> fpo = np.memmap(filename, dtype='float32', mode='r', offset=16)
|
||||||
|
>>> fpo
|
||||||
|
memmap([ 4., 5., 6., 7., 8., 9., 10., 11.], dtype=float32)
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
__array_priority__ = -100.0
|
||||||
|
|
||||||
|
def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0,
|
||||||
|
shape=None, order='C'):
|
||||||
|
# Import here to minimize 'import numpy' overhead
|
||||||
|
import mmap
|
||||||
|
import os.path
|
||||||
|
try:
|
||||||
|
mode = mode_equivalents[mode]
|
||||||
|
except KeyError:
|
||||||
|
if mode not in valid_filemodes:
|
||||||
|
raise ValueError("mode must be one of %s" %
|
||||||
|
(valid_filemodes + list(mode_equivalents.keys())))
|
||||||
|
|
||||||
|
if mode == 'w+' and shape is None:
|
||||||
|
raise ValueError("shape must be given")
|
||||||
|
|
||||||
|
if hasattr(filename, 'read'):
|
||||||
|
f_ctx = contextlib_nullcontext(filename)
|
||||||
|
else:
|
||||||
|
f_ctx = open(os_fspath(filename), ('r' if mode == 'c' else mode)+'b')
|
||||||
|
|
||||||
|
with f_ctx as fid:
|
||||||
|
fid.seek(0, 2)
|
||||||
|
flen = fid.tell()
|
||||||
|
descr = dtypedescr(dtype)
|
||||||
|
_dbytes = descr.itemsize
|
||||||
|
|
||||||
|
if shape is None:
|
||||||
|
bytes = flen - offset
|
||||||
|
if bytes % _dbytes:
|
||||||
|
raise ValueError("Size of available data is not a "
|
||||||
|
"multiple of the data-type size.")
|
||||||
|
size = bytes // _dbytes
|
||||||
|
shape = (size,)
|
||||||
|
else:
|
||||||
|
if not isinstance(shape, tuple):
|
||||||
|
shape = (shape,)
|
||||||
|
size = np.intp(1) # avoid default choice of np.int_, which might overflow
|
||||||
|
for k in shape:
|
||||||
|
size *= k
|
||||||
|
|
||||||
|
bytes = long(offset + size*_dbytes)
|
||||||
|
|
||||||
|
if mode in ('w+', 'r+') and flen < bytes:
|
||||||
|
fid.seek(bytes - 1, 0)
|
||||||
|
fid.write(b'\0')
|
||||||
|
fid.flush()
|
||||||
|
|
||||||
|
if mode == 'c':
|
||||||
|
acc = mmap.ACCESS_COPY
|
||||||
|
elif mode == 'r':
|
||||||
|
acc = mmap.ACCESS_READ
|
||||||
|
else:
|
||||||
|
acc = mmap.ACCESS_WRITE
|
||||||
|
|
||||||
|
start = offset - offset % mmap.ALLOCATIONGRANULARITY
|
||||||
|
bytes -= start
|
||||||
|
array_offset = offset - start
|
||||||
|
mm = mmap.mmap(fid.fileno(), bytes, access=acc, offset=start)
|
||||||
|
|
||||||
|
self = ndarray.__new__(subtype, shape, dtype=descr, buffer=mm,
|
||||||
|
offset=array_offset, order=order)
|
||||||
|
self._mmap = mm
|
||||||
|
self.offset = offset
|
||||||
|
self.mode = mode
|
||||||
|
|
||||||
|
if is_pathlib_path(filename):
|
||||||
|
# special case - if we were constructed with a pathlib.path,
|
||||||
|
# then filename is a path object, not a string
|
||||||
|
self.filename = filename.resolve()
|
||||||
|
elif hasattr(fid, "name") and isinstance(fid.name, basestring):
|
||||||
|
# py3 returns int for TemporaryFile().name
|
||||||
|
self.filename = os.path.abspath(fid.name)
|
||||||
|
# same as memmap copies (e.g. memmap + 1)
|
||||||
|
else:
|
||||||
|
self.filename = None
|
||||||
|
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __array_finalize__(self, obj):
|
||||||
|
if hasattr(obj, '_mmap') and np.may_share_memory(self, obj):
|
||||||
|
self._mmap = obj._mmap
|
||||||
|
self.filename = obj.filename
|
||||||
|
self.offset = obj.offset
|
||||||
|
self.mode = obj.mode
|
||||||
|
else:
|
||||||
|
self._mmap = None
|
||||||
|
self.filename = None
|
||||||
|
self.offset = None
|
||||||
|
self.mode = None
|
||||||
|
|
||||||
|
def flush(self):
|
||||||
|
"""
|
||||||
|
Write any changes in the array to the file on disk.
|
||||||
|
|
||||||
|
For further information, see `memmap`.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
None
|
||||||
|
|
||||||
|
See Also
|
||||||
|
--------
|
||||||
|
memmap
|
||||||
|
|
||||||
|
"""
|
||||||
|
if self.base is not None and hasattr(self.base, 'flush'):
|
||||||
|
self.base.flush()
|
||||||
|
|
||||||
|
def __array_wrap__(self, arr, context=None):
|
||||||
|
arr = super(memmap, self).__array_wrap__(arr, context)
|
||||||
|
|
||||||
|
# Return a memmap if a memmap was given as the output of the
|
||||||
|
# ufunc. Leave the arr class unchanged if self is not a memmap
|
||||||
|
# to keep original memmap subclasses behavior
|
||||||
|
if self is arr or type(self) is not memmap:
|
||||||
|
return arr
|
||||||
|
# Return scalar instead of 0d memmap, e.g. for np.sum with
|
||||||
|
# axis=None
|
||||||
|
if arr.shape == ():
|
||||||
|
return arr[()]
|
||||||
|
# Return ndarray otherwise
|
||||||
|
return arr.view(np.ndarray)
|
||||||
|
|
||||||
|
def __getitem__(self, index):
|
||||||
|
res = super(memmap, self).__getitem__(index)
|
||||||
|
if type(res) is memmap and res._mmap is None:
|
||||||
|
return res.view(type=ndarray)
|
||||||
|
return res
|
1631
Restaurant/Marta/venv/Lib/site-packages/numpy/core/multiarray.py
Normal file
1631
Restaurant/Marta/venv/Lib/site-packages/numpy/core/multiarray.py
Normal file
File diff suppressed because it is too large
Load Diff
2411
Restaurant/Marta/venv/Lib/site-packages/numpy/core/numeric.py
Normal file
2411
Restaurant/Marta/venv/Lib/site-packages/numpy/core/numeric.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,675 @@
|
|||||||
|
"""
|
||||||
|
numerictypes: Define the numeric type objects
|
||||||
|
|
||||||
|
This module is designed so "from numerictypes import \\*" is safe.
|
||||||
|
Exported symbols include:
|
||||||
|
|
||||||
|
Dictionary with all registered number types (including aliases):
|
||||||
|
typeDict
|
||||||
|
|
||||||
|
Type objects (not all will be available, depends on platform):
|
||||||
|
see variable sctypes for which ones you have
|
||||||
|
|
||||||
|
Bit-width names
|
||||||
|
|
||||||
|
int8 int16 int32 int64 int128
|
||||||
|
uint8 uint16 uint32 uint64 uint128
|
||||||
|
float16 float32 float64 float96 float128 float256
|
||||||
|
complex32 complex64 complex128 complex192 complex256 complex512
|
||||||
|
datetime64 timedelta64
|
||||||
|
|
||||||
|
c-based names
|
||||||
|
|
||||||
|
bool_
|
||||||
|
|
||||||
|
object_
|
||||||
|
|
||||||
|
void, str_, unicode_
|
||||||
|
|
||||||
|
byte, ubyte,
|
||||||
|
short, ushort
|
||||||
|
intc, uintc,
|
||||||
|
intp, uintp,
|
||||||
|
int_, uint,
|
||||||
|
longlong, ulonglong,
|
||||||
|
|
||||||
|
single, csingle,
|
||||||
|
float_, complex_,
|
||||||
|
longfloat, clongfloat,
|
||||||
|
|
||||||
|
As part of the type-hierarchy: xx -- is bit-width
|
||||||
|
|
||||||
|
generic
|
||||||
|
+-> bool_ (kind=b)
|
||||||
|
+-> number
|
||||||
|
| +-> integer
|
||||||
|
| | +-> signedinteger (intxx) (kind=i)
|
||||||
|
| | | byte
|
||||||
|
| | | short
|
||||||
|
| | | intc
|
||||||
|
| | | intp int0
|
||||||
|
| | | int_
|
||||||
|
| | | longlong
|
||||||
|
| | \\-> unsignedinteger (uintxx) (kind=u)
|
||||||
|
| | ubyte
|
||||||
|
| | ushort
|
||||||
|
| | uintc
|
||||||
|
| | uintp uint0
|
||||||
|
| | uint_
|
||||||
|
| | ulonglong
|
||||||
|
| +-> inexact
|
||||||
|
| +-> floating (floatxx) (kind=f)
|
||||||
|
| | half
|
||||||
|
| | single
|
||||||
|
| | float_ (double)
|
||||||
|
| | longfloat
|
||||||
|
| \\-> complexfloating (complexxx) (kind=c)
|
||||||
|
| csingle (singlecomplex)
|
||||||
|
| complex_ (cfloat, cdouble)
|
||||||
|
| clongfloat (longcomplex)
|
||||||
|
+-> flexible
|
||||||
|
| +-> character
|
||||||
|
| | str_ (string_, bytes_) (kind=S) [Python 2]
|
||||||
|
| | unicode_ (kind=U) [Python 2]
|
||||||
|
| |
|
||||||
|
| | bytes_ (string_) (kind=S) [Python 3]
|
||||||
|
| | str_ (unicode_) (kind=U) [Python 3]
|
||||||
|
| |
|
||||||
|
| \\-> void (kind=V)
|
||||||
|
\\-> object_ (not used much) (kind=O)
|
||||||
|
|
||||||
|
"""
|
||||||
|
from __future__ import division, absolute_import, print_function
|
||||||
|
|
||||||
|
import types as _types
|
||||||
|
import sys
|
||||||
|
import numbers
|
||||||
|
import warnings
|
||||||
|
|
||||||
|
from numpy.compat import bytes, long
|
||||||
|
from numpy.core.multiarray import (
|
||||||
|
typeinfo, ndarray, array, empty, dtype, datetime_data,
|
||||||
|
datetime_as_string, busday_offset, busday_count, is_busday,
|
||||||
|
busdaycalendar
|
||||||
|
)
|
||||||
|
from numpy.core.overrides import set_module
|
||||||
|
|
||||||
|
# we add more at the bottom
|
||||||
|
__all__ = ['sctypeDict', 'sctypeNA', 'typeDict', 'typeNA', 'sctypes',
|
||||||
|
'ScalarType', 'obj2sctype', 'cast', 'nbytes', 'sctype2char',
|
||||||
|
'maximum_sctype', 'issctype', 'typecodes', 'find_common_type',
|
||||||
|
'issubdtype', 'datetime_data', 'datetime_as_string',
|
||||||
|
'busday_offset', 'busday_count', 'is_busday', 'busdaycalendar',
|
||||||
|
]
|
||||||
|
|
||||||
|
# we don't need all these imports, but we need to keep them for compatibility
|
||||||
|
# for users using np.core.numerictypes.UPPER_TABLE
|
||||||
|
from ._string_helpers import (
|
||||||
|
english_lower, english_upper, english_capitalize, LOWER_TABLE, UPPER_TABLE
|
||||||
|
)
|
||||||
|
|
||||||
|
from ._type_aliases import (
|
||||||
|
sctypeDict,
|
||||||
|
sctypeNA,
|
||||||
|
allTypes,
|
||||||
|
bitname,
|
||||||
|
sctypes,
|
||||||
|
_concrete_types,
|
||||||
|
_concrete_typeinfo,
|
||||||
|
_bits_of,
|
||||||
|
)
|
||||||
|
from ._dtype import _kind_name
|
||||||
|
|
||||||
|
# we don't export these for import *, but we do want them accessible
|
||||||
|
# as numerictypes.bool, etc.
|
||||||
|
if sys.version_info[0] >= 3:
|
||||||
|
from builtins import bool, int, float, complex, object, str
|
||||||
|
unicode = str
|
||||||
|
else:
|
||||||
|
from __builtin__ import bool, int, float, complex, object, unicode, str
|
||||||
|
|
||||||
|
|
||||||
|
# We use this later
|
||||||
|
generic = allTypes['generic']
|
||||||
|
|
||||||
|
genericTypeRank = ['bool', 'int8', 'uint8', 'int16', 'uint16',
|
||||||
|
'int32', 'uint32', 'int64', 'uint64', 'int128',
|
||||||
|
'uint128', 'float16',
|
||||||
|
'float32', 'float64', 'float80', 'float96', 'float128',
|
||||||
|
'float256',
|
||||||
|
'complex32', 'complex64', 'complex128', 'complex160',
|
||||||
|
'complex192', 'complex256', 'complex512', 'object']
|
||||||
|
|
||||||
|
@set_module('numpy')
|
||||||
|
def maximum_sctype(t):
|
||||||
|
"""
|
||||||
|
Return the scalar type of highest precision of the same kind as the input.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
t : dtype or dtype specifier
|
||||||
|
The input data type. This can be a `dtype` object or an object that
|
||||||
|
is convertible to a `dtype`.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
out : dtype
|
||||||
|
The highest precision data type of the same kind (`dtype.kind`) as `t`.
|
||||||
|
|
||||||
|
See Also
|
||||||
|
--------
|
||||||
|
obj2sctype, mintypecode, sctype2char
|
||||||
|
dtype
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
>>> np.maximum_sctype(int)
|
||||||
|
<class 'numpy.int64'>
|
||||||
|
>>> np.maximum_sctype(np.uint8)
|
||||||
|
<class 'numpy.uint64'>
|
||||||
|
>>> np.maximum_sctype(complex)
|
||||||
|
<class 'numpy.complex256'> # may vary
|
||||||
|
|
||||||
|
>>> np.maximum_sctype(str)
|
||||||
|
<class 'numpy.str_'>
|
||||||
|
|
||||||
|
>>> np.maximum_sctype('i2')
|
||||||
|
<class 'numpy.int64'>
|
||||||
|
>>> np.maximum_sctype('f4')
|
||||||
|
<class 'numpy.float128'> # may vary
|
||||||
|
|
||||||
|
"""
|
||||||
|
g = obj2sctype(t)
|
||||||
|
if g is None:
|
||||||
|
return t
|
||||||
|
t = g
|
||||||
|
base = _kind_name(dtype(t))
|
||||||
|
if base in sctypes:
|
||||||
|
return sctypes[base][-1]
|
||||||
|
else:
|
||||||
|
return t
|
||||||
|
|
||||||
|
|
||||||
|
@set_module('numpy')
|
||||||
|
def issctype(rep):
|
||||||
|
"""
|
||||||
|
Determines whether the given object represents a scalar data-type.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
rep : any
|
||||||
|
If `rep` is an instance of a scalar dtype, True is returned. If not,
|
||||||
|
False is returned.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
out : bool
|
||||||
|
Boolean result of check whether `rep` is a scalar dtype.
|
||||||
|
|
||||||
|
See Also
|
||||||
|
--------
|
||||||
|
issubsctype, issubdtype, obj2sctype, sctype2char
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
>>> np.issctype(np.int32)
|
||||||
|
True
|
||||||
|
>>> np.issctype(list)
|
||||||
|
False
|
||||||
|
>>> np.issctype(1.1)
|
||||||
|
False
|
||||||
|
|
||||||
|
Strings are also a scalar type:
|
||||||
|
|
||||||
|
>>> np.issctype(np.dtype('str'))
|
||||||
|
True
|
||||||
|
|
||||||
|
"""
|
||||||
|
if not isinstance(rep, (type, dtype)):
|
||||||
|
return False
|
||||||
|
try:
|
||||||
|
res = obj2sctype(rep)
|
||||||
|
if res and res != object_:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
except Exception:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
@set_module('numpy')
|
||||||
|
def obj2sctype(rep, default=None):
|
||||||
|
"""
|
||||||
|
Return the scalar dtype or NumPy equivalent of Python type of an object.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
rep : any
|
||||||
|
The object of which the type is returned.
|
||||||
|
default : any, optional
|
||||||
|
If given, this is returned for objects whose types can not be
|
||||||
|
determined. If not given, None is returned for those objects.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
dtype : dtype or Python type
|
||||||
|
The data type of `rep`.
|
||||||
|
|
||||||
|
See Also
|
||||||
|
--------
|
||||||
|
sctype2char, issctype, issubsctype, issubdtype, maximum_sctype
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
>>> np.obj2sctype(np.int32)
|
||||||
|
<class 'numpy.int32'>
|
||||||
|
>>> np.obj2sctype(np.array([1., 2.]))
|
||||||
|
<class 'numpy.float64'>
|
||||||
|
>>> np.obj2sctype(np.array([1.j]))
|
||||||
|
<class 'numpy.complex128'>
|
||||||
|
|
||||||
|
>>> np.obj2sctype(dict)
|
||||||
|
<class 'numpy.object_'>
|
||||||
|
>>> np.obj2sctype('string')
|
||||||
|
|
||||||
|
>>> np.obj2sctype(1, default=list)
|
||||||
|
<class 'list'>
|
||||||
|
|
||||||
|
"""
|
||||||
|
# prevent abstract classes being upcast
|
||||||
|
if isinstance(rep, type) and issubclass(rep, generic):
|
||||||
|
return rep
|
||||||
|
# extract dtype from arrays
|
||||||
|
if isinstance(rep, ndarray):
|
||||||
|
return rep.dtype.type
|
||||||
|
# fall back on dtype to convert
|
||||||
|
try:
|
||||||
|
res = dtype(rep)
|
||||||
|
except Exception:
|
||||||
|
return default
|
||||||
|
else:
|
||||||
|
return res.type
|
||||||
|
|
||||||
|
|
||||||
|
@set_module('numpy')
|
||||||
|
def issubclass_(arg1, arg2):
|
||||||
|
"""
|
||||||
|
Determine if a class is a subclass of a second class.
|
||||||
|
|
||||||
|
`issubclass_` is equivalent to the Python built-in ``issubclass``,
|
||||||
|
except that it returns False instead of raising a TypeError if one
|
||||||
|
of the arguments is not a class.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
arg1 : class
|
||||||
|
Input class. True is returned if `arg1` is a subclass of `arg2`.
|
||||||
|
arg2 : class or tuple of classes.
|
||||||
|
Input class. If a tuple of classes, True is returned if `arg1` is a
|
||||||
|
subclass of any of the tuple elements.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
out : bool
|
||||||
|
Whether `arg1` is a subclass of `arg2` or not.
|
||||||
|
|
||||||
|
See Also
|
||||||
|
--------
|
||||||
|
issubsctype, issubdtype, issctype
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
>>> np.issubclass_(np.int32, int)
|
||||||
|
False # True on Python 2.7
|
||||||
|
>>> np.issubclass_(np.int32, float)
|
||||||
|
False
|
||||||
|
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
return issubclass(arg1, arg2)
|
||||||
|
except TypeError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
@set_module('numpy')
|
||||||
|
def issubsctype(arg1, arg2):
|
||||||
|
"""
|
||||||
|
Determine if the first argument is a subclass of the second argument.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
arg1, arg2 : dtype or dtype specifier
|
||||||
|
Data-types.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
out : bool
|
||||||
|
The result.
|
||||||
|
|
||||||
|
See Also
|
||||||
|
--------
|
||||||
|
issctype, issubdtype, obj2sctype
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
>>> np.issubsctype('S8', str)
|
||||||
|
False
|
||||||
|
>>> np.issubsctype(np.array([1]), int)
|
||||||
|
True
|
||||||
|
>>> np.issubsctype(np.array([1]), float)
|
||||||
|
False
|
||||||
|
|
||||||
|
"""
|
||||||
|
return issubclass(obj2sctype(arg1), obj2sctype(arg2))
|
||||||
|
|
||||||
|
|
||||||
|
@set_module('numpy')
|
||||||
|
def issubdtype(arg1, arg2):
|
||||||
|
"""
|
||||||
|
Returns True if first argument is a typecode lower/equal in type hierarchy.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
arg1, arg2 : dtype_like
|
||||||
|
dtype or string representing a typecode.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
out : bool
|
||||||
|
|
||||||
|
See Also
|
||||||
|
--------
|
||||||
|
issubsctype, issubclass_
|
||||||
|
numpy.core.numerictypes : Overview of numpy type hierarchy.
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
>>> np.issubdtype('S1', np.string_)
|
||||||
|
True
|
||||||
|
>>> np.issubdtype(np.float64, np.float32)
|
||||||
|
False
|
||||||
|
|
||||||
|
"""
|
||||||
|
if not issubclass_(arg1, generic):
|
||||||
|
arg1 = dtype(arg1).type
|
||||||
|
if not issubclass_(arg2, generic):
|
||||||
|
arg2_orig = arg2
|
||||||
|
arg2 = dtype(arg2).type
|
||||||
|
if not isinstance(arg2_orig, dtype):
|
||||||
|
# weird deprecated behaviour, that tried to infer np.floating from
|
||||||
|
# float, and similar less obvious things, such as np.generic from
|
||||||
|
# basestring
|
||||||
|
mro = arg2.mro()
|
||||||
|
arg2 = mro[1] if len(mro) > 1 else mro[0]
|
||||||
|
|
||||||
|
def type_repr(x):
|
||||||
|
""" Helper to produce clear error messages """
|
||||||
|
if not isinstance(x, type):
|
||||||
|
return repr(x)
|
||||||
|
elif issubclass(x, generic):
|
||||||
|
return "np.{}".format(x.__name__)
|
||||||
|
else:
|
||||||
|
return x.__name__
|
||||||
|
|
||||||
|
# 1.14, 2017-08-01
|
||||||
|
warnings.warn(
|
||||||
|
"Conversion of the second argument of issubdtype from `{raw}` "
|
||||||
|
"to `{abstract}` is deprecated. In future, it will be treated "
|
||||||
|
"as `{concrete} == np.dtype({raw}).type`.".format(
|
||||||
|
raw=type_repr(arg2_orig),
|
||||||
|
abstract=type_repr(arg2),
|
||||||
|
concrete=type_repr(dtype(arg2_orig).type)
|
||||||
|
),
|
||||||
|
FutureWarning, stacklevel=2
|
||||||
|
)
|
||||||
|
|
||||||
|
return issubclass(arg1, arg2)
|
||||||
|
|
||||||
|
|
||||||
|
# This dictionary allows look up based on any alias for an array data-type
|
||||||
|
class _typedict(dict):
|
||||||
|
"""
|
||||||
|
Base object for a dictionary for look-up with any alias for an array dtype.
|
||||||
|
|
||||||
|
Instances of `_typedict` can not be used as dictionaries directly,
|
||||||
|
first they have to be populated.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __getitem__(self, obj):
|
||||||
|
return dict.__getitem__(self, obj2sctype(obj))
|
||||||
|
|
||||||
|
nbytes = _typedict()
|
||||||
|
_alignment = _typedict()
|
||||||
|
_maxvals = _typedict()
|
||||||
|
_minvals = _typedict()
|
||||||
|
def _construct_lookups():
|
||||||
|
for name, info in _concrete_typeinfo.items():
|
||||||
|
obj = info.type
|
||||||
|
nbytes[obj] = info.bits // 8
|
||||||
|
_alignment[obj] = info.alignment
|
||||||
|
if len(info) > 5:
|
||||||
|
_maxvals[obj] = info.max
|
||||||
|
_minvals[obj] = info.min
|
||||||
|
else:
|
||||||
|
_maxvals[obj] = None
|
||||||
|
_minvals[obj] = None
|
||||||
|
|
||||||
|
_construct_lookups()
|
||||||
|
|
||||||
|
|
||||||
|
@set_module('numpy')
|
||||||
|
def sctype2char(sctype):
|
||||||
|
"""
|
||||||
|
Return the string representation of a scalar dtype.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
sctype : scalar dtype or object
|
||||||
|
If a scalar dtype, the corresponding string character is
|
||||||
|
returned. If an object, `sctype2char` tries to infer its scalar type
|
||||||
|
and then return the corresponding string character.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
typechar : str
|
||||||
|
The string character corresponding to the scalar type.
|
||||||
|
|
||||||
|
Raises
|
||||||
|
------
|
||||||
|
ValueError
|
||||||
|
If `sctype` is an object for which the type can not be inferred.
|
||||||
|
|
||||||
|
See Also
|
||||||
|
--------
|
||||||
|
obj2sctype, issctype, issubsctype, mintypecode
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
>>> for sctype in [np.int32, np.double, np.complex_, np.string_, np.ndarray]:
|
||||||
|
... print(np.sctype2char(sctype))
|
||||||
|
l # may vary
|
||||||
|
d
|
||||||
|
D
|
||||||
|
S
|
||||||
|
O
|
||||||
|
|
||||||
|
>>> x = np.array([1., 2-1.j])
|
||||||
|
>>> np.sctype2char(x)
|
||||||
|
'D'
|
||||||
|
>>> np.sctype2char(list)
|
||||||
|
'O'
|
||||||
|
|
||||||
|
"""
|
||||||
|
sctype = obj2sctype(sctype)
|
||||||
|
if sctype is None:
|
||||||
|
raise ValueError("unrecognized type")
|
||||||
|
if sctype not in _concrete_types:
|
||||||
|
# for compatibility
|
||||||
|
raise KeyError(sctype)
|
||||||
|
return dtype(sctype).char
|
||||||
|
|
||||||
|
# Create dictionary of casting functions that wrap sequences
|
||||||
|
# indexed by type or type character
|
||||||
|
cast = _typedict()
|
||||||
|
for key in _concrete_types:
|
||||||
|
cast[key] = lambda x, k=key: array(x, copy=False).astype(k)
|
||||||
|
|
||||||
|
try:
|
||||||
|
ScalarType = [_types.IntType, _types.FloatType, _types.ComplexType,
|
||||||
|
_types.LongType, _types.BooleanType,
|
||||||
|
_types.StringType, _types.UnicodeType, _types.BufferType]
|
||||||
|
except AttributeError:
|
||||||
|
# Py3K
|
||||||
|
ScalarType = [int, float, complex, int, bool, bytes, str, memoryview]
|
||||||
|
|
||||||
|
ScalarType.extend(_concrete_types)
|
||||||
|
ScalarType = tuple(ScalarType)
|
||||||
|
|
||||||
|
|
||||||
|
# Now add the types we've determined to this module
|
||||||
|
for key in allTypes:
|
||||||
|
globals()[key] = allTypes[key]
|
||||||
|
__all__.append(key)
|
||||||
|
|
||||||
|
del key
|
||||||
|
|
||||||
|
typecodes = {'Character':'c',
|
||||||
|
'Integer':'bhilqp',
|
||||||
|
'UnsignedInteger':'BHILQP',
|
||||||
|
'Float':'efdg',
|
||||||
|
'Complex':'FDG',
|
||||||
|
'AllInteger':'bBhHiIlLqQpP',
|
||||||
|
'AllFloat':'efdgFDG',
|
||||||
|
'Datetime': 'Mm',
|
||||||
|
'All':'?bhilqpBHILQPefdgFDGSUVOMm'}
|
||||||
|
|
||||||
|
# backwards compatibility --- deprecated name
|
||||||
|
typeDict = sctypeDict
|
||||||
|
typeNA = sctypeNA
|
||||||
|
|
||||||
|
# b -> boolean
|
||||||
|
# u -> unsigned integer
|
||||||
|
# i -> signed integer
|
||||||
|
# f -> floating point
|
||||||
|
# c -> complex
|
||||||
|
# M -> datetime
|
||||||
|
# m -> timedelta
|
||||||
|
# S -> string
|
||||||
|
# U -> Unicode string
|
||||||
|
# V -> record
|
||||||
|
# O -> Python object
|
||||||
|
_kind_list = ['b', 'u', 'i', 'f', 'c', 'S', 'U', 'V', 'O', 'M', 'm']
|
||||||
|
|
||||||
|
__test_types = '?'+typecodes['AllInteger'][:-2]+typecodes['AllFloat']+'O'
|
||||||
|
__len_test_types = len(__test_types)
|
||||||
|
|
||||||
|
# Keep incrementing until a common type both can be coerced to
|
||||||
|
# is found. Otherwise, return None
|
||||||
|
def _find_common_coerce(a, b):
|
||||||
|
if a > b:
|
||||||
|
return a
|
||||||
|
try:
|
||||||
|
thisind = __test_types.index(a.char)
|
||||||
|
except ValueError:
|
||||||
|
return None
|
||||||
|
return _can_coerce_all([a, b], start=thisind)
|
||||||
|
|
||||||
|
# Find a data-type that all data-types in a list can be coerced to
|
||||||
|
def _can_coerce_all(dtypelist, start=0):
|
||||||
|
N = len(dtypelist)
|
||||||
|
if N == 0:
|
||||||
|
return None
|
||||||
|
if N == 1:
|
||||||
|
return dtypelist[0]
|
||||||
|
thisind = start
|
||||||
|
while thisind < __len_test_types:
|
||||||
|
newdtype = dtype(__test_types[thisind])
|
||||||
|
numcoerce = len([x for x in dtypelist if newdtype >= x])
|
||||||
|
if numcoerce == N:
|
||||||
|
return newdtype
|
||||||
|
thisind += 1
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _register_types():
|
||||||
|
numbers.Integral.register(integer)
|
||||||
|
numbers.Complex.register(inexact)
|
||||||
|
numbers.Real.register(floating)
|
||||||
|
numbers.Number.register(number)
|
||||||
|
|
||||||
|
_register_types()
|
||||||
|
|
||||||
|
|
||||||
|
@set_module('numpy')
|
||||||
|
def find_common_type(array_types, scalar_types):
|
||||||
|
"""
|
||||||
|
Determine common type following standard coercion rules.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
array_types : sequence
|
||||||
|
A list of dtypes or dtype convertible objects representing arrays.
|
||||||
|
scalar_types : sequence
|
||||||
|
A list of dtypes or dtype convertible objects representing scalars.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
datatype : dtype
|
||||||
|
The common data type, which is the maximum of `array_types` ignoring
|
||||||
|
`scalar_types`, unless the maximum of `scalar_types` is of a
|
||||||
|
different kind (`dtype.kind`). If the kind is not understood, then
|
||||||
|
None is returned.
|
||||||
|
|
||||||
|
See Also
|
||||||
|
--------
|
||||||
|
dtype, common_type, can_cast, mintypecode
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
>>> np.find_common_type([], [np.int64, np.float32, complex])
|
||||||
|
dtype('complex128')
|
||||||
|
>>> np.find_common_type([np.int64, np.float32], [])
|
||||||
|
dtype('float64')
|
||||||
|
|
||||||
|
The standard casting rules ensure that a scalar cannot up-cast an
|
||||||
|
array unless the scalar is of a fundamentally different kind of data
|
||||||
|
(i.e. under a different hierarchy in the data type hierarchy) then
|
||||||
|
the array:
|
||||||
|
|
||||||
|
>>> np.find_common_type([np.float32], [np.int64, np.float64])
|
||||||
|
dtype('float32')
|
||||||
|
|
||||||
|
Complex is of a different type, so it up-casts the float in the
|
||||||
|
`array_types` argument:
|
||||||
|
|
||||||
|
>>> np.find_common_type([np.float32], [complex])
|
||||||
|
dtype('complex128')
|
||||||
|
|
||||||
|
Type specifier strings are convertible to dtypes and can therefore
|
||||||
|
be used instead of dtypes:
|
||||||
|
|
||||||
|
>>> np.find_common_type(['f4', 'f4', 'i4'], ['c8'])
|
||||||
|
dtype('complex128')
|
||||||
|
|
||||||
|
"""
|
||||||
|
array_types = [dtype(x) for x in array_types]
|
||||||
|
scalar_types = [dtype(x) for x in scalar_types]
|
||||||
|
|
||||||
|
maxa = _can_coerce_all(array_types)
|
||||||
|
maxsc = _can_coerce_all(scalar_types)
|
||||||
|
|
||||||
|
if maxa is None:
|
||||||
|
return maxsc
|
||||||
|
|
||||||
|
if maxsc is None:
|
||||||
|
return maxa
|
||||||
|
|
||||||
|
try:
|
||||||
|
index_a = _kind_list.index(maxa.kind)
|
||||||
|
index_sc = _kind_list.index(maxsc.kind)
|
||||||
|
except ValueError:
|
||||||
|
return None
|
||||||
|
|
||||||
|
if index_sc > index_a:
|
||||||
|
return _find_common_coerce(maxsc, maxa)
|
||||||
|
else:
|
||||||
|
return maxa
|
210
Restaurant/Marta/venv/Lib/site-packages/numpy/core/overrides.py
Normal file
210
Restaurant/Marta/venv/Lib/site-packages/numpy/core/overrides.py
Normal file
@ -0,0 +1,210 @@
|
|||||||
|
"""Implementation of __array_function__ overrides from NEP-18."""
|
||||||
|
import collections
|
||||||
|
import functools
|
||||||
|
import os
|
||||||
|
import textwrap
|
||||||
|
|
||||||
|
from numpy.core._multiarray_umath import (
|
||||||
|
add_docstring, implement_array_function, _get_implementing_args)
|
||||||
|
from numpy.compat._inspect import getargspec
|
||||||
|
|
||||||
|
|
||||||
|
ARRAY_FUNCTION_ENABLED = bool(
|
||||||
|
int(os.environ.get('NUMPY_EXPERIMENTAL_ARRAY_FUNCTION', 1)))
|
||||||
|
|
||||||
|
|
||||||
|
add_docstring(
|
||||||
|
implement_array_function,
|
||||||
|
"""
|
||||||
|
Implement a function with checks for __array_function__ overrides.
|
||||||
|
|
||||||
|
All arguments are required, and can only be passed by position.
|
||||||
|
|
||||||
|
Arguments
|
||||||
|
---------
|
||||||
|
implementation : function
|
||||||
|
Function that implements the operation on NumPy array without
|
||||||
|
overrides when called like ``implementation(*args, **kwargs)``.
|
||||||
|
public_api : function
|
||||||
|
Function exposed by NumPy's public API originally called like
|
||||||
|
``public_api(*args, **kwargs)`` on which arguments are now being
|
||||||
|
checked.
|
||||||
|
relevant_args : iterable
|
||||||
|
Iterable of arguments to check for __array_function__ methods.
|
||||||
|
args : tuple
|
||||||
|
Arbitrary positional arguments originally passed into ``public_api``.
|
||||||
|
kwargs : dict
|
||||||
|
Arbitrary keyword arguments originally passed into ``public_api``.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
Result from calling ``implementation()`` or an ``__array_function__``
|
||||||
|
method, as appropriate.
|
||||||
|
|
||||||
|
Raises
|
||||||
|
------
|
||||||
|
TypeError : if no implementation is found.
|
||||||
|
""")
|
||||||
|
|
||||||
|
|
||||||
|
# exposed for testing purposes; used internally by implement_array_function
|
||||||
|
add_docstring(
|
||||||
|
_get_implementing_args,
|
||||||
|
"""
|
||||||
|
Collect arguments on which to call __array_function__.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
relevant_args : iterable of array-like
|
||||||
|
Iterable of possibly array-like arguments to check for
|
||||||
|
__array_function__ methods.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
Sequence of arguments with __array_function__ methods, in the order in
|
||||||
|
which they should be called.
|
||||||
|
""")
|
||||||
|
|
||||||
|
|
||||||
|
ArgSpec = collections.namedtuple('ArgSpec', 'args varargs keywords defaults')
|
||||||
|
|
||||||
|
|
||||||
|
def verify_matching_signatures(implementation, dispatcher):
|
||||||
|
"""Verify that a dispatcher function has the right signature."""
|
||||||
|
implementation_spec = ArgSpec(*getargspec(implementation))
|
||||||
|
dispatcher_spec = ArgSpec(*getargspec(dispatcher))
|
||||||
|
|
||||||
|
if (implementation_spec.args != dispatcher_spec.args or
|
||||||
|
implementation_spec.varargs != dispatcher_spec.varargs or
|
||||||
|
implementation_spec.keywords != dispatcher_spec.keywords or
|
||||||
|
(bool(implementation_spec.defaults) !=
|
||||||
|
bool(dispatcher_spec.defaults)) or
|
||||||
|
(implementation_spec.defaults is not None and
|
||||||
|
len(implementation_spec.defaults) !=
|
||||||
|
len(dispatcher_spec.defaults))):
|
||||||
|
raise RuntimeError('implementation and dispatcher for %s have '
|
||||||
|
'different function signatures' % implementation)
|
||||||
|
|
||||||
|
if implementation_spec.defaults is not None:
|
||||||
|
if dispatcher_spec.defaults != (None,) * len(dispatcher_spec.defaults):
|
||||||
|
raise RuntimeError('dispatcher functions can only use None for '
|
||||||
|
'default argument values')
|
||||||
|
|
||||||
|
|
||||||
|
def set_module(module):
|
||||||
|
"""Decorator for overriding __module__ on a function or class.
|
||||||
|
|
||||||
|
Example usage::
|
||||||
|
|
||||||
|
@set_module('numpy')
|
||||||
|
def example():
|
||||||
|
pass
|
||||||
|
|
||||||
|
assert example.__module__ == 'numpy'
|
||||||
|
"""
|
||||||
|
def decorator(func):
|
||||||
|
if module is not None:
|
||||||
|
func.__module__ = module
|
||||||
|
return func
|
||||||
|
return decorator
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Call textwrap.dedent here instead of in the function so as to avoid
|
||||||
|
# calling dedent multiple times on the same text
|
||||||
|
_wrapped_func_source = textwrap.dedent("""
|
||||||
|
@functools.wraps(implementation)
|
||||||
|
def {name}(*args, **kwargs):
|
||||||
|
relevant_args = dispatcher(*args, **kwargs)
|
||||||
|
return implement_array_function(
|
||||||
|
implementation, {name}, relevant_args, args, kwargs)
|
||||||
|
""")
|
||||||
|
|
||||||
|
|
||||||
|
def array_function_dispatch(dispatcher, module=None, verify=True,
|
||||||
|
docs_from_dispatcher=False):
|
||||||
|
"""Decorator for adding dispatch with the __array_function__ protocol.
|
||||||
|
|
||||||
|
See NEP-18 for example usage.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
dispatcher : callable
|
||||||
|
Function that when called like ``dispatcher(*args, **kwargs)`` with
|
||||||
|
arguments from the NumPy function call returns an iterable of
|
||||||
|
array-like arguments to check for ``__array_function__``.
|
||||||
|
module : str, optional
|
||||||
|
__module__ attribute to set on new function, e.g., ``module='numpy'``.
|
||||||
|
By default, module is copied from the decorated function.
|
||||||
|
verify : bool, optional
|
||||||
|
If True, verify the that the signature of the dispatcher and decorated
|
||||||
|
function signatures match exactly: all required and optional arguments
|
||||||
|
should appear in order with the same names, but the default values for
|
||||||
|
all optional arguments should be ``None``. Only disable verification
|
||||||
|
if the dispatcher's signature needs to deviate for some particular
|
||||||
|
reason, e.g., because the function has a signature like
|
||||||
|
``func(*args, **kwargs)``.
|
||||||
|
docs_from_dispatcher : bool, optional
|
||||||
|
If True, copy docs from the dispatcher function onto the dispatched
|
||||||
|
function, rather than from the implementation. This is useful for
|
||||||
|
functions defined in C, which otherwise don't have docstrings.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
Function suitable for decorating the implementation of a NumPy function.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if not ARRAY_FUNCTION_ENABLED:
|
||||||
|
def decorator(implementation):
|
||||||
|
if docs_from_dispatcher:
|
||||||
|
add_docstring(implementation, dispatcher.__doc__)
|
||||||
|
if module is not None:
|
||||||
|
implementation.__module__ = module
|
||||||
|
return implementation
|
||||||
|
return decorator
|
||||||
|
|
||||||
|
def decorator(implementation):
|
||||||
|
if verify:
|
||||||
|
verify_matching_signatures(implementation, dispatcher)
|
||||||
|
|
||||||
|
if docs_from_dispatcher:
|
||||||
|
add_docstring(implementation, dispatcher.__doc__)
|
||||||
|
|
||||||
|
# Equivalently, we could define this function directly instead of using
|
||||||
|
# exec. This version has the advantage of giving the helper function a
|
||||||
|
# more interpettable name. Otherwise, the original function does not
|
||||||
|
# show up at all in many cases, e.g., if it's written in C or if the
|
||||||
|
# dispatcher gets an invalid keyword argument.
|
||||||
|
source = _wrapped_func_source.format(name=implementation.__name__)
|
||||||
|
|
||||||
|
source_object = compile(
|
||||||
|
source, filename='<__array_function__ internals>', mode='exec')
|
||||||
|
scope = {
|
||||||
|
'implementation': implementation,
|
||||||
|
'dispatcher': dispatcher,
|
||||||
|
'functools': functools,
|
||||||
|
'implement_array_function': implement_array_function,
|
||||||
|
}
|
||||||
|
exec(source_object, scope)
|
||||||
|
|
||||||
|
public_api = scope[implementation.__name__]
|
||||||
|
|
||||||
|
if module is not None:
|
||||||
|
public_api.__module__ = module
|
||||||
|
|
||||||
|
public_api._implementation = implementation
|
||||||
|
|
||||||
|
return public_api
|
||||||
|
|
||||||
|
return decorator
|
||||||
|
|
||||||
|
|
||||||
|
def array_function_from_dispatcher(
|
||||||
|
implementation, module=None, verify=True, docs_from_dispatcher=True):
|
||||||
|
"""Like array_function_dispatcher, but with function arguments flipped."""
|
||||||
|
|
||||||
|
def decorator(dispatcher):
|
||||||
|
return array_function_dispatch(
|
||||||
|
dispatcher, module, verify=verify,
|
||||||
|
docs_from_dispatcher=docs_from_dispatcher)(implementation)
|
||||||
|
return decorator
|
886
Restaurant/Marta/venv/Lib/site-packages/numpy/core/records.py
Normal file
886
Restaurant/Marta/venv/Lib/site-packages/numpy/core/records.py
Normal file
@ -0,0 +1,886 @@
|
|||||||
|
"""
|
||||||
|
Record Arrays
|
||||||
|
=============
|
||||||
|
Record arrays expose the fields of structured arrays as properties.
|
||||||
|
|
||||||
|
Most commonly, ndarrays contain elements of a single type, e.g. floats,
|
||||||
|
integers, bools etc. However, it is possible for elements to be combinations
|
||||||
|
of these using structured types, such as::
|
||||||
|
|
||||||
|
>>> a = np.array([(1, 2.0), (1, 2.0)], dtype=[('x', np.int64), ('y', np.float64)])
|
||||||
|
>>> a
|
||||||
|
array([(1, 2.), (1, 2.)], dtype=[('x', '<i8'), ('y', '<f8')])
|
||||||
|
|
||||||
|
Here, each element consists of two fields: x (and int), and y (a float).
|
||||||
|
This is known as a structured array. The different fields are analogous
|
||||||
|
to columns in a spread-sheet. The different fields can be accessed as
|
||||||
|
one would a dictionary::
|
||||||
|
|
||||||
|
>>> a['x']
|
||||||
|
array([1, 1])
|
||||||
|
|
||||||
|
>>> a['y']
|
||||||
|
array([2., 2.])
|
||||||
|
|
||||||
|
Record arrays allow us to access fields as properties::
|
||||||
|
|
||||||
|
>>> ar = np.rec.array(a)
|
||||||
|
|
||||||
|
>>> ar.x
|
||||||
|
array([1, 1])
|
||||||
|
|
||||||
|
>>> ar.y
|
||||||
|
array([2., 2.])
|
||||||
|
|
||||||
|
"""
|
||||||
|
from __future__ import division, absolute_import, print_function
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import warnings
|
||||||
|
from collections import Counter, OrderedDict
|
||||||
|
|
||||||
|
from . import numeric as sb
|
||||||
|
from . import numerictypes as nt
|
||||||
|
from numpy.compat import (
|
||||||
|
isfileobj, bytes, long, unicode, os_fspath, contextlib_nullcontext
|
||||||
|
)
|
||||||
|
from numpy.core.overrides import set_module
|
||||||
|
from .arrayprint import get_printoptions
|
||||||
|
|
||||||
|
# All of the functions allow formats to be a dtype
|
||||||
|
__all__ = ['record', 'recarray', 'format_parser']
|
||||||
|
|
||||||
|
|
||||||
|
ndarray = sb.ndarray
|
||||||
|
|
||||||
|
_byteorderconv = {'b':'>',
|
||||||
|
'l':'<',
|
||||||
|
'n':'=',
|
||||||
|
'B':'>',
|
||||||
|
'L':'<',
|
||||||
|
'N':'=',
|
||||||
|
'S':'s',
|
||||||
|
's':'s',
|
||||||
|
'>':'>',
|
||||||
|
'<':'<',
|
||||||
|
'=':'=',
|
||||||
|
'|':'|',
|
||||||
|
'I':'|',
|
||||||
|
'i':'|'}
|
||||||
|
|
||||||
|
# formats regular expression
|
||||||
|
# allows multidimension spec with a tuple syntax in front
|
||||||
|
# of the letter code '(2,3)f4' and ' ( 2 , 3 ) f4 '
|
||||||
|
# are equally allowed
|
||||||
|
|
||||||
|
numfmt = nt.typeDict
|
||||||
|
|
||||||
|
# taken from OrderedDict recipes in the Python documentation
|
||||||
|
# https://docs.python.org/3.3/library/collections.html#ordereddict-examples-and-recipes
|
||||||
|
class _OrderedCounter(Counter, OrderedDict):
|
||||||
|
"""Counter that remembers the order elements are first encountered"""
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return '%s(%r)' % (self.__class__.__name__, OrderedDict(self))
|
||||||
|
|
||||||
|
def __reduce__(self):
|
||||||
|
return self.__class__, (OrderedDict(self),)
|
||||||
|
|
||||||
|
|
||||||
|
def find_duplicate(list):
|
||||||
|
"""Find duplication in a list, return a list of duplicated elements"""
|
||||||
|
return [
|
||||||
|
item
|
||||||
|
for item, counts in _OrderedCounter(list).items()
|
||||||
|
if counts > 1
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@set_module('numpy')
|
||||||
|
class format_parser(object):
|
||||||
|
"""
|
||||||
|
Class to convert formats, names, titles description to a dtype.
|
||||||
|
|
||||||
|
After constructing the format_parser object, the dtype attribute is
|
||||||
|
the converted data-type:
|
||||||
|
``dtype = format_parser(formats, names, titles).dtype``
|
||||||
|
|
||||||
|
Attributes
|
||||||
|
----------
|
||||||
|
dtype : dtype
|
||||||
|
The converted data-type.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
formats : str or list of str
|
||||||
|
The format description, either specified as a string with
|
||||||
|
comma-separated format descriptions in the form ``'f8, i4, a5'``, or
|
||||||
|
a list of format description strings in the form
|
||||||
|
``['f8', 'i4', 'a5']``.
|
||||||
|
names : str or list/tuple of str
|
||||||
|
The field names, either specified as a comma-separated string in the
|
||||||
|
form ``'col1, col2, col3'``, or as a list or tuple of strings in the
|
||||||
|
form ``['col1', 'col2', 'col3']``.
|
||||||
|
An empty list can be used, in that case default field names
|
||||||
|
('f0', 'f1', ...) are used.
|
||||||
|
titles : sequence
|
||||||
|
Sequence of title strings. An empty list can be used to leave titles
|
||||||
|
out.
|
||||||
|
aligned : bool, optional
|
||||||
|
If True, align the fields by padding as the C-compiler would.
|
||||||
|
Default is False.
|
||||||
|
byteorder : str, optional
|
||||||
|
If specified, all the fields will be changed to the
|
||||||
|
provided byte-order. Otherwise, the default byte-order is
|
||||||
|
used. For all available string specifiers, see `dtype.newbyteorder`.
|
||||||
|
|
||||||
|
See Also
|
||||||
|
--------
|
||||||
|
dtype, typename, sctype2char
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
>>> np.format_parser(['<f8', '<i4', '<a5'], ['col1', 'col2', 'col3'],
|
||||||
|
... ['T1', 'T2', 'T3']).dtype
|
||||||
|
dtype([(('T1', 'col1'), '<f8'), (('T2', 'col2'), '<i4'), (('T3', 'col3'), 'S5')])
|
||||||
|
|
||||||
|
`names` and/or `titles` can be empty lists. If `titles` is an empty list,
|
||||||
|
titles will simply not appear. If `names` is empty, default field names
|
||||||
|
will be used.
|
||||||
|
|
||||||
|
>>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'],
|
||||||
|
... []).dtype
|
||||||
|
dtype([('col1', '<f8'), ('col2', '<i4'), ('col3', '<S5')])
|
||||||
|
>>> np.format_parser(['<f8', '<i4', '<a5'], [], []).dtype
|
||||||
|
dtype([('f0', '<f8'), ('f1', '<i4'), ('f2', 'S5')])
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, formats, names, titles, aligned=False, byteorder=None):
|
||||||
|
self._parseFormats(formats, aligned)
|
||||||
|
self._setfieldnames(names, titles)
|
||||||
|
self._createdescr(byteorder)
|
||||||
|
self.dtype = self._descr
|
||||||
|
|
||||||
|
def _parseFormats(self, formats, aligned=False):
|
||||||
|
""" Parse the field formats """
|
||||||
|
|
||||||
|
if formats is None:
|
||||||
|
raise ValueError("Need formats argument")
|
||||||
|
if isinstance(formats, list):
|
||||||
|
dtype = sb.dtype(
|
||||||
|
[('f{}'.format(i), format_) for i, format_ in enumerate(formats)],
|
||||||
|
aligned,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
dtype = sb.dtype(formats, aligned)
|
||||||
|
fields = dtype.fields
|
||||||
|
if fields is None:
|
||||||
|
dtype = sb.dtype([('f1', dtype)], aligned)
|
||||||
|
fields = dtype.fields
|
||||||
|
keys = dtype.names
|
||||||
|
self._f_formats = [fields[key][0] for key in keys]
|
||||||
|
self._offsets = [fields[key][1] for key in keys]
|
||||||
|
self._nfields = len(keys)
|
||||||
|
|
||||||
|
def _setfieldnames(self, names, titles):
|
||||||
|
"""convert input field names into a list and assign to the _names
|
||||||
|
attribute """
|
||||||
|
|
||||||
|
if (names):
|
||||||
|
if (type(names) in [list, tuple]):
|
||||||
|
pass
|
||||||
|
elif isinstance(names, (str, unicode)):
|
||||||
|
names = names.split(',')
|
||||||
|
else:
|
||||||
|
raise NameError("illegal input names %s" % repr(names))
|
||||||
|
|
||||||
|
self._names = [n.strip() for n in names[:self._nfields]]
|
||||||
|
else:
|
||||||
|
self._names = []
|
||||||
|
|
||||||
|
# if the names are not specified, they will be assigned as
|
||||||
|
# "f0, f1, f2,..."
|
||||||
|
# if not enough names are specified, they will be assigned as "f[n],
|
||||||
|
# f[n+1],..." etc. where n is the number of specified names..."
|
||||||
|
self._names += ['f%d' % i for i in range(len(self._names),
|
||||||
|
self._nfields)]
|
||||||
|
# check for redundant names
|
||||||
|
_dup = find_duplicate(self._names)
|
||||||
|
if _dup:
|
||||||
|
raise ValueError("Duplicate field names: %s" % _dup)
|
||||||
|
|
||||||
|
if (titles):
|
||||||
|
self._titles = [n.strip() for n in titles[:self._nfields]]
|
||||||
|
else:
|
||||||
|
self._titles = []
|
||||||
|
titles = []
|
||||||
|
|
||||||
|
if (self._nfields > len(titles)):
|
||||||
|
self._titles += [None] * (self._nfields - len(titles))
|
||||||
|
|
||||||
|
def _createdescr(self, byteorder):
|
||||||
|
descr = sb.dtype({'names':self._names,
|
||||||
|
'formats':self._f_formats,
|
||||||
|
'offsets':self._offsets,
|
||||||
|
'titles':self._titles})
|
||||||
|
if (byteorder is not None):
|
||||||
|
byteorder = _byteorderconv[byteorder[0]]
|
||||||
|
descr = descr.newbyteorder(byteorder)
|
||||||
|
|
||||||
|
self._descr = descr
|
||||||
|
|
||||||
|
class record(nt.void):
|
||||||
|
"""A data-type scalar that allows field access as attribute lookup.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# manually set name and module so that this class's type shows up
|
||||||
|
# as numpy.record when printed
|
||||||
|
__name__ = 'record'
|
||||||
|
__module__ = 'numpy'
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
if get_printoptions()['legacy'] == '1.13':
|
||||||
|
return self.__str__()
|
||||||
|
return super(record, self).__repr__()
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
if get_printoptions()['legacy'] == '1.13':
|
||||||
|
return str(self.item())
|
||||||
|
return super(record, self).__str__()
|
||||||
|
|
||||||
|
def __getattribute__(self, attr):
|
||||||
|
if attr in ['setfield', 'getfield', 'dtype']:
|
||||||
|
return nt.void.__getattribute__(self, attr)
|
||||||
|
try:
|
||||||
|
return nt.void.__getattribute__(self, attr)
|
||||||
|
except AttributeError:
|
||||||
|
pass
|
||||||
|
fielddict = nt.void.__getattribute__(self, 'dtype').fields
|
||||||
|
res = fielddict.get(attr, None)
|
||||||
|
if res:
|
||||||
|
obj = self.getfield(*res[:2])
|
||||||
|
# if it has fields return a record,
|
||||||
|
# otherwise return the object
|
||||||
|
try:
|
||||||
|
dt = obj.dtype
|
||||||
|
except AttributeError:
|
||||||
|
#happens if field is Object type
|
||||||
|
return obj
|
||||||
|
if dt.names is not None:
|
||||||
|
return obj.view((self.__class__, obj.dtype))
|
||||||
|
return obj
|
||||||
|
else:
|
||||||
|
raise AttributeError("'record' object has no "
|
||||||
|
"attribute '%s'" % attr)
|
||||||
|
|
||||||
|
def __setattr__(self, attr, val):
|
||||||
|
if attr in ['setfield', 'getfield', 'dtype']:
|
||||||
|
raise AttributeError("Cannot set '%s' attribute" % attr)
|
||||||
|
fielddict = nt.void.__getattribute__(self, 'dtype').fields
|
||||||
|
res = fielddict.get(attr, None)
|
||||||
|
if res:
|
||||||
|
return self.setfield(val, *res[:2])
|
||||||
|
else:
|
||||||
|
if getattr(self, attr, None):
|
||||||
|
return nt.void.__setattr__(self, attr, val)
|
||||||
|
else:
|
||||||
|
raise AttributeError("'record' object has no "
|
||||||
|
"attribute '%s'" % attr)
|
||||||
|
|
||||||
|
def __getitem__(self, indx):
|
||||||
|
obj = nt.void.__getitem__(self, indx)
|
||||||
|
|
||||||
|
# copy behavior of record.__getattribute__,
|
||||||
|
if isinstance(obj, nt.void) and obj.dtype.names is not None:
|
||||||
|
return obj.view((self.__class__, obj.dtype))
|
||||||
|
else:
|
||||||
|
# return a single element
|
||||||
|
return obj
|
||||||
|
|
||||||
|
def pprint(self):
|
||||||
|
"""Pretty-print all fields."""
|
||||||
|
# pretty-print all fields
|
||||||
|
names = self.dtype.names
|
||||||
|
maxlen = max(len(name) for name in names)
|
||||||
|
fmt = '%% %ds: %%s' % maxlen
|
||||||
|
rows = [fmt % (name, getattr(self, name)) for name in names]
|
||||||
|
return "\n".join(rows)
|
||||||
|
|
||||||
|
# The recarray is almost identical to a standard array (which supports
|
||||||
|
# named fields already) The biggest difference is that it can use
|
||||||
|
# attribute-lookup to find the fields and it is constructed using
|
||||||
|
# a record.
|
||||||
|
|
||||||
|
# If byteorder is given it forces a particular byteorder on all
|
||||||
|
# the fields (and any subfields)
|
||||||
|
|
||||||
|
class recarray(ndarray):
|
||||||
|
"""Construct an ndarray that allows field access using attributes.
|
||||||
|
|
||||||
|
Arrays may have a data-types containing fields, analogous
|
||||||
|
to columns in a spread sheet. An example is ``[(x, int), (y, float)]``,
|
||||||
|
where each entry in the array is a pair of ``(int, float)``. Normally,
|
||||||
|
these attributes are accessed using dictionary lookups such as ``arr['x']``
|
||||||
|
and ``arr['y']``. Record arrays allow the fields to be accessed as members
|
||||||
|
of the array, using ``arr.x`` and ``arr.y``.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
shape : tuple
|
||||||
|
Shape of output array.
|
||||||
|
dtype : data-type, optional
|
||||||
|
The desired data-type. By default, the data-type is determined
|
||||||
|
from `formats`, `names`, `titles`, `aligned` and `byteorder`.
|
||||||
|
formats : list of data-types, optional
|
||||||
|
A list containing the data-types for the different columns, e.g.
|
||||||
|
``['i4', 'f8', 'i4']``. `formats` does *not* support the new
|
||||||
|
convention of using types directly, i.e. ``(int, float, int)``.
|
||||||
|
Note that `formats` must be a list, not a tuple.
|
||||||
|
Given that `formats` is somewhat limited, we recommend specifying
|
||||||
|
`dtype` instead.
|
||||||
|
names : tuple of str, optional
|
||||||
|
The name of each column, e.g. ``('x', 'y', 'z')``.
|
||||||
|
buf : buffer, optional
|
||||||
|
By default, a new array is created of the given shape and data-type.
|
||||||
|
If `buf` is specified and is an object exposing the buffer interface,
|
||||||
|
the array will use the memory from the existing buffer. In this case,
|
||||||
|
the `offset` and `strides` keywords are available.
|
||||||
|
|
||||||
|
Other Parameters
|
||||||
|
----------------
|
||||||
|
titles : tuple of str, optional
|
||||||
|
Aliases for column names. For example, if `names` were
|
||||||
|
``('x', 'y', 'z')`` and `titles` is
|
||||||
|
``('x_coordinate', 'y_coordinate', 'z_coordinate')``, then
|
||||||
|
``arr['x']`` is equivalent to both ``arr.x`` and ``arr.x_coordinate``.
|
||||||
|
byteorder : {'<', '>', '='}, optional
|
||||||
|
Byte-order for all fields.
|
||||||
|
aligned : bool, optional
|
||||||
|
Align the fields in memory as the C-compiler would.
|
||||||
|
strides : tuple of ints, optional
|
||||||
|
Buffer (`buf`) is interpreted according to these strides (strides
|
||||||
|
define how many bytes each array element, row, column, etc.
|
||||||
|
occupy in memory).
|
||||||
|
offset : int, optional
|
||||||
|
Start reading buffer (`buf`) from this offset onwards.
|
||||||
|
order : {'C', 'F'}, optional
|
||||||
|
Row-major (C-style) or column-major (Fortran-style) order.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
rec : recarray
|
||||||
|
Empty array of the given shape and type.
|
||||||
|
|
||||||
|
See Also
|
||||||
|
--------
|
||||||
|
rec.fromrecords : Construct a record array from data.
|
||||||
|
record : fundamental data-type for `recarray`.
|
||||||
|
format_parser : determine a data-type from formats, names, titles.
|
||||||
|
|
||||||
|
Notes
|
||||||
|
-----
|
||||||
|
This constructor can be compared to ``empty``: it creates a new record
|
||||||
|
array but does not fill it with data. To create a record array from data,
|
||||||
|
use one of the following methods:
|
||||||
|
|
||||||
|
1. Create a standard ndarray and convert it to a record array,
|
||||||
|
using ``arr.view(np.recarray)``
|
||||||
|
2. Use the `buf` keyword.
|
||||||
|
3. Use `np.rec.fromrecords`.
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
Create an array with two fields, ``x`` and ``y``:
|
||||||
|
|
||||||
|
>>> x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', '<f8'), ('y', '<i8')])
|
||||||
|
>>> x
|
||||||
|
array([(1., 2), (3., 4)], dtype=[('x', '<f8'), ('y', '<i8')])
|
||||||
|
|
||||||
|
>>> x['x']
|
||||||
|
array([1., 3.])
|
||||||
|
|
||||||
|
View the array as a record array:
|
||||||
|
|
||||||
|
>>> x = x.view(np.recarray)
|
||||||
|
|
||||||
|
>>> x.x
|
||||||
|
array([1., 3.])
|
||||||
|
|
||||||
|
>>> x.y
|
||||||
|
array([2, 4])
|
||||||
|
|
||||||
|
Create a new, empty record array:
|
||||||
|
|
||||||
|
>>> np.recarray((2,),
|
||||||
|
... dtype=[('x', int), ('y', float), ('z', int)]) #doctest: +SKIP
|
||||||
|
rec.array([(-1073741821, 1.2249118382103472e-301, 24547520),
|
||||||
|
(3471280, 1.2134086255804012e-316, 0)],
|
||||||
|
dtype=[('x', '<i4'), ('y', '<f8'), ('z', '<i4')])
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
# manually set name and module so that this class's type shows
|
||||||
|
# up as "numpy.recarray" when printed
|
||||||
|
__name__ = 'recarray'
|
||||||
|
__module__ = 'numpy'
|
||||||
|
|
||||||
|
def __new__(subtype, shape, dtype=None, buf=None, offset=0, strides=None,
|
||||||
|
formats=None, names=None, titles=None,
|
||||||
|
byteorder=None, aligned=False, order='C'):
|
||||||
|
|
||||||
|
if dtype is not None:
|
||||||
|
descr = sb.dtype(dtype)
|
||||||
|
else:
|
||||||
|
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
|
||||||
|
|
||||||
|
if buf is None:
|
||||||
|
self = ndarray.__new__(subtype, shape, (record, descr), order=order)
|
||||||
|
else:
|
||||||
|
self = ndarray.__new__(subtype, shape, (record, descr),
|
||||||
|
buffer=buf, offset=offset,
|
||||||
|
strides=strides, order=order)
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __array_finalize__(self, obj):
|
||||||
|
if self.dtype.type is not record and self.dtype.names is not None:
|
||||||
|
# if self.dtype is not np.record, invoke __setattr__ which will
|
||||||
|
# convert it to a record if it is a void dtype.
|
||||||
|
self.dtype = self.dtype
|
||||||
|
|
||||||
|
def __getattribute__(self, attr):
|
||||||
|
# See if ndarray has this attr, and return it if so. (note that this
|
||||||
|
# means a field with the same name as an ndarray attr cannot be
|
||||||
|
# accessed by attribute).
|
||||||
|
try:
|
||||||
|
return object.__getattribute__(self, attr)
|
||||||
|
except AttributeError: # attr must be a fieldname
|
||||||
|
pass
|
||||||
|
|
||||||
|
# look for a field with this name
|
||||||
|
fielddict = ndarray.__getattribute__(self, 'dtype').fields
|
||||||
|
try:
|
||||||
|
res = fielddict[attr][:2]
|
||||||
|
except (TypeError, KeyError):
|
||||||
|
raise AttributeError("recarray has no attribute %s" % attr)
|
||||||
|
obj = self.getfield(*res)
|
||||||
|
|
||||||
|
# At this point obj will always be a recarray, since (see
|
||||||
|
# PyArray_GetField) the type of obj is inherited. Next, if obj.dtype is
|
||||||
|
# non-structured, convert it to an ndarray. Then if obj is structured
|
||||||
|
# with void type convert it to the same dtype.type (eg to preserve
|
||||||
|
# numpy.record type if present), since nested structured fields do not
|
||||||
|
# inherit type. Don't do this for non-void structures though.
|
||||||
|
if obj.dtype.names is not None:
|
||||||
|
if issubclass(obj.dtype.type, nt.void):
|
||||||
|
return obj.view(dtype=(self.dtype.type, obj.dtype))
|
||||||
|
return obj
|
||||||
|
else:
|
||||||
|
return obj.view(ndarray)
|
||||||
|
|
||||||
|
# Save the dictionary.
|
||||||
|
# If the attr is a field name and not in the saved dictionary
|
||||||
|
# Undo any "setting" of the attribute and do a setfield
|
||||||
|
# Thus, you can't create attributes on-the-fly that are field names.
|
||||||
|
def __setattr__(self, attr, val):
|
||||||
|
|
||||||
|
# Automatically convert (void) structured types to records
|
||||||
|
# (but not non-void structures, subarrays, or non-structured voids)
|
||||||
|
if attr == 'dtype' and issubclass(val.type, nt.void) and val.names is not None:
|
||||||
|
val = sb.dtype((record, val))
|
||||||
|
|
||||||
|
newattr = attr not in self.__dict__
|
||||||
|
try:
|
||||||
|
ret = object.__setattr__(self, attr, val)
|
||||||
|
except Exception:
|
||||||
|
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
|
||||||
|
if attr not in fielddict:
|
||||||
|
raise
|
||||||
|
else:
|
||||||
|
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
|
||||||
|
if attr not in fielddict:
|
||||||
|
return ret
|
||||||
|
if newattr:
|
||||||
|
# We just added this one or this setattr worked on an
|
||||||
|
# internal attribute.
|
||||||
|
try:
|
||||||
|
object.__delattr__(self, attr)
|
||||||
|
except Exception:
|
||||||
|
return ret
|
||||||
|
try:
|
||||||
|
res = fielddict[attr][:2]
|
||||||
|
except (TypeError, KeyError):
|
||||||
|
raise AttributeError("record array has no attribute %s" % attr)
|
||||||
|
return self.setfield(val, *res)
|
||||||
|
|
||||||
|
def __getitem__(self, indx):
|
||||||
|
obj = super(recarray, self).__getitem__(indx)
|
||||||
|
|
||||||
|
# copy behavior of getattr, except that here
|
||||||
|
# we might also be returning a single element
|
||||||
|
if isinstance(obj, ndarray):
|
||||||
|
if obj.dtype.names is not None:
|
||||||
|
obj = obj.view(type(self))
|
||||||
|
if issubclass(obj.dtype.type, nt.void):
|
||||||
|
return obj.view(dtype=(self.dtype.type, obj.dtype))
|
||||||
|
return obj
|
||||||
|
else:
|
||||||
|
return obj.view(type=ndarray)
|
||||||
|
else:
|
||||||
|
# return a single element
|
||||||
|
return obj
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
|
||||||
|
repr_dtype = self.dtype
|
||||||
|
if (self.dtype.type is record
|
||||||
|
or (not issubclass(self.dtype.type, nt.void))):
|
||||||
|
# If this is a full record array (has numpy.record dtype),
|
||||||
|
# or if it has a scalar (non-void) dtype with no records,
|
||||||
|
# represent it using the rec.array function. Since rec.array
|
||||||
|
# converts dtype to a numpy.record for us, convert back
|
||||||
|
# to non-record before printing
|
||||||
|
if repr_dtype.type is record:
|
||||||
|
repr_dtype = sb.dtype((nt.void, repr_dtype))
|
||||||
|
prefix = "rec.array("
|
||||||
|
fmt = 'rec.array(%s,%sdtype=%s)'
|
||||||
|
else:
|
||||||
|
# otherwise represent it using np.array plus a view
|
||||||
|
# This should only happen if the user is playing
|
||||||
|
# strange games with dtypes.
|
||||||
|
prefix = "array("
|
||||||
|
fmt = 'array(%s,%sdtype=%s).view(numpy.recarray)'
|
||||||
|
|
||||||
|
# get data/shape string. logic taken from numeric.array_repr
|
||||||
|
if self.size > 0 or self.shape == (0,):
|
||||||
|
lst = sb.array2string(
|
||||||
|
self, separator=', ', prefix=prefix, suffix=',')
|
||||||
|
else:
|
||||||
|
# show zero-length shape unless it is (0,)
|
||||||
|
lst = "[], shape=%s" % (repr(self.shape),)
|
||||||
|
|
||||||
|
lf = '\n'+' '*len(prefix)
|
||||||
|
if get_printoptions()['legacy'] == '1.13':
|
||||||
|
lf = ' ' + lf # trailing space
|
||||||
|
return fmt % (lst, lf, repr_dtype)
|
||||||
|
|
||||||
|
def field(self, attr, val=None):
|
||||||
|
if isinstance(attr, int):
|
||||||
|
names = ndarray.__getattribute__(self, 'dtype').names
|
||||||
|
attr = names[attr]
|
||||||
|
|
||||||
|
fielddict = ndarray.__getattribute__(self, 'dtype').fields
|
||||||
|
|
||||||
|
res = fielddict[attr][:2]
|
||||||
|
|
||||||
|
if val is None:
|
||||||
|
obj = self.getfield(*res)
|
||||||
|
if obj.dtype.names is not None:
|
||||||
|
return obj
|
||||||
|
return obj.view(ndarray)
|
||||||
|
else:
|
||||||
|
return self.setfield(val, *res)
|
||||||
|
|
||||||
|
|
||||||
|
def fromarrays(arrayList, dtype=None, shape=None, formats=None,
|
||||||
|
names=None, titles=None, aligned=False, byteorder=None):
|
||||||
|
""" create a record array from a (flat) list of arrays
|
||||||
|
|
||||||
|
>>> x1=np.array([1,2,3,4])
|
||||||
|
>>> x2=np.array(['a','dd','xyz','12'])
|
||||||
|
>>> x3=np.array([1.1,2,3,4])
|
||||||
|
>>> r = np.core.records.fromarrays([x1,x2,x3],names='a,b,c')
|
||||||
|
>>> print(r[1])
|
||||||
|
(2, 'dd', 2.0) # may vary
|
||||||
|
>>> x1[1]=34
|
||||||
|
>>> r.a
|
||||||
|
array([1, 2, 3, 4])
|
||||||
|
"""
|
||||||
|
|
||||||
|
arrayList = [sb.asarray(x) for x in arrayList]
|
||||||
|
|
||||||
|
if shape is None or shape == 0:
|
||||||
|
shape = arrayList[0].shape
|
||||||
|
|
||||||
|
if isinstance(shape, int):
|
||||||
|
shape = (shape,)
|
||||||
|
|
||||||
|
if formats is None and dtype is None:
|
||||||
|
# go through each object in the list to see if it is an ndarray
|
||||||
|
# and determine the formats.
|
||||||
|
formats = []
|
||||||
|
for obj in arrayList:
|
||||||
|
formats.append(obj.dtype)
|
||||||
|
|
||||||
|
if dtype is not None:
|
||||||
|
descr = sb.dtype(dtype)
|
||||||
|
_names = descr.names
|
||||||
|
else:
|
||||||
|
parsed = format_parser(formats, names, titles, aligned, byteorder)
|
||||||
|
_names = parsed._names
|
||||||
|
descr = parsed._descr
|
||||||
|
|
||||||
|
# Determine shape from data-type.
|
||||||
|
if len(descr) != len(arrayList):
|
||||||
|
raise ValueError("mismatch between the number of fields "
|
||||||
|
"and the number of arrays")
|
||||||
|
|
||||||
|
d0 = descr[0].shape
|
||||||
|
nn = len(d0)
|
||||||
|
if nn > 0:
|
||||||
|
shape = shape[:-nn]
|
||||||
|
|
||||||
|
for k, obj in enumerate(arrayList):
|
||||||
|
nn = descr[k].ndim
|
||||||
|
testshape = obj.shape[:obj.ndim - nn]
|
||||||
|
if testshape != shape:
|
||||||
|
raise ValueError("array-shape mismatch in array %d" % k)
|
||||||
|
|
||||||
|
_array = recarray(shape, descr)
|
||||||
|
|
||||||
|
# populate the record array (makes a copy)
|
||||||
|
for i in range(len(arrayList)):
|
||||||
|
_array[_names[i]] = arrayList[i]
|
||||||
|
|
||||||
|
return _array
|
||||||
|
|
||||||
|
def fromrecords(recList, dtype=None, shape=None, formats=None, names=None,
|
||||||
|
titles=None, aligned=False, byteorder=None):
|
||||||
|
""" create a recarray from a list of records in text form
|
||||||
|
|
||||||
|
The data in the same field can be heterogeneous, they will be promoted
|
||||||
|
to the highest data type. This method is intended for creating
|
||||||
|
smaller record arrays. If used to create large array without formats
|
||||||
|
defined
|
||||||
|
|
||||||
|
r=fromrecords([(2,3.,'abc')]*100000)
|
||||||
|
|
||||||
|
it can be slow.
|
||||||
|
|
||||||
|
If formats is None, then this will auto-detect formats. Use list of
|
||||||
|
tuples rather than list of lists for faster processing.
|
||||||
|
|
||||||
|
>>> r=np.core.records.fromrecords([(456,'dbe',1.2),(2,'de',1.3)],
|
||||||
|
... names='col1,col2,col3')
|
||||||
|
>>> print(r[0])
|
||||||
|
(456, 'dbe', 1.2)
|
||||||
|
>>> r.col1
|
||||||
|
array([456, 2])
|
||||||
|
>>> r.col2
|
||||||
|
array(['dbe', 'de'], dtype='<U3')
|
||||||
|
>>> import pickle
|
||||||
|
>>> pickle.loads(pickle.dumps(r))
|
||||||
|
rec.array([(456, 'dbe', 1.2), ( 2, 'de', 1.3)],
|
||||||
|
dtype=[('col1', '<i8'), ('col2', '<U3'), ('col3', '<f8')])
|
||||||
|
"""
|
||||||
|
|
||||||
|
if formats is None and dtype is None: # slower
|
||||||
|
obj = sb.array(recList, dtype=object)
|
||||||
|
arrlist = [sb.array(obj[..., i].tolist()) for i in range(obj.shape[-1])]
|
||||||
|
return fromarrays(arrlist, formats=formats, shape=shape, names=names,
|
||||||
|
titles=titles, aligned=aligned, byteorder=byteorder)
|
||||||
|
|
||||||
|
if dtype is not None:
|
||||||
|
descr = sb.dtype((record, dtype))
|
||||||
|
else:
|
||||||
|
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
|
||||||
|
|
||||||
|
try:
|
||||||
|
retval = sb.array(recList, dtype=descr)
|
||||||
|
except (TypeError, ValueError):
|
||||||
|
if (shape is None or shape == 0):
|
||||||
|
shape = len(recList)
|
||||||
|
if isinstance(shape, (int, long)):
|
||||||
|
shape = (shape,)
|
||||||
|
if len(shape) > 1:
|
||||||
|
raise ValueError("Can only deal with 1-d array.")
|
||||||
|
_array = recarray(shape, descr)
|
||||||
|
for k in range(_array.size):
|
||||||
|
_array[k] = tuple(recList[k])
|
||||||
|
# list of lists instead of list of tuples ?
|
||||||
|
# 2018-02-07, 1.14.1
|
||||||
|
warnings.warn(
|
||||||
|
"fromrecords expected a list of tuples, may have received a list "
|
||||||
|
"of lists instead. In the future that will raise an error",
|
||||||
|
FutureWarning, stacklevel=2)
|
||||||
|
return _array
|
||||||
|
else:
|
||||||
|
if shape is not None and retval.shape != shape:
|
||||||
|
retval.shape = shape
|
||||||
|
|
||||||
|
res = retval.view(recarray)
|
||||||
|
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None,
|
||||||
|
names=None, titles=None, aligned=False, byteorder=None):
|
||||||
|
""" create a (read-only) record array from binary data contained in
|
||||||
|
a string"""
|
||||||
|
|
||||||
|
if dtype is None and formats is None:
|
||||||
|
raise TypeError("fromstring() needs a 'dtype' or 'formats' argument")
|
||||||
|
|
||||||
|
if dtype is not None:
|
||||||
|
descr = sb.dtype(dtype)
|
||||||
|
else:
|
||||||
|
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
|
||||||
|
|
||||||
|
itemsize = descr.itemsize
|
||||||
|
if (shape is None or shape == 0 or shape == -1):
|
||||||
|
shape = (len(datastring) - offset) // itemsize
|
||||||
|
|
||||||
|
_array = recarray(shape, descr, buf=datastring, offset=offset)
|
||||||
|
return _array
|
||||||
|
|
||||||
|
def get_remaining_size(fd):
|
||||||
|
try:
|
||||||
|
fn = fd.fileno()
|
||||||
|
except AttributeError:
|
||||||
|
return os.path.getsize(fd.name) - fd.tell()
|
||||||
|
st = os.fstat(fn)
|
||||||
|
size = st.st_size - fd.tell()
|
||||||
|
return size
|
||||||
|
|
||||||
|
def fromfile(fd, dtype=None, shape=None, offset=0, formats=None,
|
||||||
|
names=None, titles=None, aligned=False, byteorder=None):
|
||||||
|
"""Create an array from binary file data
|
||||||
|
|
||||||
|
If file is a string or a path-like object then that file is opened,
|
||||||
|
else it is assumed to be a file object. The file object must
|
||||||
|
support random access (i.e. it must have tell and seek methods).
|
||||||
|
|
||||||
|
>>> from tempfile import TemporaryFile
|
||||||
|
>>> a = np.empty(10,dtype='f8,i4,a5')
|
||||||
|
>>> a[5] = (0.5,10,'abcde')
|
||||||
|
>>>
|
||||||
|
>>> fd=TemporaryFile()
|
||||||
|
>>> a = a.newbyteorder('<')
|
||||||
|
>>> a.tofile(fd)
|
||||||
|
>>>
|
||||||
|
>>> _ = fd.seek(0)
|
||||||
|
>>> r=np.core.records.fromfile(fd, formats='f8,i4,a5', shape=10,
|
||||||
|
... byteorder='<')
|
||||||
|
>>> print(r[5])
|
||||||
|
(0.5, 10, 'abcde')
|
||||||
|
>>> r.shape
|
||||||
|
(10,)
|
||||||
|
"""
|
||||||
|
|
||||||
|
if dtype is None and formats is None:
|
||||||
|
raise TypeError("fromfile() needs a 'dtype' or 'formats' argument")
|
||||||
|
|
||||||
|
if (shape is None or shape == 0):
|
||||||
|
shape = (-1,)
|
||||||
|
elif isinstance(shape, (int, long)):
|
||||||
|
shape = (shape,)
|
||||||
|
|
||||||
|
if isfileobj(fd):
|
||||||
|
# file already opened
|
||||||
|
ctx = contextlib_nullcontext(fd)
|
||||||
|
else:
|
||||||
|
# open file
|
||||||
|
ctx = open(os_fspath(fd), 'rb')
|
||||||
|
|
||||||
|
with ctx as fd:
|
||||||
|
if (offset > 0):
|
||||||
|
fd.seek(offset, 1)
|
||||||
|
size = get_remaining_size(fd)
|
||||||
|
|
||||||
|
if dtype is not None:
|
||||||
|
descr = sb.dtype(dtype)
|
||||||
|
else:
|
||||||
|
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
|
||||||
|
|
||||||
|
itemsize = descr.itemsize
|
||||||
|
|
||||||
|
shapeprod = sb.array(shape).prod(dtype=nt.intp)
|
||||||
|
shapesize = shapeprod * itemsize
|
||||||
|
if shapesize < 0:
|
||||||
|
shape = list(shape)
|
||||||
|
shape[shape.index(-1)] = size // -shapesize
|
||||||
|
shape = tuple(shape)
|
||||||
|
shapeprod = sb.array(shape).prod(dtype=nt.intp)
|
||||||
|
|
||||||
|
nbytes = shapeprod * itemsize
|
||||||
|
|
||||||
|
if nbytes > size:
|
||||||
|
raise ValueError(
|
||||||
|
"Not enough bytes left in file for specified shape and type")
|
||||||
|
|
||||||
|
# create the array
|
||||||
|
_array = recarray(shape, descr)
|
||||||
|
nbytesread = fd.readinto(_array.data)
|
||||||
|
if nbytesread != nbytes:
|
||||||
|
raise IOError("Didn't read as many bytes as expected")
|
||||||
|
|
||||||
|
return _array
|
||||||
|
|
||||||
|
def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None,
|
||||||
|
names=None, titles=None, aligned=False, byteorder=None, copy=True):
|
||||||
|
"""Construct a record array from a wide-variety of objects.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if ((isinstance(obj, (type(None), str)) or isfileobj(obj)) and
|
||||||
|
(formats is None) and (dtype is None)):
|
||||||
|
raise ValueError("Must define formats (or dtype) if object is "
|
||||||
|
"None, string, or an open file")
|
||||||
|
|
||||||
|
kwds = {}
|
||||||
|
if dtype is not None:
|
||||||
|
dtype = sb.dtype(dtype)
|
||||||
|
elif formats is not None:
|
||||||
|
dtype = format_parser(formats, names, titles,
|
||||||
|
aligned, byteorder)._descr
|
||||||
|
else:
|
||||||
|
kwds = {'formats': formats,
|
||||||
|
'names': names,
|
||||||
|
'titles': titles,
|
||||||
|
'aligned': aligned,
|
||||||
|
'byteorder': byteorder
|
||||||
|
}
|
||||||
|
|
||||||
|
if obj is None:
|
||||||
|
if shape is None:
|
||||||
|
raise ValueError("Must define a shape if obj is None")
|
||||||
|
return recarray(shape, dtype, buf=obj, offset=offset, strides=strides)
|
||||||
|
|
||||||
|
elif isinstance(obj, bytes):
|
||||||
|
return fromstring(obj, dtype, shape=shape, offset=offset, **kwds)
|
||||||
|
|
||||||
|
elif isinstance(obj, (list, tuple)):
|
||||||
|
if isinstance(obj[0], (tuple, list)):
|
||||||
|
return fromrecords(obj, dtype=dtype, shape=shape, **kwds)
|
||||||
|
else:
|
||||||
|
return fromarrays(obj, dtype=dtype, shape=shape, **kwds)
|
||||||
|
|
||||||
|
elif isinstance(obj, recarray):
|
||||||
|
if dtype is not None and (obj.dtype != dtype):
|
||||||
|
new = obj.view(dtype)
|
||||||
|
else:
|
||||||
|
new = obj
|
||||||
|
if copy:
|
||||||
|
new = new.copy()
|
||||||
|
return new
|
||||||
|
|
||||||
|
elif isfileobj(obj):
|
||||||
|
return fromfile(obj, dtype=dtype, shape=shape, offset=offset)
|
||||||
|
|
||||||
|
elif isinstance(obj, ndarray):
|
||||||
|
if dtype is not None and (obj.dtype != dtype):
|
||||||
|
new = obj.view(dtype)
|
||||||
|
else:
|
||||||
|
new = obj
|
||||||
|
if copy:
|
||||||
|
new = new.copy()
|
||||||
|
return new.view(recarray)
|
||||||
|
|
||||||
|
else:
|
||||||
|
interface = getattr(obj, "__array_interface__", None)
|
||||||
|
if interface is None or not isinstance(interface, dict):
|
||||||
|
raise ValueError("Unknown input type")
|
||||||
|
obj = sb.array(obj)
|
||||||
|
if dtype is not None and (obj.dtype != dtype):
|
||||||
|
obj = obj.view(dtype)
|
||||||
|
return obj.view(recarray)
|
979
Restaurant/Marta/venv/Lib/site-packages/numpy/core/setup.py
Normal file
979
Restaurant/Marta/venv/Lib/site-packages/numpy/core/setup.py
Normal file
@ -0,0 +1,979 @@
|
|||||||
|
from __future__ import division, print_function
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import pickle
|
||||||
|
import copy
|
||||||
|
import warnings
|
||||||
|
import platform
|
||||||
|
import textwrap
|
||||||
|
from os.path import join
|
||||||
|
|
||||||
|
from numpy.distutils import log
|
||||||
|
from distutils.dep_util import newer
|
||||||
|
from distutils.sysconfig import get_config_var
|
||||||
|
from numpy._build_utils.apple_accelerate import (
|
||||||
|
uses_accelerate_framework, get_sgemv_fix
|
||||||
|
)
|
||||||
|
from numpy.compat import npy_load_module
|
||||||
|
from setup_common import *
|
||||||
|
|
||||||
|
# Set to True to enable relaxed strides checking. This (mostly) means
|
||||||
|
# that `strides[dim]` is ignored if `shape[dim] == 1` when setting flags.
|
||||||
|
NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0")
|
||||||
|
|
||||||
|
# Put NPY_RELAXED_STRIDES_DEBUG=1 in the environment if you want numpy to use a
|
||||||
|
# bogus value for affected strides in order to help smoke out bad stride usage
|
||||||
|
# when relaxed stride checking is enabled.
|
||||||
|
NPY_RELAXED_STRIDES_DEBUG = (os.environ.get('NPY_RELAXED_STRIDES_DEBUG', "0") != "0")
|
||||||
|
NPY_RELAXED_STRIDES_DEBUG = NPY_RELAXED_STRIDES_DEBUG and NPY_RELAXED_STRIDES_CHECKING
|
||||||
|
|
||||||
|
# XXX: ugly, we use a class to avoid calling twice some expensive functions in
|
||||||
|
# config.h/numpyconfig.h. I don't see a better way because distutils force
|
||||||
|
# config.h generation inside an Extension class, and as such sharing
|
||||||
|
# configuration information between extensions is not easy.
|
||||||
|
# Using a pickled-based memoize does not work because config_cmd is an instance
|
||||||
|
# method, which cPickle does not like.
|
||||||
|
#
|
||||||
|
# Use pickle in all cases, as cPickle is gone in python3 and the difference
|
||||||
|
# in time is only in build. -- Charles Harris, 2013-03-30
|
||||||
|
|
||||||
|
class CallOnceOnly(object):
|
||||||
|
def __init__(self):
|
||||||
|
self._check_types = None
|
||||||
|
self._check_ieee_macros = None
|
||||||
|
self._check_complex = None
|
||||||
|
|
||||||
|
def check_types(self, *a, **kw):
|
||||||
|
if self._check_types is None:
|
||||||
|
out = check_types(*a, **kw)
|
||||||
|
self._check_types = pickle.dumps(out)
|
||||||
|
else:
|
||||||
|
out = copy.deepcopy(pickle.loads(self._check_types))
|
||||||
|
return out
|
||||||
|
|
||||||
|
def check_ieee_macros(self, *a, **kw):
|
||||||
|
if self._check_ieee_macros is None:
|
||||||
|
out = check_ieee_macros(*a, **kw)
|
||||||
|
self._check_ieee_macros = pickle.dumps(out)
|
||||||
|
else:
|
||||||
|
out = copy.deepcopy(pickle.loads(self._check_ieee_macros))
|
||||||
|
return out
|
||||||
|
|
||||||
|
def check_complex(self, *a, **kw):
|
||||||
|
if self._check_complex is None:
|
||||||
|
out = check_complex(*a, **kw)
|
||||||
|
self._check_complex = pickle.dumps(out)
|
||||||
|
else:
|
||||||
|
out = copy.deepcopy(pickle.loads(self._check_complex))
|
||||||
|
return out
|
||||||
|
|
||||||
|
def pythonlib_dir():
|
||||||
|
"""return path where libpython* is."""
|
||||||
|
if sys.platform == 'win32':
|
||||||
|
return os.path.join(sys.prefix, "libs")
|
||||||
|
else:
|
||||||
|
return get_config_var('LIBDIR')
|
||||||
|
|
||||||
|
def is_npy_no_signal():
|
||||||
|
"""Return True if the NPY_NO_SIGNAL symbol must be defined in configuration
|
||||||
|
header."""
|
||||||
|
return sys.platform == 'win32'
|
||||||
|
|
||||||
|
def is_npy_no_smp():
|
||||||
|
"""Return True if the NPY_NO_SMP symbol must be defined in public
|
||||||
|
header (when SMP support cannot be reliably enabled)."""
|
||||||
|
# Perhaps a fancier check is in order here.
|
||||||
|
# so that threads are only enabled if there
|
||||||
|
# are actually multiple CPUS? -- but
|
||||||
|
# threaded code can be nice even on a single
|
||||||
|
# CPU so that long-calculating code doesn't
|
||||||
|
# block.
|
||||||
|
return 'NPY_NOSMP' in os.environ
|
||||||
|
|
||||||
|
def win32_checks(deflist):
|
||||||
|
from numpy.distutils.misc_util import get_build_architecture
|
||||||
|
a = get_build_architecture()
|
||||||
|
|
||||||
|
# Distutils hack on AMD64 on windows
|
||||||
|
print('BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' %
|
||||||
|
(a, os.name, sys.platform))
|
||||||
|
if a == 'AMD64':
|
||||||
|
deflist.append('DISTUTILS_USE_SDK')
|
||||||
|
|
||||||
|
# On win32, force long double format string to be 'g', not
|
||||||
|
# 'Lg', since the MS runtime does not support long double whose
|
||||||
|
# size is > sizeof(double)
|
||||||
|
if a == "Intel" or a == "AMD64":
|
||||||
|
deflist.append('FORCE_NO_LONG_DOUBLE_FORMATTING')
|
||||||
|
|
||||||
|
def check_math_capabilities(config, moredefs, mathlibs):
|
||||||
|
def check_func(func_name):
|
||||||
|
return config.check_func(func_name, libraries=mathlibs,
|
||||||
|
decl=True, call=True)
|
||||||
|
|
||||||
|
def check_funcs_once(funcs_name):
|
||||||
|
decl = dict([(f, True) for f in funcs_name])
|
||||||
|
st = config.check_funcs_once(funcs_name, libraries=mathlibs,
|
||||||
|
decl=decl, call=decl)
|
||||||
|
if st:
|
||||||
|
moredefs.extend([(fname2def(f), 1) for f in funcs_name])
|
||||||
|
return st
|
||||||
|
|
||||||
|
def check_funcs(funcs_name):
|
||||||
|
# Use check_funcs_once first, and if it does not work, test func per
|
||||||
|
# func. Return success only if all the functions are available
|
||||||
|
if not check_funcs_once(funcs_name):
|
||||||
|
# Global check failed, check func per func
|
||||||
|
for f in funcs_name:
|
||||||
|
if check_func(f):
|
||||||
|
moredefs.append((fname2def(f), 1))
|
||||||
|
return 0
|
||||||
|
else:
|
||||||
|
return 1
|
||||||
|
|
||||||
|
#use_msvc = config.check_decl("_MSC_VER")
|
||||||
|
|
||||||
|
if not check_funcs_once(MANDATORY_FUNCS):
|
||||||
|
raise SystemError("One of the required function to build numpy is not"
|
||||||
|
" available (the list is %s)." % str(MANDATORY_FUNCS))
|
||||||
|
|
||||||
|
# Standard functions which may not be available and for which we have a
|
||||||
|
# replacement implementation. Note that some of these are C99 functions.
|
||||||
|
|
||||||
|
# XXX: hack to circumvent cpp pollution from python: python put its
|
||||||
|
# config.h in the public namespace, so we have a clash for the common
|
||||||
|
# functions we test. We remove every function tested by python's
|
||||||
|
# autoconf, hoping their own test are correct
|
||||||
|
for f in OPTIONAL_STDFUNCS_MAYBE:
|
||||||
|
if config.check_decl(fname2def(f),
|
||||||
|
headers=["Python.h", "math.h"]):
|
||||||
|
OPTIONAL_STDFUNCS.remove(f)
|
||||||
|
|
||||||
|
check_funcs(OPTIONAL_STDFUNCS)
|
||||||
|
|
||||||
|
for h in OPTIONAL_HEADERS:
|
||||||
|
if config.check_func("", decl=False, call=False, headers=[h]):
|
||||||
|
h = h.replace(".", "_").replace(os.path.sep, "_")
|
||||||
|
moredefs.append((fname2def(h), 1))
|
||||||
|
|
||||||
|
for tup in OPTIONAL_INTRINSICS:
|
||||||
|
headers = None
|
||||||
|
if len(tup) == 2:
|
||||||
|
f, args, m = tup[0], tup[1], fname2def(tup[0])
|
||||||
|
elif len(tup) == 3:
|
||||||
|
f, args, headers, m = tup[0], tup[1], [tup[2]], fname2def(tup[0])
|
||||||
|
else:
|
||||||
|
f, args, headers, m = tup[0], tup[1], [tup[2]], fname2def(tup[3])
|
||||||
|
if config.check_func(f, decl=False, call=True, call_args=args,
|
||||||
|
headers=headers):
|
||||||
|
moredefs.append((m, 1))
|
||||||
|
|
||||||
|
for dec, fn in OPTIONAL_FUNCTION_ATTRIBUTES:
|
||||||
|
if config.check_gcc_function_attribute(dec, fn):
|
||||||
|
moredefs.append((fname2def(fn), 1))
|
||||||
|
|
||||||
|
for dec, fn, code, header in OPTIONAL_FUNCTION_ATTRIBUTES_WITH_INTRINSICS:
|
||||||
|
if config.check_gcc_function_attribute_with_intrinsics(dec, fn, code,
|
||||||
|
header):
|
||||||
|
moredefs.append((fname2def(fn), 1))
|
||||||
|
|
||||||
|
for fn in OPTIONAL_VARIABLE_ATTRIBUTES:
|
||||||
|
if config.check_gcc_variable_attribute(fn):
|
||||||
|
m = fn.replace("(", "_").replace(")", "_")
|
||||||
|
moredefs.append((fname2def(m), 1))
|
||||||
|
|
||||||
|
# C99 functions: float and long double versions
|
||||||
|
check_funcs(C99_FUNCS_SINGLE)
|
||||||
|
check_funcs(C99_FUNCS_EXTENDED)
|
||||||
|
|
||||||
|
def check_complex(config, mathlibs):
|
||||||
|
priv = []
|
||||||
|
pub = []
|
||||||
|
|
||||||
|
try:
|
||||||
|
if os.uname()[0] == "Interix":
|
||||||
|
warnings.warn("Disabling broken complex support. See #1365", stacklevel=2)
|
||||||
|
return priv, pub
|
||||||
|
except Exception:
|
||||||
|
# os.uname not available on all platforms. blanket except ugly but safe
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Check for complex support
|
||||||
|
st = config.check_header('complex.h')
|
||||||
|
if st:
|
||||||
|
priv.append(('HAVE_COMPLEX_H', 1))
|
||||||
|
pub.append(('NPY_USE_C99_COMPLEX', 1))
|
||||||
|
|
||||||
|
for t in C99_COMPLEX_TYPES:
|
||||||
|
st = config.check_type(t, headers=["complex.h"])
|
||||||
|
if st:
|
||||||
|
pub.append(('NPY_HAVE_%s' % type2def(t), 1))
|
||||||
|
|
||||||
|
def check_prec(prec):
|
||||||
|
flist = [f + prec for f in C99_COMPLEX_FUNCS]
|
||||||
|
decl = dict([(f, True) for f in flist])
|
||||||
|
if not config.check_funcs_once(flist, call=decl, decl=decl,
|
||||||
|
libraries=mathlibs):
|
||||||
|
for f in flist:
|
||||||
|
if config.check_func(f, call=True, decl=True,
|
||||||
|
libraries=mathlibs):
|
||||||
|
priv.append((fname2def(f), 1))
|
||||||
|
else:
|
||||||
|
priv.extend([(fname2def(f), 1) for f in flist])
|
||||||
|
|
||||||
|
check_prec('')
|
||||||
|
check_prec('f')
|
||||||
|
check_prec('l')
|
||||||
|
|
||||||
|
return priv, pub
|
||||||
|
|
||||||
|
def check_ieee_macros(config):
|
||||||
|
priv = []
|
||||||
|
pub = []
|
||||||
|
|
||||||
|
macros = []
|
||||||
|
|
||||||
|
def _add_decl(f):
|
||||||
|
priv.append(fname2def("decl_%s" % f))
|
||||||
|
pub.append('NPY_%s' % fname2def("decl_%s" % f))
|
||||||
|
|
||||||
|
# XXX: hack to circumvent cpp pollution from python: python put its
|
||||||
|
# config.h in the public namespace, so we have a clash for the common
|
||||||
|
# functions we test. We remove every function tested by python's
|
||||||
|
# autoconf, hoping their own test are correct
|
||||||
|
_macros = ["isnan", "isinf", "signbit", "isfinite"]
|
||||||
|
for f in _macros:
|
||||||
|
py_symbol = fname2def("decl_%s" % f)
|
||||||
|
already_declared = config.check_decl(py_symbol,
|
||||||
|
headers=["Python.h", "math.h"])
|
||||||
|
if already_declared:
|
||||||
|
if config.check_macro_true(py_symbol,
|
||||||
|
headers=["Python.h", "math.h"]):
|
||||||
|
pub.append('NPY_%s' % fname2def("decl_%s" % f))
|
||||||
|
else:
|
||||||
|
macros.append(f)
|
||||||
|
# Normally, isnan and isinf are macro (C99), but some platforms only have
|
||||||
|
# func, or both func and macro version. Check for macro only, and define
|
||||||
|
# replacement ones if not found.
|
||||||
|
# Note: including Python.h is necessary because it modifies some math.h
|
||||||
|
# definitions
|
||||||
|
for f in macros:
|
||||||
|
st = config.check_decl(f, headers=["Python.h", "math.h"])
|
||||||
|
if st:
|
||||||
|
_add_decl(f)
|
||||||
|
|
||||||
|
return priv, pub
|
||||||
|
|
||||||
|
def check_types(config_cmd, ext, build_dir):
|
||||||
|
private_defines = []
|
||||||
|
public_defines = []
|
||||||
|
|
||||||
|
# Expected size (in number of bytes) for each type. This is an
|
||||||
|
# optimization: those are only hints, and an exhaustive search for the size
|
||||||
|
# is done if the hints are wrong.
|
||||||
|
expected = {'short': [2], 'int': [4], 'long': [8, 4],
|
||||||
|
'float': [4], 'double': [8], 'long double': [16, 12, 8],
|
||||||
|
'Py_intptr_t': [8, 4], 'PY_LONG_LONG': [8], 'long long': [8],
|
||||||
|
'off_t': [8, 4]}
|
||||||
|
|
||||||
|
# Check we have the python header (-dev* packages on Linux)
|
||||||
|
result = config_cmd.check_header('Python.h')
|
||||||
|
if not result:
|
||||||
|
python = 'python'
|
||||||
|
if '__pypy__' in sys.builtin_module_names:
|
||||||
|
python = 'pypy'
|
||||||
|
raise SystemError(
|
||||||
|
"Cannot compile 'Python.h'. Perhaps you need to "
|
||||||
|
"install {0}-dev|{0}-devel.".format(python))
|
||||||
|
res = config_cmd.check_header("endian.h")
|
||||||
|
if res:
|
||||||
|
private_defines.append(('HAVE_ENDIAN_H', 1))
|
||||||
|
public_defines.append(('NPY_HAVE_ENDIAN_H', 1))
|
||||||
|
res = config_cmd.check_header("sys/endian.h")
|
||||||
|
if res:
|
||||||
|
private_defines.append(('HAVE_SYS_ENDIAN_H', 1))
|
||||||
|
public_defines.append(('NPY_HAVE_SYS_ENDIAN_H', 1))
|
||||||
|
|
||||||
|
# Check basic types sizes
|
||||||
|
for type in ('short', 'int', 'long'):
|
||||||
|
res = config_cmd.check_decl("SIZEOF_%s" % sym2def(type), headers=["Python.h"])
|
||||||
|
if res:
|
||||||
|
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), "SIZEOF_%s" % sym2def(type)))
|
||||||
|
else:
|
||||||
|
res = config_cmd.check_type_size(type, expected=expected[type])
|
||||||
|
if res >= 0:
|
||||||
|
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
|
||||||
|
else:
|
||||||
|
raise SystemError("Checking sizeof (%s) failed !" % type)
|
||||||
|
|
||||||
|
for type in ('float', 'double', 'long double'):
|
||||||
|
already_declared = config_cmd.check_decl("SIZEOF_%s" % sym2def(type),
|
||||||
|
headers=["Python.h"])
|
||||||
|
res = config_cmd.check_type_size(type, expected=expected[type])
|
||||||
|
if res >= 0:
|
||||||
|
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
|
||||||
|
if not already_declared and not type == 'long double':
|
||||||
|
private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))
|
||||||
|
else:
|
||||||
|
raise SystemError("Checking sizeof (%s) failed !" % type)
|
||||||
|
|
||||||
|
# Compute size of corresponding complex type: used to check that our
|
||||||
|
# definition is binary compatible with C99 complex type (check done at
|
||||||
|
# build time in npy_common.h)
|
||||||
|
complex_def = "struct {%s __x; %s __y;}" % (type, type)
|
||||||
|
res = config_cmd.check_type_size(complex_def,
|
||||||
|
expected=[2 * x for x in expected[type]])
|
||||||
|
if res >= 0:
|
||||||
|
public_defines.append(('NPY_SIZEOF_COMPLEX_%s' % sym2def(type), '%d' % res))
|
||||||
|
else:
|
||||||
|
raise SystemError("Checking sizeof (%s) failed !" % complex_def)
|
||||||
|
|
||||||
|
for type in ('Py_intptr_t', 'off_t'):
|
||||||
|
res = config_cmd.check_type_size(type, headers=["Python.h"],
|
||||||
|
library_dirs=[pythonlib_dir()],
|
||||||
|
expected=expected[type])
|
||||||
|
|
||||||
|
if res >= 0:
|
||||||
|
private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))
|
||||||
|
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
|
||||||
|
else:
|
||||||
|
raise SystemError("Checking sizeof (%s) failed !" % type)
|
||||||
|
|
||||||
|
# We check declaration AND type because that's how distutils does it.
|
||||||
|
if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']):
|
||||||
|
res = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h'],
|
||||||
|
library_dirs=[pythonlib_dir()],
|
||||||
|
expected=expected['PY_LONG_LONG'])
|
||||||
|
if res >= 0:
|
||||||
|
private_defines.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))
|
||||||
|
public_defines.append(('NPY_SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))
|
||||||
|
else:
|
||||||
|
raise SystemError("Checking sizeof (%s) failed !" % 'PY_LONG_LONG')
|
||||||
|
|
||||||
|
res = config_cmd.check_type_size('long long',
|
||||||
|
expected=expected['long long'])
|
||||||
|
if res >= 0:
|
||||||
|
#private_defines.append(('SIZEOF_%s' % sym2def('long long'), '%d' % res))
|
||||||
|
public_defines.append(('NPY_SIZEOF_%s' % sym2def('long long'), '%d' % res))
|
||||||
|
else:
|
||||||
|
raise SystemError("Checking sizeof (%s) failed !" % 'long long')
|
||||||
|
|
||||||
|
if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']):
|
||||||
|
raise RuntimeError(
|
||||||
|
"Config wo CHAR_BIT is not supported"
|
||||||
|
", please contact the maintainers")
|
||||||
|
|
||||||
|
return private_defines, public_defines
|
||||||
|
|
||||||
|
def check_mathlib(config_cmd):
|
||||||
|
# Testing the C math library
|
||||||
|
mathlibs = []
|
||||||
|
mathlibs_choices = [[], ['m'], ['cpml']]
|
||||||
|
mathlib = os.environ.get('MATHLIB')
|
||||||
|
if mathlib:
|
||||||
|
mathlibs_choices.insert(0, mathlib.split(','))
|
||||||
|
for libs in mathlibs_choices:
|
||||||
|
if config_cmd.check_func("exp", libraries=libs, decl=True, call=True):
|
||||||
|
mathlibs = libs
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
raise EnvironmentError("math library missing; rerun "
|
||||||
|
"setup.py after setting the "
|
||||||
|
"MATHLIB env variable")
|
||||||
|
return mathlibs
|
||||||
|
|
||||||
|
def visibility_define(config):
|
||||||
|
"""Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty
|
||||||
|
string)."""
|
||||||
|
hide = '__attribute__((visibility("hidden")))'
|
||||||
|
if config.check_gcc_function_attribute(hide, 'hideme'):
|
||||||
|
return hide
|
||||||
|
else:
|
||||||
|
return ''
|
||||||
|
|
||||||
|
def configuration(parent_package='',top_path=None):
|
||||||
|
from numpy.distutils.misc_util import Configuration, dot_join
|
||||||
|
from numpy.distutils.system_info import get_info, dict_append
|
||||||
|
|
||||||
|
config = Configuration('core', parent_package, top_path)
|
||||||
|
local_dir = config.local_path
|
||||||
|
codegen_dir = join(local_dir, 'code_generators')
|
||||||
|
|
||||||
|
if is_released(config):
|
||||||
|
warnings.simplefilter('error', MismatchCAPIWarning)
|
||||||
|
|
||||||
|
# Check whether we have a mismatch between the set C API VERSION and the
|
||||||
|
# actual C API VERSION
|
||||||
|
check_api_version(C_API_VERSION, codegen_dir)
|
||||||
|
|
||||||
|
generate_umath_py = join(codegen_dir, 'generate_umath.py')
|
||||||
|
n = dot_join(config.name, 'generate_umath')
|
||||||
|
generate_umath = npy_load_module('_'.join(n.split('.')),
|
||||||
|
generate_umath_py, ('.py', 'U', 1))
|
||||||
|
|
||||||
|
header_dir = 'include/numpy' # this is relative to config.path_in_package
|
||||||
|
|
||||||
|
cocache = CallOnceOnly()
|
||||||
|
|
||||||
|
def generate_config_h(ext, build_dir):
|
||||||
|
target = join(build_dir, header_dir, 'config.h')
|
||||||
|
d = os.path.dirname(target)
|
||||||
|
if not os.path.exists(d):
|
||||||
|
os.makedirs(d)
|
||||||
|
|
||||||
|
if newer(__file__, target):
|
||||||
|
config_cmd = config.get_config_cmd()
|
||||||
|
log.info('Generating %s', target)
|
||||||
|
|
||||||
|
# Check sizeof
|
||||||
|
moredefs, ignored = cocache.check_types(config_cmd, ext, build_dir)
|
||||||
|
|
||||||
|
# Check math library and C99 math funcs availability
|
||||||
|
mathlibs = check_mathlib(config_cmd)
|
||||||
|
moredefs.append(('MATHLIB', ','.join(mathlibs)))
|
||||||
|
|
||||||
|
check_math_capabilities(config_cmd, moredefs, mathlibs)
|
||||||
|
moredefs.extend(cocache.check_ieee_macros(config_cmd)[0])
|
||||||
|
moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0])
|
||||||
|
|
||||||
|
# Signal check
|
||||||
|
if is_npy_no_signal():
|
||||||
|
moredefs.append('__NPY_PRIVATE_NO_SIGNAL')
|
||||||
|
|
||||||
|
# Windows checks
|
||||||
|
if sys.platform == 'win32' or os.name == 'nt':
|
||||||
|
win32_checks(moredefs)
|
||||||
|
|
||||||
|
# C99 restrict keyword
|
||||||
|
moredefs.append(('NPY_RESTRICT', config_cmd.check_restrict()))
|
||||||
|
|
||||||
|
# Inline check
|
||||||
|
inline = config_cmd.check_inline()
|
||||||
|
|
||||||
|
# Use relaxed stride checking
|
||||||
|
if NPY_RELAXED_STRIDES_CHECKING:
|
||||||
|
moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
|
||||||
|
|
||||||
|
# Use bogus stride debug aid when relaxed strides are enabled
|
||||||
|
if NPY_RELAXED_STRIDES_DEBUG:
|
||||||
|
moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 1))
|
||||||
|
|
||||||
|
# Get long double representation
|
||||||
|
rep = check_long_double_representation(config_cmd)
|
||||||
|
moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1))
|
||||||
|
|
||||||
|
if check_for_right_shift_internal_compiler_error(config_cmd):
|
||||||
|
moredefs.append('NPY_DO_NOT_OPTIMIZE_LONG_right_shift')
|
||||||
|
moredefs.append('NPY_DO_NOT_OPTIMIZE_ULONG_right_shift')
|
||||||
|
moredefs.append('NPY_DO_NOT_OPTIMIZE_LONGLONG_right_shift')
|
||||||
|
moredefs.append('NPY_DO_NOT_OPTIMIZE_ULONGLONG_right_shift')
|
||||||
|
|
||||||
|
# Py3K check
|
||||||
|
if sys.version_info[0] >= 3:
|
||||||
|
moredefs.append(('NPY_PY3K', 1))
|
||||||
|
|
||||||
|
# Generate the config.h file from moredefs
|
||||||
|
with open(target, 'w') as target_f:
|
||||||
|
for d in moredefs:
|
||||||
|
if isinstance(d, str):
|
||||||
|
target_f.write('#define %s\n' % (d))
|
||||||
|
else:
|
||||||
|
target_f.write('#define %s %s\n' % (d[0], d[1]))
|
||||||
|
|
||||||
|
# define inline to our keyword, or nothing
|
||||||
|
target_f.write('#ifndef __cplusplus\n')
|
||||||
|
if inline == 'inline':
|
||||||
|
target_f.write('/* #undef inline */\n')
|
||||||
|
else:
|
||||||
|
target_f.write('#define inline %s\n' % inline)
|
||||||
|
target_f.write('#endif\n')
|
||||||
|
|
||||||
|
# add the guard to make sure config.h is never included directly,
|
||||||
|
# but always through npy_config.h
|
||||||
|
target_f.write(textwrap.dedent("""
|
||||||
|
#ifndef _NPY_NPY_CONFIG_H_
|
||||||
|
#error config.h should never be included directly, include npy_config.h instead
|
||||||
|
#endif
|
||||||
|
"""))
|
||||||
|
|
||||||
|
log.info('File: %s' % target)
|
||||||
|
with open(target) as target_f:
|
||||||
|
log.info(target_f.read())
|
||||||
|
log.info('EOF')
|
||||||
|
else:
|
||||||
|
mathlibs = []
|
||||||
|
with open(target) as target_f:
|
||||||
|
for line in target_f:
|
||||||
|
s = '#define MATHLIB'
|
||||||
|
if line.startswith(s):
|
||||||
|
value = line[len(s):].strip()
|
||||||
|
if value:
|
||||||
|
mathlibs.extend(value.split(','))
|
||||||
|
|
||||||
|
# Ugly: this can be called within a library and not an extension,
|
||||||
|
# in which case there is no libraries attributes (and none is
|
||||||
|
# needed).
|
||||||
|
if hasattr(ext, 'libraries'):
|
||||||
|
ext.libraries.extend(mathlibs)
|
||||||
|
|
||||||
|
incl_dir = os.path.dirname(target)
|
||||||
|
if incl_dir not in config.numpy_include_dirs:
|
||||||
|
config.numpy_include_dirs.append(incl_dir)
|
||||||
|
|
||||||
|
return target
|
||||||
|
|
||||||
|
def generate_numpyconfig_h(ext, build_dir):
|
||||||
|
"""Depends on config.h: generate_config_h has to be called before !"""
|
||||||
|
# put common include directory in build_dir on search path
|
||||||
|
# allows using code generation in headers headers
|
||||||
|
config.add_include_dirs(join(build_dir, "src", "common"))
|
||||||
|
config.add_include_dirs(join(build_dir, "src", "npymath"))
|
||||||
|
|
||||||
|
target = join(build_dir, header_dir, '_numpyconfig.h')
|
||||||
|
d = os.path.dirname(target)
|
||||||
|
if not os.path.exists(d):
|
||||||
|
os.makedirs(d)
|
||||||
|
if newer(__file__, target):
|
||||||
|
config_cmd = config.get_config_cmd()
|
||||||
|
log.info('Generating %s', target)
|
||||||
|
|
||||||
|
# Check sizeof
|
||||||
|
ignored, moredefs = cocache.check_types(config_cmd, ext, build_dir)
|
||||||
|
|
||||||
|
if is_npy_no_signal():
|
||||||
|
moredefs.append(('NPY_NO_SIGNAL', 1))
|
||||||
|
|
||||||
|
if is_npy_no_smp():
|
||||||
|
moredefs.append(('NPY_NO_SMP', 1))
|
||||||
|
else:
|
||||||
|
moredefs.append(('NPY_NO_SMP', 0))
|
||||||
|
|
||||||
|
mathlibs = check_mathlib(config_cmd)
|
||||||
|
moredefs.extend(cocache.check_ieee_macros(config_cmd)[1])
|
||||||
|
moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[1])
|
||||||
|
|
||||||
|
if NPY_RELAXED_STRIDES_CHECKING:
|
||||||
|
moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
|
||||||
|
|
||||||
|
if NPY_RELAXED_STRIDES_DEBUG:
|
||||||
|
moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 1))
|
||||||
|
|
||||||
|
# Check whether we can use inttypes (C99) formats
|
||||||
|
if config_cmd.check_decl('PRIdPTR', headers=['inttypes.h']):
|
||||||
|
moredefs.append(('NPY_USE_C99_FORMATS', 1))
|
||||||
|
|
||||||
|
# visibility check
|
||||||
|
hidden_visibility = visibility_define(config_cmd)
|
||||||
|
moredefs.append(('NPY_VISIBILITY_HIDDEN', hidden_visibility))
|
||||||
|
|
||||||
|
# Add the C API/ABI versions
|
||||||
|
moredefs.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION))
|
||||||
|
moredefs.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION))
|
||||||
|
|
||||||
|
# Add moredefs to header
|
||||||
|
with open(target, 'w') as target_f:
|
||||||
|
for d in moredefs:
|
||||||
|
if isinstance(d, str):
|
||||||
|
target_f.write('#define %s\n' % (d))
|
||||||
|
else:
|
||||||
|
target_f.write('#define %s %s\n' % (d[0], d[1]))
|
||||||
|
|
||||||
|
# Define __STDC_FORMAT_MACROS
|
||||||
|
target_f.write(textwrap.dedent("""
|
||||||
|
#ifndef __STDC_FORMAT_MACROS
|
||||||
|
#define __STDC_FORMAT_MACROS 1
|
||||||
|
#endif
|
||||||
|
"""))
|
||||||
|
|
||||||
|
# Dump the numpyconfig.h header to stdout
|
||||||
|
log.info('File: %s' % target)
|
||||||
|
with open(target) as target_f:
|
||||||
|
log.info(target_f.read())
|
||||||
|
log.info('EOF')
|
||||||
|
config.add_data_files((header_dir, target))
|
||||||
|
return target
|
||||||
|
|
||||||
|
def generate_api_func(module_name):
|
||||||
|
def generate_api(ext, build_dir):
|
||||||
|
script = join(codegen_dir, module_name + '.py')
|
||||||
|
sys.path.insert(0, codegen_dir)
|
||||||
|
try:
|
||||||
|
m = __import__(module_name)
|
||||||
|
log.info('executing %s', script)
|
||||||
|
h_file, c_file, doc_file = m.generate_api(os.path.join(build_dir, header_dir))
|
||||||
|
finally:
|
||||||
|
del sys.path[0]
|
||||||
|
config.add_data_files((header_dir, h_file),
|
||||||
|
(header_dir, doc_file))
|
||||||
|
return (h_file,)
|
||||||
|
return generate_api
|
||||||
|
|
||||||
|
generate_numpy_api = generate_api_func('generate_numpy_api')
|
||||||
|
generate_ufunc_api = generate_api_func('generate_ufunc_api')
|
||||||
|
|
||||||
|
config.add_include_dirs(join(local_dir, "src", "common"))
|
||||||
|
config.add_include_dirs(join(local_dir, "src"))
|
||||||
|
config.add_include_dirs(join(local_dir))
|
||||||
|
|
||||||
|
config.add_data_dir('include/numpy')
|
||||||
|
config.add_include_dirs(join('src', 'npymath'))
|
||||||
|
config.add_include_dirs(join('src', 'multiarray'))
|
||||||
|
config.add_include_dirs(join('src', 'umath'))
|
||||||
|
config.add_include_dirs(join('src', 'npysort'))
|
||||||
|
|
||||||
|
config.add_define_macros([("NPY_INTERNAL_BUILD", "1")]) # this macro indicates that Numpy build is in process
|
||||||
|
config.add_define_macros([("HAVE_NPY_CONFIG_H", "1")])
|
||||||
|
if sys.platform[:3] == "aix":
|
||||||
|
config.add_define_macros([("_LARGE_FILES", None)])
|
||||||
|
else:
|
||||||
|
config.add_define_macros([("_FILE_OFFSET_BITS", "64")])
|
||||||
|
config.add_define_macros([('_LARGEFILE_SOURCE', '1')])
|
||||||
|
config.add_define_macros([('_LARGEFILE64_SOURCE', '1')])
|
||||||
|
|
||||||
|
config.numpy_include_dirs.extend(config.paths('include'))
|
||||||
|
|
||||||
|
deps = [join('src', 'npymath', '_signbit.c'),
|
||||||
|
join('include', 'numpy', '*object.h'),
|
||||||
|
join(codegen_dir, 'genapi.py'),
|
||||||
|
]
|
||||||
|
|
||||||
|
#######################################################################
|
||||||
|
# npymath library #
|
||||||
|
#######################################################################
|
||||||
|
|
||||||
|
subst_dict = dict([("sep", os.path.sep), ("pkgname", "numpy.core")])
|
||||||
|
|
||||||
|
def get_mathlib_info(*args):
|
||||||
|
# Another ugly hack: the mathlib info is known once build_src is run,
|
||||||
|
# but we cannot use add_installed_pkg_config here either, so we only
|
||||||
|
# update the substitution dictionary during npymath build
|
||||||
|
config_cmd = config.get_config_cmd()
|
||||||
|
|
||||||
|
# Check that the toolchain works, to fail early if it doesn't
|
||||||
|
# (avoid late errors with MATHLIB which are confusing if the
|
||||||
|
# compiler does not work).
|
||||||
|
st = config_cmd.try_link('int main(void) { return 0;}')
|
||||||
|
if not st:
|
||||||
|
# rerun the failing command in verbose mode
|
||||||
|
config_cmd.compiler.verbose = True
|
||||||
|
config_cmd.try_link('int main(void) { return 0;}')
|
||||||
|
raise RuntimeError("Broken toolchain: cannot link a simple C program")
|
||||||
|
mlibs = check_mathlib(config_cmd)
|
||||||
|
|
||||||
|
posix_mlib = ' '.join(['-l%s' % l for l in mlibs])
|
||||||
|
msvc_mlib = ' '.join(['%s.lib' % l for l in mlibs])
|
||||||
|
subst_dict["posix_mathlib"] = posix_mlib
|
||||||
|
subst_dict["msvc_mathlib"] = msvc_mlib
|
||||||
|
|
||||||
|
npymath_sources = [join('src', 'npymath', 'npy_math_internal.h.src'),
|
||||||
|
join('src', 'npymath', 'npy_math.c'),
|
||||||
|
join('src', 'npymath', 'ieee754.c.src'),
|
||||||
|
join('src', 'npymath', 'npy_math_complex.c.src'),
|
||||||
|
join('src', 'npymath', 'halffloat.c')
|
||||||
|
]
|
||||||
|
|
||||||
|
# Must be true for CRT compilers but not MinGW/cygwin. See gh-9977.
|
||||||
|
# Intel and Clang also don't seem happy with /GL
|
||||||
|
is_msvc = (platform.platform().startswith('Windows') and
|
||||||
|
platform.python_compiler().startswith('MS'))
|
||||||
|
config.add_installed_library('npymath',
|
||||||
|
sources=npymath_sources + [get_mathlib_info],
|
||||||
|
install_dir='lib',
|
||||||
|
build_info={
|
||||||
|
'include_dirs' : [], # empty list required for creating npy_math_internal.h
|
||||||
|
'extra_compiler_args' : (['/GL-'] if is_msvc else []),
|
||||||
|
})
|
||||||
|
config.add_npy_pkg_config("npymath.ini.in", "lib/npy-pkg-config",
|
||||||
|
subst_dict)
|
||||||
|
config.add_npy_pkg_config("mlib.ini.in", "lib/npy-pkg-config",
|
||||||
|
subst_dict)
|
||||||
|
|
||||||
|
#######################################################################
|
||||||
|
# npysort library #
|
||||||
|
#######################################################################
|
||||||
|
|
||||||
|
# This library is created for the build but it is not installed
|
||||||
|
npysort_sources = [join('src', 'common', 'npy_sort.h.src'),
|
||||||
|
join('src', 'npysort', 'quicksort.c.src'),
|
||||||
|
join('src', 'npysort', 'mergesort.c.src'),
|
||||||
|
join('src', 'npysort', 'timsort.c.src'),
|
||||||
|
join('src', 'npysort', 'heapsort.c.src'),
|
||||||
|
join('src', 'npysort', 'radixsort.c.src'),
|
||||||
|
join('src', 'common', 'npy_partition.h.src'),
|
||||||
|
join('src', 'npysort', 'selection.c.src'),
|
||||||
|
join('src', 'common', 'npy_binsearch.h.src'),
|
||||||
|
join('src', 'npysort', 'binsearch.c.src'),
|
||||||
|
]
|
||||||
|
config.add_library('npysort',
|
||||||
|
sources=npysort_sources,
|
||||||
|
include_dirs=[])
|
||||||
|
|
||||||
|
#######################################################################
|
||||||
|
# multiarray_tests module #
|
||||||
|
#######################################################################
|
||||||
|
|
||||||
|
config.add_extension('_multiarray_tests',
|
||||||
|
sources=[join('src', 'multiarray', '_multiarray_tests.c.src'),
|
||||||
|
join('src', 'common', 'mem_overlap.c')],
|
||||||
|
depends=[join('src', 'common', 'mem_overlap.h'),
|
||||||
|
join('src', 'common', 'npy_extint128.h')],
|
||||||
|
libraries=['npymath'])
|
||||||
|
|
||||||
|
#######################################################################
|
||||||
|
# _multiarray_umath module - common part #
|
||||||
|
#######################################################################
|
||||||
|
|
||||||
|
common_deps = [
|
||||||
|
join('src', 'common', 'array_assign.h'),
|
||||||
|
join('src', 'common', 'binop_override.h'),
|
||||||
|
join('src', 'common', 'cblasfuncs.h'),
|
||||||
|
join('src', 'common', 'lowlevel_strided_loops.h'),
|
||||||
|
join('src', 'common', 'mem_overlap.h'),
|
||||||
|
join('src', 'common', 'npy_cblas.h'),
|
||||||
|
join('src', 'common', 'npy_config.h'),
|
||||||
|
join('src', 'common', 'npy_ctypes.h'),
|
||||||
|
join('src', 'common', 'npy_extint128.h'),
|
||||||
|
join('src', 'common', 'npy_import.h'),
|
||||||
|
join('src', 'common', 'npy_longdouble.h'),
|
||||||
|
join('src', 'common', 'templ_common.h.src'),
|
||||||
|
join('src', 'common', 'ucsnarrow.h'),
|
||||||
|
join('src', 'common', 'ufunc_override.h'),
|
||||||
|
join('src', 'common', 'umathmodule.h'),
|
||||||
|
join('src', 'common', 'numpyos.h'),
|
||||||
|
]
|
||||||
|
|
||||||
|
common_src = [
|
||||||
|
join('src', 'common', 'array_assign.c'),
|
||||||
|
join('src', 'common', 'mem_overlap.c'),
|
||||||
|
join('src', 'common', 'npy_longdouble.c'),
|
||||||
|
join('src', 'common', 'templ_common.h.src'),
|
||||||
|
join('src', 'common', 'ucsnarrow.c'),
|
||||||
|
join('src', 'common', 'ufunc_override.c'),
|
||||||
|
join('src', 'common', 'numpyos.c'),
|
||||||
|
]
|
||||||
|
|
||||||
|
if os.environ.get('NPY_USE_BLAS_ILP64', "0") != "0":
|
||||||
|
blas_info = get_info('blas_ilp64_opt', 2)
|
||||||
|
else:
|
||||||
|
blas_info = get_info('blas_opt', 0)
|
||||||
|
|
||||||
|
have_blas = blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', [])
|
||||||
|
|
||||||
|
if have_blas:
|
||||||
|
extra_info = blas_info
|
||||||
|
# These files are also in MANIFEST.in so that they are always in
|
||||||
|
# the source distribution independently of HAVE_CBLAS.
|
||||||
|
common_src.extend([join('src', 'common', 'cblasfuncs.c'),
|
||||||
|
join('src', 'common', 'python_xerbla.c'),
|
||||||
|
])
|
||||||
|
if uses_accelerate_framework(blas_info):
|
||||||
|
common_src.extend(get_sgemv_fix())
|
||||||
|
else:
|
||||||
|
extra_info = {}
|
||||||
|
|
||||||
|
#######################################################################
|
||||||
|
# _multiarray_umath module - multiarray part #
|
||||||
|
#######################################################################
|
||||||
|
|
||||||
|
multiarray_deps = [
|
||||||
|
join('src', 'multiarray', 'arrayobject.h'),
|
||||||
|
join('src', 'multiarray', 'arraytypes.h'),
|
||||||
|
join('src', 'multiarray', 'arrayfunction_override.h'),
|
||||||
|
join('src', 'multiarray', 'npy_buffer.h'),
|
||||||
|
join('src', 'multiarray', 'calculation.h'),
|
||||||
|
join('src', 'multiarray', 'common.h'),
|
||||||
|
join('src', 'multiarray', 'convert_datatype.h'),
|
||||||
|
join('src', 'multiarray', 'convert.h'),
|
||||||
|
join('src', 'multiarray', 'conversion_utils.h'),
|
||||||
|
join('src', 'multiarray', 'ctors.h'),
|
||||||
|
join('src', 'multiarray', 'descriptor.h'),
|
||||||
|
join('src', 'multiarray', 'dragon4.h'),
|
||||||
|
join('src', 'multiarray', 'getset.h'),
|
||||||
|
join('src', 'multiarray', 'hashdescr.h'),
|
||||||
|
join('src', 'multiarray', 'iterators.h'),
|
||||||
|
join('src', 'multiarray', 'mapping.h'),
|
||||||
|
join('src', 'multiarray', 'methods.h'),
|
||||||
|
join('src', 'multiarray', 'multiarraymodule.h'),
|
||||||
|
join('src', 'multiarray', 'nditer_impl.h'),
|
||||||
|
join('src', 'multiarray', 'number.h'),
|
||||||
|
join('src', 'multiarray', 'refcount.h'),
|
||||||
|
join('src', 'multiarray', 'scalartypes.h'),
|
||||||
|
join('src', 'multiarray', 'sequence.h'),
|
||||||
|
join('src', 'multiarray', 'shape.h'),
|
||||||
|
join('src', 'multiarray', 'strfuncs.h'),
|
||||||
|
join('src', 'multiarray', 'typeinfo.h'),
|
||||||
|
join('src', 'multiarray', 'usertypes.h'),
|
||||||
|
join('src', 'multiarray', 'vdot.h'),
|
||||||
|
join('include', 'numpy', 'arrayobject.h'),
|
||||||
|
join('include', 'numpy', '_neighborhood_iterator_imp.h'),
|
||||||
|
join('include', 'numpy', 'npy_endian.h'),
|
||||||
|
join('include', 'numpy', 'arrayscalars.h'),
|
||||||
|
join('include', 'numpy', 'noprefix.h'),
|
||||||
|
join('include', 'numpy', 'npy_interrupt.h'),
|
||||||
|
join('include', 'numpy', 'npy_3kcompat.h'),
|
||||||
|
join('include', 'numpy', 'npy_math.h'),
|
||||||
|
join('include', 'numpy', 'halffloat.h'),
|
||||||
|
join('include', 'numpy', 'npy_common.h'),
|
||||||
|
join('include', 'numpy', 'npy_os.h'),
|
||||||
|
join('include', 'numpy', 'utils.h'),
|
||||||
|
join('include', 'numpy', 'ndarrayobject.h'),
|
||||||
|
join('include', 'numpy', 'npy_cpu.h'),
|
||||||
|
join('include', 'numpy', 'numpyconfig.h'),
|
||||||
|
join('include', 'numpy', 'ndarraytypes.h'),
|
||||||
|
join('include', 'numpy', 'npy_1_7_deprecated_api.h'),
|
||||||
|
# add library sources as distuils does not consider libraries
|
||||||
|
# dependencies
|
||||||
|
] + npysort_sources + npymath_sources
|
||||||
|
|
||||||
|
multiarray_src = [
|
||||||
|
join('src', 'multiarray', 'alloc.c'),
|
||||||
|
join('src', 'multiarray', 'arrayobject.c'),
|
||||||
|
join('src', 'multiarray', 'arraytypes.c.src'),
|
||||||
|
join('src', 'multiarray', 'array_assign_scalar.c'),
|
||||||
|
join('src', 'multiarray', 'array_assign_array.c'),
|
||||||
|
join('src', 'multiarray', 'arrayfunction_override.c'),
|
||||||
|
join('src', 'multiarray', 'buffer.c'),
|
||||||
|
join('src', 'multiarray', 'calculation.c'),
|
||||||
|
join('src', 'multiarray', 'compiled_base.c'),
|
||||||
|
join('src', 'multiarray', 'common.c'),
|
||||||
|
join('src', 'multiarray', 'convert.c'),
|
||||||
|
join('src', 'multiarray', 'convert_datatype.c'),
|
||||||
|
join('src', 'multiarray', 'conversion_utils.c'),
|
||||||
|
join('src', 'multiarray', 'ctors.c'),
|
||||||
|
join('src', 'multiarray', 'datetime.c'),
|
||||||
|
join('src', 'multiarray', 'datetime_strings.c'),
|
||||||
|
join('src', 'multiarray', 'datetime_busday.c'),
|
||||||
|
join('src', 'multiarray', 'datetime_busdaycal.c'),
|
||||||
|
join('src', 'multiarray', 'descriptor.c'),
|
||||||
|
join('src', 'multiarray', 'dragon4.c'),
|
||||||
|
join('src', 'multiarray', 'dtype_transfer.c'),
|
||||||
|
join('src', 'multiarray', 'einsum.c.src'),
|
||||||
|
join('src', 'multiarray', 'flagsobject.c'),
|
||||||
|
join('src', 'multiarray', 'getset.c'),
|
||||||
|
join('src', 'multiarray', 'hashdescr.c'),
|
||||||
|
join('src', 'multiarray', 'item_selection.c'),
|
||||||
|
join('src', 'multiarray', 'iterators.c'),
|
||||||
|
join('src', 'multiarray', 'lowlevel_strided_loops.c.src'),
|
||||||
|
join('src', 'multiarray', 'mapping.c'),
|
||||||
|
join('src', 'multiarray', 'methods.c'),
|
||||||
|
join('src', 'multiarray', 'multiarraymodule.c'),
|
||||||
|
join('src', 'multiarray', 'nditer_templ.c.src'),
|
||||||
|
join('src', 'multiarray', 'nditer_api.c'),
|
||||||
|
join('src', 'multiarray', 'nditer_constr.c'),
|
||||||
|
join('src', 'multiarray', 'nditer_pywrap.c'),
|
||||||
|
join('src', 'multiarray', 'number.c'),
|
||||||
|
join('src', 'multiarray', 'refcount.c'),
|
||||||
|
join('src', 'multiarray', 'sequence.c'),
|
||||||
|
join('src', 'multiarray', 'shape.c'),
|
||||||
|
join('src', 'multiarray', 'scalarapi.c'),
|
||||||
|
join('src', 'multiarray', 'scalartypes.c.src'),
|
||||||
|
join('src', 'multiarray', 'strfuncs.c'),
|
||||||
|
join('src', 'multiarray', 'temp_elide.c'),
|
||||||
|
join('src', 'multiarray', 'typeinfo.c'),
|
||||||
|
join('src', 'multiarray', 'usertypes.c'),
|
||||||
|
join('src', 'multiarray', 'vdot.c'),
|
||||||
|
]
|
||||||
|
|
||||||
|
#######################################################################
|
||||||
|
# _multiarray_umath module - umath part #
|
||||||
|
#######################################################################
|
||||||
|
|
||||||
|
def generate_umath_c(ext, build_dir):
|
||||||
|
target = join(build_dir, header_dir, '__umath_generated.c')
|
||||||
|
dir = os.path.dirname(target)
|
||||||
|
if not os.path.exists(dir):
|
||||||
|
os.makedirs(dir)
|
||||||
|
script = generate_umath_py
|
||||||
|
if newer(script, target):
|
||||||
|
with open(target, 'w') as f:
|
||||||
|
f.write(generate_umath.make_code(generate_umath.defdict,
|
||||||
|
generate_umath.__file__))
|
||||||
|
return []
|
||||||
|
|
||||||
|
umath_src = [
|
||||||
|
join('src', 'umath', 'umathmodule.c'),
|
||||||
|
join('src', 'umath', 'reduction.c'),
|
||||||
|
join('src', 'umath', 'funcs.inc.src'),
|
||||||
|
join('src', 'umath', 'simd.inc.src'),
|
||||||
|
join('src', 'umath', 'loops.h.src'),
|
||||||
|
join('src', 'umath', 'loops.c.src'),
|
||||||
|
join('src', 'umath', 'matmul.h.src'),
|
||||||
|
join('src', 'umath', 'matmul.c.src'),
|
||||||
|
join('src', 'umath', 'clip.h.src'),
|
||||||
|
join('src', 'umath', 'clip.c.src'),
|
||||||
|
join('src', 'umath', 'ufunc_object.c'),
|
||||||
|
join('src', 'umath', 'extobj.c'),
|
||||||
|
join('src', 'umath', 'cpuid.c'),
|
||||||
|
join('src', 'umath', 'scalarmath.c.src'),
|
||||||
|
join('src', 'umath', 'ufunc_type_resolution.c'),
|
||||||
|
join('src', 'umath', 'override.c'),
|
||||||
|
]
|
||||||
|
|
||||||
|
umath_deps = [
|
||||||
|
generate_umath_py,
|
||||||
|
join('include', 'numpy', 'npy_math.h'),
|
||||||
|
join('include', 'numpy', 'halffloat.h'),
|
||||||
|
join('src', 'multiarray', 'common.h'),
|
||||||
|
join('src', 'multiarray', 'number.h'),
|
||||||
|
join('src', 'common', 'templ_common.h.src'),
|
||||||
|
join('src', 'umath', 'simd.inc.src'),
|
||||||
|
join('src', 'umath', 'override.h'),
|
||||||
|
join(codegen_dir, 'generate_ufunc_api.py'),
|
||||||
|
]
|
||||||
|
|
||||||
|
config.add_extension('_multiarray_umath',
|
||||||
|
sources=multiarray_src + umath_src +
|
||||||
|
npymath_sources + common_src +
|
||||||
|
[generate_config_h,
|
||||||
|
generate_numpyconfig_h,
|
||||||
|
generate_numpy_api,
|
||||||
|
join(codegen_dir, 'generate_numpy_api.py'),
|
||||||
|
join('*.py'),
|
||||||
|
generate_umath_c,
|
||||||
|
generate_ufunc_api,
|
||||||
|
],
|
||||||
|
depends=deps + multiarray_deps + umath_deps +
|
||||||
|
common_deps,
|
||||||
|
libraries=['npymath', 'npysort'],
|
||||||
|
extra_info=extra_info)
|
||||||
|
|
||||||
|
#######################################################################
|
||||||
|
# umath_tests module #
|
||||||
|
#######################################################################
|
||||||
|
|
||||||
|
config.add_extension('_umath_tests',
|
||||||
|
sources=[join('src', 'umath', '_umath_tests.c.src')])
|
||||||
|
|
||||||
|
#######################################################################
|
||||||
|
# custom rational dtype module #
|
||||||
|
#######################################################################
|
||||||
|
|
||||||
|
config.add_extension('_rational_tests',
|
||||||
|
sources=[join('src', 'umath', '_rational_tests.c.src')])
|
||||||
|
|
||||||
|
#######################################################################
|
||||||
|
# struct_ufunc_test module #
|
||||||
|
#######################################################################
|
||||||
|
|
||||||
|
config.add_extension('_struct_ufunc_tests',
|
||||||
|
sources=[join('src', 'umath', '_struct_ufunc_tests.c.src')])
|
||||||
|
|
||||||
|
|
||||||
|
#######################################################################
|
||||||
|
# operand_flag_tests module #
|
||||||
|
#######################################################################
|
||||||
|
|
||||||
|
config.add_extension('_operand_flag_tests',
|
||||||
|
sources=[join('src', 'umath', '_operand_flag_tests.c.src')])
|
||||||
|
|
||||||
|
config.add_data_dir('tests')
|
||||||
|
config.add_data_dir('tests/data')
|
||||||
|
|
||||||
|
config.make_svn_version_py()
|
||||||
|
|
||||||
|
return config
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
from numpy.distutils.core import setup
|
||||||
|
setup(configuration=configuration)
|
@ -0,0 +1,457 @@
|
|||||||
|
from __future__ import division, absolute_import, print_function
|
||||||
|
|
||||||
|
# Code common to build tools
|
||||||
|
import sys
|
||||||
|
import warnings
|
||||||
|
import copy
|
||||||
|
import binascii
|
||||||
|
import textwrap
|
||||||
|
|
||||||
|
from numpy.distutils.misc_util import mingw32
|
||||||
|
|
||||||
|
|
||||||
|
#-------------------
|
||||||
|
# Versioning support
|
||||||
|
#-------------------
|
||||||
|
# How to change C_API_VERSION ?
|
||||||
|
# - increase C_API_VERSION value
|
||||||
|
# - record the hash for the new C API with the cversions.py script
|
||||||
|
# and add the hash to cversions.txt
|
||||||
|
# The hash values are used to remind developers when the C API number was not
|
||||||
|
# updated - generates a MismatchCAPIWarning warning which is turned into an
|
||||||
|
# exception for released version.
|
||||||
|
|
||||||
|
# Binary compatibility version number. This number is increased whenever the
|
||||||
|
# C-API is changed such that binary compatibility is broken, i.e. whenever a
|
||||||
|
# recompile of extension modules is needed.
|
||||||
|
C_ABI_VERSION = 0x01000009
|
||||||
|
|
||||||
|
# Minor API version. This number is increased whenever a change is made to the
|
||||||
|
# C-API -- whether it breaks binary compatibility or not. Some changes, such
|
||||||
|
# as adding a function pointer to the end of the function table, can be made
|
||||||
|
# without breaking binary compatibility. In this case, only the C_API_VERSION
|
||||||
|
# (*not* C_ABI_VERSION) would be increased. Whenever binary compatibility is
|
||||||
|
# broken, both C_API_VERSION and C_ABI_VERSION should be increased.
|
||||||
|
#
|
||||||
|
# 0x00000008 - 1.7.x
|
||||||
|
# 0x00000009 - 1.8.x
|
||||||
|
# 0x00000009 - 1.9.x
|
||||||
|
# 0x0000000a - 1.10.x
|
||||||
|
# 0x0000000a - 1.11.x
|
||||||
|
# 0x0000000a - 1.12.x
|
||||||
|
# 0x0000000b - 1.13.x
|
||||||
|
# 0x0000000c - 1.14.x
|
||||||
|
# 0x0000000c - 1.15.x
|
||||||
|
# 0x0000000d - 1.16.x
|
||||||
|
C_API_VERSION = 0x0000000d
|
||||||
|
|
||||||
|
class MismatchCAPIWarning(Warning):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def is_released(config):
|
||||||
|
"""Return True if a released version of numpy is detected."""
|
||||||
|
from distutils.version import LooseVersion
|
||||||
|
|
||||||
|
v = config.get_version('../version.py')
|
||||||
|
if v is None:
|
||||||
|
raise ValueError("Could not get version")
|
||||||
|
pv = LooseVersion(vstring=v).version
|
||||||
|
if len(pv) > 3:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def get_api_versions(apiversion, codegen_dir):
|
||||||
|
"""
|
||||||
|
Return current C API checksum and the recorded checksum.
|
||||||
|
|
||||||
|
Return current C API checksum and the recorded checksum for the given
|
||||||
|
version of the C API version.
|
||||||
|
|
||||||
|
"""
|
||||||
|
# Compute the hash of the current API as defined in the .txt files in
|
||||||
|
# code_generators
|
||||||
|
sys.path.insert(0, codegen_dir)
|
||||||
|
try:
|
||||||
|
m = __import__('genapi')
|
||||||
|
numpy_api = __import__('numpy_api')
|
||||||
|
curapi_hash = m.fullapi_hash(numpy_api.full_api)
|
||||||
|
apis_hash = m.get_versions_hash()
|
||||||
|
finally:
|
||||||
|
del sys.path[0]
|
||||||
|
|
||||||
|
return curapi_hash, apis_hash[apiversion]
|
||||||
|
|
||||||
|
def check_api_version(apiversion, codegen_dir):
|
||||||
|
"""Emits a MismatchCAPIWarning if the C API version needs updating."""
|
||||||
|
curapi_hash, api_hash = get_api_versions(apiversion, codegen_dir)
|
||||||
|
|
||||||
|
# If different hash, it means that the api .txt files in
|
||||||
|
# codegen_dir have been updated without the API version being
|
||||||
|
# updated. Any modification in those .txt files should be reflected
|
||||||
|
# in the api and eventually abi versions.
|
||||||
|
# To compute the checksum of the current API, use numpy/core/cversions.py
|
||||||
|
if not curapi_hash == api_hash:
|
||||||
|
msg = ("API mismatch detected, the C API version "
|
||||||
|
"numbers have to be updated. Current C api version is %d, "
|
||||||
|
"with checksum %s, but recorded checksum for C API version %d "
|
||||||
|
"in core/codegen_dir/cversions.txt is %s. If functions were "
|
||||||
|
"added in the C API, you have to update C_API_VERSION in %s."
|
||||||
|
)
|
||||||
|
warnings.warn(msg % (apiversion, curapi_hash, apiversion, api_hash,
|
||||||
|
__file__),
|
||||||
|
MismatchCAPIWarning, stacklevel=2)
|
||||||
|
# Mandatory functions: if not found, fail the build
|
||||||
|
MANDATORY_FUNCS = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs",
|
||||||
|
"floor", "ceil", "sqrt", "log10", "log", "exp", "asin",
|
||||||
|
"acos", "atan", "fmod", 'modf', 'frexp', 'ldexp']
|
||||||
|
|
||||||
|
# Standard functions which may not be available and for which we have a
|
||||||
|
# replacement implementation. Note that some of these are C99 functions.
|
||||||
|
OPTIONAL_STDFUNCS = ["expm1", "log1p", "acosh", "asinh", "atanh",
|
||||||
|
"rint", "trunc", "exp2", "log2", "hypot", "atan2", "pow",
|
||||||
|
"copysign", "nextafter", "ftello", "fseeko",
|
||||||
|
"strtoll", "strtoull", "cbrt", "strtold_l", "fallocate",
|
||||||
|
"backtrace", "madvise"]
|
||||||
|
|
||||||
|
|
||||||
|
OPTIONAL_HEADERS = [
|
||||||
|
# sse headers only enabled automatically on amd64/x32 builds
|
||||||
|
"xmmintrin.h", # SSE
|
||||||
|
"emmintrin.h", # SSE2
|
||||||
|
"immintrin.h", # AVX
|
||||||
|
"features.h", # for glibc version linux
|
||||||
|
"xlocale.h", # see GH#8367
|
||||||
|
"dlfcn.h", # dladdr
|
||||||
|
"sys/mman.h", #madvise
|
||||||
|
]
|
||||||
|
|
||||||
|
# optional gcc compiler builtins and their call arguments and optional a
|
||||||
|
# required header and definition name (HAVE_ prepended)
|
||||||
|
# call arguments are required as the compiler will do strict signature checking
|
||||||
|
OPTIONAL_INTRINSICS = [("__builtin_isnan", '5.'),
|
||||||
|
("__builtin_isinf", '5.'),
|
||||||
|
("__builtin_isfinite", '5.'),
|
||||||
|
("__builtin_bswap32", '5u'),
|
||||||
|
("__builtin_bswap64", '5u'),
|
||||||
|
("__builtin_expect", '5, 0'),
|
||||||
|
("__builtin_mul_overflow", '5, 5, (int*)5'),
|
||||||
|
# broken on OSX 10.11, make sure its not optimized away
|
||||||
|
("volatile int r = __builtin_cpu_supports", '"sse"',
|
||||||
|
"stdio.h", "__BUILTIN_CPU_SUPPORTS"),
|
||||||
|
("volatile int r = __builtin_cpu_supports", '"avx512f"',
|
||||||
|
"stdio.h", "__BUILTIN_CPU_SUPPORTS_AVX512F"),
|
||||||
|
# MMX only needed for icc, but some clangs don't have it
|
||||||
|
("_m_from_int64", '0', "emmintrin.h"),
|
||||||
|
("_mm_load_ps", '(float*)0', "xmmintrin.h"), # SSE
|
||||||
|
("_mm_prefetch", '(float*)0, _MM_HINT_NTA',
|
||||||
|
"xmmintrin.h"), # SSE
|
||||||
|
("_mm_load_pd", '(double*)0', "emmintrin.h"), # SSE2
|
||||||
|
("__builtin_prefetch", "(float*)0, 0, 3"),
|
||||||
|
# check that the linker can handle avx
|
||||||
|
("__asm__ volatile", '"vpand %xmm1, %xmm2, %xmm3"',
|
||||||
|
"stdio.h", "LINK_AVX"),
|
||||||
|
("__asm__ volatile", '"vpand %ymm1, %ymm2, %ymm3"',
|
||||||
|
"stdio.h", "LINK_AVX2"),
|
||||||
|
("__asm__ volatile", '"vpaddd %zmm1, %zmm2, %zmm3"',
|
||||||
|
"stdio.h", "LINK_AVX512F"),
|
||||||
|
("__asm__ volatile", '"xgetbv"', "stdio.h", "XGETBV"),
|
||||||
|
]
|
||||||
|
|
||||||
|
# function attributes
|
||||||
|
# tested via "int %s %s(void *);" % (attribute, name)
|
||||||
|
# function name will be converted to HAVE_<upper-case-name> preprocessor macro
|
||||||
|
OPTIONAL_FUNCTION_ATTRIBUTES = [('__attribute__((optimize("unroll-loops")))',
|
||||||
|
'attribute_optimize_unroll_loops'),
|
||||||
|
('__attribute__((optimize("O3")))',
|
||||||
|
'attribute_optimize_opt_3'),
|
||||||
|
('__attribute__((nonnull (1)))',
|
||||||
|
'attribute_nonnull'),
|
||||||
|
('__attribute__((target ("avx")))',
|
||||||
|
'attribute_target_avx'),
|
||||||
|
('__attribute__((target ("avx2")))',
|
||||||
|
'attribute_target_avx2'),
|
||||||
|
('__attribute__((target ("avx512f")))',
|
||||||
|
'attribute_target_avx512f'),
|
||||||
|
]
|
||||||
|
|
||||||
|
# function attributes with intrinsics
|
||||||
|
# To ensure your compiler can compile avx intrinsics with just the attributes
|
||||||
|
# gcc 4.8.4 support attributes but not with intrisics
|
||||||
|
# tested via "#include<%s> int %s %s(void *){code; return 0;};" % (header, attribute, name, code)
|
||||||
|
# function name will be converted to HAVE_<upper-case-name> preprocessor macro
|
||||||
|
OPTIONAL_FUNCTION_ATTRIBUTES_WITH_INTRINSICS = [('__attribute__((target("avx2,fma")))',
|
||||||
|
'attribute_target_avx2_with_intrinsics',
|
||||||
|
'__m256 temp = _mm256_set1_ps(1.0); temp = \
|
||||||
|
_mm256_fmadd_ps(temp, temp, temp)',
|
||||||
|
'immintrin.h'),
|
||||||
|
('__attribute__((target("avx512f")))',
|
||||||
|
'attribute_target_avx512f_with_intrinsics',
|
||||||
|
'__m512 temp = _mm512_set1_ps(1.0)',
|
||||||
|
'immintrin.h'),
|
||||||
|
]
|
||||||
|
|
||||||
|
# variable attributes tested via "int %s a" % attribute
|
||||||
|
OPTIONAL_VARIABLE_ATTRIBUTES = ["__thread", "__declspec(thread)"]
|
||||||
|
|
||||||
|
# Subset of OPTIONAL_STDFUNCS which may already have HAVE_* defined by Python.h
|
||||||
|
OPTIONAL_STDFUNCS_MAYBE = [
|
||||||
|
"expm1", "log1p", "acosh", "atanh", "asinh", "hypot", "copysign",
|
||||||
|
"ftello", "fseeko"
|
||||||
|
]
|
||||||
|
|
||||||
|
# C99 functions: float and long double versions
|
||||||
|
C99_FUNCS = [
|
||||||
|
"sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", "floor", "ceil",
|
||||||
|
"rint", "trunc", "sqrt", "log10", "log", "log1p", "exp", "expm1",
|
||||||
|
"asin", "acos", "atan", "asinh", "acosh", "atanh", "hypot", "atan2",
|
||||||
|
"pow", "fmod", "modf", 'frexp', 'ldexp', "exp2", "log2", "copysign",
|
||||||
|
"nextafter", "cbrt"
|
||||||
|
]
|
||||||
|
C99_FUNCS_SINGLE = [f + 'f' for f in C99_FUNCS]
|
||||||
|
C99_FUNCS_EXTENDED = [f + 'l' for f in C99_FUNCS]
|
||||||
|
C99_COMPLEX_TYPES = [
|
||||||
|
'complex double', 'complex float', 'complex long double'
|
||||||
|
]
|
||||||
|
C99_COMPLEX_FUNCS = [
|
||||||
|
"cabs", "cacos", "cacosh", "carg", "casin", "casinh", "catan",
|
||||||
|
"catanh", "ccos", "ccosh", "cexp", "cimag", "clog", "conj", "cpow",
|
||||||
|
"cproj", "creal", "csin", "csinh", "csqrt", "ctan", "ctanh"
|
||||||
|
]
|
||||||
|
|
||||||
|
def fname2def(name):
|
||||||
|
return "HAVE_%s" % name.upper()
|
||||||
|
|
||||||
|
def sym2def(symbol):
|
||||||
|
define = symbol.replace(' ', '')
|
||||||
|
return define.upper()
|
||||||
|
|
||||||
|
def type2def(symbol):
|
||||||
|
define = symbol.replace(' ', '_')
|
||||||
|
return define.upper()
|
||||||
|
|
||||||
|
# Code to detect long double representation taken from MPFR m4 macro
|
||||||
|
def check_long_double_representation(cmd):
|
||||||
|
cmd._check_compiler()
|
||||||
|
body = LONG_DOUBLE_REPRESENTATION_SRC % {'type': 'long double'}
|
||||||
|
|
||||||
|
# Disable whole program optimization (the default on vs2015, with python 3.5+)
|
||||||
|
# which generates intermediary object files and prevents checking the
|
||||||
|
# float representation.
|
||||||
|
if sys.platform == "win32" and not mingw32():
|
||||||
|
try:
|
||||||
|
cmd.compiler.compile_options.remove("/GL")
|
||||||
|
except (AttributeError, ValueError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Disable multi-file interprocedural optimization in the Intel compiler on Linux
|
||||||
|
# which generates intermediary object files and prevents checking the
|
||||||
|
# float representation.
|
||||||
|
elif (sys.platform != "win32"
|
||||||
|
and cmd.compiler.compiler_type.startswith('intel')
|
||||||
|
and '-ipo' in cmd.compiler.cc_exe):
|
||||||
|
newcompiler = cmd.compiler.cc_exe.replace(' -ipo', '')
|
||||||
|
cmd.compiler.set_executables(
|
||||||
|
compiler=newcompiler,
|
||||||
|
compiler_so=newcompiler,
|
||||||
|
compiler_cxx=newcompiler,
|
||||||
|
linker_exe=newcompiler,
|
||||||
|
linker_so=newcompiler + ' -shared'
|
||||||
|
)
|
||||||
|
|
||||||
|
# We need to use _compile because we need the object filename
|
||||||
|
src, obj = cmd._compile(body, None, None, 'c')
|
||||||
|
try:
|
||||||
|
ltype = long_double_representation(pyod(obj))
|
||||||
|
return ltype
|
||||||
|
except ValueError:
|
||||||
|
# try linking to support CC="gcc -flto" or icc -ipo
|
||||||
|
# struct needs to be volatile so it isn't optimized away
|
||||||
|
# additionally "clang -flto" requires the foo struct to be used
|
||||||
|
body = body.replace('struct', 'volatile struct')
|
||||||
|
body += "int main(void) { return foo.before[0]; }\n"
|
||||||
|
src, obj = cmd._compile(body, None, None, 'c')
|
||||||
|
cmd.temp_files.append("_configtest")
|
||||||
|
cmd.compiler.link_executable([obj], "_configtest")
|
||||||
|
ltype = long_double_representation(pyod("_configtest"))
|
||||||
|
return ltype
|
||||||
|
finally:
|
||||||
|
cmd._clean()
|
||||||
|
|
||||||
|
LONG_DOUBLE_REPRESENTATION_SRC = r"""
|
||||||
|
/* "before" is 16 bytes to ensure there's no padding between it and "x".
|
||||||
|
* We're not expecting any "long double" bigger than 16 bytes or with
|
||||||
|
* alignment requirements stricter than 16 bytes. */
|
||||||
|
typedef %(type)s test_type;
|
||||||
|
|
||||||
|
struct {
|
||||||
|
char before[16];
|
||||||
|
test_type x;
|
||||||
|
char after[8];
|
||||||
|
} foo = {
|
||||||
|
{ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
|
||||||
|
'\001', '\043', '\105', '\147', '\211', '\253', '\315', '\357' },
|
||||||
|
-123456789.0,
|
||||||
|
{ '\376', '\334', '\272', '\230', '\166', '\124', '\062', '\020' }
|
||||||
|
};
|
||||||
|
"""
|
||||||
|
|
||||||
|
def pyod(filename):
|
||||||
|
"""Python implementation of the od UNIX utility (od -b, more exactly).
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
filename : str
|
||||||
|
name of the file to get the dump from.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
out : seq
|
||||||
|
list of lines of od output
|
||||||
|
|
||||||
|
Note
|
||||||
|
----
|
||||||
|
We only implement enough to get the necessary information for long double
|
||||||
|
representation, this is not intended as a compatible replacement for od.
|
||||||
|
"""
|
||||||
|
def _pyod2():
|
||||||
|
out = []
|
||||||
|
|
||||||
|
with open(filename, 'rb') as fid:
|
||||||
|
yo = [int(oct(int(binascii.b2a_hex(o), 16))) for o in fid.read()]
|
||||||
|
for i in range(0, len(yo), 16):
|
||||||
|
line = ['%07d' % int(oct(i))]
|
||||||
|
line.extend(['%03d' % c for c in yo[i:i+16]])
|
||||||
|
out.append(" ".join(line))
|
||||||
|
return out
|
||||||
|
|
||||||
|
def _pyod3():
|
||||||
|
out = []
|
||||||
|
|
||||||
|
with open(filename, 'rb') as fid:
|
||||||
|
yo2 = [oct(o)[2:] for o in fid.read()]
|
||||||
|
for i in range(0, len(yo2), 16):
|
||||||
|
line = ['%07d' % int(oct(i)[2:])]
|
||||||
|
line.extend(['%03d' % int(c) for c in yo2[i:i+16]])
|
||||||
|
out.append(" ".join(line))
|
||||||
|
return out
|
||||||
|
|
||||||
|
if sys.version_info[0] < 3:
|
||||||
|
return _pyod2()
|
||||||
|
else:
|
||||||
|
return _pyod3()
|
||||||
|
|
||||||
|
_BEFORE_SEQ = ['000', '000', '000', '000', '000', '000', '000', '000',
|
||||||
|
'001', '043', '105', '147', '211', '253', '315', '357']
|
||||||
|
_AFTER_SEQ = ['376', '334', '272', '230', '166', '124', '062', '020']
|
||||||
|
|
||||||
|
_IEEE_DOUBLE_BE = ['301', '235', '157', '064', '124', '000', '000', '000']
|
||||||
|
_IEEE_DOUBLE_LE = _IEEE_DOUBLE_BE[::-1]
|
||||||
|
_INTEL_EXTENDED_12B = ['000', '000', '000', '000', '240', '242', '171', '353',
|
||||||
|
'031', '300', '000', '000']
|
||||||
|
_INTEL_EXTENDED_16B = ['000', '000', '000', '000', '240', '242', '171', '353',
|
||||||
|
'031', '300', '000', '000', '000', '000', '000', '000']
|
||||||
|
_MOTOROLA_EXTENDED_12B = ['300', '031', '000', '000', '353', '171',
|
||||||
|
'242', '240', '000', '000', '000', '000']
|
||||||
|
_IEEE_QUAD_PREC_BE = ['300', '031', '326', '363', '105', '100', '000', '000',
|
||||||
|
'000', '000', '000', '000', '000', '000', '000', '000']
|
||||||
|
_IEEE_QUAD_PREC_LE = _IEEE_QUAD_PREC_BE[::-1]
|
||||||
|
_IBM_DOUBLE_DOUBLE_BE = (['301', '235', '157', '064', '124', '000', '000', '000'] +
|
||||||
|
['000'] * 8)
|
||||||
|
_IBM_DOUBLE_DOUBLE_LE = (['000', '000', '000', '124', '064', '157', '235', '301'] +
|
||||||
|
['000'] * 8)
|
||||||
|
|
||||||
|
def long_double_representation(lines):
|
||||||
|
"""Given a binary dump as given by GNU od -b, look for long double
|
||||||
|
representation."""
|
||||||
|
|
||||||
|
# Read contains a list of 32 items, each item is a byte (in octal
|
||||||
|
# representation, as a string). We 'slide' over the output until read is of
|
||||||
|
# the form before_seq + content + after_sequence, where content is the long double
|
||||||
|
# representation:
|
||||||
|
# - content is 12 bytes: 80 bits Intel representation
|
||||||
|
# - content is 16 bytes: 80 bits Intel representation (64 bits) or quad precision
|
||||||
|
# - content is 8 bytes: same as double (not implemented yet)
|
||||||
|
read = [''] * 32
|
||||||
|
saw = None
|
||||||
|
for line in lines:
|
||||||
|
# we skip the first word, as od -b output an index at the beginning of
|
||||||
|
# each line
|
||||||
|
for w in line.split()[1:]:
|
||||||
|
read.pop(0)
|
||||||
|
read.append(w)
|
||||||
|
|
||||||
|
# If the end of read is equal to the after_sequence, read contains
|
||||||
|
# the long double
|
||||||
|
if read[-8:] == _AFTER_SEQ:
|
||||||
|
saw = copy.copy(read)
|
||||||
|
# if the content was 12 bytes, we only have 32 - 8 - 12 = 12
|
||||||
|
# "before" bytes. In other words the first 4 "before" bytes went
|
||||||
|
# past the sliding window.
|
||||||
|
if read[:12] == _BEFORE_SEQ[4:]:
|
||||||
|
if read[12:-8] == _INTEL_EXTENDED_12B:
|
||||||
|
return 'INTEL_EXTENDED_12_BYTES_LE'
|
||||||
|
if read[12:-8] == _MOTOROLA_EXTENDED_12B:
|
||||||
|
return 'MOTOROLA_EXTENDED_12_BYTES_BE'
|
||||||
|
# if the content was 16 bytes, we are left with 32-8-16 = 16
|
||||||
|
# "before" bytes, so 8 went past the sliding window.
|
||||||
|
elif read[:8] == _BEFORE_SEQ[8:]:
|
||||||
|
if read[8:-8] == _INTEL_EXTENDED_16B:
|
||||||
|
return 'INTEL_EXTENDED_16_BYTES_LE'
|
||||||
|
elif read[8:-8] == _IEEE_QUAD_PREC_BE:
|
||||||
|
return 'IEEE_QUAD_BE'
|
||||||
|
elif read[8:-8] == _IEEE_QUAD_PREC_LE:
|
||||||
|
return 'IEEE_QUAD_LE'
|
||||||
|
elif read[8:-8] == _IBM_DOUBLE_DOUBLE_LE:
|
||||||
|
return 'IBM_DOUBLE_DOUBLE_LE'
|
||||||
|
elif read[8:-8] == _IBM_DOUBLE_DOUBLE_BE:
|
||||||
|
return 'IBM_DOUBLE_DOUBLE_BE'
|
||||||
|
# if the content was 8 bytes, left with 32-8-8 = 16 bytes
|
||||||
|
elif read[:16] == _BEFORE_SEQ:
|
||||||
|
if read[16:-8] == _IEEE_DOUBLE_LE:
|
||||||
|
return 'IEEE_DOUBLE_LE'
|
||||||
|
elif read[16:-8] == _IEEE_DOUBLE_BE:
|
||||||
|
return 'IEEE_DOUBLE_BE'
|
||||||
|
|
||||||
|
if saw is not None:
|
||||||
|
raise ValueError("Unrecognized format (%s)" % saw)
|
||||||
|
else:
|
||||||
|
# We never detected the after_sequence
|
||||||
|
raise ValueError("Could not lock sequences (%s)" % saw)
|
||||||
|
|
||||||
|
|
||||||
|
def check_for_right_shift_internal_compiler_error(cmd):
|
||||||
|
"""
|
||||||
|
On our arm CI, this fails with an internal compilation error
|
||||||
|
|
||||||
|
The failure looks like the following, and can be reproduced on ARM64 GCC 5.4:
|
||||||
|
|
||||||
|
<source>: In function 'right_shift':
|
||||||
|
<source>:4:20: internal compiler error: in expand_shift_1, at expmed.c:2349
|
||||||
|
ip1[i] = ip1[i] >> in2;
|
||||||
|
^
|
||||||
|
Please submit a full bug report,
|
||||||
|
with preprocessed source if appropriate.
|
||||||
|
See <http://gcc.gnu.org/bugs.html> for instructions.
|
||||||
|
Compiler returned: 1
|
||||||
|
|
||||||
|
This function returns True if this compiler bug is present, and we need to
|
||||||
|
turn off optimization for the function
|
||||||
|
"""
|
||||||
|
cmd._check_compiler()
|
||||||
|
has_optimize = cmd.try_compile(textwrap.dedent("""\
|
||||||
|
__attribute__((optimize("O3"))) void right_shift() {}
|
||||||
|
"""), None, None)
|
||||||
|
if not has_optimize:
|
||||||
|
return False
|
||||||
|
|
||||||
|
no_err = cmd.try_compile(textwrap.dedent("""\
|
||||||
|
typedef long the_type; /* fails also for unsigned and long long */
|
||||||
|
__attribute__((optimize("O3"))) void right_shift(the_type in2, the_type *ip1, int n) {
|
||||||
|
for (int i = 0; i < n; i++) {
|
||||||
|
if (in2 < (the_type)sizeof(the_type) * 8) {
|
||||||
|
ip1[i] = ip1[i] >> in2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
"""), None, None)
|
||||||
|
return not no_err
|
906
Restaurant/Marta/venv/Lib/site-packages/numpy/core/shape_base.py
Normal file
906
Restaurant/Marta/venv/Lib/site-packages/numpy/core/shape_base.py
Normal file
@ -0,0 +1,906 @@
|
|||||||
|
from __future__ import division, absolute_import, print_function
|
||||||
|
|
||||||
|
__all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'block', 'hstack',
|
||||||
|
'stack', 'vstack']
|
||||||
|
|
||||||
|
import functools
|
||||||
|
import operator
|
||||||
|
import warnings
|
||||||
|
|
||||||
|
from . import numeric as _nx
|
||||||
|
from . import overrides
|
||||||
|
from ._asarray import array, asanyarray
|
||||||
|
from .multiarray import normalize_axis_index
|
||||||
|
from . import fromnumeric as _from_nx
|
||||||
|
|
||||||
|
|
||||||
|
array_function_dispatch = functools.partial(
|
||||||
|
overrides.array_function_dispatch, module='numpy')
|
||||||
|
|
||||||
|
|
||||||
|
def _atleast_1d_dispatcher(*arys):
|
||||||
|
return arys
|
||||||
|
|
||||||
|
|
||||||
|
@array_function_dispatch(_atleast_1d_dispatcher)
|
||||||
|
def atleast_1d(*arys):
|
||||||
|
"""
|
||||||
|
Convert inputs to arrays with at least one dimension.
|
||||||
|
|
||||||
|
Scalar inputs are converted to 1-dimensional arrays, whilst
|
||||||
|
higher-dimensional inputs are preserved.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
arys1, arys2, ... : array_like
|
||||||
|
One or more input arrays.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
ret : ndarray
|
||||||
|
An array, or list of arrays, each with ``a.ndim >= 1``.
|
||||||
|
Copies are made only if necessary.
|
||||||
|
|
||||||
|
See Also
|
||||||
|
--------
|
||||||
|
atleast_2d, atleast_3d
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
>>> np.atleast_1d(1.0)
|
||||||
|
array([1.])
|
||||||
|
|
||||||
|
>>> x = np.arange(9.0).reshape(3,3)
|
||||||
|
>>> np.atleast_1d(x)
|
||||||
|
array([[0., 1., 2.],
|
||||||
|
[3., 4., 5.],
|
||||||
|
[6., 7., 8.]])
|
||||||
|
>>> np.atleast_1d(x) is x
|
||||||
|
True
|
||||||
|
|
||||||
|
>>> np.atleast_1d(1, [3, 4])
|
||||||
|
[array([1]), array([3, 4])]
|
||||||
|
|
||||||
|
"""
|
||||||
|
res = []
|
||||||
|
for ary in arys:
|
||||||
|
ary = asanyarray(ary)
|
||||||
|
if ary.ndim == 0:
|
||||||
|
result = ary.reshape(1)
|
||||||
|
else:
|
||||||
|
result = ary
|
||||||
|
res.append(result)
|
||||||
|
if len(res) == 1:
|
||||||
|
return res[0]
|
||||||
|
else:
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
def _atleast_2d_dispatcher(*arys):
|
||||||
|
return arys
|
||||||
|
|
||||||
|
|
||||||
|
@array_function_dispatch(_atleast_2d_dispatcher)
|
||||||
|
def atleast_2d(*arys):
|
||||||
|
"""
|
||||||
|
View inputs as arrays with at least two dimensions.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
arys1, arys2, ... : array_like
|
||||||
|
One or more array-like sequences. Non-array inputs are converted
|
||||||
|
to arrays. Arrays that already have two or more dimensions are
|
||||||
|
preserved.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
res, res2, ... : ndarray
|
||||||
|
An array, or list of arrays, each with ``a.ndim >= 2``.
|
||||||
|
Copies are avoided where possible, and views with two or more
|
||||||
|
dimensions are returned.
|
||||||
|
|
||||||
|
See Also
|
||||||
|
--------
|
||||||
|
atleast_1d, atleast_3d
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
>>> np.atleast_2d(3.0)
|
||||||
|
array([[3.]])
|
||||||
|
|
||||||
|
>>> x = np.arange(3.0)
|
||||||
|
>>> np.atleast_2d(x)
|
||||||
|
array([[0., 1., 2.]])
|
||||||
|
>>> np.atleast_2d(x).base is x
|
||||||
|
True
|
||||||
|
|
||||||
|
>>> np.atleast_2d(1, [1, 2], [[1, 2]])
|
||||||
|
[array([[1]]), array([[1, 2]]), array([[1, 2]])]
|
||||||
|
|
||||||
|
"""
|
||||||
|
res = []
|
||||||
|
for ary in arys:
|
||||||
|
ary = asanyarray(ary)
|
||||||
|
if ary.ndim == 0:
|
||||||
|
result = ary.reshape(1, 1)
|
||||||
|
elif ary.ndim == 1:
|
||||||
|
result = ary[_nx.newaxis, :]
|
||||||
|
else:
|
||||||
|
result = ary
|
||||||
|
res.append(result)
|
||||||
|
if len(res) == 1:
|
||||||
|
return res[0]
|
||||||
|
else:
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
def _atleast_3d_dispatcher(*arys):
|
||||||
|
return arys
|
||||||
|
|
||||||
|
|
||||||
|
@array_function_dispatch(_atleast_3d_dispatcher)
|
||||||
|
def atleast_3d(*arys):
|
||||||
|
"""
|
||||||
|
View inputs as arrays with at least three dimensions.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
arys1, arys2, ... : array_like
|
||||||
|
One or more array-like sequences. Non-array inputs are converted to
|
||||||
|
arrays. Arrays that already have three or more dimensions are
|
||||||
|
preserved.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
res1, res2, ... : ndarray
|
||||||
|
An array, or list of arrays, each with ``a.ndim >= 3``. Copies are
|
||||||
|
avoided where possible, and views with three or more dimensions are
|
||||||
|
returned. For example, a 1-D array of shape ``(N,)`` becomes a view
|
||||||
|
of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a
|
||||||
|
view of shape ``(M, N, 1)``.
|
||||||
|
|
||||||
|
See Also
|
||||||
|
--------
|
||||||
|
atleast_1d, atleast_2d
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
>>> np.atleast_3d(3.0)
|
||||||
|
array([[[3.]]])
|
||||||
|
|
||||||
|
>>> x = np.arange(3.0)
|
||||||
|
>>> np.atleast_3d(x).shape
|
||||||
|
(1, 3, 1)
|
||||||
|
|
||||||
|
>>> x = np.arange(12.0).reshape(4,3)
|
||||||
|
>>> np.atleast_3d(x).shape
|
||||||
|
(4, 3, 1)
|
||||||
|
>>> np.atleast_3d(x).base is x.base # x is a reshape, so not base itself
|
||||||
|
True
|
||||||
|
|
||||||
|
>>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]):
|
||||||
|
... print(arr, arr.shape) # doctest: +SKIP
|
||||||
|
...
|
||||||
|
[[[1]
|
||||||
|
[2]]] (1, 2, 1)
|
||||||
|
[[[1]
|
||||||
|
[2]]] (1, 2, 1)
|
||||||
|
[[[1 2]]] (1, 1, 2)
|
||||||
|
|
||||||
|
"""
|
||||||
|
res = []
|
||||||
|
for ary in arys:
|
||||||
|
ary = asanyarray(ary)
|
||||||
|
if ary.ndim == 0:
|
||||||
|
result = ary.reshape(1, 1, 1)
|
||||||
|
elif ary.ndim == 1:
|
||||||
|
result = ary[_nx.newaxis, :, _nx.newaxis]
|
||||||
|
elif ary.ndim == 2:
|
||||||
|
result = ary[:, :, _nx.newaxis]
|
||||||
|
else:
|
||||||
|
result = ary
|
||||||
|
res.append(result)
|
||||||
|
if len(res) == 1:
|
||||||
|
return res[0]
|
||||||
|
else:
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
def _arrays_for_stack_dispatcher(arrays, stacklevel=4):
|
||||||
|
if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):
|
||||||
|
warnings.warn('arrays to stack must be passed as a "sequence" type '
|
||||||
|
'such as list or tuple. Support for non-sequence '
|
||||||
|
'iterables such as generators is deprecated as of '
|
||||||
|
'NumPy 1.16 and will raise an error in the future.',
|
||||||
|
FutureWarning, stacklevel=stacklevel)
|
||||||
|
return ()
|
||||||
|
return arrays
|
||||||
|
|
||||||
|
|
||||||
|
def _vhstack_dispatcher(tup):
|
||||||
|
return _arrays_for_stack_dispatcher(tup)
|
||||||
|
|
||||||
|
|
||||||
|
@array_function_dispatch(_vhstack_dispatcher)
|
||||||
|
def vstack(tup):
|
||||||
|
"""
|
||||||
|
Stack arrays in sequence vertically (row wise).
|
||||||
|
|
||||||
|
This is equivalent to concatenation along the first axis after 1-D arrays
|
||||||
|
of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by
|
||||||
|
`vsplit`.
|
||||||
|
|
||||||
|
This function makes most sense for arrays with up to 3 dimensions. For
|
||||||
|
instance, for pixel-data with a height (first axis), width (second axis),
|
||||||
|
and r/g/b channels (third axis). The functions `concatenate`, `stack` and
|
||||||
|
`block` provide more general stacking and concatenation operations.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
tup : sequence of ndarrays
|
||||||
|
The arrays must have the same shape along all but the first axis.
|
||||||
|
1-D arrays must have the same length.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
stacked : ndarray
|
||||||
|
The array formed by stacking the given arrays, will be at least 2-D.
|
||||||
|
|
||||||
|
See Also
|
||||||
|
--------
|
||||||
|
stack : Join a sequence of arrays along a new axis.
|
||||||
|
hstack : Stack arrays in sequence horizontally (column wise).
|
||||||
|
dstack : Stack arrays in sequence depth wise (along third dimension).
|
||||||
|
concatenate : Join a sequence of arrays along an existing axis.
|
||||||
|
vsplit : Split array into a list of multiple sub-arrays vertically.
|
||||||
|
block : Assemble arrays from blocks.
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
>>> a = np.array([1, 2, 3])
|
||||||
|
>>> b = np.array([2, 3, 4])
|
||||||
|
>>> np.vstack((a,b))
|
||||||
|
array([[1, 2, 3],
|
||||||
|
[2, 3, 4]])
|
||||||
|
|
||||||
|
>>> a = np.array([[1], [2], [3]])
|
||||||
|
>>> b = np.array([[2], [3], [4]])
|
||||||
|
>>> np.vstack((a,b))
|
||||||
|
array([[1],
|
||||||
|
[2],
|
||||||
|
[3],
|
||||||
|
[2],
|
||||||
|
[3],
|
||||||
|
[4]])
|
||||||
|
|
||||||
|
"""
|
||||||
|
if not overrides.ARRAY_FUNCTION_ENABLED:
|
||||||
|
# raise warning if necessary
|
||||||
|
_arrays_for_stack_dispatcher(tup, stacklevel=2)
|
||||||
|
arrs = atleast_2d(*tup)
|
||||||
|
if not isinstance(arrs, list):
|
||||||
|
arrs = [arrs]
|
||||||
|
return _nx.concatenate(arrs, 0)
|
||||||
|
|
||||||
|
|
||||||
|
@array_function_dispatch(_vhstack_dispatcher)
|
||||||
|
def hstack(tup):
|
||||||
|
"""
|
||||||
|
Stack arrays in sequence horizontally (column wise).
|
||||||
|
|
||||||
|
This is equivalent to concatenation along the second axis, except for 1-D
|
||||||
|
arrays where it concatenates along the first axis. Rebuilds arrays divided
|
||||||
|
by `hsplit`.
|
||||||
|
|
||||||
|
This function makes most sense for arrays with up to 3 dimensions. For
|
||||||
|
instance, for pixel-data with a height (first axis), width (second axis),
|
||||||
|
and r/g/b channels (third axis). The functions `concatenate`, `stack` and
|
||||||
|
`block` provide more general stacking and concatenation operations.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
tup : sequence of ndarrays
|
||||||
|
The arrays must have the same shape along all but the second axis,
|
||||||
|
except 1-D arrays which can be any length.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
stacked : ndarray
|
||||||
|
The array formed by stacking the given arrays.
|
||||||
|
|
||||||
|
See Also
|
||||||
|
--------
|
||||||
|
stack : Join a sequence of arrays along a new axis.
|
||||||
|
vstack : Stack arrays in sequence vertically (row wise).
|
||||||
|
dstack : Stack arrays in sequence depth wise (along third axis).
|
||||||
|
concatenate : Join a sequence of arrays along an existing axis.
|
||||||
|
hsplit : Split array along second axis.
|
||||||
|
block : Assemble arrays from blocks.
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
>>> a = np.array((1,2,3))
|
||||||
|
>>> b = np.array((2,3,4))
|
||||||
|
>>> np.hstack((a,b))
|
||||||
|
array([1, 2, 3, 2, 3, 4])
|
||||||
|
>>> a = np.array([[1],[2],[3]])
|
||||||
|
>>> b = np.array([[2],[3],[4]])
|
||||||
|
>>> np.hstack((a,b))
|
||||||
|
array([[1, 2],
|
||||||
|
[2, 3],
|
||||||
|
[3, 4]])
|
||||||
|
|
||||||
|
"""
|
||||||
|
if not overrides.ARRAY_FUNCTION_ENABLED:
|
||||||
|
# raise warning if necessary
|
||||||
|
_arrays_for_stack_dispatcher(tup, stacklevel=2)
|
||||||
|
|
||||||
|
arrs = atleast_1d(*tup)
|
||||||
|
if not isinstance(arrs, list):
|
||||||
|
arrs = [arrs]
|
||||||
|
# As a special case, dimension 0 of 1-dimensional arrays is "horizontal"
|
||||||
|
if arrs and arrs[0].ndim == 1:
|
||||||
|
return _nx.concatenate(arrs, 0)
|
||||||
|
else:
|
||||||
|
return _nx.concatenate(arrs, 1)
|
||||||
|
|
||||||
|
|
||||||
|
def _stack_dispatcher(arrays, axis=None, out=None):
|
||||||
|
arrays = _arrays_for_stack_dispatcher(arrays, stacklevel=6)
|
||||||
|
if out is not None:
|
||||||
|
# optimize for the typical case where only arrays is provided
|
||||||
|
arrays = list(arrays)
|
||||||
|
arrays.append(out)
|
||||||
|
return arrays
|
||||||
|
|
||||||
|
|
||||||
|
@array_function_dispatch(_stack_dispatcher)
|
||||||
|
def stack(arrays, axis=0, out=None):
|
||||||
|
"""
|
||||||
|
Join a sequence of arrays along a new axis.
|
||||||
|
|
||||||
|
The ``axis`` parameter specifies the index of the new axis in the
|
||||||
|
dimensions of the result. For example, if ``axis=0`` it will be the first
|
||||||
|
dimension and if ``axis=-1`` it will be the last dimension.
|
||||||
|
|
||||||
|
.. versionadded:: 1.10.0
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
arrays : sequence of array_like
|
||||||
|
Each array must have the same shape.
|
||||||
|
|
||||||
|
axis : int, optional
|
||||||
|
The axis in the result array along which the input arrays are stacked.
|
||||||
|
|
||||||
|
out : ndarray, optional
|
||||||
|
If provided, the destination to place the result. The shape must be
|
||||||
|
correct, matching that of what stack would have returned if no
|
||||||
|
out argument were specified.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
stacked : ndarray
|
||||||
|
The stacked array has one more dimension than the input arrays.
|
||||||
|
|
||||||
|
See Also
|
||||||
|
--------
|
||||||
|
concatenate : Join a sequence of arrays along an existing axis.
|
||||||
|
split : Split array into a list of multiple sub-arrays of equal size.
|
||||||
|
block : Assemble arrays from blocks.
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
>>> arrays = [np.random.randn(3, 4) for _ in range(10)]
|
||||||
|
>>> np.stack(arrays, axis=0).shape
|
||||||
|
(10, 3, 4)
|
||||||
|
|
||||||
|
>>> np.stack(arrays, axis=1).shape
|
||||||
|
(3, 10, 4)
|
||||||
|
|
||||||
|
>>> np.stack(arrays, axis=2).shape
|
||||||
|
(3, 4, 10)
|
||||||
|
|
||||||
|
>>> a = np.array([1, 2, 3])
|
||||||
|
>>> b = np.array([2, 3, 4])
|
||||||
|
>>> np.stack((a, b))
|
||||||
|
array([[1, 2, 3],
|
||||||
|
[2, 3, 4]])
|
||||||
|
|
||||||
|
>>> np.stack((a, b), axis=-1)
|
||||||
|
array([[1, 2],
|
||||||
|
[2, 3],
|
||||||
|
[3, 4]])
|
||||||
|
|
||||||
|
"""
|
||||||
|
if not overrides.ARRAY_FUNCTION_ENABLED:
|
||||||
|
# raise warning if necessary
|
||||||
|
_arrays_for_stack_dispatcher(arrays, stacklevel=2)
|
||||||
|
|
||||||
|
arrays = [asanyarray(arr) for arr in arrays]
|
||||||
|
if not arrays:
|
||||||
|
raise ValueError('need at least one array to stack')
|
||||||
|
|
||||||
|
shapes = {arr.shape for arr in arrays}
|
||||||
|
if len(shapes) != 1:
|
||||||
|
raise ValueError('all input arrays must have the same shape')
|
||||||
|
|
||||||
|
result_ndim = arrays[0].ndim + 1
|
||||||
|
axis = normalize_axis_index(axis, result_ndim)
|
||||||
|
|
||||||
|
sl = (slice(None),) * axis + (_nx.newaxis,)
|
||||||
|
expanded_arrays = [arr[sl] for arr in arrays]
|
||||||
|
return _nx.concatenate(expanded_arrays, axis=axis, out=out)
|
||||||
|
|
||||||
|
|
||||||
|
# Internal functions to eliminate the overhead of repeated dispatch in one of
|
||||||
|
# the two possible paths inside np.block.
|
||||||
|
# Use getattr to protect against __array_function__ being disabled.
|
||||||
|
_size = getattr(_from_nx.size, '__wrapped__', _from_nx.size)
|
||||||
|
_ndim = getattr(_from_nx.ndim, '__wrapped__', _from_nx.ndim)
|
||||||
|
_concatenate = getattr(_from_nx.concatenate, '__wrapped__', _from_nx.concatenate)
|
||||||
|
|
||||||
|
|
||||||
|
def _block_format_index(index):
|
||||||
|
"""
|
||||||
|
Convert a list of indices ``[0, 1, 2]`` into ``"arrays[0][1][2]"``.
|
||||||
|
"""
|
||||||
|
idx_str = ''.join('[{}]'.format(i) for i in index if i is not None)
|
||||||
|
return 'arrays' + idx_str
|
||||||
|
|
||||||
|
|
||||||
|
def _block_check_depths_match(arrays, parent_index=[]):
|
||||||
|
"""
|
||||||
|
Recursive function checking that the depths of nested lists in `arrays`
|
||||||
|
all match. Mismatch raises a ValueError as described in the block
|
||||||
|
docstring below.
|
||||||
|
|
||||||
|
The entire index (rather than just the depth) needs to be calculated
|
||||||
|
for each innermost list, in case an error needs to be raised, so that
|
||||||
|
the index of the offending list can be printed as part of the error.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
arrays : nested list of arrays
|
||||||
|
The arrays to check
|
||||||
|
parent_index : list of int
|
||||||
|
The full index of `arrays` within the nested lists passed to
|
||||||
|
`_block_check_depths_match` at the top of the recursion.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
first_index : list of int
|
||||||
|
The full index of an element from the bottom of the nesting in
|
||||||
|
`arrays`. If any element at the bottom is an empty list, this will
|
||||||
|
refer to it, and the last index along the empty axis will be None.
|
||||||
|
max_arr_ndim : int
|
||||||
|
The maximum of the ndims of the arrays nested in `arrays`.
|
||||||
|
final_size: int
|
||||||
|
The number of elements in the final array. This is used the motivate
|
||||||
|
the choice of algorithm used using benchmarking wisdom.
|
||||||
|
|
||||||
|
"""
|
||||||
|
if type(arrays) is tuple:
|
||||||
|
# not strictly necessary, but saves us from:
|
||||||
|
# - more than one way to do things - no point treating tuples like
|
||||||
|
# lists
|
||||||
|
# - horribly confusing behaviour that results when tuples are
|
||||||
|
# treated like ndarray
|
||||||
|
raise TypeError(
|
||||||
|
'{} is a tuple. '
|
||||||
|
'Only lists can be used to arrange blocks, and np.block does '
|
||||||
|
'not allow implicit conversion from tuple to ndarray.'.format(
|
||||||
|
_block_format_index(parent_index)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
elif type(arrays) is list and len(arrays) > 0:
|
||||||
|
idxs_ndims = (_block_check_depths_match(arr, parent_index + [i])
|
||||||
|
for i, arr in enumerate(arrays))
|
||||||
|
|
||||||
|
first_index, max_arr_ndim, final_size = next(idxs_ndims)
|
||||||
|
for index, ndim, size in idxs_ndims:
|
||||||
|
final_size += size
|
||||||
|
if ndim > max_arr_ndim:
|
||||||
|
max_arr_ndim = ndim
|
||||||
|
if len(index) != len(first_index):
|
||||||
|
raise ValueError(
|
||||||
|
"List depths are mismatched. First element was at depth "
|
||||||
|
"{}, but there is an element at depth {} ({})".format(
|
||||||
|
len(first_index),
|
||||||
|
len(index),
|
||||||
|
_block_format_index(index)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
# propagate our flag that indicates an empty list at the bottom
|
||||||
|
if index[-1] is None:
|
||||||
|
first_index = index
|
||||||
|
|
||||||
|
return first_index, max_arr_ndim, final_size
|
||||||
|
elif type(arrays) is list and len(arrays) == 0:
|
||||||
|
# We've 'bottomed out' on an empty list
|
||||||
|
return parent_index + [None], 0, 0
|
||||||
|
else:
|
||||||
|
# We've 'bottomed out' - arrays is either a scalar or an array
|
||||||
|
size = _size(arrays)
|
||||||
|
return parent_index, _ndim(arrays), size
|
||||||
|
|
||||||
|
|
||||||
|
def _atleast_nd(a, ndim):
|
||||||
|
# Ensures `a` has at least `ndim` dimensions by prepending
|
||||||
|
# ones to `a.shape` as necessary
|
||||||
|
return array(a, ndmin=ndim, copy=False, subok=True)
|
||||||
|
|
||||||
|
|
||||||
|
def _accumulate(values):
|
||||||
|
# Helper function because Python 2.7 doesn't have
|
||||||
|
# itertools.accumulate
|
||||||
|
value = 0
|
||||||
|
accumulated = []
|
||||||
|
for v in values:
|
||||||
|
value += v
|
||||||
|
accumulated.append(value)
|
||||||
|
return accumulated
|
||||||
|
|
||||||
|
|
||||||
|
def _concatenate_shapes(shapes, axis):
|
||||||
|
"""Given array shapes, return the resulting shape and slices prefixes.
|
||||||
|
|
||||||
|
These help in nested concatation.
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
shape: tuple of int
|
||||||
|
This tuple satisfies:
|
||||||
|
```
|
||||||
|
shape, _ = _concatenate_shapes([arr.shape for shape in arrs], axis)
|
||||||
|
shape == concatenate(arrs, axis).shape
|
||||||
|
```
|
||||||
|
|
||||||
|
slice_prefixes: tuple of (slice(start, end), )
|
||||||
|
For a list of arrays being concatenated, this returns the slice
|
||||||
|
in the larger array at axis that needs to be sliced into.
|
||||||
|
|
||||||
|
For example, the following holds:
|
||||||
|
```
|
||||||
|
ret = concatenate([a, b, c], axis)
|
||||||
|
_, (sl_a, sl_b, sl_c) = concatenate_slices([a, b, c], axis)
|
||||||
|
|
||||||
|
ret[(slice(None),) * axis + sl_a] == a
|
||||||
|
ret[(slice(None),) * axis + sl_b] == b
|
||||||
|
ret[(slice(None),) * axis + sl_c] == c
|
||||||
|
```
|
||||||
|
|
||||||
|
These are called slice prefixes since they are used in the recursive
|
||||||
|
blocking algorithm to compute the left-most slices during the
|
||||||
|
recursion. Therefore, they must be prepended to rest of the slice
|
||||||
|
that was computed deeper in the recursion.
|
||||||
|
|
||||||
|
These are returned as tuples to ensure that they can quickly be added
|
||||||
|
to existing slice tuple without creating a new tuple every time.
|
||||||
|
|
||||||
|
"""
|
||||||
|
# Cache a result that will be reused.
|
||||||
|
shape_at_axis = [shape[axis] for shape in shapes]
|
||||||
|
|
||||||
|
# Take a shape, any shape
|
||||||
|
first_shape = shapes[0]
|
||||||
|
first_shape_pre = first_shape[:axis]
|
||||||
|
first_shape_post = first_shape[axis+1:]
|
||||||
|
|
||||||
|
if any(shape[:axis] != first_shape_pre or
|
||||||
|
shape[axis+1:] != first_shape_post for shape in shapes):
|
||||||
|
raise ValueError(
|
||||||
|
'Mismatched array shapes in block along axis {}.'.format(axis))
|
||||||
|
|
||||||
|
shape = (first_shape_pre + (sum(shape_at_axis),) + first_shape[axis+1:])
|
||||||
|
|
||||||
|
offsets_at_axis = _accumulate(shape_at_axis)
|
||||||
|
slice_prefixes = [(slice(start, end),)
|
||||||
|
for start, end in zip([0] + offsets_at_axis,
|
||||||
|
offsets_at_axis)]
|
||||||
|
return shape, slice_prefixes
|
||||||
|
|
||||||
|
|
||||||
|
def _block_info_recursion(arrays, max_depth, result_ndim, depth=0):
|
||||||
|
"""
|
||||||
|
Returns the shape of the final array, along with a list
|
||||||
|
of slices and a list of arrays that can be used for assignment inside the
|
||||||
|
new array
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
arrays : nested list of arrays
|
||||||
|
The arrays to check
|
||||||
|
max_depth : list of int
|
||||||
|
The number of nested lists
|
||||||
|
result_ndim: int
|
||||||
|
The number of dimensions in thefinal array.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
shape : tuple of int
|
||||||
|
The shape that the final array will take on.
|
||||||
|
slices: list of tuple of slices
|
||||||
|
The slices into the full array required for assignment. These are
|
||||||
|
required to be prepended with ``(Ellipsis, )`` to obtain to correct
|
||||||
|
final index.
|
||||||
|
arrays: list of ndarray
|
||||||
|
The data to assign to each slice of the full array
|
||||||
|
|
||||||
|
"""
|
||||||
|
if depth < max_depth:
|
||||||
|
shapes, slices, arrays = zip(
|
||||||
|
*[_block_info_recursion(arr, max_depth, result_ndim, depth+1)
|
||||||
|
for arr in arrays])
|
||||||
|
|
||||||
|
axis = result_ndim - max_depth + depth
|
||||||
|
shape, slice_prefixes = _concatenate_shapes(shapes, axis)
|
||||||
|
|
||||||
|
# Prepend the slice prefix and flatten the slices
|
||||||
|
slices = [slice_prefix + the_slice
|
||||||
|
for slice_prefix, inner_slices in zip(slice_prefixes, slices)
|
||||||
|
for the_slice in inner_slices]
|
||||||
|
|
||||||
|
# Flatten the array list
|
||||||
|
arrays = functools.reduce(operator.add, arrays)
|
||||||
|
|
||||||
|
return shape, slices, arrays
|
||||||
|
else:
|
||||||
|
# We've 'bottomed out' - arrays is either a scalar or an array
|
||||||
|
# type(arrays) is not list
|
||||||
|
# Return the slice and the array inside a list to be consistent with
|
||||||
|
# the recursive case.
|
||||||
|
arr = _atleast_nd(arrays, result_ndim)
|
||||||
|
return arr.shape, [()], [arr]
|
||||||
|
|
||||||
|
|
||||||
|
def _block(arrays, max_depth, result_ndim, depth=0):
|
||||||
|
"""
|
||||||
|
Internal implementation of block based on repeated concatenation.
|
||||||
|
`arrays` is the argument passed to
|
||||||
|
block. `max_depth` is the depth of nested lists within `arrays` and
|
||||||
|
`result_ndim` is the greatest of the dimensions of the arrays in
|
||||||
|
`arrays` and the depth of the lists in `arrays` (see block docstring
|
||||||
|
for details).
|
||||||
|
"""
|
||||||
|
if depth < max_depth:
|
||||||
|
arrs = [_block(arr, max_depth, result_ndim, depth+1)
|
||||||
|
for arr in arrays]
|
||||||
|
return _concatenate(arrs, axis=-(max_depth-depth))
|
||||||
|
else:
|
||||||
|
# We've 'bottomed out' - arrays is either a scalar or an array
|
||||||
|
# type(arrays) is not list
|
||||||
|
return _atleast_nd(arrays, result_ndim)
|
||||||
|
|
||||||
|
|
||||||
|
def _block_dispatcher(arrays):
|
||||||
|
# Use type(...) is list to match the behavior of np.block(), which special
|
||||||
|
# cases list specifically rather than allowing for generic iterables or
|
||||||
|
# tuple. Also, we know that list.__array_function__ will never exist.
|
||||||
|
if type(arrays) is list:
|
||||||
|
for subarrays in arrays:
|
||||||
|
for subarray in _block_dispatcher(subarrays):
|
||||||
|
yield subarray
|
||||||
|
else:
|
||||||
|
yield arrays
|
||||||
|
|
||||||
|
|
||||||
|
@array_function_dispatch(_block_dispatcher)
|
||||||
|
def block(arrays):
|
||||||
|
"""
|
||||||
|
Assemble an nd-array from nested lists of blocks.
|
||||||
|
|
||||||
|
Blocks in the innermost lists are concatenated (see `concatenate`) along
|
||||||
|
the last dimension (-1), then these are concatenated along the
|
||||||
|
second-last dimension (-2), and so on until the outermost list is reached.
|
||||||
|
|
||||||
|
Blocks can be of any dimension, but will not be broadcasted using the normal
|
||||||
|
rules. Instead, leading axes of size 1 are inserted, to make ``block.ndim``
|
||||||
|
the same for all blocks. This is primarily useful for working with scalars,
|
||||||
|
and means that code like ``np.block([v, 1])`` is valid, where
|
||||||
|
``v.ndim == 1``.
|
||||||
|
|
||||||
|
When the nested list is two levels deep, this allows block matrices to be
|
||||||
|
constructed from their components.
|
||||||
|
|
||||||
|
.. versionadded:: 1.13.0
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
arrays : nested list of array_like or scalars (but not tuples)
|
||||||
|
If passed a single ndarray or scalar (a nested list of depth 0), this
|
||||||
|
is returned unmodified (and not copied).
|
||||||
|
|
||||||
|
Elements shapes must match along the appropriate axes (without
|
||||||
|
broadcasting), but leading 1s will be prepended to the shape as
|
||||||
|
necessary to make the dimensions match.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
block_array : ndarray
|
||||||
|
The array assembled from the given blocks.
|
||||||
|
|
||||||
|
The dimensionality of the output is equal to the greatest of:
|
||||||
|
* the dimensionality of all the inputs
|
||||||
|
* the depth to which the input list is nested
|
||||||
|
|
||||||
|
Raises
|
||||||
|
------
|
||||||
|
ValueError
|
||||||
|
* If list depths are mismatched - for instance, ``[[a, b], c]`` is
|
||||||
|
illegal, and should be spelt ``[[a, b], [c]]``
|
||||||
|
* If lists are empty - for instance, ``[[a, b], []]``
|
||||||
|
|
||||||
|
See Also
|
||||||
|
--------
|
||||||
|
concatenate : Join a sequence of arrays together.
|
||||||
|
stack : Stack arrays in sequence along a new dimension.
|
||||||
|
hstack : Stack arrays in sequence horizontally (column wise).
|
||||||
|
vstack : Stack arrays in sequence vertically (row wise).
|
||||||
|
dstack : Stack arrays in sequence depth wise (along third dimension).
|
||||||
|
vsplit : Split array into a list of multiple sub-arrays vertically.
|
||||||
|
|
||||||
|
Notes
|
||||||
|
-----
|
||||||
|
|
||||||
|
When called with only scalars, ``np.block`` is equivalent to an ndarray
|
||||||
|
call. So ``np.block([[1, 2], [3, 4]])`` is equivalent to
|
||||||
|
``np.array([[1, 2], [3, 4]])``.
|
||||||
|
|
||||||
|
This function does not enforce that the blocks lie on a fixed grid.
|
||||||
|
``np.block([[a, b], [c, d]])`` is not restricted to arrays of the form::
|
||||||
|
|
||||||
|
AAAbb
|
||||||
|
AAAbb
|
||||||
|
cccDD
|
||||||
|
|
||||||
|
But is also allowed to produce, for some ``a, b, c, d``::
|
||||||
|
|
||||||
|
AAAbb
|
||||||
|
AAAbb
|
||||||
|
cDDDD
|
||||||
|
|
||||||
|
Since concatenation happens along the last axis first, `block` is _not_
|
||||||
|
capable of producing the following directly::
|
||||||
|
|
||||||
|
AAAbb
|
||||||
|
cccbb
|
||||||
|
cccDD
|
||||||
|
|
||||||
|
Matlab's "square bracket stacking", ``[A, B, ...; p, q, ...]``, is
|
||||||
|
equivalent to ``np.block([[A, B, ...], [p, q, ...]])``.
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
The most common use of this function is to build a block matrix
|
||||||
|
|
||||||
|
>>> A = np.eye(2) * 2
|
||||||
|
>>> B = np.eye(3) * 3
|
||||||
|
>>> np.block([
|
||||||
|
... [A, np.zeros((2, 3))],
|
||||||
|
... [np.ones((3, 2)), B ]
|
||||||
|
... ])
|
||||||
|
array([[2., 0., 0., 0., 0.],
|
||||||
|
[0., 2., 0., 0., 0.],
|
||||||
|
[1., 1., 3., 0., 0.],
|
||||||
|
[1., 1., 0., 3., 0.],
|
||||||
|
[1., 1., 0., 0., 3.]])
|
||||||
|
|
||||||
|
With a list of depth 1, `block` can be used as `hstack`
|
||||||
|
|
||||||
|
>>> np.block([1, 2, 3]) # hstack([1, 2, 3])
|
||||||
|
array([1, 2, 3])
|
||||||
|
|
||||||
|
>>> a = np.array([1, 2, 3])
|
||||||
|
>>> b = np.array([2, 3, 4])
|
||||||
|
>>> np.block([a, b, 10]) # hstack([a, b, 10])
|
||||||
|
array([ 1, 2, 3, 2, 3, 4, 10])
|
||||||
|
|
||||||
|
>>> A = np.ones((2, 2), int)
|
||||||
|
>>> B = 2 * A
|
||||||
|
>>> np.block([A, B]) # hstack([A, B])
|
||||||
|
array([[1, 1, 2, 2],
|
||||||
|
[1, 1, 2, 2]])
|
||||||
|
|
||||||
|
With a list of depth 2, `block` can be used in place of `vstack`:
|
||||||
|
|
||||||
|
>>> a = np.array([1, 2, 3])
|
||||||
|
>>> b = np.array([2, 3, 4])
|
||||||
|
>>> np.block([[a], [b]]) # vstack([a, b])
|
||||||
|
array([[1, 2, 3],
|
||||||
|
[2, 3, 4]])
|
||||||
|
|
||||||
|
>>> A = np.ones((2, 2), int)
|
||||||
|
>>> B = 2 * A
|
||||||
|
>>> np.block([[A], [B]]) # vstack([A, B])
|
||||||
|
array([[1, 1],
|
||||||
|
[1, 1],
|
||||||
|
[2, 2],
|
||||||
|
[2, 2]])
|
||||||
|
|
||||||
|
It can also be used in places of `atleast_1d` and `atleast_2d`
|
||||||
|
|
||||||
|
>>> a = np.array(0)
|
||||||
|
>>> b = np.array([1])
|
||||||
|
>>> np.block([a]) # atleast_1d(a)
|
||||||
|
array([0])
|
||||||
|
>>> np.block([b]) # atleast_1d(b)
|
||||||
|
array([1])
|
||||||
|
|
||||||
|
>>> np.block([[a]]) # atleast_2d(a)
|
||||||
|
array([[0]])
|
||||||
|
>>> np.block([[b]]) # atleast_2d(b)
|
||||||
|
array([[1]])
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
arrays, list_ndim, result_ndim, final_size = _block_setup(arrays)
|
||||||
|
|
||||||
|
# It was found through benchmarking that making an array of final size
|
||||||
|
# around 256x256 was faster by straight concatenation on a
|
||||||
|
# i7-7700HQ processor and dual channel ram 2400MHz.
|
||||||
|
# It didn't seem to matter heavily on the dtype used.
|
||||||
|
#
|
||||||
|
# A 2D array using repeated concatenation requires 2 copies of the array.
|
||||||
|
#
|
||||||
|
# The fastest algorithm will depend on the ratio of CPU power to memory
|
||||||
|
# speed.
|
||||||
|
# One can monitor the results of the benchmark
|
||||||
|
# https://pv.github.io/numpy-bench/#bench_shape_base.Block2D.time_block2d
|
||||||
|
# to tune this parameter until a C version of the `_block_info_recursion`
|
||||||
|
# algorithm is implemented which would likely be faster than the python
|
||||||
|
# version.
|
||||||
|
if list_ndim * final_size > (2 * 512 * 512):
|
||||||
|
return _block_slicing(arrays, list_ndim, result_ndim)
|
||||||
|
else:
|
||||||
|
return _block_concatenate(arrays, list_ndim, result_ndim)
|
||||||
|
|
||||||
|
|
||||||
|
# These helper functions are mostly used for testing.
|
||||||
|
# They allow us to write tests that directly call `_block_slicing`
|
||||||
|
# or `_block_concatenate` without blocking large arrays to force the wisdom
|
||||||
|
# to trigger the desired path.
|
||||||
|
def _block_setup(arrays):
|
||||||
|
"""
|
||||||
|
Returns
|
||||||
|
(`arrays`, list_ndim, result_ndim, final_size)
|
||||||
|
"""
|
||||||
|
bottom_index, arr_ndim, final_size = _block_check_depths_match(arrays)
|
||||||
|
list_ndim = len(bottom_index)
|
||||||
|
if bottom_index and bottom_index[-1] is None:
|
||||||
|
raise ValueError(
|
||||||
|
'List at {} cannot be empty'.format(
|
||||||
|
_block_format_index(bottom_index)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
result_ndim = max(arr_ndim, list_ndim)
|
||||||
|
return arrays, list_ndim, result_ndim, final_size
|
||||||
|
|
||||||
|
|
||||||
|
def _block_slicing(arrays, list_ndim, result_ndim):
|
||||||
|
shape, slices, arrays = _block_info_recursion(
|
||||||
|
arrays, list_ndim, result_ndim)
|
||||||
|
dtype = _nx.result_type(*[arr.dtype for arr in arrays])
|
||||||
|
|
||||||
|
# Test preferring F only in the case that all input arrays are F
|
||||||
|
F_order = all(arr.flags['F_CONTIGUOUS'] for arr in arrays)
|
||||||
|
C_order = all(arr.flags['C_CONTIGUOUS'] for arr in arrays)
|
||||||
|
order = 'F' if F_order and not C_order else 'C'
|
||||||
|
result = _nx.empty(shape=shape, dtype=dtype, order=order)
|
||||||
|
# Note: In a c implementation, the function
|
||||||
|
# PyArray_CreateMultiSortedStridePerm could be used for more advanced
|
||||||
|
# guessing of the desired order.
|
||||||
|
|
||||||
|
for the_slice, arr in zip(slices, arrays):
|
||||||
|
result[(Ellipsis,) + the_slice] = arr
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def _block_concatenate(arrays, list_ndim, result_ndim):
|
||||||
|
result = _block(arrays, list_ndim, result_ndim)
|
||||||
|
if list_ndim == 0:
|
||||||
|
# Catch an edge case where _block returns a view because
|
||||||
|
# `arrays` is a single numpy array and not a list of numpy arrays.
|
||||||
|
# This might copy scalars or lists twice, but this isn't a likely
|
||||||
|
# usecase for those interested in performance
|
||||||
|
result = result.copy()
|
||||||
|
return result
|
@ -0,0 +1,76 @@
|
|||||||
|
"""Provide class for testing in French locale
|
||||||
|
|
||||||
|
"""
|
||||||
|
from __future__ import division, absolute_import, print_function
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import locale
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
__ALL__ = ['CommaDecimalPointLocale']
|
||||||
|
|
||||||
|
|
||||||
|
def find_comma_decimal_point_locale():
|
||||||
|
"""See if platform has a decimal point as comma locale.
|
||||||
|
|
||||||
|
Find a locale that uses a comma instead of a period as the
|
||||||
|
decimal point.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
old_locale: str
|
||||||
|
Locale when the function was called.
|
||||||
|
new_locale: {str, None)
|
||||||
|
First French locale found, None if none found.
|
||||||
|
|
||||||
|
"""
|
||||||
|
if sys.platform == 'win32':
|
||||||
|
locales = ['FRENCH']
|
||||||
|
else:
|
||||||
|
locales = ['fr_FR', 'fr_FR.UTF-8', 'fi_FI', 'fi_FI.UTF-8']
|
||||||
|
|
||||||
|
old_locale = locale.getlocale(locale.LC_NUMERIC)
|
||||||
|
new_locale = None
|
||||||
|
try:
|
||||||
|
for loc in locales:
|
||||||
|
try:
|
||||||
|
locale.setlocale(locale.LC_NUMERIC, loc)
|
||||||
|
new_locale = loc
|
||||||
|
break
|
||||||
|
except locale.Error:
|
||||||
|
pass
|
||||||
|
finally:
|
||||||
|
locale.setlocale(locale.LC_NUMERIC, locale=old_locale)
|
||||||
|
return old_locale, new_locale
|
||||||
|
|
||||||
|
|
||||||
|
class CommaDecimalPointLocale(object):
|
||||||
|
"""Sets LC_NUMERIC to a locale with comma as decimal point.
|
||||||
|
|
||||||
|
Classes derived from this class have setup and teardown methods that run
|
||||||
|
tests with locale.LC_NUMERIC set to a locale where commas (',') are used as
|
||||||
|
the decimal point instead of periods ('.'). On exit the locale is restored
|
||||||
|
to the initial locale. It also serves as context manager with the same
|
||||||
|
effect. If no such locale is available, the test is skipped.
|
||||||
|
|
||||||
|
.. versionadded:: 1.15.0
|
||||||
|
|
||||||
|
"""
|
||||||
|
(cur_locale, tst_locale) = find_comma_decimal_point_locale()
|
||||||
|
|
||||||
|
def setup(self):
|
||||||
|
if self.tst_locale is None:
|
||||||
|
pytest.skip("No French locale available")
|
||||||
|
locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale)
|
||||||
|
|
||||||
|
def teardown(self):
|
||||||
|
locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale)
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
if self.tst_locale is None:
|
||||||
|
pytest.skip("No French locale available")
|
||||||
|
locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale)
|
||||||
|
|
||||||
|
def __exit__(self, type, value, traceback):
|
||||||
|
locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale)
|
Binary file not shown.
Binary file not shown.
@ -0,0 +1,15 @@
|
|||||||
|
Steps to validate transcendental functions:
|
||||||
|
1) Add a file 'umath-validation-set-<ufuncname>', where ufuncname is name of
|
||||||
|
the function in NumPy you want to validate
|
||||||
|
2) The file should contain 4 columns: dtype,input,expected output,ulperror
|
||||||
|
a. dtype: one of np.float16, np.float32, np.float64
|
||||||
|
b. input: floating point input to ufunc in hex. Example: 0x414570a4
|
||||||
|
represents 12.340000152587890625
|
||||||
|
c. expected output: floating point output for the corresponding input in hex.
|
||||||
|
This should be computed using a high(er) precision library and then rounded to
|
||||||
|
same format as the input.
|
||||||
|
d. ulperror: expected maximum ulp error of the function. This
|
||||||
|
should be same across all rows of the same dtype. Otherwise, the function is
|
||||||
|
tested for the maximum ulp error among all entries of that dtype.
|
||||||
|
3) Add file umath-validation-set-<ufuncname> to the test file test_umath_accuracy.py
|
||||||
|
which will then validate your ufunc.
|
@ -0,0 +1,707 @@
|
|||||||
|
dtype,input,output,ulperrortol
|
||||||
|
## +ve denormals ##
|
||||||
|
np.float32,0x004b4716,0x3f800000,2
|
||||||
|
np.float32,0x007b2490,0x3f800000,2
|
||||||
|
np.float32,0x007c99fa,0x3f800000,2
|
||||||
|
np.float32,0x00734a0c,0x3f800000,2
|
||||||
|
np.float32,0x0070de24,0x3f800000,2
|
||||||
|
np.float32,0x007fffff,0x3f800000,2
|
||||||
|
np.float32,0x00000001,0x3f800000,2
|
||||||
|
## -ve denormals ##
|
||||||
|
np.float32,0x80495d65,0x3f800000,2
|
||||||
|
np.float32,0x806894f6,0x3f800000,2
|
||||||
|
np.float32,0x80555a76,0x3f800000,2
|
||||||
|
np.float32,0x804e1fb8,0x3f800000,2
|
||||||
|
np.float32,0x80687de9,0x3f800000,2
|
||||||
|
np.float32,0x807fffff,0x3f800000,2
|
||||||
|
np.float32,0x80000001,0x3f800000,2
|
||||||
|
## +/-0.0f, +/-FLT_MIN +/-FLT_MAX ##
|
||||||
|
np.float32,0x00000000,0x3f800000,2
|
||||||
|
np.float32,0x80000000,0x3f800000,2
|
||||||
|
np.float32,0x00800000,0x3f800000,2
|
||||||
|
np.float32,0x7f7fffff,0x3f5a5f96,2
|
||||||
|
np.float32,0x80800000,0x3f800000,2
|
||||||
|
np.float32,0xff7fffff,0x3f5a5f96,2
|
||||||
|
## 1.00f + 0x00000001 ##
|
||||||
|
np.float32,0x3f800000,0x3f0a5140,2
|
||||||
|
np.float32,0x3f800001,0x3f0a513f,2
|
||||||
|
np.float32,0x3f800002,0x3f0a513d,2
|
||||||
|
np.float32,0xc090a8b0,0xbe4332ce,2
|
||||||
|
np.float32,0x41ce3184,0x3f4d1de1,2
|
||||||
|
np.float32,0xc1d85848,0xbeaa8980,2
|
||||||
|
np.float32,0x402b8820,0xbf653aa3,2
|
||||||
|
np.float32,0x42b4e454,0xbf4a338b,2
|
||||||
|
np.float32,0x42a67a60,0x3c58202e,2
|
||||||
|
np.float32,0x41d92388,0xbed987c7,2
|
||||||
|
np.float32,0x422dd66c,0x3f5dcab3,2
|
||||||
|
np.float32,0xc28f5be6,0xbf5688d8,2
|
||||||
|
np.float32,0x41ab2674,0xbf53aa3b,2
|
||||||
|
np.float32,0xd0102756,0x3f45d12d,2
|
||||||
|
np.float32,0xcf99405e,0xbe9cf281,2
|
||||||
|
np.float32,0xcfd83a12,0x3eaae4ca,2
|
||||||
|
np.float32,0x4fb54db0,0xbf7b2894,2
|
||||||
|
np.float32,0xcfcca29d,0x3f752e4e,2
|
||||||
|
np.float32,0xceec2ac0,0xbf745303,2
|
||||||
|
np.float32,0xcfdca97f,0x3ef554a7,2
|
||||||
|
np.float32,0xcfe92b0a,0x3f4618f2,2
|
||||||
|
np.float32,0x5014b0eb,0x3ee933e6,2
|
||||||
|
np.float32,0xcfa7ee96,0xbeedeeb2,2
|
||||||
|
np.float32,0x754c09a0,0xbef298de,2
|
||||||
|
np.float32,0x77a731fb,0x3f24599f,2
|
||||||
|
np.float32,0x76de2494,0x3f79576c,2
|
||||||
|
np.float32,0xf74920dc,0xbf4d196e,2
|
||||||
|
np.float32,0x7707a312,0xbeb5cb8e,2
|
||||||
|
np.float32,0x75bf9790,0xbf7fd7fe,2
|
||||||
|
np.float32,0xf4ca7c40,0xbe15107d,2
|
||||||
|
np.float32,0x77e91899,0xbe8a968b,2
|
||||||
|
np.float32,0xf74c9820,0xbf7f9677,2
|
||||||
|
np.float32,0x7785ca29,0xbe6ef93b,2
|
||||||
|
np.float32,0x3f490fdb,0x3f3504f3,2
|
||||||
|
np.float32,0xbf490fdb,0x3f3504f3,2
|
||||||
|
np.float32,0x3fc90fdb,0xb33bbd2e,2
|
||||||
|
np.float32,0xbfc90fdb,0xb33bbd2e,2
|
||||||
|
np.float32,0x40490fdb,0xbf800000,2
|
||||||
|
np.float32,0xc0490fdb,0xbf800000,2
|
||||||
|
np.float32,0x3fc90fdb,0xb33bbd2e,2
|
||||||
|
np.float32,0xbfc90fdb,0xb33bbd2e,2
|
||||||
|
np.float32,0x40490fdb,0xbf800000,2
|
||||||
|
np.float32,0xc0490fdb,0xbf800000,2
|
||||||
|
np.float32,0x40c90fdb,0x3f800000,2
|
||||||
|
np.float32,0xc0c90fdb,0x3f800000,2
|
||||||
|
np.float32,0x4016cbe4,0xbf3504f3,2
|
||||||
|
np.float32,0xc016cbe4,0xbf3504f3,2
|
||||||
|
np.float32,0x4096cbe4,0x324cde2e,2
|
||||||
|
np.float32,0xc096cbe4,0x324cde2e,2
|
||||||
|
np.float32,0x4116cbe4,0xbf800000,2
|
||||||
|
np.float32,0xc116cbe4,0xbf800000,2
|
||||||
|
np.float32,0x40490fdb,0xbf800000,2
|
||||||
|
np.float32,0xc0490fdb,0xbf800000,2
|
||||||
|
np.float32,0x40c90fdb,0x3f800000,2
|
||||||
|
np.float32,0xc0c90fdb,0x3f800000,2
|
||||||
|
np.float32,0x41490fdb,0x3f800000,2
|
||||||
|
np.float32,0xc1490fdb,0x3f800000,2
|
||||||
|
np.float32,0x407b53d2,0xbf3504f1,2
|
||||||
|
np.float32,0xc07b53d2,0xbf3504f1,2
|
||||||
|
np.float32,0x40fb53d2,0xb4b5563d,2
|
||||||
|
np.float32,0xc0fb53d2,0xb4b5563d,2
|
||||||
|
np.float32,0x417b53d2,0xbf800000,2
|
||||||
|
np.float32,0xc17b53d2,0xbf800000,2
|
||||||
|
np.float32,0x4096cbe4,0x324cde2e,2
|
||||||
|
np.float32,0xc096cbe4,0x324cde2e,2
|
||||||
|
np.float32,0x4116cbe4,0xbf800000,2
|
||||||
|
np.float32,0xc116cbe4,0xbf800000,2
|
||||||
|
np.float32,0x4196cbe4,0x3f800000,2
|
||||||
|
np.float32,0xc196cbe4,0x3f800000,2
|
||||||
|
np.float32,0x40afede0,0x3f3504f7,2
|
||||||
|
np.float32,0xc0afede0,0x3f3504f7,2
|
||||||
|
np.float32,0x412fede0,0x353222c4,2
|
||||||
|
np.float32,0xc12fede0,0x353222c4,2
|
||||||
|
np.float32,0x41afede0,0xbf800000,2
|
||||||
|
np.float32,0xc1afede0,0xbf800000,2
|
||||||
|
np.float32,0x40c90fdb,0x3f800000,2
|
||||||
|
np.float32,0xc0c90fdb,0x3f800000,2
|
||||||
|
np.float32,0x41490fdb,0x3f800000,2
|
||||||
|
np.float32,0xc1490fdb,0x3f800000,2
|
||||||
|
np.float32,0x41c90fdb,0x3f800000,2
|
||||||
|
np.float32,0xc1c90fdb,0x3f800000,2
|
||||||
|
np.float32,0x40e231d6,0x3f3504f3,2
|
||||||
|
np.float32,0xc0e231d6,0x3f3504f3,2
|
||||||
|
np.float32,0x416231d6,0xb319a6a2,2
|
||||||
|
np.float32,0xc16231d6,0xb319a6a2,2
|
||||||
|
np.float32,0x41e231d6,0xbf800000,2
|
||||||
|
np.float32,0xc1e231d6,0xbf800000,2
|
||||||
|
np.float32,0x40fb53d2,0xb4b5563d,2
|
||||||
|
np.float32,0xc0fb53d2,0xb4b5563d,2
|
||||||
|
np.float32,0x417b53d2,0xbf800000,2
|
||||||
|
np.float32,0xc17b53d2,0xbf800000,2
|
||||||
|
np.float32,0x41fb53d2,0x3f800000,2
|
||||||
|
np.float32,0xc1fb53d2,0x3f800000,2
|
||||||
|
np.float32,0x410a3ae7,0xbf3504fb,2
|
||||||
|
np.float32,0xc10a3ae7,0xbf3504fb,2
|
||||||
|
np.float32,0x418a3ae7,0x35b08908,2
|
||||||
|
np.float32,0xc18a3ae7,0x35b08908,2
|
||||||
|
np.float32,0x420a3ae7,0xbf800000,2
|
||||||
|
np.float32,0xc20a3ae7,0xbf800000,2
|
||||||
|
np.float32,0x4116cbe4,0xbf800000,2
|
||||||
|
np.float32,0xc116cbe4,0xbf800000,2
|
||||||
|
np.float32,0x4196cbe4,0x3f800000,2
|
||||||
|
np.float32,0xc196cbe4,0x3f800000,2
|
||||||
|
np.float32,0x4216cbe4,0x3f800000,2
|
||||||
|
np.float32,0xc216cbe4,0x3f800000,2
|
||||||
|
np.float32,0x41235ce2,0xbf3504ef,2
|
||||||
|
np.float32,0xc1235ce2,0xbf3504ef,2
|
||||||
|
np.float32,0x41a35ce2,0xb53889b6,2
|
||||||
|
np.float32,0xc1a35ce2,0xb53889b6,2
|
||||||
|
np.float32,0x42235ce2,0xbf800000,2
|
||||||
|
np.float32,0xc2235ce2,0xbf800000,2
|
||||||
|
np.float32,0x412fede0,0x353222c4,2
|
||||||
|
np.float32,0xc12fede0,0x353222c4,2
|
||||||
|
np.float32,0x41afede0,0xbf800000,2
|
||||||
|
np.float32,0xc1afede0,0xbf800000,2
|
||||||
|
np.float32,0x422fede0,0x3f800000,2
|
||||||
|
np.float32,0xc22fede0,0x3f800000,2
|
||||||
|
np.float32,0x413c7edd,0x3f3504f4,2
|
||||||
|
np.float32,0xc13c7edd,0x3f3504f4,2
|
||||||
|
np.float32,0x41bc7edd,0x33800add,2
|
||||||
|
np.float32,0xc1bc7edd,0x33800add,2
|
||||||
|
np.float32,0x423c7edd,0xbf800000,2
|
||||||
|
np.float32,0xc23c7edd,0xbf800000,2
|
||||||
|
np.float32,0x41490fdb,0x3f800000,2
|
||||||
|
np.float32,0xc1490fdb,0x3f800000,2
|
||||||
|
np.float32,0x41c90fdb,0x3f800000,2
|
||||||
|
np.float32,0xc1c90fdb,0x3f800000,2
|
||||||
|
np.float32,0x42490fdb,0x3f800000,2
|
||||||
|
np.float32,0xc2490fdb,0x3f800000,2
|
||||||
|
np.float32,0x4155a0d9,0x3f3504eb,2
|
||||||
|
np.float32,0xc155a0d9,0x3f3504eb,2
|
||||||
|
np.float32,0x41d5a0d9,0xb5b3bc81,2
|
||||||
|
np.float32,0xc1d5a0d9,0xb5b3bc81,2
|
||||||
|
np.float32,0x4255a0d9,0xbf800000,2
|
||||||
|
np.float32,0xc255a0d9,0xbf800000,2
|
||||||
|
np.float32,0x416231d6,0xb319a6a2,2
|
||||||
|
np.float32,0xc16231d6,0xb319a6a2,2
|
||||||
|
np.float32,0x41e231d6,0xbf800000,2
|
||||||
|
np.float32,0xc1e231d6,0xbf800000,2
|
||||||
|
np.float32,0x426231d6,0x3f800000,2
|
||||||
|
np.float32,0xc26231d6,0x3f800000,2
|
||||||
|
np.float32,0x416ec2d4,0xbf3504f7,2
|
||||||
|
np.float32,0xc16ec2d4,0xbf3504f7,2
|
||||||
|
np.float32,0x41eec2d4,0x353ef0a7,2
|
||||||
|
np.float32,0xc1eec2d4,0x353ef0a7,2
|
||||||
|
np.float32,0x426ec2d4,0xbf800000,2
|
||||||
|
np.float32,0xc26ec2d4,0xbf800000,2
|
||||||
|
np.float32,0x417b53d2,0xbf800000,2
|
||||||
|
np.float32,0xc17b53d2,0xbf800000,2
|
||||||
|
np.float32,0x41fb53d2,0x3f800000,2
|
||||||
|
np.float32,0xc1fb53d2,0x3f800000,2
|
||||||
|
np.float32,0x427b53d2,0x3f800000,2
|
||||||
|
np.float32,0xc27b53d2,0x3f800000,2
|
||||||
|
np.float32,0x4183f268,0xbf3504e7,2
|
||||||
|
np.float32,0xc183f268,0xbf3504e7,2
|
||||||
|
np.float32,0x4203f268,0xb6059a13,2
|
||||||
|
np.float32,0xc203f268,0xb6059a13,2
|
||||||
|
np.float32,0x4283f268,0xbf800000,2
|
||||||
|
np.float32,0xc283f268,0xbf800000,2
|
||||||
|
np.float32,0x418a3ae7,0x35b08908,2
|
||||||
|
np.float32,0xc18a3ae7,0x35b08908,2
|
||||||
|
np.float32,0x420a3ae7,0xbf800000,2
|
||||||
|
np.float32,0xc20a3ae7,0xbf800000,2
|
||||||
|
np.float32,0x428a3ae7,0x3f800000,2
|
||||||
|
np.float32,0xc28a3ae7,0x3f800000,2
|
||||||
|
np.float32,0x41908365,0x3f3504f0,2
|
||||||
|
np.float32,0xc1908365,0x3f3504f0,2
|
||||||
|
np.float32,0x42108365,0xb512200d,2
|
||||||
|
np.float32,0xc2108365,0xb512200d,2
|
||||||
|
np.float32,0x42908365,0xbf800000,2
|
||||||
|
np.float32,0xc2908365,0xbf800000,2
|
||||||
|
np.float32,0x4196cbe4,0x3f800000,2
|
||||||
|
np.float32,0xc196cbe4,0x3f800000,2
|
||||||
|
np.float32,0x4216cbe4,0x3f800000,2
|
||||||
|
np.float32,0xc216cbe4,0x3f800000,2
|
||||||
|
np.float32,0x4296cbe4,0x3f800000,2
|
||||||
|
np.float32,0xc296cbe4,0x3f800000,2
|
||||||
|
np.float32,0x419d1463,0x3f3504ef,2
|
||||||
|
np.float32,0xc19d1463,0x3f3504ef,2
|
||||||
|
np.float32,0x421d1463,0xb5455799,2
|
||||||
|
np.float32,0xc21d1463,0xb5455799,2
|
||||||
|
np.float32,0x429d1463,0xbf800000,2
|
||||||
|
np.float32,0xc29d1463,0xbf800000,2
|
||||||
|
np.float32,0x41a35ce2,0xb53889b6,2
|
||||||
|
np.float32,0xc1a35ce2,0xb53889b6,2
|
||||||
|
np.float32,0x42235ce2,0xbf800000,2
|
||||||
|
np.float32,0xc2235ce2,0xbf800000,2
|
||||||
|
np.float32,0x42a35ce2,0x3f800000,2
|
||||||
|
np.float32,0xc2a35ce2,0x3f800000,2
|
||||||
|
np.float32,0x41a9a561,0xbf3504ff,2
|
||||||
|
np.float32,0xc1a9a561,0xbf3504ff,2
|
||||||
|
np.float32,0x4229a561,0x360733d0,2
|
||||||
|
np.float32,0xc229a561,0x360733d0,2
|
||||||
|
np.float32,0x42a9a561,0xbf800000,2
|
||||||
|
np.float32,0xc2a9a561,0xbf800000,2
|
||||||
|
np.float32,0x41afede0,0xbf800000,2
|
||||||
|
np.float32,0xc1afede0,0xbf800000,2
|
||||||
|
np.float32,0x422fede0,0x3f800000,2
|
||||||
|
np.float32,0xc22fede0,0x3f800000,2
|
||||||
|
np.float32,0x42afede0,0x3f800000,2
|
||||||
|
np.float32,0xc2afede0,0x3f800000,2
|
||||||
|
np.float32,0x41b6365e,0xbf3504f6,2
|
||||||
|
np.float32,0xc1b6365e,0xbf3504f6,2
|
||||||
|
np.float32,0x4236365e,0x350bb91c,2
|
||||||
|
np.float32,0xc236365e,0x350bb91c,2
|
||||||
|
np.float32,0x42b6365e,0xbf800000,2
|
||||||
|
np.float32,0xc2b6365e,0xbf800000,2
|
||||||
|
np.float32,0x41bc7edd,0x33800add,2
|
||||||
|
np.float32,0xc1bc7edd,0x33800add,2
|
||||||
|
np.float32,0x423c7edd,0xbf800000,2
|
||||||
|
np.float32,0xc23c7edd,0xbf800000,2
|
||||||
|
np.float32,0x42bc7edd,0x3f800000,2
|
||||||
|
np.float32,0xc2bc7edd,0x3f800000,2
|
||||||
|
np.float32,0x41c2c75c,0x3f3504f8,2
|
||||||
|
np.float32,0xc1c2c75c,0x3f3504f8,2
|
||||||
|
np.float32,0x4242c75c,0x354bbe8a,2
|
||||||
|
np.float32,0xc242c75c,0x354bbe8a,2
|
||||||
|
np.float32,0x42c2c75c,0xbf800000,2
|
||||||
|
np.float32,0xc2c2c75c,0xbf800000,2
|
||||||
|
np.float32,0x41c90fdb,0x3f800000,2
|
||||||
|
np.float32,0xc1c90fdb,0x3f800000,2
|
||||||
|
np.float32,0x42490fdb,0x3f800000,2
|
||||||
|
np.float32,0xc2490fdb,0x3f800000,2
|
||||||
|
np.float32,0x42c90fdb,0x3f800000,2
|
||||||
|
np.float32,0xc2c90fdb,0x3f800000,2
|
||||||
|
np.float32,0x41cf585a,0x3f3504e7,2
|
||||||
|
np.float32,0xc1cf585a,0x3f3504e7,2
|
||||||
|
np.float32,0x424f585a,0xb608cd8c,2
|
||||||
|
np.float32,0xc24f585a,0xb608cd8c,2
|
||||||
|
np.float32,0x42cf585a,0xbf800000,2
|
||||||
|
np.float32,0xc2cf585a,0xbf800000,2
|
||||||
|
np.float32,0x41d5a0d9,0xb5b3bc81,2
|
||||||
|
np.float32,0xc1d5a0d9,0xb5b3bc81,2
|
||||||
|
np.float32,0x4255a0d9,0xbf800000,2
|
||||||
|
np.float32,0xc255a0d9,0xbf800000,2
|
||||||
|
np.float32,0x42d5a0d9,0x3f800000,2
|
||||||
|
np.float32,0xc2d5a0d9,0x3f800000,2
|
||||||
|
np.float32,0x41dbe958,0xbf350507,2
|
||||||
|
np.float32,0xc1dbe958,0xbf350507,2
|
||||||
|
np.float32,0x425be958,0x365eab75,2
|
||||||
|
np.float32,0xc25be958,0x365eab75,2
|
||||||
|
np.float32,0x42dbe958,0xbf800000,2
|
||||||
|
np.float32,0xc2dbe958,0xbf800000,2
|
||||||
|
np.float32,0x41e231d6,0xbf800000,2
|
||||||
|
np.float32,0xc1e231d6,0xbf800000,2
|
||||||
|
np.float32,0x426231d6,0x3f800000,2
|
||||||
|
np.float32,0xc26231d6,0x3f800000,2
|
||||||
|
np.float32,0x42e231d6,0x3f800000,2
|
||||||
|
np.float32,0xc2e231d6,0x3f800000,2
|
||||||
|
np.float32,0x41e87a55,0xbf3504ef,2
|
||||||
|
np.float32,0xc1e87a55,0xbf3504ef,2
|
||||||
|
np.float32,0x42687a55,0xb552257b,2
|
||||||
|
np.float32,0xc2687a55,0xb552257b,2
|
||||||
|
np.float32,0x42e87a55,0xbf800000,2
|
||||||
|
np.float32,0xc2e87a55,0xbf800000,2
|
||||||
|
np.float32,0x41eec2d4,0x353ef0a7,2
|
||||||
|
np.float32,0xc1eec2d4,0x353ef0a7,2
|
||||||
|
np.float32,0x426ec2d4,0xbf800000,2
|
||||||
|
np.float32,0xc26ec2d4,0xbf800000,2
|
||||||
|
np.float32,0x42eec2d4,0x3f800000,2
|
||||||
|
np.float32,0xc2eec2d4,0x3f800000,2
|
||||||
|
np.float32,0x41f50b53,0x3f3504ff,2
|
||||||
|
np.float32,0xc1f50b53,0x3f3504ff,2
|
||||||
|
np.float32,0x42750b53,0x360a6748,2
|
||||||
|
np.float32,0xc2750b53,0x360a6748,2
|
||||||
|
np.float32,0x42f50b53,0xbf800000,2
|
||||||
|
np.float32,0xc2f50b53,0xbf800000,2
|
||||||
|
np.float32,0x41fb53d2,0x3f800000,2
|
||||||
|
np.float32,0xc1fb53d2,0x3f800000,2
|
||||||
|
np.float32,0x427b53d2,0x3f800000,2
|
||||||
|
np.float32,0xc27b53d2,0x3f800000,2
|
||||||
|
np.float32,0x42fb53d2,0x3f800000,2
|
||||||
|
np.float32,0xc2fb53d2,0x3f800000,2
|
||||||
|
np.float32,0x4200ce28,0x3f3504f6,2
|
||||||
|
np.float32,0xc200ce28,0x3f3504f6,2
|
||||||
|
np.float32,0x4280ce28,0x34fdd672,2
|
||||||
|
np.float32,0xc280ce28,0x34fdd672,2
|
||||||
|
np.float32,0x4300ce28,0xbf800000,2
|
||||||
|
np.float32,0xc300ce28,0xbf800000,2
|
||||||
|
np.float32,0x4203f268,0xb6059a13,2
|
||||||
|
np.float32,0xc203f268,0xb6059a13,2
|
||||||
|
np.float32,0x4283f268,0xbf800000,2
|
||||||
|
np.float32,0xc283f268,0xbf800000,2
|
||||||
|
np.float32,0x4303f268,0x3f800000,2
|
||||||
|
np.float32,0xc303f268,0x3f800000,2
|
||||||
|
np.float32,0x420716a7,0xbf3504f8,2
|
||||||
|
np.float32,0xc20716a7,0xbf3504f8,2
|
||||||
|
np.float32,0x428716a7,0x35588c6d,2
|
||||||
|
np.float32,0xc28716a7,0x35588c6d,2
|
||||||
|
np.float32,0x430716a7,0xbf800000,2
|
||||||
|
np.float32,0xc30716a7,0xbf800000,2
|
||||||
|
np.float32,0x420a3ae7,0xbf800000,2
|
||||||
|
np.float32,0xc20a3ae7,0xbf800000,2
|
||||||
|
np.float32,0x428a3ae7,0x3f800000,2
|
||||||
|
np.float32,0xc28a3ae7,0x3f800000,2
|
||||||
|
np.float32,0x430a3ae7,0x3f800000,2
|
||||||
|
np.float32,0xc30a3ae7,0x3f800000,2
|
||||||
|
np.float32,0x420d5f26,0xbf3504e7,2
|
||||||
|
np.float32,0xc20d5f26,0xbf3504e7,2
|
||||||
|
np.float32,0x428d5f26,0xb60c0105,2
|
||||||
|
np.float32,0xc28d5f26,0xb60c0105,2
|
||||||
|
np.float32,0x430d5f26,0xbf800000,2
|
||||||
|
np.float32,0xc30d5f26,0xbf800000,2
|
||||||
|
np.float32,0x42108365,0xb512200d,2
|
||||||
|
np.float32,0xc2108365,0xb512200d,2
|
||||||
|
np.float32,0x42908365,0xbf800000,2
|
||||||
|
np.float32,0xc2908365,0xbf800000,2
|
||||||
|
np.float32,0x43108365,0x3f800000,2
|
||||||
|
np.float32,0xc3108365,0x3f800000,2
|
||||||
|
np.float32,0x4213a7a5,0x3f350507,2
|
||||||
|
np.float32,0xc213a7a5,0x3f350507,2
|
||||||
|
np.float32,0x4293a7a5,0x3661deee,2
|
||||||
|
np.float32,0xc293a7a5,0x3661deee,2
|
||||||
|
np.float32,0x4313a7a5,0xbf800000,2
|
||||||
|
np.float32,0xc313a7a5,0xbf800000,2
|
||||||
|
np.float32,0x4216cbe4,0x3f800000,2
|
||||||
|
np.float32,0xc216cbe4,0x3f800000,2
|
||||||
|
np.float32,0x4296cbe4,0x3f800000,2
|
||||||
|
np.float32,0xc296cbe4,0x3f800000,2
|
||||||
|
np.float32,0x4316cbe4,0x3f800000,2
|
||||||
|
np.float32,0xc316cbe4,0x3f800000,2
|
||||||
|
np.float32,0x4219f024,0x3f3504d8,2
|
||||||
|
np.float32,0xc219f024,0x3f3504d8,2
|
||||||
|
np.float32,0x4299f024,0xb69bde6c,2
|
||||||
|
np.float32,0xc299f024,0xb69bde6c,2
|
||||||
|
np.float32,0x4319f024,0xbf800000,2
|
||||||
|
np.float32,0xc319f024,0xbf800000,2
|
||||||
|
np.float32,0x421d1463,0xb5455799,2
|
||||||
|
np.float32,0xc21d1463,0xb5455799,2
|
||||||
|
np.float32,0x429d1463,0xbf800000,2
|
||||||
|
np.float32,0xc29d1463,0xbf800000,2
|
||||||
|
np.float32,0x431d1463,0x3f800000,2
|
||||||
|
np.float32,0xc31d1463,0x3f800000,2
|
||||||
|
np.float32,0x422038a3,0xbf350516,2
|
||||||
|
np.float32,0xc22038a3,0xbf350516,2
|
||||||
|
np.float32,0x42a038a3,0x36c6cd61,2
|
||||||
|
np.float32,0xc2a038a3,0x36c6cd61,2
|
||||||
|
np.float32,0x432038a3,0xbf800000,2
|
||||||
|
np.float32,0xc32038a3,0xbf800000,2
|
||||||
|
np.float32,0x42235ce2,0xbf800000,2
|
||||||
|
np.float32,0xc2235ce2,0xbf800000,2
|
||||||
|
np.float32,0x42a35ce2,0x3f800000,2
|
||||||
|
np.float32,0xc2a35ce2,0x3f800000,2
|
||||||
|
np.float32,0x43235ce2,0x3f800000,2
|
||||||
|
np.float32,0xc3235ce2,0x3f800000,2
|
||||||
|
np.float32,0x42268121,0xbf3504f6,2
|
||||||
|
np.float32,0xc2268121,0xbf3504f6,2
|
||||||
|
np.float32,0x42a68121,0x34e43aac,2
|
||||||
|
np.float32,0xc2a68121,0x34e43aac,2
|
||||||
|
np.float32,0x43268121,0xbf800000,2
|
||||||
|
np.float32,0xc3268121,0xbf800000,2
|
||||||
|
np.float32,0x4229a561,0x360733d0,2
|
||||||
|
np.float32,0xc229a561,0x360733d0,2
|
||||||
|
np.float32,0x42a9a561,0xbf800000,2
|
||||||
|
np.float32,0xc2a9a561,0xbf800000,2
|
||||||
|
np.float32,0x4329a561,0x3f800000,2
|
||||||
|
np.float32,0xc329a561,0x3f800000,2
|
||||||
|
np.float32,0x422cc9a0,0x3f3504f8,2
|
||||||
|
np.float32,0xc22cc9a0,0x3f3504f8,2
|
||||||
|
np.float32,0x42acc9a0,0x35655a50,2
|
||||||
|
np.float32,0xc2acc9a0,0x35655a50,2
|
||||||
|
np.float32,0x432cc9a0,0xbf800000,2
|
||||||
|
np.float32,0xc32cc9a0,0xbf800000,2
|
||||||
|
np.float32,0x422fede0,0x3f800000,2
|
||||||
|
np.float32,0xc22fede0,0x3f800000,2
|
||||||
|
np.float32,0x42afede0,0x3f800000,2
|
||||||
|
np.float32,0xc2afede0,0x3f800000,2
|
||||||
|
np.float32,0x432fede0,0x3f800000,2
|
||||||
|
np.float32,0xc32fede0,0x3f800000,2
|
||||||
|
np.float32,0x4233121f,0x3f3504e7,2
|
||||||
|
np.float32,0xc233121f,0x3f3504e7,2
|
||||||
|
np.float32,0x42b3121f,0xb60f347d,2
|
||||||
|
np.float32,0xc2b3121f,0xb60f347d,2
|
||||||
|
np.float32,0x4333121f,0xbf800000,2
|
||||||
|
np.float32,0xc333121f,0xbf800000,2
|
||||||
|
np.float32,0x4236365e,0x350bb91c,2
|
||||||
|
np.float32,0xc236365e,0x350bb91c,2
|
||||||
|
np.float32,0x42b6365e,0xbf800000,2
|
||||||
|
np.float32,0xc2b6365e,0xbf800000,2
|
||||||
|
np.float32,0x4336365e,0x3f800000,2
|
||||||
|
np.float32,0xc336365e,0x3f800000,2
|
||||||
|
np.float32,0x42395a9e,0xbf350507,2
|
||||||
|
np.float32,0xc2395a9e,0xbf350507,2
|
||||||
|
np.float32,0x42b95a9e,0x36651267,2
|
||||||
|
np.float32,0xc2b95a9e,0x36651267,2
|
||||||
|
np.float32,0x43395a9e,0xbf800000,2
|
||||||
|
np.float32,0xc3395a9e,0xbf800000,2
|
||||||
|
np.float32,0x423c7edd,0xbf800000,2
|
||||||
|
np.float32,0xc23c7edd,0xbf800000,2
|
||||||
|
np.float32,0x42bc7edd,0x3f800000,2
|
||||||
|
np.float32,0xc2bc7edd,0x3f800000,2
|
||||||
|
np.float32,0x433c7edd,0x3f800000,2
|
||||||
|
np.float32,0xc33c7edd,0x3f800000,2
|
||||||
|
np.float32,0x423fa31d,0xbf3504d7,2
|
||||||
|
np.float32,0xc23fa31d,0xbf3504d7,2
|
||||||
|
np.float32,0x42bfa31d,0xb69d7828,2
|
||||||
|
np.float32,0xc2bfa31d,0xb69d7828,2
|
||||||
|
np.float32,0x433fa31d,0xbf800000,2
|
||||||
|
np.float32,0xc33fa31d,0xbf800000,2
|
||||||
|
np.float32,0x4242c75c,0x354bbe8a,2
|
||||||
|
np.float32,0xc242c75c,0x354bbe8a,2
|
||||||
|
np.float32,0x42c2c75c,0xbf800000,2
|
||||||
|
np.float32,0xc2c2c75c,0xbf800000,2
|
||||||
|
np.float32,0x4342c75c,0x3f800000,2
|
||||||
|
np.float32,0xc342c75c,0x3f800000,2
|
||||||
|
np.float32,0x4245eb9c,0x3f350517,2
|
||||||
|
np.float32,0xc245eb9c,0x3f350517,2
|
||||||
|
np.float32,0x42c5eb9c,0x36c8671d,2
|
||||||
|
np.float32,0xc2c5eb9c,0x36c8671d,2
|
||||||
|
np.float32,0x4345eb9c,0xbf800000,2
|
||||||
|
np.float32,0xc345eb9c,0xbf800000,2
|
||||||
|
np.float32,0x42490fdb,0x3f800000,2
|
||||||
|
np.float32,0xc2490fdb,0x3f800000,2
|
||||||
|
np.float32,0x42c90fdb,0x3f800000,2
|
||||||
|
np.float32,0xc2c90fdb,0x3f800000,2
|
||||||
|
np.float32,0x43490fdb,0x3f800000,2
|
||||||
|
np.float32,0xc3490fdb,0x3f800000,2
|
||||||
|
np.float32,0x424c341a,0x3f3504f5,2
|
||||||
|
np.float32,0xc24c341a,0x3f3504f5,2
|
||||||
|
np.float32,0x42cc341a,0x34ca9ee6,2
|
||||||
|
np.float32,0xc2cc341a,0x34ca9ee6,2
|
||||||
|
np.float32,0x434c341a,0xbf800000,2
|
||||||
|
np.float32,0xc34c341a,0xbf800000,2
|
||||||
|
np.float32,0x424f585a,0xb608cd8c,2
|
||||||
|
np.float32,0xc24f585a,0xb608cd8c,2
|
||||||
|
np.float32,0x42cf585a,0xbf800000,2
|
||||||
|
np.float32,0xc2cf585a,0xbf800000,2
|
||||||
|
np.float32,0x434f585a,0x3f800000,2
|
||||||
|
np.float32,0xc34f585a,0x3f800000,2
|
||||||
|
np.float32,0x42527c99,0xbf3504f9,2
|
||||||
|
np.float32,0xc2527c99,0xbf3504f9,2
|
||||||
|
np.float32,0x42d27c99,0x35722833,2
|
||||||
|
np.float32,0xc2d27c99,0x35722833,2
|
||||||
|
np.float32,0x43527c99,0xbf800000,2
|
||||||
|
np.float32,0xc3527c99,0xbf800000,2
|
||||||
|
np.float32,0x4255a0d9,0xbf800000,2
|
||||||
|
np.float32,0xc255a0d9,0xbf800000,2
|
||||||
|
np.float32,0x42d5a0d9,0x3f800000,2
|
||||||
|
np.float32,0xc2d5a0d9,0x3f800000,2
|
||||||
|
np.float32,0x4355a0d9,0x3f800000,2
|
||||||
|
np.float32,0xc355a0d9,0x3f800000,2
|
||||||
|
np.float32,0x4258c518,0xbf3504e6,2
|
||||||
|
np.float32,0xc258c518,0xbf3504e6,2
|
||||||
|
np.float32,0x42d8c518,0xb61267f6,2
|
||||||
|
np.float32,0xc2d8c518,0xb61267f6,2
|
||||||
|
np.float32,0x4358c518,0xbf800000,2
|
||||||
|
np.float32,0xc358c518,0xbf800000,2
|
||||||
|
np.float32,0x425be958,0x365eab75,2
|
||||||
|
np.float32,0xc25be958,0x365eab75,2
|
||||||
|
np.float32,0x42dbe958,0xbf800000,2
|
||||||
|
np.float32,0xc2dbe958,0xbf800000,2
|
||||||
|
np.float32,0x435be958,0x3f800000,2
|
||||||
|
np.float32,0xc35be958,0x3f800000,2
|
||||||
|
np.float32,0x425f0d97,0x3f350508,2
|
||||||
|
np.float32,0xc25f0d97,0x3f350508,2
|
||||||
|
np.float32,0x42df0d97,0x366845e0,2
|
||||||
|
np.float32,0xc2df0d97,0x366845e0,2
|
||||||
|
np.float32,0x435f0d97,0xbf800000,2
|
||||||
|
np.float32,0xc35f0d97,0xbf800000,2
|
||||||
|
np.float32,0x426231d6,0x3f800000,2
|
||||||
|
np.float32,0xc26231d6,0x3f800000,2
|
||||||
|
np.float32,0x42e231d6,0x3f800000,2
|
||||||
|
np.float32,0xc2e231d6,0x3f800000,2
|
||||||
|
np.float32,0x436231d6,0x3f800000,2
|
||||||
|
np.float32,0xc36231d6,0x3f800000,2
|
||||||
|
np.float32,0x42655616,0x3f3504d7,2
|
||||||
|
np.float32,0xc2655616,0x3f3504d7,2
|
||||||
|
np.float32,0x42e55616,0xb69f11e5,2
|
||||||
|
np.float32,0xc2e55616,0xb69f11e5,2
|
||||||
|
np.float32,0x43655616,0xbf800000,2
|
||||||
|
np.float32,0xc3655616,0xbf800000,2
|
||||||
|
np.float32,0x42687a55,0xb552257b,2
|
||||||
|
np.float32,0xc2687a55,0xb552257b,2
|
||||||
|
np.float32,0x42e87a55,0xbf800000,2
|
||||||
|
np.float32,0xc2e87a55,0xbf800000,2
|
||||||
|
np.float32,0x43687a55,0x3f800000,2
|
||||||
|
np.float32,0xc3687a55,0x3f800000,2
|
||||||
|
np.float32,0x426b9e95,0xbf350517,2
|
||||||
|
np.float32,0xc26b9e95,0xbf350517,2
|
||||||
|
np.float32,0x42eb9e95,0x36ca00d9,2
|
||||||
|
np.float32,0xc2eb9e95,0x36ca00d9,2
|
||||||
|
np.float32,0x436b9e95,0xbf800000,2
|
||||||
|
np.float32,0xc36b9e95,0xbf800000,2
|
||||||
|
np.float32,0x426ec2d4,0xbf800000,2
|
||||||
|
np.float32,0xc26ec2d4,0xbf800000,2
|
||||||
|
np.float32,0x42eec2d4,0x3f800000,2
|
||||||
|
np.float32,0xc2eec2d4,0x3f800000,2
|
||||||
|
np.float32,0x436ec2d4,0x3f800000,2
|
||||||
|
np.float32,0xc36ec2d4,0x3f800000,2
|
||||||
|
np.float32,0x4271e713,0xbf3504f5,2
|
||||||
|
np.float32,0xc271e713,0xbf3504f5,2
|
||||||
|
np.float32,0x42f1e713,0x34b10321,2
|
||||||
|
np.float32,0xc2f1e713,0x34b10321,2
|
||||||
|
np.float32,0x4371e713,0xbf800000,2
|
||||||
|
np.float32,0xc371e713,0xbf800000,2
|
||||||
|
np.float32,0x42750b53,0x360a6748,2
|
||||||
|
np.float32,0xc2750b53,0x360a6748,2
|
||||||
|
np.float32,0x42f50b53,0xbf800000,2
|
||||||
|
np.float32,0xc2f50b53,0xbf800000,2
|
||||||
|
np.float32,0x43750b53,0x3f800000,2
|
||||||
|
np.float32,0xc3750b53,0x3f800000,2
|
||||||
|
np.float32,0x42782f92,0x3f3504f9,2
|
||||||
|
np.float32,0xc2782f92,0x3f3504f9,2
|
||||||
|
np.float32,0x42f82f92,0x357ef616,2
|
||||||
|
np.float32,0xc2f82f92,0x357ef616,2
|
||||||
|
np.float32,0x43782f92,0xbf800000,2
|
||||||
|
np.float32,0xc3782f92,0xbf800000,2
|
||||||
|
np.float32,0x427b53d2,0x3f800000,2
|
||||||
|
np.float32,0xc27b53d2,0x3f800000,2
|
||||||
|
np.float32,0x42fb53d2,0x3f800000,2
|
||||||
|
np.float32,0xc2fb53d2,0x3f800000,2
|
||||||
|
np.float32,0x437b53d2,0x3f800000,2
|
||||||
|
np.float32,0xc37b53d2,0x3f800000,2
|
||||||
|
np.float32,0x427e7811,0x3f3504e6,2
|
||||||
|
np.float32,0xc27e7811,0x3f3504e6,2
|
||||||
|
np.float32,0x42fe7811,0xb6159b6f,2
|
||||||
|
np.float32,0xc2fe7811,0xb6159b6f,2
|
||||||
|
np.float32,0x437e7811,0xbf800000,2
|
||||||
|
np.float32,0xc37e7811,0xbf800000,2
|
||||||
|
np.float32,0x4280ce28,0x34fdd672,2
|
||||||
|
np.float32,0xc280ce28,0x34fdd672,2
|
||||||
|
np.float32,0x4300ce28,0xbf800000,2
|
||||||
|
np.float32,0xc300ce28,0xbf800000,2
|
||||||
|
np.float32,0x4380ce28,0x3f800000,2
|
||||||
|
np.float32,0xc380ce28,0x3f800000,2
|
||||||
|
np.float32,0x42826048,0xbf350508,2
|
||||||
|
np.float32,0xc2826048,0xbf350508,2
|
||||||
|
np.float32,0x43026048,0x366b7958,2
|
||||||
|
np.float32,0xc3026048,0x366b7958,2
|
||||||
|
np.float32,0x43826048,0xbf800000,2
|
||||||
|
np.float32,0xc3826048,0xbf800000,2
|
||||||
|
np.float32,0x4283f268,0xbf800000,2
|
||||||
|
np.float32,0xc283f268,0xbf800000,2
|
||||||
|
np.float32,0x4303f268,0x3f800000,2
|
||||||
|
np.float32,0xc303f268,0x3f800000,2
|
||||||
|
np.float32,0x4383f268,0x3f800000,2
|
||||||
|
np.float32,0xc383f268,0x3f800000,2
|
||||||
|
np.float32,0x42858487,0xbf350504,2
|
||||||
|
np.float32,0xc2858487,0xbf350504,2
|
||||||
|
np.float32,0x43058487,0x363ea8be,2
|
||||||
|
np.float32,0xc3058487,0x363ea8be,2
|
||||||
|
np.float32,0x43858487,0xbf800000,2
|
||||||
|
np.float32,0xc3858487,0xbf800000,2
|
||||||
|
np.float32,0x428716a7,0x35588c6d,2
|
||||||
|
np.float32,0xc28716a7,0x35588c6d,2
|
||||||
|
np.float32,0x430716a7,0xbf800000,2
|
||||||
|
np.float32,0xc30716a7,0xbf800000,2
|
||||||
|
np.float32,0x438716a7,0x3f800000,2
|
||||||
|
np.float32,0xc38716a7,0x3f800000,2
|
||||||
|
np.float32,0x4288a8c7,0x3f350517,2
|
||||||
|
np.float32,0xc288a8c7,0x3f350517,2
|
||||||
|
np.float32,0x4308a8c7,0x36cb9a96,2
|
||||||
|
np.float32,0xc308a8c7,0x36cb9a96,2
|
||||||
|
np.float32,0x4388a8c7,0xbf800000,2
|
||||||
|
np.float32,0xc388a8c7,0xbf800000,2
|
||||||
|
np.float32,0x428a3ae7,0x3f800000,2
|
||||||
|
np.float32,0xc28a3ae7,0x3f800000,2
|
||||||
|
np.float32,0x430a3ae7,0x3f800000,2
|
||||||
|
np.float32,0xc30a3ae7,0x3f800000,2
|
||||||
|
np.float32,0x438a3ae7,0x3f800000,2
|
||||||
|
np.float32,0xc38a3ae7,0x3f800000,2
|
||||||
|
np.float32,0x428bcd06,0x3f3504f5,2
|
||||||
|
np.float32,0xc28bcd06,0x3f3504f5,2
|
||||||
|
np.float32,0x430bcd06,0x3497675b,2
|
||||||
|
np.float32,0xc30bcd06,0x3497675b,2
|
||||||
|
np.float32,0x438bcd06,0xbf800000,2
|
||||||
|
np.float32,0xc38bcd06,0xbf800000,2
|
||||||
|
np.float32,0x428d5f26,0xb60c0105,2
|
||||||
|
np.float32,0xc28d5f26,0xb60c0105,2
|
||||||
|
np.float32,0x430d5f26,0xbf800000,2
|
||||||
|
np.float32,0xc30d5f26,0xbf800000,2
|
||||||
|
np.float32,0x438d5f26,0x3f800000,2
|
||||||
|
np.float32,0xc38d5f26,0x3f800000,2
|
||||||
|
np.float32,0x428ef146,0xbf350526,2
|
||||||
|
np.float32,0xc28ef146,0xbf350526,2
|
||||||
|
np.float32,0x430ef146,0x3710bc40,2
|
||||||
|
np.float32,0xc30ef146,0x3710bc40,2
|
||||||
|
np.float32,0x438ef146,0xbf800000,2
|
||||||
|
np.float32,0xc38ef146,0xbf800000,2
|
||||||
|
np.float32,0x42908365,0xbf800000,2
|
||||||
|
np.float32,0xc2908365,0xbf800000,2
|
||||||
|
np.float32,0x43108365,0x3f800000,2
|
||||||
|
np.float32,0xc3108365,0x3f800000,2
|
||||||
|
np.float32,0x43908365,0x3f800000,2
|
||||||
|
np.float32,0xc3908365,0x3f800000,2
|
||||||
|
np.float32,0x42921585,0xbf3504e6,2
|
||||||
|
np.float32,0xc2921585,0xbf3504e6,2
|
||||||
|
np.float32,0x43121585,0xb618cee8,2
|
||||||
|
np.float32,0xc3121585,0xb618cee8,2
|
||||||
|
np.float32,0x43921585,0xbf800000,2
|
||||||
|
np.float32,0xc3921585,0xbf800000,2
|
||||||
|
np.float32,0x4293a7a5,0x3661deee,2
|
||||||
|
np.float32,0xc293a7a5,0x3661deee,2
|
||||||
|
np.float32,0x4313a7a5,0xbf800000,2
|
||||||
|
np.float32,0xc313a7a5,0xbf800000,2
|
||||||
|
np.float32,0x4393a7a5,0x3f800000,2
|
||||||
|
np.float32,0xc393a7a5,0x3f800000,2
|
||||||
|
np.float32,0x429539c5,0x3f350536,2
|
||||||
|
np.float32,0xc29539c5,0x3f350536,2
|
||||||
|
np.float32,0x431539c5,0x373bab34,2
|
||||||
|
np.float32,0xc31539c5,0x373bab34,2
|
||||||
|
np.float32,0x439539c5,0xbf800000,2
|
||||||
|
np.float32,0xc39539c5,0xbf800000,2
|
||||||
|
np.float32,0x4296cbe4,0x3f800000,2
|
||||||
|
np.float32,0xc296cbe4,0x3f800000,2
|
||||||
|
np.float32,0x4316cbe4,0x3f800000,2
|
||||||
|
np.float32,0xc316cbe4,0x3f800000,2
|
||||||
|
np.float32,0x4396cbe4,0x3f800000,2
|
||||||
|
np.float32,0xc396cbe4,0x3f800000,2
|
||||||
|
np.float32,0x42985e04,0x3f3504d7,2
|
||||||
|
np.float32,0xc2985e04,0x3f3504d7,2
|
||||||
|
np.float32,0x43185e04,0xb6a2455d,2
|
||||||
|
np.float32,0xc3185e04,0xb6a2455d,2
|
||||||
|
np.float32,0x43985e04,0xbf800000,2
|
||||||
|
np.float32,0xc3985e04,0xbf800000,2
|
||||||
|
np.float32,0x4299f024,0xb69bde6c,2
|
||||||
|
np.float32,0xc299f024,0xb69bde6c,2
|
||||||
|
np.float32,0x4319f024,0xbf800000,2
|
||||||
|
np.float32,0xc319f024,0xbf800000,2
|
||||||
|
np.float32,0x4399f024,0x3f800000,2
|
||||||
|
np.float32,0xc399f024,0x3f800000,2
|
||||||
|
np.float32,0x429b8243,0xbf3504ea,2
|
||||||
|
np.float32,0xc29b8243,0xbf3504ea,2
|
||||||
|
np.float32,0x431b8243,0xb5cb2eb8,2
|
||||||
|
np.float32,0xc31b8243,0xb5cb2eb8,2
|
||||||
|
np.float32,0x439b8243,0xbf800000,2
|
||||||
|
np.float32,0xc39b8243,0xbf800000,2
|
||||||
|
np.float32,0x435b2047,0x3f3504c1,2
|
||||||
|
np.float32,0x42a038a2,0xb5e4ca7e,2
|
||||||
|
np.float32,0x432038a2,0xbf800000,2
|
||||||
|
np.float32,0x4345eb9b,0xbf800000,2
|
||||||
|
np.float32,0x42c5eb9b,0xb5de638c,2
|
||||||
|
np.float32,0x42eb9e94,0xb5d7fc9b,2
|
||||||
|
np.float32,0x4350ea79,0x3631dadb,2
|
||||||
|
np.float32,0x42dbe957,0xbf800000,2
|
||||||
|
np.float32,0x425be957,0xb505522a,2
|
||||||
|
np.float32,0x435be957,0x3f800000,2
|
||||||
|
np.float32,0x487fe5ab,0xba140185,2
|
||||||
|
np.float32,0x497fe5ab,0x3f7fffd5,2
|
||||||
|
np.float32,0x49ffe5ab,0x3f7fff55,2
|
||||||
|
np.float32,0x49ffeb37,0x3b9382f5,2
|
||||||
|
np.float32,0x497ff0c3,0x3b13049f,2
|
||||||
|
np.float32,0x49fff0c3,0xbf7fff57,2
|
||||||
|
np.float32,0x49fff64f,0xbb928618,2
|
||||||
|
np.float32,0x497ffbdb,0xbf7fffd6,2
|
||||||
|
np.float32,0x49fffbdb,0x3f7fff59,2
|
||||||
|
np.float32,0x48fffbdb,0xba9207c6,2
|
||||||
|
np.float32,0x4e736e56,0xbf800000,2
|
||||||
|
np.float32,0x4d4da377,0xbf800000,2
|
||||||
|
np.float32,0x4ece58c3,0xbf800000,2
|
||||||
|
np.float32,0x4ee0db9c,0xbf800000,2
|
||||||
|
np.float32,0x4dee7002,0x3f800000,2
|
||||||
|
np.float32,0x4ee86afc,0x38857a23,2
|
||||||
|
np.float32,0x4dca4f3f,0xbf800000,2
|
||||||
|
np.float32,0x4ecb48af,0xb95d1e10,2
|
||||||
|
np.float32,0x4e51e33f,0xbf800000,2
|
||||||
|
np.float32,0x4ef5f421,0xbf800000,2
|
||||||
|
np.float32,0x46027eb2,0x3e7d94c9,2
|
||||||
|
np.float32,0x4477baed,0xbe7f1824,2
|
||||||
|
np.float32,0x454b8024,0x3e7f5268,2
|
||||||
|
np.float32,0x455d2c09,0x3e7f40cb,2
|
||||||
|
np.float32,0x4768d3de,0xba14b4af,2
|
||||||
|
np.float32,0x46c1e7cd,0x3e7fb102,2
|
||||||
|
np.float32,0x44a52949,0xbe7dc9d5,2
|
||||||
|
np.float32,0x4454633a,0x3e7dbc7d,2
|
||||||
|
np.float32,0x4689810b,0x3e7eb02b,2
|
||||||
|
np.float32,0x473473cd,0xbe7eef6f,2
|
||||||
|
np.float32,0x44a5193f,0x3e7e1b1f,2
|
||||||
|
np.float32,0x46004b36,0x3e7dac59,2
|
||||||
|
np.float32,0x467f604b,0x3d7ffd3a,2
|
||||||
|
np.float32,0x45ea1805,0x3dffd2e0,2
|
||||||
|
np.float32,0x457b6af3,0x3dff7831,2
|
||||||
|
np.float32,0x44996159,0xbe7d85f4,2
|
||||||
|
np.float32,0x47883553,0xbb80584e,2
|
||||||
|
np.float32,0x44e19f0c,0xbdffcfe6,2
|
||||||
|
np.float32,0x472b3bf6,0xbe7f7a82,2
|
||||||
|
np.float32,0x4600bb4e,0x3a135e33,2
|
||||||
|
np.float32,0x449f4556,0x3e7e42e5,2
|
||||||
|
np.float32,0x474e9420,0x3dff77b2,2
|
||||||
|
np.float32,0x45cbdb23,0x3dff7240,2
|
||||||
|
np.float32,0x44222747,0x3dffb039,2
|
||||||
|
np.float32,0x4772e419,0xbdff74b8,2
|
@ -0,0 +1,135 @@
|
|||||||
|
dtype,input,output,ulperrortol
|
||||||
|
## +ve denormals ##
|
||||||
|
np.float32,0x004b4716,0x3f800000,3
|
||||||
|
np.float32,0x007b2490,0x3f800000,3
|
||||||
|
np.float32,0x007c99fa,0x3f800000,3
|
||||||
|
np.float32,0x00734a0c,0x3f800000,3
|
||||||
|
np.float32,0x0070de24,0x3f800000,3
|
||||||
|
np.float32,0x00495d65,0x3f800000,3
|
||||||
|
np.float32,0x006894f6,0x3f800000,3
|
||||||
|
np.float32,0x00555a76,0x3f800000,3
|
||||||
|
np.float32,0x004e1fb8,0x3f800000,3
|
||||||
|
np.float32,0x00687de9,0x3f800000,3
|
||||||
|
## -ve denormals ##
|
||||||
|
np.float32,0x805b59af,0x3f800000,3
|
||||||
|
np.float32,0x807ed8ed,0x3f800000,3
|
||||||
|
np.float32,0x807142ad,0x3f800000,3
|
||||||
|
np.float32,0x80772002,0x3f800000,3
|
||||||
|
np.float32,0x8062abcb,0x3f800000,3
|
||||||
|
np.float32,0x8045e31c,0x3f800000,3
|
||||||
|
np.float32,0x805f01c2,0x3f800000,3
|
||||||
|
np.float32,0x80506432,0x3f800000,3
|
||||||
|
np.float32,0x8060089d,0x3f800000,3
|
||||||
|
np.float32,0x8071292f,0x3f800000,3
|
||||||
|
## floats that output a denormal ##
|
||||||
|
np.float32,0xc2cf3fc1,0x00000001,3
|
||||||
|
np.float32,0xc2c79726,0x00000021,3
|
||||||
|
np.float32,0xc2cb295d,0x00000005,3
|
||||||
|
np.float32,0xc2b49e6b,0x00068c4c,3
|
||||||
|
np.float32,0xc2ca8116,0x00000008,3
|
||||||
|
np.float32,0xc2c23f82,0x000001d7,3
|
||||||
|
np.float32,0xc2cb69c0,0x00000005,3
|
||||||
|
np.float32,0xc2cc1f4d,0x00000003,3
|
||||||
|
np.float32,0xc2ae094e,0x00affc4c,3
|
||||||
|
np.float32,0xc2c86c44,0x00000015,3
|
||||||
|
## random floats between -87.0f and 88.0f ##
|
||||||
|
np.float32,0x4030d7e0,0x417d9a05,3
|
||||||
|
np.float32,0x426f60e8,0x6aa1be2c,3
|
||||||
|
np.float32,0x41a1b220,0x4e0efc11,3
|
||||||
|
np.float32,0xc20cc722,0x26159da7,3
|
||||||
|
np.float32,0x41c492bc,0x512ec79d,3
|
||||||
|
np.float32,0x40980210,0x42e73a0e,3
|
||||||
|
np.float32,0xbf1f7b80,0x3f094de3,3
|
||||||
|
np.float32,0x42a678a4,0x7b87a383,3
|
||||||
|
np.float32,0xc20f3cfd,0x25a1c304,3
|
||||||
|
np.float32,0x423ff34c,0x6216467f,3
|
||||||
|
np.float32,0x00000000,0x3f800000,3
|
||||||
|
## floats that cause an overflow ##
|
||||||
|
np.float32,0x7f06d8c1,0x7f800000,3
|
||||||
|
np.float32,0x7f451912,0x7f800000,3
|
||||||
|
np.float32,0x7ecceac3,0x7f800000,3
|
||||||
|
np.float32,0x7f643b45,0x7f800000,3
|
||||||
|
np.float32,0x7e910ea0,0x7f800000,3
|
||||||
|
np.float32,0x7eb4756b,0x7f800000,3
|
||||||
|
np.float32,0x7f4ec708,0x7f800000,3
|
||||||
|
np.float32,0x7f6b4551,0x7f800000,3
|
||||||
|
np.float32,0x7d8edbda,0x7f800000,3
|
||||||
|
np.float32,0x7f730718,0x7f800000,3
|
||||||
|
np.float32,0x42b17217,0x7f7fff84,3
|
||||||
|
np.float32,0x42b17218,0x7f800000,3
|
||||||
|
np.float32,0x42b17219,0x7f800000,3
|
||||||
|
np.float32,0xfef2b0bc,0x00000000,3
|
||||||
|
np.float32,0xff69f83e,0x00000000,3
|
||||||
|
np.float32,0xff4ecb12,0x00000000,3
|
||||||
|
np.float32,0xfeac6d86,0x00000000,3
|
||||||
|
np.float32,0xfde0cdb8,0x00000000,3
|
||||||
|
np.float32,0xff26aef4,0x00000000,3
|
||||||
|
np.float32,0xff6f9277,0x00000000,3
|
||||||
|
np.float32,0xff7adfc4,0x00000000,3
|
||||||
|
np.float32,0xff0ad40e,0x00000000,3
|
||||||
|
np.float32,0xff6fd8f3,0x00000000,3
|
||||||
|
np.float32,0xc2cff1b4,0x00000001,3
|
||||||
|
np.float32,0xc2cff1b5,0x00000000,3
|
||||||
|
np.float32,0xc2cff1b6,0x00000000,3
|
||||||
|
np.float32,0x7f800000,0x7f800000,3
|
||||||
|
np.float32,0xff800000,0x00000000,3
|
||||||
|
np.float32,0x4292f27c,0x7480000a,3
|
||||||
|
np.float32,0x42a920be,0x7c7fff94,3
|
||||||
|
np.float32,0x41c214c9,0x50ffffd9,3
|
||||||
|
np.float32,0x41abe686,0x4effffd9,3
|
||||||
|
np.float32,0x4287db5a,0x707fffd3,3
|
||||||
|
np.float32,0x41902cbb,0x4c800078,3
|
||||||
|
np.float32,0x42609466,0x67ffffeb,3
|
||||||
|
np.float32,0x41a65af5,0x4e7fffd1,3
|
||||||
|
np.float32,0x417f13ff,0x4affffc9,3
|
||||||
|
np.float32,0x426d0e6c,0x6a3504f2,3
|
||||||
|
np.float32,0x41bc8934,0x507fff51,3
|
||||||
|
np.float32,0x42a7bdde,0x7c0000d6,3
|
||||||
|
np.float32,0x4120cf66,0x46b504f6,3
|
||||||
|
np.float32,0x4244da8f,0x62ffff1a,3
|
||||||
|
np.float32,0x41a0cf69,0x4e000034,3
|
||||||
|
np.float32,0x41cd2bec,0x52000005,3
|
||||||
|
np.float32,0x42893e41,0x7100009e,3
|
||||||
|
np.float32,0x41b437e1,0x4fb50502,3
|
||||||
|
np.float32,0x41d8430f,0x5300001d,3
|
||||||
|
np.float32,0x4244da92,0x62ffffda,3
|
||||||
|
np.float32,0x41a0cf63,0x4dffffa9,3
|
||||||
|
np.float32,0x3eb17218,0x3fb504f3,3
|
||||||
|
np.float32,0x428729e8,0x703504dc,3
|
||||||
|
np.float32,0x41a0cf67,0x4e000014,3
|
||||||
|
np.float32,0x4252b77d,0x65800011,3
|
||||||
|
np.float32,0x41902cb9,0x4c800058,3
|
||||||
|
np.float32,0x42a0cf67,0x79800052,3
|
||||||
|
np.float32,0x4152b77b,0x48ffffe9,3
|
||||||
|
np.float32,0x41265af3,0x46ffffc8,3
|
||||||
|
np.float32,0x42187e0b,0x5affff9a,3
|
||||||
|
np.float32,0xc0d2b77c,0x3ab504f6,3
|
||||||
|
np.float32,0xc283b2ac,0x10000072,3
|
||||||
|
np.float32,0xc1cff1b4,0x2cb504f5,3
|
||||||
|
np.float32,0xc05dce9e,0x3d000000,3
|
||||||
|
np.float32,0xc28ec9d2,0x0bfffea5,3
|
||||||
|
np.float32,0xc23c893a,0x1d7fffde,3
|
||||||
|
np.float32,0xc2a920c0,0x027fff6c,3
|
||||||
|
np.float32,0xc1f9886f,0x2900002b,3
|
||||||
|
np.float32,0xc2c42920,0x000000b5,3
|
||||||
|
np.float32,0xc2893e41,0x0dfffec5,3
|
||||||
|
np.float32,0xc2c4da93,0x00000080,3
|
||||||
|
np.float32,0xc17f1401,0x3400000c,3
|
||||||
|
np.float32,0xc1902cb6,0x327fffaf,3
|
||||||
|
np.float32,0xc27c4e3b,0x11ffffc5,3
|
||||||
|
np.float32,0xc268e5c5,0x157ffe9d,3
|
||||||
|
np.float32,0xc2b4e953,0x0005a826,3
|
||||||
|
np.float32,0xc287db5a,0x0e800016,3
|
||||||
|
np.float32,0xc207db5a,0x2700000b,3
|
||||||
|
np.float32,0xc2b2d4fe,0x000ffff1,3
|
||||||
|
np.float32,0xc268e5c0,0x157fffdd,3
|
||||||
|
np.float32,0xc22920bd,0x2100003b,3
|
||||||
|
np.float32,0xc2902caf,0x0b80011e,3
|
||||||
|
np.float32,0xc1902cba,0x327fff2f,3
|
||||||
|
np.float32,0xc2ca6625,0x00000008,3
|
||||||
|
np.float32,0xc280ece8,0x10fffeb5,3
|
||||||
|
np.float32,0xc2918f94,0x0b0000ea,3
|
||||||
|
np.float32,0xc29b43d5,0x077ffffc,3
|
||||||
|
np.float32,0xc1e61ff7,0x2ab504f5,3
|
||||||
|
np.float32,0xc2867878,0x0effff15,3
|
||||||
|
np.float32,0xc2a2324a,0x04fffff4,3
|
@ -0,0 +1,118 @@
|
|||||||
|
dtype,input,output,ulperrortol
|
||||||
|
## +ve denormals ##
|
||||||
|
np.float32,0x004b4716,0xc2afbc1b,4
|
||||||
|
np.float32,0x007b2490,0xc2aec01e,4
|
||||||
|
np.float32,0x007c99fa,0xc2aeba17,4
|
||||||
|
np.float32,0x00734a0c,0xc2aee1dc,4
|
||||||
|
np.float32,0x0070de24,0xc2aeecba,4
|
||||||
|
np.float32,0x007fffff,0xc2aeac50,4
|
||||||
|
np.float32,0x00000001,0xc2ce8ed0,4
|
||||||
|
## -ve denormals ##
|
||||||
|
np.float32,0x80495d65,0xffc00000,4
|
||||||
|
np.float32,0x806894f6,0xffc00000,4
|
||||||
|
np.float32,0x80555a76,0xffc00000,4
|
||||||
|
np.float32,0x804e1fb8,0xffc00000,4
|
||||||
|
np.float32,0x80687de9,0xffc00000,4
|
||||||
|
np.float32,0x807fffff,0xffc00000,4
|
||||||
|
np.float32,0x80000001,0xffc00000,4
|
||||||
|
## +/-0.0f, +/-FLT_MIN +/-FLT_MAX ##
|
||||||
|
np.float32,0x00000000,0xff800000,4
|
||||||
|
np.float32,0x80000000,0xff800000,4
|
||||||
|
np.float32,0x7f7fffff,0x42b17218,4
|
||||||
|
np.float32,0x80800000,0xffc00000,4
|
||||||
|
np.float32,0xff7fffff,0xffc00000,4
|
||||||
|
## 1.00f + 0x00000001 ##
|
||||||
|
np.float32,0x3f800000,0x00000000,4
|
||||||
|
np.float32,0x3f800001,0x33ffffff,4
|
||||||
|
np.float32,0x3f800002,0x347ffffe,4
|
||||||
|
np.float32,0x3f7fffff,0xb3800000,4
|
||||||
|
np.float32,0x3f7ffffe,0xb4000000,4
|
||||||
|
np.float32,0x3f7ffffd,0xb4400001,4
|
||||||
|
np.float32,0x402df853,0x3f7ffffe,4
|
||||||
|
np.float32,0x402df854,0x3f7fffff,4
|
||||||
|
np.float32,0x402df855,0x3f800000,4
|
||||||
|
np.float32,0x402df856,0x3f800001,4
|
||||||
|
np.float32,0x3ebc5ab0,0xbf800001,4
|
||||||
|
np.float32,0x3ebc5ab1,0xbf800000,4
|
||||||
|
np.float32,0x3ebc5ab2,0xbf800000,4
|
||||||
|
np.float32,0x3ebc5ab3,0xbf7ffffe,4
|
||||||
|
np.float32,0x423ef575,0x407768ab,4
|
||||||
|
np.float32,0x427b8c61,0x408485dd,4
|
||||||
|
np.float32,0x4211e9ee,0x406630b0,4
|
||||||
|
np.float32,0x424d5c41,0x407c0fed,4
|
||||||
|
np.float32,0x42be722a,0x4091cc91,4
|
||||||
|
np.float32,0x42b73d30,0x4090908b,4
|
||||||
|
np.float32,0x427e48e2,0x4084de7f,4
|
||||||
|
np.float32,0x428f759b,0x4088bba3,4
|
||||||
|
np.float32,0x41629069,0x4029a0cc,4
|
||||||
|
np.float32,0x4272c99d,0x40836379,4
|
||||||
|
np.float32,0x4d1b7458,0x4197463d,4
|
||||||
|
np.float32,0x4f10c594,0x41ace2b2,4
|
||||||
|
np.float32,0x4ea397c2,0x41a85171,4
|
||||||
|
np.float32,0x4fefa9d1,0x41b6769c,4
|
||||||
|
np.float32,0x4ebac6ab,0x41a960dc,4
|
||||||
|
np.float32,0x4f6efb42,0x41b0e535,4
|
||||||
|
np.float32,0x4e9ab8e7,0x41a7df44,4
|
||||||
|
np.float32,0x4e81b5d1,0x41a67625,4
|
||||||
|
np.float32,0x5014d9f2,0x41b832bd,4
|
||||||
|
np.float32,0x4f02175c,0x41ac07b8,4
|
||||||
|
np.float32,0x7f034f89,0x42b01c47,4
|
||||||
|
np.float32,0x7f56d00e,0x42b11849,4
|
||||||
|
np.float32,0x7f1cd5f6,0x42b0773a,4
|
||||||
|
np.float32,0x7e979174,0x42af02d7,4
|
||||||
|
np.float32,0x7f23369f,0x42b08ba2,4
|
||||||
|
np.float32,0x7f0637ae,0x42b0277d,4
|
||||||
|
np.float32,0x7efcb6e8,0x42b00897,4
|
||||||
|
np.float32,0x7f7907c8,0x42b163f6,4
|
||||||
|
np.float32,0x7e95c4c2,0x42aefcba,4
|
||||||
|
np.float32,0x7f4577b2,0x42b0ed2d,4
|
||||||
|
np.float32,0x3f49c92e,0xbe73ae84,4
|
||||||
|
np.float32,0x3f4a23d1,0xbe71e2f8,4
|
||||||
|
np.float32,0x3f4abb67,0xbe6ee430,4
|
||||||
|
np.float32,0x3f48169a,0xbe7c5532,4
|
||||||
|
np.float32,0x3f47f5fa,0xbe7cfc37,4
|
||||||
|
np.float32,0x3f488309,0xbe7a2ad8,4
|
||||||
|
np.float32,0x3f479df4,0xbe7ebf5f,4
|
||||||
|
np.float32,0x3f47cfff,0xbe7dbec9,4
|
||||||
|
np.float32,0x3f496704,0xbe75a125,4
|
||||||
|
np.float32,0x3f478ee8,0xbe7f0c92,4
|
||||||
|
np.float32,0x3f4a763b,0xbe7041ce,4
|
||||||
|
np.float32,0x3f47a108,0xbe7eaf94,4
|
||||||
|
np.float32,0x3f48136c,0xbe7c6578,4
|
||||||
|
np.float32,0x3f481c17,0xbe7c391c,4
|
||||||
|
np.float32,0x3f47cd28,0xbe7dcd56,4
|
||||||
|
np.float32,0x3f478be8,0xbe7f1bf7,4
|
||||||
|
np.float32,0x3f4c1f8e,0xbe67e367,4
|
||||||
|
np.float32,0x3f489b0c,0xbe79b03f,4
|
||||||
|
np.float32,0x3f4934cf,0xbe76a08a,4
|
||||||
|
np.float32,0x3f4954df,0xbe75fd6a,4
|
||||||
|
np.float32,0x3f47a3f5,0xbe7ea093,4
|
||||||
|
np.float32,0x3f4ba4fc,0xbe6a4b02,4
|
||||||
|
np.float32,0x3f47a0e1,0xbe7eb05c,4
|
||||||
|
np.float32,0x3f48c30a,0xbe78e42f,4
|
||||||
|
np.float32,0x3f48cab8,0xbe78bd05,4
|
||||||
|
np.float32,0x3f4b0569,0xbe6d6ea4,4
|
||||||
|
np.float32,0x3f47de32,0xbe7d7607,4
|
||||||
|
np.float32,0x3f477328,0xbe7f9b00,4
|
||||||
|
np.float32,0x3f496dab,0xbe757f52,4
|
||||||
|
np.float32,0x3f47662c,0xbe7fddac,4
|
||||||
|
np.float32,0x3f48ddd8,0xbe785b80,4
|
||||||
|
np.float32,0x3f481866,0xbe7c4bff,4
|
||||||
|
np.float32,0x3f48b119,0xbe793fb6,4
|
||||||
|
np.float32,0x3f48c7e8,0xbe78cb5c,4
|
||||||
|
np.float32,0x3f4985f6,0xbe7503da,4
|
||||||
|
np.float32,0x3f483fdf,0xbe7b8212,4
|
||||||
|
np.float32,0x3f4b1c76,0xbe6cfa67,4
|
||||||
|
np.float32,0x3f480b2e,0xbe7c8fa8,4
|
||||||
|
np.float32,0x3f48745f,0xbe7a75bf,4
|
||||||
|
np.float32,0x3f485bda,0xbe7af308,4
|
||||||
|
np.float32,0x3f47a660,0xbe7e942c,4
|
||||||
|
np.float32,0x3f47d4d5,0xbe7da600,4
|
||||||
|
np.float32,0x3f4b0a26,0xbe6d56be,4
|
||||||
|
np.float32,0x3f4a4883,0xbe712924,4
|
||||||
|
np.float32,0x3f4769e7,0xbe7fca84,4
|
||||||
|
np.float32,0x3f499702,0xbe74ad3f,4
|
||||||
|
np.float32,0x3f494ab1,0xbe763131,4
|
||||||
|
np.float32,0x3f476b69,0xbe7fc2c6,4
|
||||||
|
np.float32,0x3f4884e8,0xbe7a214a,4
|
||||||
|
np.float32,0x3f486945,0xbe7aae76,4
|
@ -0,0 +1,707 @@
|
|||||||
|
dtype,input,output,ulperrortol
|
||||||
|
## +ve denormals ##
|
||||||
|
np.float32,0x004b4716,0x004b4716,2
|
||||||
|
np.float32,0x007b2490,0x007b2490,2
|
||||||
|
np.float32,0x007c99fa,0x007c99fa,2
|
||||||
|
np.float32,0x00734a0c,0x00734a0c,2
|
||||||
|
np.float32,0x0070de24,0x0070de24,2
|
||||||
|
np.float32,0x007fffff,0x007fffff,2
|
||||||
|
np.float32,0x00000001,0x00000001,2
|
||||||
|
## -ve denormals ##
|
||||||
|
np.float32,0x80495d65,0x80495d65,2
|
||||||
|
np.float32,0x806894f6,0x806894f6,2
|
||||||
|
np.float32,0x80555a76,0x80555a76,2
|
||||||
|
np.float32,0x804e1fb8,0x804e1fb8,2
|
||||||
|
np.float32,0x80687de9,0x80687de9,2
|
||||||
|
np.float32,0x807fffff,0x807fffff,2
|
||||||
|
np.float32,0x80000001,0x80000001,2
|
||||||
|
## +/-0.0f, +/-FLT_MIN +/-FLT_MAX ##
|
||||||
|
np.float32,0x00000000,0x00000000,2
|
||||||
|
np.float32,0x80000000,0x80000000,2
|
||||||
|
np.float32,0x00800000,0x00800000,2
|
||||||
|
np.float32,0x7f7fffff,0xbf0599b3,2
|
||||||
|
np.float32,0x80800000,0x80800000,2
|
||||||
|
np.float32,0xff7fffff,0x3f0599b3,2
|
||||||
|
## 1.00f ##
|
||||||
|
np.float32,0x3f800000,0x3f576aa4,2
|
||||||
|
np.float32,0x3f800001,0x3f576aa6,2
|
||||||
|
np.float32,0x3f800002,0x3f576aa7,2
|
||||||
|
np.float32,0xc090a8b0,0x3f7b4e48,2
|
||||||
|
np.float32,0x41ce3184,0x3f192d43,2
|
||||||
|
np.float32,0xc1d85848,0xbf7161cb,2
|
||||||
|
np.float32,0x402b8820,0x3ee3f29f,2
|
||||||
|
np.float32,0x42b4e454,0x3f1d0151,2
|
||||||
|
np.float32,0x42a67a60,0x3f7ffa4c,2
|
||||||
|
np.float32,0x41d92388,0x3f67beef,2
|
||||||
|
np.float32,0x422dd66c,0xbeffb0c1,2
|
||||||
|
np.float32,0xc28f5be6,0xbf0bae79,2
|
||||||
|
np.float32,0x41ab2674,0x3f0ffe2b,2
|
||||||
|
np.float32,0xd0102756,0x3f227e8a,2
|
||||||
|
np.float32,0xcf99405e,0x3f73ad00,2
|
||||||
|
np.float32,0xcfd83a12,0xbf7151a7,2
|
||||||
|
np.float32,0x4fb54db0,0xbe46354b,2
|
||||||
|
np.float32,0xcfcca29d,0xbe9345e6,2
|
||||||
|
np.float32,0xceec2ac0,0x3e98dc89,2
|
||||||
|
np.float32,0xcfdca97f,0xbf60b2b4,2
|
||||||
|
np.float32,0xcfe92b0a,0xbf222705,2
|
||||||
|
np.float32,0x5014b0eb,0x3f63e75c,2
|
||||||
|
np.float32,0xcfa7ee96,0x3f62ada4,2
|
||||||
|
np.float32,0x754c09a0,0xbf617056,2
|
||||||
|
np.float32,0x77a731fb,0x3f44472b,2
|
||||||
|
np.float32,0x76de2494,0xbe680739,2
|
||||||
|
np.float32,0xf74920dc,0xbf193338,2
|
||||||
|
np.float32,0x7707a312,0xbf6f51b1,2
|
||||||
|
np.float32,0x75bf9790,0xbd0f1a47,2
|
||||||
|
np.float32,0xf4ca7c40,0xbf7d45e7,2
|
||||||
|
np.float32,0x77e91899,0x3f767181,2
|
||||||
|
np.float32,0xf74c9820,0xbd685b75,2
|
||||||
|
np.float32,0x7785ca29,0x3f78ee61,2
|
||||||
|
np.float32,0x3f490fdb,0x3f3504f3,2
|
||||||
|
np.float32,0xbf490fdb,0xbf3504f3,2
|
||||||
|
np.float32,0x3fc90fdb,0x3f800000,2
|
||||||
|
np.float32,0xbfc90fdb,0xbf800000,2
|
||||||
|
np.float32,0x40490fdb,0xb3bbbd2e,2
|
||||||
|
np.float32,0xc0490fdb,0x33bbbd2e,2
|
||||||
|
np.float32,0x3fc90fdb,0x3f800000,2
|
||||||
|
np.float32,0xbfc90fdb,0xbf800000,2
|
||||||
|
np.float32,0x40490fdb,0xb3bbbd2e,2
|
||||||
|
np.float32,0xc0490fdb,0x33bbbd2e,2
|
||||||
|
np.float32,0x40c90fdb,0x343bbd2e,2
|
||||||
|
np.float32,0xc0c90fdb,0xb43bbd2e,2
|
||||||
|
np.float32,0x4016cbe4,0x3f3504f3,2
|
||||||
|
np.float32,0xc016cbe4,0xbf3504f3,2
|
||||||
|
np.float32,0x4096cbe4,0xbf800000,2
|
||||||
|
np.float32,0xc096cbe4,0x3f800000,2
|
||||||
|
np.float32,0x4116cbe4,0xb2ccde2e,2
|
||||||
|
np.float32,0xc116cbe4,0x32ccde2e,2
|
||||||
|
np.float32,0x40490fdb,0xb3bbbd2e,2
|
||||||
|
np.float32,0xc0490fdb,0x33bbbd2e,2
|
||||||
|
np.float32,0x40c90fdb,0x343bbd2e,2
|
||||||
|
np.float32,0xc0c90fdb,0xb43bbd2e,2
|
||||||
|
np.float32,0x41490fdb,0x34bbbd2e,2
|
||||||
|
np.float32,0xc1490fdb,0xb4bbbd2e,2
|
||||||
|
np.float32,0x407b53d2,0xbf3504f5,2
|
||||||
|
np.float32,0xc07b53d2,0x3f3504f5,2
|
||||||
|
np.float32,0x40fb53d2,0x3f800000,2
|
||||||
|
np.float32,0xc0fb53d2,0xbf800000,2
|
||||||
|
np.float32,0x417b53d2,0xb535563d,2
|
||||||
|
np.float32,0xc17b53d2,0x3535563d,2
|
||||||
|
np.float32,0x4096cbe4,0xbf800000,2
|
||||||
|
np.float32,0xc096cbe4,0x3f800000,2
|
||||||
|
np.float32,0x4116cbe4,0xb2ccde2e,2
|
||||||
|
np.float32,0xc116cbe4,0x32ccde2e,2
|
||||||
|
np.float32,0x4196cbe4,0x334cde2e,2
|
||||||
|
np.float32,0xc196cbe4,0xb34cde2e,2
|
||||||
|
np.float32,0x40afede0,0xbf3504ef,2
|
||||||
|
np.float32,0xc0afede0,0x3f3504ef,2
|
||||||
|
np.float32,0x412fede0,0xbf800000,2
|
||||||
|
np.float32,0xc12fede0,0x3f800000,2
|
||||||
|
np.float32,0x41afede0,0xb5b222c4,2
|
||||||
|
np.float32,0xc1afede0,0x35b222c4,2
|
||||||
|
np.float32,0x40c90fdb,0x343bbd2e,2
|
||||||
|
np.float32,0xc0c90fdb,0xb43bbd2e,2
|
||||||
|
np.float32,0x41490fdb,0x34bbbd2e,2
|
||||||
|
np.float32,0xc1490fdb,0xb4bbbd2e,2
|
||||||
|
np.float32,0x41c90fdb,0x353bbd2e,2
|
||||||
|
np.float32,0xc1c90fdb,0xb53bbd2e,2
|
||||||
|
np.float32,0x40e231d6,0x3f3504f3,2
|
||||||
|
np.float32,0xc0e231d6,0xbf3504f3,2
|
||||||
|
np.float32,0x416231d6,0x3f800000,2
|
||||||
|
np.float32,0xc16231d6,0xbf800000,2
|
||||||
|
np.float32,0x41e231d6,0xb399a6a2,2
|
||||||
|
np.float32,0xc1e231d6,0x3399a6a2,2
|
||||||
|
np.float32,0x40fb53d2,0x3f800000,2
|
||||||
|
np.float32,0xc0fb53d2,0xbf800000,2
|
||||||
|
np.float32,0x417b53d2,0xb535563d,2
|
||||||
|
np.float32,0xc17b53d2,0x3535563d,2
|
||||||
|
np.float32,0x41fb53d2,0x35b5563d,2
|
||||||
|
np.float32,0xc1fb53d2,0xb5b5563d,2
|
||||||
|
np.float32,0x410a3ae7,0x3f3504eb,2
|
||||||
|
np.float32,0xc10a3ae7,0xbf3504eb,2
|
||||||
|
np.float32,0x418a3ae7,0xbf800000,2
|
||||||
|
np.float32,0xc18a3ae7,0x3f800000,2
|
||||||
|
np.float32,0x420a3ae7,0xb6308908,2
|
||||||
|
np.float32,0xc20a3ae7,0x36308908,2
|
||||||
|
np.float32,0x4116cbe4,0xb2ccde2e,2
|
||||||
|
np.float32,0xc116cbe4,0x32ccde2e,2
|
||||||
|
np.float32,0x4196cbe4,0x334cde2e,2
|
||||||
|
np.float32,0xc196cbe4,0xb34cde2e,2
|
||||||
|
np.float32,0x4216cbe4,0x33ccde2e,2
|
||||||
|
np.float32,0xc216cbe4,0xb3ccde2e,2
|
||||||
|
np.float32,0x41235ce2,0xbf3504f7,2
|
||||||
|
np.float32,0xc1235ce2,0x3f3504f7,2
|
||||||
|
np.float32,0x41a35ce2,0x3f800000,2
|
||||||
|
np.float32,0xc1a35ce2,0xbf800000,2
|
||||||
|
np.float32,0x42235ce2,0xb5b889b6,2
|
||||||
|
np.float32,0xc2235ce2,0x35b889b6,2
|
||||||
|
np.float32,0x412fede0,0xbf800000,2
|
||||||
|
np.float32,0xc12fede0,0x3f800000,2
|
||||||
|
np.float32,0x41afede0,0xb5b222c4,2
|
||||||
|
np.float32,0xc1afede0,0x35b222c4,2
|
||||||
|
np.float32,0x422fede0,0x363222c4,2
|
||||||
|
np.float32,0xc22fede0,0xb63222c4,2
|
||||||
|
np.float32,0x413c7edd,0xbf3504f3,2
|
||||||
|
np.float32,0xc13c7edd,0x3f3504f3,2
|
||||||
|
np.float32,0x41bc7edd,0xbf800000,2
|
||||||
|
np.float32,0xc1bc7edd,0x3f800000,2
|
||||||
|
np.float32,0x423c7edd,0xb4000add,2
|
||||||
|
np.float32,0xc23c7edd,0x34000add,2
|
||||||
|
np.float32,0x41490fdb,0x34bbbd2e,2
|
||||||
|
np.float32,0xc1490fdb,0xb4bbbd2e,2
|
||||||
|
np.float32,0x41c90fdb,0x353bbd2e,2
|
||||||
|
np.float32,0xc1c90fdb,0xb53bbd2e,2
|
||||||
|
np.float32,0x42490fdb,0x35bbbd2e,2
|
||||||
|
np.float32,0xc2490fdb,0xb5bbbd2e,2
|
||||||
|
np.float32,0x4155a0d9,0x3f3504fb,2
|
||||||
|
np.float32,0xc155a0d9,0xbf3504fb,2
|
||||||
|
np.float32,0x41d5a0d9,0x3f800000,2
|
||||||
|
np.float32,0xc1d5a0d9,0xbf800000,2
|
||||||
|
np.float32,0x4255a0d9,0xb633bc81,2
|
||||||
|
np.float32,0xc255a0d9,0x3633bc81,2
|
||||||
|
np.float32,0x416231d6,0x3f800000,2
|
||||||
|
np.float32,0xc16231d6,0xbf800000,2
|
||||||
|
np.float32,0x41e231d6,0xb399a6a2,2
|
||||||
|
np.float32,0xc1e231d6,0x3399a6a2,2
|
||||||
|
np.float32,0x426231d6,0x3419a6a2,2
|
||||||
|
np.float32,0xc26231d6,0xb419a6a2,2
|
||||||
|
np.float32,0x416ec2d4,0x3f3504ef,2
|
||||||
|
np.float32,0xc16ec2d4,0xbf3504ef,2
|
||||||
|
np.float32,0x41eec2d4,0xbf800000,2
|
||||||
|
np.float32,0xc1eec2d4,0x3f800000,2
|
||||||
|
np.float32,0x426ec2d4,0xb5bef0a7,2
|
||||||
|
np.float32,0xc26ec2d4,0x35bef0a7,2
|
||||||
|
np.float32,0x417b53d2,0xb535563d,2
|
||||||
|
np.float32,0xc17b53d2,0x3535563d,2
|
||||||
|
np.float32,0x41fb53d2,0x35b5563d,2
|
||||||
|
np.float32,0xc1fb53d2,0xb5b5563d,2
|
||||||
|
np.float32,0x427b53d2,0x3635563d,2
|
||||||
|
np.float32,0xc27b53d2,0xb635563d,2
|
||||||
|
np.float32,0x4183f268,0xbf3504ff,2
|
||||||
|
np.float32,0xc183f268,0x3f3504ff,2
|
||||||
|
np.float32,0x4203f268,0x3f800000,2
|
||||||
|
np.float32,0xc203f268,0xbf800000,2
|
||||||
|
np.float32,0x4283f268,0xb6859a13,2
|
||||||
|
np.float32,0xc283f268,0x36859a13,2
|
||||||
|
np.float32,0x418a3ae7,0xbf800000,2
|
||||||
|
np.float32,0xc18a3ae7,0x3f800000,2
|
||||||
|
np.float32,0x420a3ae7,0xb6308908,2
|
||||||
|
np.float32,0xc20a3ae7,0x36308908,2
|
||||||
|
np.float32,0x428a3ae7,0x36b08908,2
|
||||||
|
np.float32,0xc28a3ae7,0xb6b08908,2
|
||||||
|
np.float32,0x41908365,0xbf3504f6,2
|
||||||
|
np.float32,0xc1908365,0x3f3504f6,2
|
||||||
|
np.float32,0x42108365,0xbf800000,2
|
||||||
|
np.float32,0xc2108365,0x3f800000,2
|
||||||
|
np.float32,0x42908365,0x3592200d,2
|
||||||
|
np.float32,0xc2908365,0xb592200d,2
|
||||||
|
np.float32,0x4196cbe4,0x334cde2e,2
|
||||||
|
np.float32,0xc196cbe4,0xb34cde2e,2
|
||||||
|
np.float32,0x4216cbe4,0x33ccde2e,2
|
||||||
|
np.float32,0xc216cbe4,0xb3ccde2e,2
|
||||||
|
np.float32,0x4296cbe4,0x344cde2e,2
|
||||||
|
np.float32,0xc296cbe4,0xb44cde2e,2
|
||||||
|
np.float32,0x419d1463,0x3f3504f8,2
|
||||||
|
np.float32,0xc19d1463,0xbf3504f8,2
|
||||||
|
np.float32,0x421d1463,0x3f800000,2
|
||||||
|
np.float32,0xc21d1463,0xbf800000,2
|
||||||
|
np.float32,0x429d1463,0xb5c55799,2
|
||||||
|
np.float32,0xc29d1463,0x35c55799,2
|
||||||
|
np.float32,0x41a35ce2,0x3f800000,2
|
||||||
|
np.float32,0xc1a35ce2,0xbf800000,2
|
||||||
|
np.float32,0x42235ce2,0xb5b889b6,2
|
||||||
|
np.float32,0xc2235ce2,0x35b889b6,2
|
||||||
|
np.float32,0x42a35ce2,0x363889b6,2
|
||||||
|
np.float32,0xc2a35ce2,0xb63889b6,2
|
||||||
|
np.float32,0x41a9a561,0x3f3504e7,2
|
||||||
|
np.float32,0xc1a9a561,0xbf3504e7,2
|
||||||
|
np.float32,0x4229a561,0xbf800000,2
|
||||||
|
np.float32,0xc229a561,0x3f800000,2
|
||||||
|
np.float32,0x42a9a561,0xb68733d0,2
|
||||||
|
np.float32,0xc2a9a561,0x368733d0,2
|
||||||
|
np.float32,0x41afede0,0xb5b222c4,2
|
||||||
|
np.float32,0xc1afede0,0x35b222c4,2
|
||||||
|
np.float32,0x422fede0,0x363222c4,2
|
||||||
|
np.float32,0xc22fede0,0xb63222c4,2
|
||||||
|
np.float32,0x42afede0,0x36b222c4,2
|
||||||
|
np.float32,0xc2afede0,0xb6b222c4,2
|
||||||
|
np.float32,0x41b6365e,0xbf3504f0,2
|
||||||
|
np.float32,0xc1b6365e,0x3f3504f0,2
|
||||||
|
np.float32,0x4236365e,0x3f800000,2
|
||||||
|
np.float32,0xc236365e,0xbf800000,2
|
||||||
|
np.float32,0x42b6365e,0x358bb91c,2
|
||||||
|
np.float32,0xc2b6365e,0xb58bb91c,2
|
||||||
|
np.float32,0x41bc7edd,0xbf800000,2
|
||||||
|
np.float32,0xc1bc7edd,0x3f800000,2
|
||||||
|
np.float32,0x423c7edd,0xb4000add,2
|
||||||
|
np.float32,0xc23c7edd,0x34000add,2
|
||||||
|
np.float32,0x42bc7edd,0x34800add,2
|
||||||
|
np.float32,0xc2bc7edd,0xb4800add,2
|
||||||
|
np.float32,0x41c2c75c,0xbf3504ef,2
|
||||||
|
np.float32,0xc1c2c75c,0x3f3504ef,2
|
||||||
|
np.float32,0x4242c75c,0xbf800000,2
|
||||||
|
np.float32,0xc242c75c,0x3f800000,2
|
||||||
|
np.float32,0x42c2c75c,0xb5cbbe8a,2
|
||||||
|
np.float32,0xc2c2c75c,0x35cbbe8a,2
|
||||||
|
np.float32,0x41c90fdb,0x353bbd2e,2
|
||||||
|
np.float32,0xc1c90fdb,0xb53bbd2e,2
|
||||||
|
np.float32,0x42490fdb,0x35bbbd2e,2
|
||||||
|
np.float32,0xc2490fdb,0xb5bbbd2e,2
|
||||||
|
np.float32,0x42c90fdb,0x363bbd2e,2
|
||||||
|
np.float32,0xc2c90fdb,0xb63bbd2e,2
|
||||||
|
np.float32,0x41cf585a,0x3f3504ff,2
|
||||||
|
np.float32,0xc1cf585a,0xbf3504ff,2
|
||||||
|
np.float32,0x424f585a,0x3f800000,2
|
||||||
|
np.float32,0xc24f585a,0xbf800000,2
|
||||||
|
np.float32,0x42cf585a,0xb688cd8c,2
|
||||||
|
np.float32,0xc2cf585a,0x3688cd8c,2
|
||||||
|
np.float32,0x41d5a0d9,0x3f800000,2
|
||||||
|
np.float32,0xc1d5a0d9,0xbf800000,2
|
||||||
|
np.float32,0x4255a0d9,0xb633bc81,2
|
||||||
|
np.float32,0xc255a0d9,0x3633bc81,2
|
||||||
|
np.float32,0x42d5a0d9,0x36b3bc81,2
|
||||||
|
np.float32,0xc2d5a0d9,0xb6b3bc81,2
|
||||||
|
np.float32,0x41dbe958,0x3f3504e0,2
|
||||||
|
np.float32,0xc1dbe958,0xbf3504e0,2
|
||||||
|
np.float32,0x425be958,0xbf800000,2
|
||||||
|
np.float32,0xc25be958,0x3f800000,2
|
||||||
|
np.float32,0x42dbe958,0xb6deab75,2
|
||||||
|
np.float32,0xc2dbe958,0x36deab75,2
|
||||||
|
np.float32,0x41e231d6,0xb399a6a2,2
|
||||||
|
np.float32,0xc1e231d6,0x3399a6a2,2
|
||||||
|
np.float32,0x426231d6,0x3419a6a2,2
|
||||||
|
np.float32,0xc26231d6,0xb419a6a2,2
|
||||||
|
np.float32,0x42e231d6,0x3499a6a2,2
|
||||||
|
np.float32,0xc2e231d6,0xb499a6a2,2
|
||||||
|
np.float32,0x41e87a55,0xbf3504f8,2
|
||||||
|
np.float32,0xc1e87a55,0x3f3504f8,2
|
||||||
|
np.float32,0x42687a55,0x3f800000,2
|
||||||
|
np.float32,0xc2687a55,0xbf800000,2
|
||||||
|
np.float32,0x42e87a55,0xb5d2257b,2
|
||||||
|
np.float32,0xc2e87a55,0x35d2257b,2
|
||||||
|
np.float32,0x41eec2d4,0xbf800000,2
|
||||||
|
np.float32,0xc1eec2d4,0x3f800000,2
|
||||||
|
np.float32,0x426ec2d4,0xb5bef0a7,2
|
||||||
|
np.float32,0xc26ec2d4,0x35bef0a7,2
|
||||||
|
np.float32,0x42eec2d4,0x363ef0a7,2
|
||||||
|
np.float32,0xc2eec2d4,0xb63ef0a7,2
|
||||||
|
np.float32,0x41f50b53,0xbf3504e7,2
|
||||||
|
np.float32,0xc1f50b53,0x3f3504e7,2
|
||||||
|
np.float32,0x42750b53,0xbf800000,2
|
||||||
|
np.float32,0xc2750b53,0x3f800000,2
|
||||||
|
np.float32,0x42f50b53,0xb68a6748,2
|
||||||
|
np.float32,0xc2f50b53,0x368a6748,2
|
||||||
|
np.float32,0x41fb53d2,0x35b5563d,2
|
||||||
|
np.float32,0xc1fb53d2,0xb5b5563d,2
|
||||||
|
np.float32,0x427b53d2,0x3635563d,2
|
||||||
|
np.float32,0xc27b53d2,0xb635563d,2
|
||||||
|
np.float32,0x42fb53d2,0x36b5563d,2
|
||||||
|
np.float32,0xc2fb53d2,0xb6b5563d,2
|
||||||
|
np.float32,0x4200ce28,0x3f3504f0,2
|
||||||
|
np.float32,0xc200ce28,0xbf3504f0,2
|
||||||
|
np.float32,0x4280ce28,0x3f800000,2
|
||||||
|
np.float32,0xc280ce28,0xbf800000,2
|
||||||
|
np.float32,0x4300ce28,0x357dd672,2
|
||||||
|
np.float32,0xc300ce28,0xb57dd672,2
|
||||||
|
np.float32,0x4203f268,0x3f800000,2
|
||||||
|
np.float32,0xc203f268,0xbf800000,2
|
||||||
|
np.float32,0x4283f268,0xb6859a13,2
|
||||||
|
np.float32,0xc283f268,0x36859a13,2
|
||||||
|
np.float32,0x4303f268,0x37059a13,2
|
||||||
|
np.float32,0xc303f268,0xb7059a13,2
|
||||||
|
np.float32,0x420716a7,0x3f3504ee,2
|
||||||
|
np.float32,0xc20716a7,0xbf3504ee,2
|
||||||
|
np.float32,0x428716a7,0xbf800000,2
|
||||||
|
np.float32,0xc28716a7,0x3f800000,2
|
||||||
|
np.float32,0x430716a7,0xb5d88c6d,2
|
||||||
|
np.float32,0xc30716a7,0x35d88c6d,2
|
||||||
|
np.float32,0x420a3ae7,0xb6308908,2
|
||||||
|
np.float32,0xc20a3ae7,0x36308908,2
|
||||||
|
np.float32,0x428a3ae7,0x36b08908,2
|
||||||
|
np.float32,0xc28a3ae7,0xb6b08908,2
|
||||||
|
np.float32,0x430a3ae7,0x37308908,2
|
||||||
|
np.float32,0xc30a3ae7,0xb7308908,2
|
||||||
|
np.float32,0x420d5f26,0xbf350500,2
|
||||||
|
np.float32,0xc20d5f26,0x3f350500,2
|
||||||
|
np.float32,0x428d5f26,0x3f800000,2
|
||||||
|
np.float32,0xc28d5f26,0xbf800000,2
|
||||||
|
np.float32,0x430d5f26,0xb68c0105,2
|
||||||
|
np.float32,0xc30d5f26,0x368c0105,2
|
||||||
|
np.float32,0x42108365,0xbf800000,2
|
||||||
|
np.float32,0xc2108365,0x3f800000,2
|
||||||
|
np.float32,0x42908365,0x3592200d,2
|
||||||
|
np.float32,0xc2908365,0xb592200d,2
|
||||||
|
np.float32,0x43108365,0xb612200d,2
|
||||||
|
np.float32,0xc3108365,0x3612200d,2
|
||||||
|
np.float32,0x4213a7a5,0xbf3504df,2
|
||||||
|
np.float32,0xc213a7a5,0x3f3504df,2
|
||||||
|
np.float32,0x4293a7a5,0xbf800000,2
|
||||||
|
np.float32,0xc293a7a5,0x3f800000,2
|
||||||
|
np.float32,0x4313a7a5,0xb6e1deee,2
|
||||||
|
np.float32,0xc313a7a5,0x36e1deee,2
|
||||||
|
np.float32,0x4216cbe4,0x33ccde2e,2
|
||||||
|
np.float32,0xc216cbe4,0xb3ccde2e,2
|
||||||
|
np.float32,0x4296cbe4,0x344cde2e,2
|
||||||
|
np.float32,0xc296cbe4,0xb44cde2e,2
|
||||||
|
np.float32,0x4316cbe4,0x34ccde2e,2
|
||||||
|
np.float32,0xc316cbe4,0xb4ccde2e,2
|
||||||
|
np.float32,0x4219f024,0x3f35050f,2
|
||||||
|
np.float32,0xc219f024,0xbf35050f,2
|
||||||
|
np.float32,0x4299f024,0x3f800000,2
|
||||||
|
np.float32,0xc299f024,0xbf800000,2
|
||||||
|
np.float32,0x4319f024,0xb71bde6c,2
|
||||||
|
np.float32,0xc319f024,0x371bde6c,2
|
||||||
|
np.float32,0x421d1463,0x3f800000,2
|
||||||
|
np.float32,0xc21d1463,0xbf800000,2
|
||||||
|
np.float32,0x429d1463,0xb5c55799,2
|
||||||
|
np.float32,0xc29d1463,0x35c55799,2
|
||||||
|
np.float32,0x431d1463,0x36455799,2
|
||||||
|
np.float32,0xc31d1463,0xb6455799,2
|
||||||
|
np.float32,0x422038a3,0x3f3504d0,2
|
||||||
|
np.float32,0xc22038a3,0xbf3504d0,2
|
||||||
|
np.float32,0x42a038a3,0xbf800000,2
|
||||||
|
np.float32,0xc2a038a3,0x3f800000,2
|
||||||
|
np.float32,0x432038a3,0xb746cd61,2
|
||||||
|
np.float32,0xc32038a3,0x3746cd61,2
|
||||||
|
np.float32,0x42235ce2,0xb5b889b6,2
|
||||||
|
np.float32,0xc2235ce2,0x35b889b6,2
|
||||||
|
np.float32,0x42a35ce2,0x363889b6,2
|
||||||
|
np.float32,0xc2a35ce2,0xb63889b6,2
|
||||||
|
np.float32,0x43235ce2,0x36b889b6,2
|
||||||
|
np.float32,0xc3235ce2,0xb6b889b6,2
|
||||||
|
np.float32,0x42268121,0xbf3504f1,2
|
||||||
|
np.float32,0xc2268121,0x3f3504f1,2
|
||||||
|
np.float32,0x42a68121,0x3f800000,2
|
||||||
|
np.float32,0xc2a68121,0xbf800000,2
|
||||||
|
np.float32,0x43268121,0x35643aac,2
|
||||||
|
np.float32,0xc3268121,0xb5643aac,2
|
||||||
|
np.float32,0x4229a561,0xbf800000,2
|
||||||
|
np.float32,0xc229a561,0x3f800000,2
|
||||||
|
np.float32,0x42a9a561,0xb68733d0,2
|
||||||
|
np.float32,0xc2a9a561,0x368733d0,2
|
||||||
|
np.float32,0x4329a561,0x370733d0,2
|
||||||
|
np.float32,0xc329a561,0xb70733d0,2
|
||||||
|
np.float32,0x422cc9a0,0xbf3504ee,2
|
||||||
|
np.float32,0xc22cc9a0,0x3f3504ee,2
|
||||||
|
np.float32,0x42acc9a0,0xbf800000,2
|
||||||
|
np.float32,0xc2acc9a0,0x3f800000,2
|
||||||
|
np.float32,0x432cc9a0,0xb5e55a50,2
|
||||||
|
np.float32,0xc32cc9a0,0x35e55a50,2
|
||||||
|
np.float32,0x422fede0,0x363222c4,2
|
||||||
|
np.float32,0xc22fede0,0xb63222c4,2
|
||||||
|
np.float32,0x42afede0,0x36b222c4,2
|
||||||
|
np.float32,0xc2afede0,0xb6b222c4,2
|
||||||
|
np.float32,0x432fede0,0x373222c4,2
|
||||||
|
np.float32,0xc32fede0,0xb73222c4,2
|
||||||
|
np.float32,0x4233121f,0x3f350500,2
|
||||||
|
np.float32,0xc233121f,0xbf350500,2
|
||||||
|
np.float32,0x42b3121f,0x3f800000,2
|
||||||
|
np.float32,0xc2b3121f,0xbf800000,2
|
||||||
|
np.float32,0x4333121f,0xb68f347d,2
|
||||||
|
np.float32,0xc333121f,0x368f347d,2
|
||||||
|
np.float32,0x4236365e,0x3f800000,2
|
||||||
|
np.float32,0xc236365e,0xbf800000,2
|
||||||
|
np.float32,0x42b6365e,0x358bb91c,2
|
||||||
|
np.float32,0xc2b6365e,0xb58bb91c,2
|
||||||
|
np.float32,0x4336365e,0xb60bb91c,2
|
||||||
|
np.float32,0xc336365e,0x360bb91c,2
|
||||||
|
np.float32,0x42395a9e,0x3f3504df,2
|
||||||
|
np.float32,0xc2395a9e,0xbf3504df,2
|
||||||
|
np.float32,0x42b95a9e,0xbf800000,2
|
||||||
|
np.float32,0xc2b95a9e,0x3f800000,2
|
||||||
|
np.float32,0x43395a9e,0xb6e51267,2
|
||||||
|
np.float32,0xc3395a9e,0x36e51267,2
|
||||||
|
np.float32,0x423c7edd,0xb4000add,2
|
||||||
|
np.float32,0xc23c7edd,0x34000add,2
|
||||||
|
np.float32,0x42bc7edd,0x34800add,2
|
||||||
|
np.float32,0xc2bc7edd,0xb4800add,2
|
||||||
|
np.float32,0x433c7edd,0x35000add,2
|
||||||
|
np.float32,0xc33c7edd,0xb5000add,2
|
||||||
|
np.float32,0x423fa31d,0xbf35050f,2
|
||||||
|
np.float32,0xc23fa31d,0x3f35050f,2
|
||||||
|
np.float32,0x42bfa31d,0x3f800000,2
|
||||||
|
np.float32,0xc2bfa31d,0xbf800000,2
|
||||||
|
np.float32,0x433fa31d,0xb71d7828,2
|
||||||
|
np.float32,0xc33fa31d,0x371d7828,2
|
||||||
|
np.float32,0x4242c75c,0xbf800000,2
|
||||||
|
np.float32,0xc242c75c,0x3f800000,2
|
||||||
|
np.float32,0x42c2c75c,0xb5cbbe8a,2
|
||||||
|
np.float32,0xc2c2c75c,0x35cbbe8a,2
|
||||||
|
np.float32,0x4342c75c,0x364bbe8a,2
|
||||||
|
np.float32,0xc342c75c,0xb64bbe8a,2
|
||||||
|
np.float32,0x4245eb9c,0xbf3504d0,2
|
||||||
|
np.float32,0xc245eb9c,0x3f3504d0,2
|
||||||
|
np.float32,0x42c5eb9c,0xbf800000,2
|
||||||
|
np.float32,0xc2c5eb9c,0x3f800000,2
|
||||||
|
np.float32,0x4345eb9c,0xb748671d,2
|
||||||
|
np.float32,0xc345eb9c,0x3748671d,2
|
||||||
|
np.float32,0x42490fdb,0x35bbbd2e,2
|
||||||
|
np.float32,0xc2490fdb,0xb5bbbd2e,2
|
||||||
|
np.float32,0x42c90fdb,0x363bbd2e,2
|
||||||
|
np.float32,0xc2c90fdb,0xb63bbd2e,2
|
||||||
|
np.float32,0x43490fdb,0x36bbbd2e,2
|
||||||
|
np.float32,0xc3490fdb,0xb6bbbd2e,2
|
||||||
|
np.float32,0x424c341a,0x3f3504f1,2
|
||||||
|
np.float32,0xc24c341a,0xbf3504f1,2
|
||||||
|
np.float32,0x42cc341a,0x3f800000,2
|
||||||
|
np.float32,0xc2cc341a,0xbf800000,2
|
||||||
|
np.float32,0x434c341a,0x354a9ee6,2
|
||||||
|
np.float32,0xc34c341a,0xb54a9ee6,2
|
||||||
|
np.float32,0x424f585a,0x3f800000,2
|
||||||
|
np.float32,0xc24f585a,0xbf800000,2
|
||||||
|
np.float32,0x42cf585a,0xb688cd8c,2
|
||||||
|
np.float32,0xc2cf585a,0x3688cd8c,2
|
||||||
|
np.float32,0x434f585a,0x3708cd8c,2
|
||||||
|
np.float32,0xc34f585a,0xb708cd8c,2
|
||||||
|
np.float32,0x42527c99,0x3f3504ee,2
|
||||||
|
np.float32,0xc2527c99,0xbf3504ee,2
|
||||||
|
np.float32,0x42d27c99,0xbf800000,2
|
||||||
|
np.float32,0xc2d27c99,0x3f800000,2
|
||||||
|
np.float32,0x43527c99,0xb5f22833,2
|
||||||
|
np.float32,0xc3527c99,0x35f22833,2
|
||||||
|
np.float32,0x4255a0d9,0xb633bc81,2
|
||||||
|
np.float32,0xc255a0d9,0x3633bc81,2
|
||||||
|
np.float32,0x42d5a0d9,0x36b3bc81,2
|
||||||
|
np.float32,0xc2d5a0d9,0xb6b3bc81,2
|
||||||
|
np.float32,0x4355a0d9,0x3733bc81,2
|
||||||
|
np.float32,0xc355a0d9,0xb733bc81,2
|
||||||
|
np.float32,0x4258c518,0xbf350500,2
|
||||||
|
np.float32,0xc258c518,0x3f350500,2
|
||||||
|
np.float32,0x42d8c518,0x3f800000,2
|
||||||
|
np.float32,0xc2d8c518,0xbf800000,2
|
||||||
|
np.float32,0x4358c518,0xb69267f6,2
|
||||||
|
np.float32,0xc358c518,0x369267f6,2
|
||||||
|
np.float32,0x425be958,0xbf800000,2
|
||||||
|
np.float32,0xc25be958,0x3f800000,2
|
||||||
|
np.float32,0x42dbe958,0xb6deab75,2
|
||||||
|
np.float32,0xc2dbe958,0x36deab75,2
|
||||||
|
np.float32,0x435be958,0x375eab75,2
|
||||||
|
np.float32,0xc35be958,0xb75eab75,2
|
||||||
|
np.float32,0x425f0d97,0xbf3504df,2
|
||||||
|
np.float32,0xc25f0d97,0x3f3504df,2
|
||||||
|
np.float32,0x42df0d97,0xbf800000,2
|
||||||
|
np.float32,0xc2df0d97,0x3f800000,2
|
||||||
|
np.float32,0x435f0d97,0xb6e845e0,2
|
||||||
|
np.float32,0xc35f0d97,0x36e845e0,2
|
||||||
|
np.float32,0x426231d6,0x3419a6a2,2
|
||||||
|
np.float32,0xc26231d6,0xb419a6a2,2
|
||||||
|
np.float32,0x42e231d6,0x3499a6a2,2
|
||||||
|
np.float32,0xc2e231d6,0xb499a6a2,2
|
||||||
|
np.float32,0x436231d6,0x3519a6a2,2
|
||||||
|
np.float32,0xc36231d6,0xb519a6a2,2
|
||||||
|
np.float32,0x42655616,0x3f35050f,2
|
||||||
|
np.float32,0xc2655616,0xbf35050f,2
|
||||||
|
np.float32,0x42e55616,0x3f800000,2
|
||||||
|
np.float32,0xc2e55616,0xbf800000,2
|
||||||
|
np.float32,0x43655616,0xb71f11e5,2
|
||||||
|
np.float32,0xc3655616,0x371f11e5,2
|
||||||
|
np.float32,0x42687a55,0x3f800000,2
|
||||||
|
np.float32,0xc2687a55,0xbf800000,2
|
||||||
|
np.float32,0x42e87a55,0xb5d2257b,2
|
||||||
|
np.float32,0xc2e87a55,0x35d2257b,2
|
||||||
|
np.float32,0x43687a55,0x3652257b,2
|
||||||
|
np.float32,0xc3687a55,0xb652257b,2
|
||||||
|
np.float32,0x426b9e95,0x3f3504cf,2
|
||||||
|
np.float32,0xc26b9e95,0xbf3504cf,2
|
||||||
|
np.float32,0x42eb9e95,0xbf800000,2
|
||||||
|
np.float32,0xc2eb9e95,0x3f800000,2
|
||||||
|
np.float32,0x436b9e95,0xb74a00d9,2
|
||||||
|
np.float32,0xc36b9e95,0x374a00d9,2
|
||||||
|
np.float32,0x426ec2d4,0xb5bef0a7,2
|
||||||
|
np.float32,0xc26ec2d4,0x35bef0a7,2
|
||||||
|
np.float32,0x42eec2d4,0x363ef0a7,2
|
||||||
|
np.float32,0xc2eec2d4,0xb63ef0a7,2
|
||||||
|
np.float32,0x436ec2d4,0x36bef0a7,2
|
||||||
|
np.float32,0xc36ec2d4,0xb6bef0a7,2
|
||||||
|
np.float32,0x4271e713,0xbf3504f1,2
|
||||||
|
np.float32,0xc271e713,0x3f3504f1,2
|
||||||
|
np.float32,0x42f1e713,0x3f800000,2
|
||||||
|
np.float32,0xc2f1e713,0xbf800000,2
|
||||||
|
np.float32,0x4371e713,0x35310321,2
|
||||||
|
np.float32,0xc371e713,0xb5310321,2
|
||||||
|
np.float32,0x42750b53,0xbf800000,2
|
||||||
|
np.float32,0xc2750b53,0x3f800000,2
|
||||||
|
np.float32,0x42f50b53,0xb68a6748,2
|
||||||
|
np.float32,0xc2f50b53,0x368a6748,2
|
||||||
|
np.float32,0x43750b53,0x370a6748,2
|
||||||
|
np.float32,0xc3750b53,0xb70a6748,2
|
||||||
|
np.float32,0x42782f92,0xbf3504ee,2
|
||||||
|
np.float32,0xc2782f92,0x3f3504ee,2
|
||||||
|
np.float32,0x42f82f92,0xbf800000,2
|
||||||
|
np.float32,0xc2f82f92,0x3f800000,2
|
||||||
|
np.float32,0x43782f92,0xb5fef616,2
|
||||||
|
np.float32,0xc3782f92,0x35fef616,2
|
||||||
|
np.float32,0x427b53d2,0x3635563d,2
|
||||||
|
np.float32,0xc27b53d2,0xb635563d,2
|
||||||
|
np.float32,0x42fb53d2,0x36b5563d,2
|
||||||
|
np.float32,0xc2fb53d2,0xb6b5563d,2
|
||||||
|
np.float32,0x437b53d2,0x3735563d,2
|
||||||
|
np.float32,0xc37b53d2,0xb735563d,2
|
||||||
|
np.float32,0x427e7811,0x3f350500,2
|
||||||
|
np.float32,0xc27e7811,0xbf350500,2
|
||||||
|
np.float32,0x42fe7811,0x3f800000,2
|
||||||
|
np.float32,0xc2fe7811,0xbf800000,2
|
||||||
|
np.float32,0x437e7811,0xb6959b6f,2
|
||||||
|
np.float32,0xc37e7811,0x36959b6f,2
|
||||||
|
np.float32,0x4280ce28,0x3f800000,2
|
||||||
|
np.float32,0xc280ce28,0xbf800000,2
|
||||||
|
np.float32,0x4300ce28,0x357dd672,2
|
||||||
|
np.float32,0xc300ce28,0xb57dd672,2
|
||||||
|
np.float32,0x4380ce28,0xb5fdd672,2
|
||||||
|
np.float32,0xc380ce28,0x35fdd672,2
|
||||||
|
np.float32,0x42826048,0x3f3504de,2
|
||||||
|
np.float32,0xc2826048,0xbf3504de,2
|
||||||
|
np.float32,0x43026048,0xbf800000,2
|
||||||
|
np.float32,0xc3026048,0x3f800000,2
|
||||||
|
np.float32,0x43826048,0xb6eb7958,2
|
||||||
|
np.float32,0xc3826048,0x36eb7958,2
|
||||||
|
np.float32,0x4283f268,0xb6859a13,2
|
||||||
|
np.float32,0xc283f268,0x36859a13,2
|
||||||
|
np.float32,0x4303f268,0x37059a13,2
|
||||||
|
np.float32,0xc303f268,0xb7059a13,2
|
||||||
|
np.float32,0x4383f268,0x37859a13,2
|
||||||
|
np.float32,0xc383f268,0xb7859a13,2
|
||||||
|
np.float32,0x42858487,0xbf3504e2,2
|
||||||
|
np.float32,0xc2858487,0x3f3504e2,2
|
||||||
|
np.float32,0x43058487,0x3f800000,2
|
||||||
|
np.float32,0xc3058487,0xbf800000,2
|
||||||
|
np.float32,0x43858487,0x36bea8be,2
|
||||||
|
np.float32,0xc3858487,0xb6bea8be,2
|
||||||
|
np.float32,0x428716a7,0xbf800000,2
|
||||||
|
np.float32,0xc28716a7,0x3f800000,2
|
||||||
|
np.float32,0x430716a7,0xb5d88c6d,2
|
||||||
|
np.float32,0xc30716a7,0x35d88c6d,2
|
||||||
|
np.float32,0x438716a7,0x36588c6d,2
|
||||||
|
np.float32,0xc38716a7,0xb6588c6d,2
|
||||||
|
np.float32,0x4288a8c7,0xbf3504cf,2
|
||||||
|
np.float32,0xc288a8c7,0x3f3504cf,2
|
||||||
|
np.float32,0x4308a8c7,0xbf800000,2
|
||||||
|
np.float32,0xc308a8c7,0x3f800000,2
|
||||||
|
np.float32,0x4388a8c7,0xb74b9a96,2
|
||||||
|
np.float32,0xc388a8c7,0x374b9a96,2
|
||||||
|
np.float32,0x428a3ae7,0x36b08908,2
|
||||||
|
np.float32,0xc28a3ae7,0xb6b08908,2
|
||||||
|
np.float32,0x430a3ae7,0x37308908,2
|
||||||
|
np.float32,0xc30a3ae7,0xb7308908,2
|
||||||
|
np.float32,0x438a3ae7,0x37b08908,2
|
||||||
|
np.float32,0xc38a3ae7,0xb7b08908,2
|
||||||
|
np.float32,0x428bcd06,0x3f3504f2,2
|
||||||
|
np.float32,0xc28bcd06,0xbf3504f2,2
|
||||||
|
np.float32,0x430bcd06,0x3f800000,2
|
||||||
|
np.float32,0xc30bcd06,0xbf800000,2
|
||||||
|
np.float32,0x438bcd06,0x3517675b,2
|
||||||
|
np.float32,0xc38bcd06,0xb517675b,2
|
||||||
|
np.float32,0x428d5f26,0x3f800000,2
|
||||||
|
np.float32,0xc28d5f26,0xbf800000,2
|
||||||
|
np.float32,0x430d5f26,0xb68c0105,2
|
||||||
|
np.float32,0xc30d5f26,0x368c0105,2
|
||||||
|
np.float32,0x438d5f26,0x370c0105,2
|
||||||
|
np.float32,0xc38d5f26,0xb70c0105,2
|
||||||
|
np.float32,0x428ef146,0x3f3504c0,2
|
||||||
|
np.float32,0xc28ef146,0xbf3504c0,2
|
||||||
|
np.float32,0x430ef146,0xbf800000,2
|
||||||
|
np.float32,0xc30ef146,0x3f800000,2
|
||||||
|
np.float32,0x438ef146,0xb790bc40,2
|
||||||
|
np.float32,0xc38ef146,0x3790bc40,2
|
||||||
|
np.float32,0x42908365,0x3592200d,2
|
||||||
|
np.float32,0xc2908365,0xb592200d,2
|
||||||
|
np.float32,0x43108365,0xb612200d,2
|
||||||
|
np.float32,0xc3108365,0x3612200d,2
|
||||||
|
np.float32,0x43908365,0xb692200d,2
|
||||||
|
np.float32,0xc3908365,0x3692200d,2
|
||||||
|
np.float32,0x42921585,0xbf350501,2
|
||||||
|
np.float32,0xc2921585,0x3f350501,2
|
||||||
|
np.float32,0x43121585,0x3f800000,2
|
||||||
|
np.float32,0xc3121585,0xbf800000,2
|
||||||
|
np.float32,0x43921585,0xb698cee8,2
|
||||||
|
np.float32,0xc3921585,0x3698cee8,2
|
||||||
|
np.float32,0x4293a7a5,0xbf800000,2
|
||||||
|
np.float32,0xc293a7a5,0x3f800000,2
|
||||||
|
np.float32,0x4313a7a5,0xb6e1deee,2
|
||||||
|
np.float32,0xc313a7a5,0x36e1deee,2
|
||||||
|
np.float32,0x4393a7a5,0x3761deee,2
|
||||||
|
np.float32,0xc393a7a5,0xb761deee,2
|
||||||
|
np.float32,0x429539c5,0xbf3504b1,2
|
||||||
|
np.float32,0xc29539c5,0x3f3504b1,2
|
||||||
|
np.float32,0x431539c5,0xbf800000,2
|
||||||
|
np.float32,0xc31539c5,0x3f800000,2
|
||||||
|
np.float32,0x439539c5,0xb7bbab34,2
|
||||||
|
np.float32,0xc39539c5,0x37bbab34,2
|
||||||
|
np.float32,0x4296cbe4,0x344cde2e,2
|
||||||
|
np.float32,0xc296cbe4,0xb44cde2e,2
|
||||||
|
np.float32,0x4316cbe4,0x34ccde2e,2
|
||||||
|
np.float32,0xc316cbe4,0xb4ccde2e,2
|
||||||
|
np.float32,0x4396cbe4,0x354cde2e,2
|
||||||
|
np.float32,0xc396cbe4,0xb54cde2e,2
|
||||||
|
np.float32,0x42985e04,0x3f350510,2
|
||||||
|
np.float32,0xc2985e04,0xbf350510,2
|
||||||
|
np.float32,0x43185e04,0x3f800000,2
|
||||||
|
np.float32,0xc3185e04,0xbf800000,2
|
||||||
|
np.float32,0x43985e04,0xb722455d,2
|
||||||
|
np.float32,0xc3985e04,0x3722455d,2
|
||||||
|
np.float32,0x4299f024,0x3f800000,2
|
||||||
|
np.float32,0xc299f024,0xbf800000,2
|
||||||
|
np.float32,0x4319f024,0xb71bde6c,2
|
||||||
|
np.float32,0xc319f024,0x371bde6c,2
|
||||||
|
np.float32,0x4399f024,0x379bde6c,2
|
||||||
|
np.float32,0xc399f024,0xb79bde6c,2
|
||||||
|
np.float32,0x429b8243,0x3f3504fc,2
|
||||||
|
np.float32,0xc29b8243,0xbf3504fc,2
|
||||||
|
np.float32,0x431b8243,0xbf800000,2
|
||||||
|
np.float32,0xc31b8243,0x3f800000,2
|
||||||
|
np.float32,0x439b8243,0x364b2eb8,2
|
||||||
|
np.float32,0xc39b8243,0xb64b2eb8,2
|
||||||
|
np.float32,0x435b2047,0xbf350525,2
|
||||||
|
np.float32,0x42a038a2,0xbf800000,2
|
||||||
|
np.float32,0x432038a2,0x3664ca7e,2
|
||||||
|
np.float32,0x4345eb9b,0x365e638c,2
|
||||||
|
np.float32,0x42c5eb9b,0xbf800000,2
|
||||||
|
np.float32,0x42eb9e94,0xbf800000,2
|
||||||
|
np.float32,0x4350ea79,0x3f800000,2
|
||||||
|
np.float32,0x42dbe957,0x3585522a,2
|
||||||
|
np.float32,0x425be957,0xbf800000,2
|
||||||
|
np.float32,0x435be957,0xb605522a,2
|
||||||
|
np.float32,0x487fe5ab,0xbf7ffffd,2
|
||||||
|
np.float32,0x497fe5ab,0xbb14017d,2
|
||||||
|
np.float32,0x49ffe5ab,0xbb940164,2
|
||||||
|
np.float32,0x49ffeb37,0x3f7fff56,2
|
||||||
|
np.float32,0x497ff0c3,0x3f7fffd6,2
|
||||||
|
np.float32,0x49fff0c3,0x3b930487,2
|
||||||
|
np.float32,0x49fff64f,0xbf7fff58,2
|
||||||
|
np.float32,0x497ffbdb,0x3b1207c0,2
|
||||||
|
np.float32,0x49fffbdb,0xbb9207a9,2
|
||||||
|
np.float32,0x48fffbdb,0xbf7ffff6,2
|
||||||
|
np.float32,0x4e736e56,0x397fa7f2,2
|
||||||
|
np.float32,0x4d4da377,0xb57c64bc,2
|
||||||
|
np.float32,0x4ece58c3,0xb80846c8,2
|
||||||
|
np.float32,0x4ee0db9c,0x394c4786,2
|
||||||
|
np.float32,0x4dee7002,0x381bce96,2
|
||||||
|
np.float32,0x4ee86afc,0x3f800000,2
|
||||||
|
np.float32,0x4dca4f3f,0xb8e25111,2
|
||||||
|
np.float32,0x4ecb48af,0xbf800000,2
|
||||||
|
np.float32,0x4e51e33f,0xb8a4fa6f,2
|
||||||
|
np.float32,0x4ef5f421,0x387ca7df,2
|
||||||
|
np.float32,0x476362a2,0xbd7ff911,2
|
||||||
|
np.float32,0x464c99a4,0x3e7f4d41,2
|
||||||
|
np.float32,0x4471f73d,0x3e7fe1b0,2
|
||||||
|
np.float32,0x445a6752,0x3e7ef367,2
|
||||||
|
np.float32,0x474fa400,0x3e7f9fcd,2
|
||||||
|
np.float32,0x47c9e70e,0xbb4bba09,2
|
||||||
|
np.float32,0x45c1e72f,0xbe7fc7af,2
|
||||||
|
np.float32,0x4558c91d,0x3e7e9f31,2
|
||||||
|
np.float32,0x43784f94,0xbdff6654,2
|
||||||
|
np.float32,0x466e8500,0xbe7ea0a3,2
|
||||||
|
np.float32,0x468e1c25,0x3e7e22fb,2
|
||||||
|
np.float32,0x47d28adc,0xbe7d5e6b,2
|
||||||
|
np.float32,0x44ea6cfc,0x3dff70c3,2
|
||||||
|
np.float32,0x4605126c,0x3e7f89ef,2
|
||||||
|
np.float32,0x4788b3c6,0xbb87d853,2
|
||||||
|
np.float32,0x4531b042,0x3dffd163,2
|
||||||
|
np.float32,0x47e46c29,0xbe7def2b,2
|
||||||
|
np.float32,0x47c10e07,0xbdff63d4,2
|
||||||
|
np.float32,0x43f1f71d,0x3dfff387,2
|
||||||
|
np.float32,0x47c3e38c,0x3e7f0b2f,2
|
||||||
|
np.float32,0x462c3fa5,0xbd7fe13d,2
|
||||||
|
np.float32,0x441c5354,0xbdff76b4,2
|
||||||
|
np.float32,0x44908b69,0x3e7dcf0d,2
|
||||||
|
np.float32,0x478813ad,0xbe7e9d80,2
|
||||||
|
np.float32,0x441c4351,0x3dff937b,2
|
@ -0,0 +1,42 @@
|
|||||||
|
"""
|
||||||
|
Tests of the ._exceptions module. Primarily for exercising the __str__ methods.
|
||||||
|
"""
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
_ArrayMemoryError = np.core._exceptions._ArrayMemoryError
|
||||||
|
|
||||||
|
class TestArrayMemoryError:
|
||||||
|
def test_str(self):
|
||||||
|
e = _ArrayMemoryError((1023,), np.dtype(np.uint8))
|
||||||
|
str(e) # not crashing is enough
|
||||||
|
|
||||||
|
# testing these properties is easier than testing the full string repr
|
||||||
|
def test__size_to_string(self):
|
||||||
|
""" Test e._size_to_string """
|
||||||
|
f = _ArrayMemoryError._size_to_string
|
||||||
|
Ki = 1024
|
||||||
|
assert f(0) == '0 bytes'
|
||||||
|
assert f(1) == '1 bytes'
|
||||||
|
assert f(1023) == '1023 bytes'
|
||||||
|
assert f(Ki) == '1.00 KiB'
|
||||||
|
assert f(Ki+1) == '1.00 KiB'
|
||||||
|
assert f(10*Ki) == '10.0 KiB'
|
||||||
|
assert f(int(999.4*Ki)) == '999. KiB'
|
||||||
|
assert f(int(1023.4*Ki)) == '1023. KiB'
|
||||||
|
assert f(int(1023.5*Ki)) == '1.00 MiB'
|
||||||
|
assert f(Ki*Ki) == '1.00 MiB'
|
||||||
|
|
||||||
|
# 1023.9999 Mib should round to 1 GiB
|
||||||
|
assert f(int(Ki*Ki*Ki*0.9999)) == '1.00 GiB'
|
||||||
|
assert f(Ki*Ki*Ki*Ki*Ki*Ki) == '1.00 EiB'
|
||||||
|
# larger than sys.maxsize, adding larger prefices isn't going to help
|
||||||
|
# anyway.
|
||||||
|
assert f(Ki*Ki*Ki*Ki*Ki*Ki*123456) == '123456. EiB'
|
||||||
|
|
||||||
|
def test__total_size(self):
|
||||||
|
""" Test e._total_size """
|
||||||
|
e = _ArrayMemoryError((1,), np.dtype(np.uint8))
|
||||||
|
assert e._total_size == 1
|
||||||
|
|
||||||
|
e = _ArrayMemoryError((2, 4), np.dtype((np.uint64, 16)))
|
||||||
|
assert e._total_size == 1024
|
@ -0,0 +1,56 @@
|
|||||||
|
from __future__ import division, absolute_import, print_function
|
||||||
|
|
||||||
|
from numpy.testing import assert_
|
||||||
|
|
||||||
|
import numbers
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from numpy.core.numerictypes import sctypes
|
||||||
|
|
||||||
|
class TestABC(object):
|
||||||
|
def test_abstract(self):
|
||||||
|
assert_(issubclass(np.number, numbers.Number))
|
||||||
|
|
||||||
|
assert_(issubclass(np.inexact, numbers.Complex))
|
||||||
|
assert_(issubclass(np.complexfloating, numbers.Complex))
|
||||||
|
assert_(issubclass(np.floating, numbers.Real))
|
||||||
|
|
||||||
|
assert_(issubclass(np.integer, numbers.Integral))
|
||||||
|
assert_(issubclass(np.signedinteger, numbers.Integral))
|
||||||
|
assert_(issubclass(np.unsignedinteger, numbers.Integral))
|
||||||
|
|
||||||
|
def test_floats(self):
|
||||||
|
for t in sctypes['float']:
|
||||||
|
assert_(isinstance(t(), numbers.Real),
|
||||||
|
"{0} is not instance of Real".format(t.__name__))
|
||||||
|
assert_(issubclass(t, numbers.Real),
|
||||||
|
"{0} is not subclass of Real".format(t.__name__))
|
||||||
|
assert_(not isinstance(t(), numbers.Rational),
|
||||||
|
"{0} is instance of Rational".format(t.__name__))
|
||||||
|
assert_(not issubclass(t, numbers.Rational),
|
||||||
|
"{0} is subclass of Rational".format(t.__name__))
|
||||||
|
|
||||||
|
def test_complex(self):
|
||||||
|
for t in sctypes['complex']:
|
||||||
|
assert_(isinstance(t(), numbers.Complex),
|
||||||
|
"{0} is not instance of Complex".format(t.__name__))
|
||||||
|
assert_(issubclass(t, numbers.Complex),
|
||||||
|
"{0} is not subclass of Complex".format(t.__name__))
|
||||||
|
assert_(not isinstance(t(), numbers.Real),
|
||||||
|
"{0} is instance of Real".format(t.__name__))
|
||||||
|
assert_(not issubclass(t, numbers.Real),
|
||||||
|
"{0} is subclass of Real".format(t.__name__))
|
||||||
|
|
||||||
|
def test_int(self):
|
||||||
|
for t in sctypes['int']:
|
||||||
|
assert_(isinstance(t(), numbers.Integral),
|
||||||
|
"{0} is not instance of Integral".format(t.__name__))
|
||||||
|
assert_(issubclass(t, numbers.Integral),
|
||||||
|
"{0} is not subclass of Integral".format(t.__name__))
|
||||||
|
|
||||||
|
def test_uint(self):
|
||||||
|
for t in sctypes['uint']:
|
||||||
|
assert_(isinstance(t(), numbers.Integral),
|
||||||
|
"{0} is not instance of Integral".format(t.__name__))
|
||||||
|
assert_(issubclass(t, numbers.Integral),
|
||||||
|
"{0} is not subclass of Integral".format(t.__name__))
|
@ -0,0 +1,526 @@
|
|||||||
|
from __future__ import division, absolute_import, print_function
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import pytest
|
||||||
|
from numpy.testing import (
|
||||||
|
assert_, assert_equal, assert_array_equal, assert_raises, assert_warns,
|
||||||
|
HAS_REFCOUNT
|
||||||
|
)
|
||||||
|
|
||||||
|
# Switch between new behaviour when NPY_RELAXED_STRIDES_CHECKING is set.
|
||||||
|
NPY_RELAXED_STRIDES_CHECKING = np.ones((10, 1), order='C').flags.f_contiguous
|
||||||
|
|
||||||
|
|
||||||
|
def test_array_array():
|
||||||
|
tobj = type(object)
|
||||||
|
ones11 = np.ones((1, 1), np.float64)
|
||||||
|
tndarray = type(ones11)
|
||||||
|
# Test is_ndarray
|
||||||
|
assert_equal(np.array(ones11, dtype=np.float64), ones11)
|
||||||
|
if HAS_REFCOUNT:
|
||||||
|
old_refcount = sys.getrefcount(tndarray)
|
||||||
|
np.array(ones11)
|
||||||
|
assert_equal(old_refcount, sys.getrefcount(tndarray))
|
||||||
|
|
||||||
|
# test None
|
||||||
|
assert_equal(np.array(None, dtype=np.float64),
|
||||||
|
np.array(np.nan, dtype=np.float64))
|
||||||
|
if HAS_REFCOUNT:
|
||||||
|
old_refcount = sys.getrefcount(tobj)
|
||||||
|
np.array(None, dtype=np.float64)
|
||||||
|
assert_equal(old_refcount, sys.getrefcount(tobj))
|
||||||
|
|
||||||
|
# test scalar
|
||||||
|
assert_equal(np.array(1.0, dtype=np.float64),
|
||||||
|
np.ones((), dtype=np.float64))
|
||||||
|
if HAS_REFCOUNT:
|
||||||
|
old_refcount = sys.getrefcount(np.float64)
|
||||||
|
np.array(np.array(1.0, dtype=np.float64), dtype=np.float64)
|
||||||
|
assert_equal(old_refcount, sys.getrefcount(np.float64))
|
||||||
|
|
||||||
|
# test string
|
||||||
|
S2 = np.dtype((str, 2))
|
||||||
|
S3 = np.dtype((str, 3))
|
||||||
|
S5 = np.dtype((str, 5))
|
||||||
|
assert_equal(np.array("1.0", dtype=np.float64),
|
||||||
|
np.ones((), dtype=np.float64))
|
||||||
|
assert_equal(np.array("1.0").dtype, S3)
|
||||||
|
assert_equal(np.array("1.0", dtype=str).dtype, S3)
|
||||||
|
assert_equal(np.array("1.0", dtype=S2), np.array("1."))
|
||||||
|
assert_equal(np.array("1", dtype=S5), np.ones((), dtype=S5))
|
||||||
|
|
||||||
|
# test unicode
|
||||||
|
_unicode = globals().get("unicode")
|
||||||
|
if _unicode:
|
||||||
|
U2 = np.dtype((_unicode, 2))
|
||||||
|
U3 = np.dtype((_unicode, 3))
|
||||||
|
U5 = np.dtype((_unicode, 5))
|
||||||
|
assert_equal(np.array(_unicode("1.0"), dtype=np.float64),
|
||||||
|
np.ones((), dtype=np.float64))
|
||||||
|
assert_equal(np.array(_unicode("1.0")).dtype, U3)
|
||||||
|
assert_equal(np.array(_unicode("1.0"), dtype=_unicode).dtype, U3)
|
||||||
|
assert_equal(np.array(_unicode("1.0"), dtype=U2),
|
||||||
|
np.array(_unicode("1.")))
|
||||||
|
assert_equal(np.array(_unicode("1"), dtype=U5),
|
||||||
|
np.ones((), dtype=U5))
|
||||||
|
|
||||||
|
builtins = getattr(__builtins__, '__dict__', __builtins__)
|
||||||
|
assert_(hasattr(builtins, 'get'))
|
||||||
|
|
||||||
|
# test buffer
|
||||||
|
_buffer = builtins.get("buffer")
|
||||||
|
if _buffer and sys.version_info[:3] >= (2, 7, 5):
|
||||||
|
# This test fails for earlier versions of Python.
|
||||||
|
# Evidently a bug got fixed in 2.7.5.
|
||||||
|
dat = np.array(_buffer('1.0'), dtype=np.float64)
|
||||||
|
assert_equal(dat, [49.0, 46.0, 48.0])
|
||||||
|
assert_(dat.dtype.type is np.float64)
|
||||||
|
|
||||||
|
dat = np.array(_buffer(b'1.0'))
|
||||||
|
assert_equal(dat, [49, 46, 48])
|
||||||
|
assert_(dat.dtype.type is np.uint8)
|
||||||
|
|
||||||
|
# test memoryview, new version of buffer
|
||||||
|
_memoryview = builtins.get("memoryview")
|
||||||
|
if _memoryview:
|
||||||
|
dat = np.array(_memoryview(b'1.0'), dtype=np.float64)
|
||||||
|
assert_equal(dat, [49.0, 46.0, 48.0])
|
||||||
|
assert_(dat.dtype.type is np.float64)
|
||||||
|
|
||||||
|
dat = np.array(_memoryview(b'1.0'))
|
||||||
|
assert_equal(dat, [49, 46, 48])
|
||||||
|
assert_(dat.dtype.type is np.uint8)
|
||||||
|
|
||||||
|
# test array interface
|
||||||
|
a = np.array(100.0, dtype=np.float64)
|
||||||
|
o = type("o", (object,),
|
||||||
|
dict(__array_interface__=a.__array_interface__))
|
||||||
|
assert_equal(np.array(o, dtype=np.float64), a)
|
||||||
|
|
||||||
|
# test array_struct interface
|
||||||
|
a = np.array([(1, 4.0, 'Hello'), (2, 6.0, 'World')],
|
||||||
|
dtype=[('f0', int), ('f1', float), ('f2', str)])
|
||||||
|
o = type("o", (object,),
|
||||||
|
dict(__array_struct__=a.__array_struct__))
|
||||||
|
## wasn't what I expected... is np.array(o) supposed to equal a ?
|
||||||
|
## instead we get a array([...], dtype=">V18")
|
||||||
|
assert_equal(bytes(np.array(o).data), bytes(a.data))
|
||||||
|
|
||||||
|
# test array
|
||||||
|
o = type("o", (object,),
|
||||||
|
dict(__array__=lambda *x: np.array(100.0, dtype=np.float64)))()
|
||||||
|
assert_equal(np.array(o, dtype=np.float64), np.array(100.0, np.float64))
|
||||||
|
|
||||||
|
# test recursion
|
||||||
|
nested = 1.5
|
||||||
|
for i in range(np.MAXDIMS):
|
||||||
|
nested = [nested]
|
||||||
|
|
||||||
|
# no error
|
||||||
|
np.array(nested)
|
||||||
|
|
||||||
|
# Exceeds recursion limit
|
||||||
|
assert_raises(ValueError, np.array, [nested], dtype=np.float64)
|
||||||
|
|
||||||
|
# Try with lists...
|
||||||
|
assert_equal(np.array([None] * 10, dtype=np.float64),
|
||||||
|
np.full((10,), np.nan, dtype=np.float64))
|
||||||
|
assert_equal(np.array([[None]] * 10, dtype=np.float64),
|
||||||
|
np.full((10, 1), np.nan, dtype=np.float64))
|
||||||
|
assert_equal(np.array([[None] * 10], dtype=np.float64),
|
||||||
|
np.full((1, 10), np.nan, dtype=np.float64))
|
||||||
|
assert_equal(np.array([[None] * 10] * 10, dtype=np.float64),
|
||||||
|
np.full((10, 10), np.nan, dtype=np.float64))
|
||||||
|
|
||||||
|
assert_equal(np.array([1.0] * 10, dtype=np.float64),
|
||||||
|
np.ones((10,), dtype=np.float64))
|
||||||
|
assert_equal(np.array([[1.0]] * 10, dtype=np.float64),
|
||||||
|
np.ones((10, 1), dtype=np.float64))
|
||||||
|
assert_equal(np.array([[1.0] * 10], dtype=np.float64),
|
||||||
|
np.ones((1, 10), dtype=np.float64))
|
||||||
|
assert_equal(np.array([[1.0] * 10] * 10, dtype=np.float64),
|
||||||
|
np.ones((10, 10), dtype=np.float64))
|
||||||
|
|
||||||
|
# Try with tuples
|
||||||
|
assert_equal(np.array((None,) * 10, dtype=np.float64),
|
||||||
|
np.full((10,), np.nan, dtype=np.float64))
|
||||||
|
assert_equal(np.array([(None,)] * 10, dtype=np.float64),
|
||||||
|
np.full((10, 1), np.nan, dtype=np.float64))
|
||||||
|
assert_equal(np.array([(None,) * 10], dtype=np.float64),
|
||||||
|
np.full((1, 10), np.nan, dtype=np.float64))
|
||||||
|
assert_equal(np.array([(None,) * 10] * 10, dtype=np.float64),
|
||||||
|
np.full((10, 10), np.nan, dtype=np.float64))
|
||||||
|
|
||||||
|
assert_equal(np.array((1.0,) * 10, dtype=np.float64),
|
||||||
|
np.ones((10,), dtype=np.float64))
|
||||||
|
assert_equal(np.array([(1.0,)] * 10, dtype=np.float64),
|
||||||
|
np.ones((10, 1), dtype=np.float64))
|
||||||
|
assert_equal(np.array([(1.0,) * 10], dtype=np.float64),
|
||||||
|
np.ones((1, 10), dtype=np.float64))
|
||||||
|
assert_equal(np.array([(1.0,) * 10] * 10, dtype=np.float64),
|
||||||
|
np.ones((10, 10), dtype=np.float64))
|
||||||
|
|
||||||
|
|
||||||
|
def test_fastCopyAndTranspose():
|
||||||
|
# 0D array
|
||||||
|
a = np.array(2)
|
||||||
|
b = np.fastCopyAndTranspose(a)
|
||||||
|
assert_equal(b, a.T)
|
||||||
|
assert_(b.flags.owndata)
|
||||||
|
|
||||||
|
# 1D array
|
||||||
|
a = np.array([3, 2, 7, 0])
|
||||||
|
b = np.fastCopyAndTranspose(a)
|
||||||
|
assert_equal(b, a.T)
|
||||||
|
assert_(b.flags.owndata)
|
||||||
|
|
||||||
|
# 2D array
|
||||||
|
a = np.arange(6).reshape(2, 3)
|
||||||
|
b = np.fastCopyAndTranspose(a)
|
||||||
|
assert_equal(b, a.T)
|
||||||
|
assert_(b.flags.owndata)
|
||||||
|
|
||||||
|
def test_array_astype():
|
||||||
|
a = np.arange(6, dtype='f4').reshape(2, 3)
|
||||||
|
# Default behavior: allows unsafe casts, keeps memory layout,
|
||||||
|
# always copies.
|
||||||
|
b = a.astype('i4')
|
||||||
|
assert_equal(a, b)
|
||||||
|
assert_equal(b.dtype, np.dtype('i4'))
|
||||||
|
assert_equal(a.strides, b.strides)
|
||||||
|
b = a.T.astype('i4')
|
||||||
|
assert_equal(a.T, b)
|
||||||
|
assert_equal(b.dtype, np.dtype('i4'))
|
||||||
|
assert_equal(a.T.strides, b.strides)
|
||||||
|
b = a.astype('f4')
|
||||||
|
assert_equal(a, b)
|
||||||
|
assert_(not (a is b))
|
||||||
|
|
||||||
|
# copy=False parameter can sometimes skip a copy
|
||||||
|
b = a.astype('f4', copy=False)
|
||||||
|
assert_(a is b)
|
||||||
|
|
||||||
|
# order parameter allows overriding of the memory layout,
|
||||||
|
# forcing a copy if the layout is wrong
|
||||||
|
b = a.astype('f4', order='F', copy=False)
|
||||||
|
assert_equal(a, b)
|
||||||
|
assert_(not (a is b))
|
||||||
|
assert_(b.flags.f_contiguous)
|
||||||
|
|
||||||
|
b = a.astype('f4', order='C', copy=False)
|
||||||
|
assert_equal(a, b)
|
||||||
|
assert_(a is b)
|
||||||
|
assert_(b.flags.c_contiguous)
|
||||||
|
|
||||||
|
# casting parameter allows catching bad casts
|
||||||
|
b = a.astype('c8', casting='safe')
|
||||||
|
assert_equal(a, b)
|
||||||
|
assert_equal(b.dtype, np.dtype('c8'))
|
||||||
|
|
||||||
|
assert_raises(TypeError, a.astype, 'i4', casting='safe')
|
||||||
|
|
||||||
|
# subok=False passes through a non-subclassed array
|
||||||
|
b = a.astype('f4', subok=0, copy=False)
|
||||||
|
assert_(a is b)
|
||||||
|
|
||||||
|
class MyNDArray(np.ndarray):
|
||||||
|
pass
|
||||||
|
|
||||||
|
a = np.array([[0, 1, 2], [3, 4, 5]], dtype='f4').view(MyNDArray)
|
||||||
|
|
||||||
|
# subok=True passes through a subclass
|
||||||
|
b = a.astype('f4', subok=True, copy=False)
|
||||||
|
assert_(a is b)
|
||||||
|
|
||||||
|
# subok=True is default, and creates a subtype on a cast
|
||||||
|
b = a.astype('i4', copy=False)
|
||||||
|
assert_equal(a, b)
|
||||||
|
assert_equal(type(b), MyNDArray)
|
||||||
|
|
||||||
|
# subok=False never returns a subclass
|
||||||
|
b = a.astype('f4', subok=False, copy=False)
|
||||||
|
assert_equal(a, b)
|
||||||
|
assert_(not (a is b))
|
||||||
|
assert_(type(b) is not MyNDArray)
|
||||||
|
|
||||||
|
# Make sure converting from string object to fixed length string
|
||||||
|
# does not truncate.
|
||||||
|
a = np.array([b'a'*100], dtype='O')
|
||||||
|
b = a.astype('S')
|
||||||
|
assert_equal(a, b)
|
||||||
|
assert_equal(b.dtype, np.dtype('S100'))
|
||||||
|
a = np.array([u'a'*100], dtype='O')
|
||||||
|
b = a.astype('U')
|
||||||
|
assert_equal(a, b)
|
||||||
|
assert_equal(b.dtype, np.dtype('U100'))
|
||||||
|
|
||||||
|
# Same test as above but for strings shorter than 64 characters
|
||||||
|
a = np.array([b'a'*10], dtype='O')
|
||||||
|
b = a.astype('S')
|
||||||
|
assert_equal(a, b)
|
||||||
|
assert_equal(b.dtype, np.dtype('S10'))
|
||||||
|
a = np.array([u'a'*10], dtype='O')
|
||||||
|
b = a.astype('U')
|
||||||
|
assert_equal(a, b)
|
||||||
|
assert_equal(b.dtype, np.dtype('U10'))
|
||||||
|
|
||||||
|
a = np.array(123456789012345678901234567890, dtype='O').astype('S')
|
||||||
|
assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
|
||||||
|
a = np.array(123456789012345678901234567890, dtype='O').astype('U')
|
||||||
|
assert_array_equal(a, np.array(u'1234567890' * 3, dtype='U30'))
|
||||||
|
|
||||||
|
a = np.array([123456789012345678901234567890], dtype='O').astype('S')
|
||||||
|
assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
|
||||||
|
a = np.array([123456789012345678901234567890], dtype='O').astype('U')
|
||||||
|
assert_array_equal(a, np.array(u'1234567890' * 3, dtype='U30'))
|
||||||
|
|
||||||
|
a = np.array(123456789012345678901234567890, dtype='S')
|
||||||
|
assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
|
||||||
|
a = np.array(123456789012345678901234567890, dtype='U')
|
||||||
|
assert_array_equal(a, np.array(u'1234567890' * 3, dtype='U30'))
|
||||||
|
|
||||||
|
a = np.array(u'a\u0140', dtype='U')
|
||||||
|
b = np.ndarray(buffer=a, dtype='uint32', shape=2)
|
||||||
|
assert_(b.size == 2)
|
||||||
|
|
||||||
|
a = np.array([1000], dtype='i4')
|
||||||
|
assert_raises(TypeError, a.astype, 'S1', casting='safe')
|
||||||
|
|
||||||
|
a = np.array(1000, dtype='i4')
|
||||||
|
assert_raises(TypeError, a.astype, 'U1', casting='safe')
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("t",
|
||||||
|
np.sctypes['uint'] + np.sctypes['int'] + np.sctypes['float']
|
||||||
|
)
|
||||||
|
def test_array_astype_warning(t):
|
||||||
|
# test ComplexWarning when casting from complex to float or int
|
||||||
|
a = np.array(10, dtype=np.complex_)
|
||||||
|
assert_warns(np.ComplexWarning, a.astype, t)
|
||||||
|
|
||||||
|
def test_copyto_fromscalar():
|
||||||
|
a = np.arange(6, dtype='f4').reshape(2, 3)
|
||||||
|
|
||||||
|
# Simple copy
|
||||||
|
np.copyto(a, 1.5)
|
||||||
|
assert_equal(a, 1.5)
|
||||||
|
np.copyto(a.T, 2.5)
|
||||||
|
assert_equal(a, 2.5)
|
||||||
|
|
||||||
|
# Where-masked copy
|
||||||
|
mask = np.array([[0, 1, 0], [0, 0, 1]], dtype='?')
|
||||||
|
np.copyto(a, 3.5, where=mask)
|
||||||
|
assert_equal(a, [[2.5, 3.5, 2.5], [2.5, 2.5, 3.5]])
|
||||||
|
mask = np.array([[0, 1], [1, 1], [1, 0]], dtype='?')
|
||||||
|
np.copyto(a.T, 4.5, where=mask)
|
||||||
|
assert_equal(a, [[2.5, 4.5, 4.5], [4.5, 4.5, 3.5]])
|
||||||
|
|
||||||
|
def test_copyto():
|
||||||
|
a = np.arange(6, dtype='i4').reshape(2, 3)
|
||||||
|
|
||||||
|
# Simple copy
|
||||||
|
np.copyto(a, [[3, 1, 5], [6, 2, 1]])
|
||||||
|
assert_equal(a, [[3, 1, 5], [6, 2, 1]])
|
||||||
|
|
||||||
|
# Overlapping copy should work
|
||||||
|
np.copyto(a[:, :2], a[::-1, 1::-1])
|
||||||
|
assert_equal(a, [[2, 6, 5], [1, 3, 1]])
|
||||||
|
|
||||||
|
# Defaults to 'same_kind' casting
|
||||||
|
assert_raises(TypeError, np.copyto, a, 1.5)
|
||||||
|
|
||||||
|
# Force a copy with 'unsafe' casting, truncating 1.5 to 1
|
||||||
|
np.copyto(a, 1.5, casting='unsafe')
|
||||||
|
assert_equal(a, 1)
|
||||||
|
|
||||||
|
# Copying with a mask
|
||||||
|
np.copyto(a, 3, where=[True, False, True])
|
||||||
|
assert_equal(a, [[3, 1, 3], [3, 1, 3]])
|
||||||
|
|
||||||
|
# Casting rule still applies with a mask
|
||||||
|
assert_raises(TypeError, np.copyto, a, 3.5, where=[True, False, True])
|
||||||
|
|
||||||
|
# Lists of integer 0's and 1's is ok too
|
||||||
|
np.copyto(a, 4.0, casting='unsafe', where=[[0, 1, 1], [1, 0, 0]])
|
||||||
|
assert_equal(a, [[3, 4, 4], [4, 1, 3]])
|
||||||
|
|
||||||
|
# Overlapping copy with mask should work
|
||||||
|
np.copyto(a[:, :2], a[::-1, 1::-1], where=[[0, 1], [1, 1]])
|
||||||
|
assert_equal(a, [[3, 4, 4], [4, 3, 3]])
|
||||||
|
|
||||||
|
# 'dst' must be an array
|
||||||
|
assert_raises(TypeError, np.copyto, [1, 2, 3], [2, 3, 4])
|
||||||
|
|
||||||
|
def test_copyto_permut():
|
||||||
|
# test explicit overflow case
|
||||||
|
pad = 500
|
||||||
|
l = [True] * pad + [True, True, True, True]
|
||||||
|
r = np.zeros(len(l)-pad)
|
||||||
|
d = np.ones(len(l)-pad)
|
||||||
|
mask = np.array(l)[pad:]
|
||||||
|
np.copyto(r, d, where=mask[::-1])
|
||||||
|
|
||||||
|
# test all permutation of possible masks, 9 should be sufficient for
|
||||||
|
# current 4 byte unrolled code
|
||||||
|
power = 9
|
||||||
|
d = np.ones(power)
|
||||||
|
for i in range(2**power):
|
||||||
|
r = np.zeros(power)
|
||||||
|
l = [(i & x) != 0 for x in range(power)]
|
||||||
|
mask = np.array(l)
|
||||||
|
np.copyto(r, d, where=mask)
|
||||||
|
assert_array_equal(r == 1, l)
|
||||||
|
assert_equal(r.sum(), sum(l))
|
||||||
|
|
||||||
|
r = np.zeros(power)
|
||||||
|
np.copyto(r, d, where=mask[::-1])
|
||||||
|
assert_array_equal(r == 1, l[::-1])
|
||||||
|
assert_equal(r.sum(), sum(l))
|
||||||
|
|
||||||
|
r = np.zeros(power)
|
||||||
|
np.copyto(r[::2], d[::2], where=mask[::2])
|
||||||
|
assert_array_equal(r[::2] == 1, l[::2])
|
||||||
|
assert_equal(r[::2].sum(), sum(l[::2]))
|
||||||
|
|
||||||
|
r = np.zeros(power)
|
||||||
|
np.copyto(r[::2], d[::2], where=mask[::-2])
|
||||||
|
assert_array_equal(r[::2] == 1, l[::-2])
|
||||||
|
assert_equal(r[::2].sum(), sum(l[::-2]))
|
||||||
|
|
||||||
|
for c in [0xFF, 0x7F, 0x02, 0x10]:
|
||||||
|
r = np.zeros(power)
|
||||||
|
mask = np.array(l)
|
||||||
|
imask = np.array(l).view(np.uint8)
|
||||||
|
imask[mask != 0] = c
|
||||||
|
np.copyto(r, d, where=mask)
|
||||||
|
assert_array_equal(r == 1, l)
|
||||||
|
assert_equal(r.sum(), sum(l))
|
||||||
|
|
||||||
|
r = np.zeros(power)
|
||||||
|
np.copyto(r, d, where=True)
|
||||||
|
assert_equal(r.sum(), r.size)
|
||||||
|
r = np.ones(power)
|
||||||
|
d = np.zeros(power)
|
||||||
|
np.copyto(r, d, where=False)
|
||||||
|
assert_equal(r.sum(), r.size)
|
||||||
|
|
||||||
|
def test_copy_order():
|
||||||
|
a = np.arange(24).reshape(2, 1, 3, 4)
|
||||||
|
b = a.copy(order='F')
|
||||||
|
c = np.arange(24).reshape(2, 1, 4, 3).swapaxes(2, 3)
|
||||||
|
|
||||||
|
def check_copy_result(x, y, ccontig, fcontig, strides=False):
|
||||||
|
assert_(not (x is y))
|
||||||
|
assert_equal(x, y)
|
||||||
|
assert_equal(res.flags.c_contiguous, ccontig)
|
||||||
|
assert_equal(res.flags.f_contiguous, fcontig)
|
||||||
|
# This check is impossible only because
|
||||||
|
# NPY_RELAXED_STRIDES_CHECKING changes the strides actively
|
||||||
|
if not NPY_RELAXED_STRIDES_CHECKING:
|
||||||
|
if strides:
|
||||||
|
assert_equal(x.strides, y.strides)
|
||||||
|
else:
|
||||||
|
assert_(x.strides != y.strides)
|
||||||
|
|
||||||
|
# Validate the initial state of a, b, and c
|
||||||
|
assert_(a.flags.c_contiguous)
|
||||||
|
assert_(not a.flags.f_contiguous)
|
||||||
|
assert_(not b.flags.c_contiguous)
|
||||||
|
assert_(b.flags.f_contiguous)
|
||||||
|
assert_(not c.flags.c_contiguous)
|
||||||
|
assert_(not c.flags.f_contiguous)
|
||||||
|
|
||||||
|
# Copy with order='C'
|
||||||
|
res = a.copy(order='C')
|
||||||
|
check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
|
||||||
|
res = b.copy(order='C')
|
||||||
|
check_copy_result(res, b, ccontig=True, fcontig=False, strides=False)
|
||||||
|
res = c.copy(order='C')
|
||||||
|
check_copy_result(res, c, ccontig=True, fcontig=False, strides=False)
|
||||||
|
res = np.copy(a, order='C')
|
||||||
|
check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
|
||||||
|
res = np.copy(b, order='C')
|
||||||
|
check_copy_result(res, b, ccontig=True, fcontig=False, strides=False)
|
||||||
|
res = np.copy(c, order='C')
|
||||||
|
check_copy_result(res, c, ccontig=True, fcontig=False, strides=False)
|
||||||
|
|
||||||
|
# Copy with order='F'
|
||||||
|
res = a.copy(order='F')
|
||||||
|
check_copy_result(res, a, ccontig=False, fcontig=True, strides=False)
|
||||||
|
res = b.copy(order='F')
|
||||||
|
check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
|
||||||
|
res = c.copy(order='F')
|
||||||
|
check_copy_result(res, c, ccontig=False, fcontig=True, strides=False)
|
||||||
|
res = np.copy(a, order='F')
|
||||||
|
check_copy_result(res, a, ccontig=False, fcontig=True, strides=False)
|
||||||
|
res = np.copy(b, order='F')
|
||||||
|
check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
|
||||||
|
res = np.copy(c, order='F')
|
||||||
|
check_copy_result(res, c, ccontig=False, fcontig=True, strides=False)
|
||||||
|
|
||||||
|
# Copy with order='K'
|
||||||
|
res = a.copy(order='K')
|
||||||
|
check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
|
||||||
|
res = b.copy(order='K')
|
||||||
|
check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
|
||||||
|
res = c.copy(order='K')
|
||||||
|
check_copy_result(res, c, ccontig=False, fcontig=False, strides=True)
|
||||||
|
res = np.copy(a, order='K')
|
||||||
|
check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
|
||||||
|
res = np.copy(b, order='K')
|
||||||
|
check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
|
||||||
|
res = np.copy(c, order='K')
|
||||||
|
check_copy_result(res, c, ccontig=False, fcontig=False, strides=True)
|
||||||
|
|
||||||
|
def test_contiguous_flags():
|
||||||
|
a = np.ones((4, 4, 1))[::2,:,:]
|
||||||
|
if NPY_RELAXED_STRIDES_CHECKING:
|
||||||
|
a.strides = a.strides[:2] + (-123,)
|
||||||
|
b = np.ones((2, 2, 1, 2, 2)).swapaxes(3, 4)
|
||||||
|
|
||||||
|
def check_contig(a, ccontig, fcontig):
|
||||||
|
assert_(a.flags.c_contiguous == ccontig)
|
||||||
|
assert_(a.flags.f_contiguous == fcontig)
|
||||||
|
|
||||||
|
# Check if new arrays are correct:
|
||||||
|
check_contig(a, False, False)
|
||||||
|
check_contig(b, False, False)
|
||||||
|
if NPY_RELAXED_STRIDES_CHECKING:
|
||||||
|
check_contig(np.empty((2, 2, 0, 2, 2)), True, True)
|
||||||
|
check_contig(np.array([[[1], [2]]], order='F'), True, True)
|
||||||
|
else:
|
||||||
|
check_contig(np.empty((2, 2, 0, 2, 2)), True, False)
|
||||||
|
check_contig(np.array([[[1], [2]]], order='F'), False, True)
|
||||||
|
check_contig(np.empty((2, 2)), True, False)
|
||||||
|
check_contig(np.empty((2, 2), order='F'), False, True)
|
||||||
|
|
||||||
|
# Check that np.array creates correct contiguous flags:
|
||||||
|
check_contig(np.array(a, copy=False), False, False)
|
||||||
|
check_contig(np.array(a, copy=False, order='C'), True, False)
|
||||||
|
check_contig(np.array(a, ndmin=4, copy=False, order='F'), False, True)
|
||||||
|
|
||||||
|
if NPY_RELAXED_STRIDES_CHECKING:
|
||||||
|
# Check slicing update of flags and :
|
||||||
|
check_contig(a[0], True, True)
|
||||||
|
check_contig(a[None, ::4, ..., None], True, True)
|
||||||
|
check_contig(b[0, 0, ...], False, True)
|
||||||
|
check_contig(b[:,:, 0:0,:,:], True, True)
|
||||||
|
else:
|
||||||
|
# Check slicing update of flags:
|
||||||
|
check_contig(a[0], True, False)
|
||||||
|
# Would be nice if this was C-Contiguous:
|
||||||
|
check_contig(a[None, 0, ..., None], False, False)
|
||||||
|
check_contig(b[0, 0, 0, ...], False, True)
|
||||||
|
|
||||||
|
# Test ravel and squeeze.
|
||||||
|
check_contig(a.ravel(), True, True)
|
||||||
|
check_contig(np.ones((1, 3, 1)).squeeze(), True, True)
|
||||||
|
|
||||||
|
def test_broadcast_arrays():
|
||||||
|
# Test user defined dtypes
|
||||||
|
a = np.array([(1, 2, 3)], dtype='u4,u4,u4')
|
||||||
|
b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4')
|
||||||
|
result = np.broadcast_arrays(a, b)
|
||||||
|
assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4'))
|
||||||
|
assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4'))
|
@ -0,0 +1,888 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
from __future__ import division, absolute_import, print_function
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import gc
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from numpy.testing import (
|
||||||
|
assert_, assert_equal, assert_raises, assert_warns, HAS_REFCOUNT,
|
||||||
|
assert_raises_regex,
|
||||||
|
)
|
||||||
|
import textwrap
|
||||||
|
|
||||||
|
class TestArrayRepr(object):
|
||||||
|
def test_nan_inf(self):
|
||||||
|
x = np.array([np.nan, np.inf])
|
||||||
|
assert_equal(repr(x), 'array([nan, inf])')
|
||||||
|
|
||||||
|
def test_subclass(self):
|
||||||
|
class sub(np.ndarray): pass
|
||||||
|
|
||||||
|
# one dimensional
|
||||||
|
x1d = np.array([1, 2]).view(sub)
|
||||||
|
assert_equal(repr(x1d), 'sub([1, 2])')
|
||||||
|
|
||||||
|
# two dimensional
|
||||||
|
x2d = np.array([[1, 2], [3, 4]]).view(sub)
|
||||||
|
assert_equal(repr(x2d),
|
||||||
|
'sub([[1, 2],\n'
|
||||||
|
' [3, 4]])')
|
||||||
|
|
||||||
|
# two dimensional with flexible dtype
|
||||||
|
xstruct = np.ones((2,2), dtype=[('a', '<i4')]).view(sub)
|
||||||
|
assert_equal(repr(xstruct),
|
||||||
|
"sub([[(1,), (1,)],\n"
|
||||||
|
" [(1,), (1,)]], dtype=[('a', '<i4')])"
|
||||||
|
)
|
||||||
|
|
||||||
|
@pytest.mark.xfail(reason="See gh-10544")
|
||||||
|
def test_object_subclass(self):
|
||||||
|
class sub(np.ndarray):
|
||||||
|
def __new__(cls, inp):
|
||||||
|
obj = np.asarray(inp).view(cls)
|
||||||
|
return obj
|
||||||
|
|
||||||
|
def __getitem__(self, ind):
|
||||||
|
ret = super(sub, self).__getitem__(ind)
|
||||||
|
return sub(ret)
|
||||||
|
|
||||||
|
# test that object + subclass is OK:
|
||||||
|
x = sub([None, None])
|
||||||
|
assert_equal(repr(x), 'sub([None, None], dtype=object)')
|
||||||
|
assert_equal(str(x), '[None None]')
|
||||||
|
|
||||||
|
x = sub([None, sub([None, None])])
|
||||||
|
assert_equal(repr(x),
|
||||||
|
'sub([None, sub([None, None], dtype=object)], dtype=object)')
|
||||||
|
assert_equal(str(x), '[None sub([None, None], dtype=object)]')
|
||||||
|
|
||||||
|
def test_0d_object_subclass(self):
|
||||||
|
# make sure that subclasses which return 0ds instead
|
||||||
|
# of scalars don't cause infinite recursion in str
|
||||||
|
class sub(np.ndarray):
|
||||||
|
def __new__(cls, inp):
|
||||||
|
obj = np.asarray(inp).view(cls)
|
||||||
|
return obj
|
||||||
|
|
||||||
|
def __getitem__(self, ind):
|
||||||
|
ret = super(sub, self).__getitem__(ind)
|
||||||
|
return sub(ret)
|
||||||
|
|
||||||
|
x = sub(1)
|
||||||
|
assert_equal(repr(x), 'sub(1)')
|
||||||
|
assert_equal(str(x), '1')
|
||||||
|
|
||||||
|
x = sub([1, 1])
|
||||||
|
assert_equal(repr(x), 'sub([1, 1])')
|
||||||
|
assert_equal(str(x), '[1 1]')
|
||||||
|
|
||||||
|
# check it works properly with object arrays too
|
||||||
|
x = sub(None)
|
||||||
|
assert_equal(repr(x), 'sub(None, dtype=object)')
|
||||||
|
assert_equal(str(x), 'None')
|
||||||
|
|
||||||
|
# plus recursive object arrays (even depth > 1)
|
||||||
|
y = sub(None)
|
||||||
|
x[()] = y
|
||||||
|
y[()] = x
|
||||||
|
assert_equal(repr(x),
|
||||||
|
'sub(sub(sub(..., dtype=object), dtype=object), dtype=object)')
|
||||||
|
assert_equal(str(x), '...')
|
||||||
|
x[()] = 0 # resolve circular references for garbage collector
|
||||||
|
|
||||||
|
# nested 0d-subclass-object
|
||||||
|
x = sub(None)
|
||||||
|
x[()] = sub(None)
|
||||||
|
assert_equal(repr(x), 'sub(sub(None, dtype=object), dtype=object)')
|
||||||
|
assert_equal(str(x), 'None')
|
||||||
|
|
||||||
|
# gh-10663
|
||||||
|
class DuckCounter(np.ndarray):
|
||||||
|
def __getitem__(self, item):
|
||||||
|
result = super(DuckCounter, self).__getitem__(item)
|
||||||
|
if not isinstance(result, DuckCounter):
|
||||||
|
result = result[...].view(DuckCounter)
|
||||||
|
return result
|
||||||
|
|
||||||
|
def to_string(self):
|
||||||
|
return {0: 'zero', 1: 'one', 2: 'two'}.get(self.item(), 'many')
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
if self.shape == ():
|
||||||
|
return self.to_string()
|
||||||
|
else:
|
||||||
|
fmt = {'all': lambda x: x.to_string()}
|
||||||
|
return np.array2string(self, formatter=fmt)
|
||||||
|
|
||||||
|
dc = np.arange(5).view(DuckCounter)
|
||||||
|
assert_equal(str(dc), "[zero one two many many]")
|
||||||
|
assert_equal(str(dc[0]), "zero")
|
||||||
|
|
||||||
|
def test_self_containing(self):
|
||||||
|
arr0d = np.array(None)
|
||||||
|
arr0d[()] = arr0d
|
||||||
|
assert_equal(repr(arr0d),
|
||||||
|
'array(array(..., dtype=object), dtype=object)')
|
||||||
|
arr0d[()] = 0 # resolve recursion for garbage collector
|
||||||
|
|
||||||
|
arr1d = np.array([None, None])
|
||||||
|
arr1d[1] = arr1d
|
||||||
|
assert_equal(repr(arr1d),
|
||||||
|
'array([None, array(..., dtype=object)], dtype=object)')
|
||||||
|
arr1d[1] = 0 # resolve recursion for garbage collector
|
||||||
|
|
||||||
|
first = np.array(None)
|
||||||
|
second = np.array(None)
|
||||||
|
first[()] = second
|
||||||
|
second[()] = first
|
||||||
|
assert_equal(repr(first),
|
||||||
|
'array(array(array(..., dtype=object), dtype=object), dtype=object)')
|
||||||
|
first[()] = 0 # resolve circular references for garbage collector
|
||||||
|
|
||||||
|
def test_containing_list(self):
|
||||||
|
# printing square brackets directly would be ambiguuous
|
||||||
|
arr1d = np.array([None, None])
|
||||||
|
arr1d[0] = [1, 2]
|
||||||
|
arr1d[1] = [3]
|
||||||
|
assert_equal(repr(arr1d),
|
||||||
|
'array([list([1, 2]), list([3])], dtype=object)')
|
||||||
|
|
||||||
|
def test_void_scalar_recursion(self):
|
||||||
|
# gh-9345
|
||||||
|
repr(np.void(b'test')) # RecursionError ?
|
||||||
|
|
||||||
|
def test_fieldless_structured(self):
|
||||||
|
# gh-10366
|
||||||
|
no_fields = np.dtype([])
|
||||||
|
arr_no_fields = np.empty(4, dtype=no_fields)
|
||||||
|
assert_equal(repr(arr_no_fields), 'array([(), (), (), ()], dtype=[])')
|
||||||
|
|
||||||
|
|
||||||
|
class TestComplexArray(object):
|
||||||
|
def test_str(self):
|
||||||
|
rvals = [0, 1, -1, np.inf, -np.inf, np.nan]
|
||||||
|
cvals = [complex(rp, ip) for rp in rvals for ip in rvals]
|
||||||
|
dtypes = [np.complex64, np.cdouble, np.clongdouble]
|
||||||
|
actual = [str(np.array([c], dt)) for c in cvals for dt in dtypes]
|
||||||
|
wanted = [
|
||||||
|
'[0.+0.j]', '[0.+0.j]', '[0.+0.j]',
|
||||||
|
'[0.+1.j]', '[0.+1.j]', '[0.+1.j]',
|
||||||
|
'[0.-1.j]', '[0.-1.j]', '[0.-1.j]',
|
||||||
|
'[0.+infj]', '[0.+infj]', '[0.+infj]',
|
||||||
|
'[0.-infj]', '[0.-infj]', '[0.-infj]',
|
||||||
|
'[0.+nanj]', '[0.+nanj]', '[0.+nanj]',
|
||||||
|
'[1.+0.j]', '[1.+0.j]', '[1.+0.j]',
|
||||||
|
'[1.+1.j]', '[1.+1.j]', '[1.+1.j]',
|
||||||
|
'[1.-1.j]', '[1.-1.j]', '[1.-1.j]',
|
||||||
|
'[1.+infj]', '[1.+infj]', '[1.+infj]',
|
||||||
|
'[1.-infj]', '[1.-infj]', '[1.-infj]',
|
||||||
|
'[1.+nanj]', '[1.+nanj]', '[1.+nanj]',
|
||||||
|
'[-1.+0.j]', '[-1.+0.j]', '[-1.+0.j]',
|
||||||
|
'[-1.+1.j]', '[-1.+1.j]', '[-1.+1.j]',
|
||||||
|
'[-1.-1.j]', '[-1.-1.j]', '[-1.-1.j]',
|
||||||
|
'[-1.+infj]', '[-1.+infj]', '[-1.+infj]',
|
||||||
|
'[-1.-infj]', '[-1.-infj]', '[-1.-infj]',
|
||||||
|
'[-1.+nanj]', '[-1.+nanj]', '[-1.+nanj]',
|
||||||
|
'[inf+0.j]', '[inf+0.j]', '[inf+0.j]',
|
||||||
|
'[inf+1.j]', '[inf+1.j]', '[inf+1.j]',
|
||||||
|
'[inf-1.j]', '[inf-1.j]', '[inf-1.j]',
|
||||||
|
'[inf+infj]', '[inf+infj]', '[inf+infj]',
|
||||||
|
'[inf-infj]', '[inf-infj]', '[inf-infj]',
|
||||||
|
'[inf+nanj]', '[inf+nanj]', '[inf+nanj]',
|
||||||
|
'[-inf+0.j]', '[-inf+0.j]', '[-inf+0.j]',
|
||||||
|
'[-inf+1.j]', '[-inf+1.j]', '[-inf+1.j]',
|
||||||
|
'[-inf-1.j]', '[-inf-1.j]', '[-inf-1.j]',
|
||||||
|
'[-inf+infj]', '[-inf+infj]', '[-inf+infj]',
|
||||||
|
'[-inf-infj]', '[-inf-infj]', '[-inf-infj]',
|
||||||
|
'[-inf+nanj]', '[-inf+nanj]', '[-inf+nanj]',
|
||||||
|
'[nan+0.j]', '[nan+0.j]', '[nan+0.j]',
|
||||||
|
'[nan+1.j]', '[nan+1.j]', '[nan+1.j]',
|
||||||
|
'[nan-1.j]', '[nan-1.j]', '[nan-1.j]',
|
||||||
|
'[nan+infj]', '[nan+infj]', '[nan+infj]',
|
||||||
|
'[nan-infj]', '[nan-infj]', '[nan-infj]',
|
||||||
|
'[nan+nanj]', '[nan+nanj]', '[nan+nanj]']
|
||||||
|
|
||||||
|
for res, val in zip(actual, wanted):
|
||||||
|
assert_equal(res, val)
|
||||||
|
|
||||||
|
class TestArray2String(object):
|
||||||
|
def test_basic(self):
|
||||||
|
"""Basic test of array2string."""
|
||||||
|
a = np.arange(3)
|
||||||
|
assert_(np.array2string(a) == '[0 1 2]')
|
||||||
|
assert_(np.array2string(a, max_line_width=4, legacy='1.13') == '[0 1\n 2]')
|
||||||
|
assert_(np.array2string(a, max_line_width=4) == '[0\n 1\n 2]')
|
||||||
|
|
||||||
|
def test_unexpected_kwarg(self):
|
||||||
|
# ensure than an appropriate TypeError
|
||||||
|
# is raised when array2string receives
|
||||||
|
# an unexpected kwarg
|
||||||
|
|
||||||
|
with assert_raises_regex(TypeError, 'nonsense'):
|
||||||
|
np.array2string(np.array([1, 2, 3]),
|
||||||
|
nonsense=None)
|
||||||
|
|
||||||
|
def test_format_function(self):
|
||||||
|
"""Test custom format function for each element in array."""
|
||||||
|
def _format_function(x):
|
||||||
|
if np.abs(x) < 1:
|
||||||
|
return '.'
|
||||||
|
elif np.abs(x) < 2:
|
||||||
|
return 'o'
|
||||||
|
else:
|
||||||
|
return 'O'
|
||||||
|
|
||||||
|
x = np.arange(3)
|
||||||
|
if sys.version_info[0] >= 3:
|
||||||
|
x_hex = "[0x0 0x1 0x2]"
|
||||||
|
x_oct = "[0o0 0o1 0o2]"
|
||||||
|
else:
|
||||||
|
x_hex = "[0x0L 0x1L 0x2L]"
|
||||||
|
x_oct = "[0L 01L 02L]"
|
||||||
|
assert_(np.array2string(x, formatter={'all':_format_function}) ==
|
||||||
|
"[. o O]")
|
||||||
|
assert_(np.array2string(x, formatter={'int_kind':_format_function}) ==
|
||||||
|
"[. o O]")
|
||||||
|
assert_(np.array2string(x, formatter={'all':lambda x: "%.4f" % x}) ==
|
||||||
|
"[0.0000 1.0000 2.0000]")
|
||||||
|
assert_equal(np.array2string(x, formatter={'int':lambda x: hex(x)}),
|
||||||
|
x_hex)
|
||||||
|
assert_equal(np.array2string(x, formatter={'int':lambda x: oct(x)}),
|
||||||
|
x_oct)
|
||||||
|
|
||||||
|
x = np.arange(3.)
|
||||||
|
assert_(np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) ==
|
||||||
|
"[0.00 1.00 2.00]")
|
||||||
|
assert_(np.array2string(x, formatter={'float':lambda x: "%.2f" % x}) ==
|
||||||
|
"[0.00 1.00 2.00]")
|
||||||
|
|
||||||
|
s = np.array(['abc', 'def'])
|
||||||
|
assert_(np.array2string(s, formatter={'numpystr':lambda s: s*2}) ==
|
||||||
|
'[abcabc defdef]')
|
||||||
|
|
||||||
|
|
||||||
|
def test_structure_format(self):
|
||||||
|
dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
|
||||||
|
x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt)
|
||||||
|
assert_equal(np.array2string(x),
|
||||||
|
"[('Sarah', [8., 7.]) ('John', [6., 7.])]")
|
||||||
|
|
||||||
|
np.set_printoptions(legacy='1.13')
|
||||||
|
try:
|
||||||
|
# for issue #5692
|
||||||
|
A = np.zeros(shape=10, dtype=[("A", "M8[s]")])
|
||||||
|
A[5:].fill(np.datetime64('NaT'))
|
||||||
|
assert_equal(
|
||||||
|
np.array2string(A),
|
||||||
|
textwrap.dedent("""\
|
||||||
|
[('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
|
||||||
|
('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('NaT',) ('NaT',)
|
||||||
|
('NaT',) ('NaT',) ('NaT',)]""")
|
||||||
|
)
|
||||||
|
finally:
|
||||||
|
np.set_printoptions(legacy=False)
|
||||||
|
|
||||||
|
# same again, but with non-legacy behavior
|
||||||
|
assert_equal(
|
||||||
|
np.array2string(A),
|
||||||
|
textwrap.dedent("""\
|
||||||
|
[('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
|
||||||
|
('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
|
||||||
|
('1970-01-01T00:00:00',) ( 'NaT',)
|
||||||
|
( 'NaT',) ( 'NaT',)
|
||||||
|
( 'NaT',) ( 'NaT',)]""")
|
||||||
|
)
|
||||||
|
|
||||||
|
# and again, with timedeltas
|
||||||
|
A = np.full(10, 123456, dtype=[("A", "m8[s]")])
|
||||||
|
A[5:].fill(np.datetime64('NaT'))
|
||||||
|
assert_equal(
|
||||||
|
np.array2string(A),
|
||||||
|
textwrap.dedent("""\
|
||||||
|
[(123456,) (123456,) (123456,) (123456,) (123456,) ( 'NaT',) ( 'NaT',)
|
||||||
|
( 'NaT',) ( 'NaT',) ( 'NaT',)]""")
|
||||||
|
)
|
||||||
|
|
||||||
|
# See #8160
|
||||||
|
struct_int = np.array([([1, -1],), ([123, 1],)], dtype=[('B', 'i4', 2)])
|
||||||
|
assert_equal(np.array2string(struct_int),
|
||||||
|
"[([ 1, -1],) ([123, 1],)]")
|
||||||
|
struct_2dint = np.array([([[0, 1], [2, 3]],), ([[12, 0], [0, 0]],)],
|
||||||
|
dtype=[('B', 'i4', (2, 2))])
|
||||||
|
assert_equal(np.array2string(struct_2dint),
|
||||||
|
"[([[ 0, 1], [ 2, 3]],) ([[12, 0], [ 0, 0]],)]")
|
||||||
|
|
||||||
|
# See #8172
|
||||||
|
array_scalar = np.array(
|
||||||
|
(1., 2.1234567890123456789, 3.), dtype=('f8,f8,f8'))
|
||||||
|
assert_equal(np.array2string(array_scalar), "(1., 2.12345679, 3.)")
|
||||||
|
|
||||||
|
def test_unstructured_void_repr(self):
|
||||||
|
a = np.array([27, 91, 50, 75, 7, 65, 10, 8,
|
||||||
|
27, 91, 51, 49,109, 82,101,100], dtype='u1').view('V8')
|
||||||
|
assert_equal(repr(a[0]), r"void(b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08')")
|
||||||
|
assert_equal(str(a[0]), r"b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'")
|
||||||
|
assert_equal(repr(a),
|
||||||
|
r"array([b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'," "\n"
|
||||||
|
r" b'\x1B\x5B\x33\x31\x6D\x52\x65\x64'], dtype='|V8')")
|
||||||
|
|
||||||
|
assert_equal(eval(repr(a), vars(np)), a)
|
||||||
|
assert_equal(eval(repr(a[0]), vars(np)), a[0])
|
||||||
|
|
||||||
|
def test_edgeitems_kwarg(self):
|
||||||
|
# previously the global print options would be taken over the kwarg
|
||||||
|
arr = np.zeros(3, int)
|
||||||
|
assert_equal(
|
||||||
|
np.array2string(arr, edgeitems=1, threshold=0),
|
||||||
|
"[0 ... 0]"
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_summarize_1d(self):
|
||||||
|
A = np.arange(1001)
|
||||||
|
strA = '[ 0 1 2 ... 998 999 1000]'
|
||||||
|
assert_equal(str(A), strA)
|
||||||
|
|
||||||
|
reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
|
||||||
|
assert_equal(repr(A), reprA)
|
||||||
|
|
||||||
|
def test_summarize_2d(self):
|
||||||
|
A = np.arange(1002).reshape(2, 501)
|
||||||
|
strA = '[[ 0 1 2 ... 498 499 500]\n' \
|
||||||
|
' [ 501 502 503 ... 999 1000 1001]]'
|
||||||
|
assert_equal(str(A), strA)
|
||||||
|
|
||||||
|
reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
|
||||||
|
' [ 501, 502, 503, ..., 999, 1000, 1001]])'
|
||||||
|
assert_equal(repr(A), reprA)
|
||||||
|
|
||||||
|
def test_linewidth(self):
|
||||||
|
a = np.full(6, 1)
|
||||||
|
|
||||||
|
def make_str(a, width, **kw):
|
||||||
|
return np.array2string(a, separator="", max_line_width=width, **kw)
|
||||||
|
|
||||||
|
assert_equal(make_str(a, 8, legacy='1.13'), '[111111]')
|
||||||
|
assert_equal(make_str(a, 7, legacy='1.13'), '[111111]')
|
||||||
|
assert_equal(make_str(a, 5, legacy='1.13'), '[1111\n'
|
||||||
|
' 11]')
|
||||||
|
|
||||||
|
assert_equal(make_str(a, 8), '[111111]')
|
||||||
|
assert_equal(make_str(a, 7), '[11111\n'
|
||||||
|
' 1]')
|
||||||
|
assert_equal(make_str(a, 5), '[111\n'
|
||||||
|
' 111]')
|
||||||
|
|
||||||
|
b = a[None,None,:]
|
||||||
|
|
||||||
|
assert_equal(make_str(b, 12, legacy='1.13'), '[[[111111]]]')
|
||||||
|
assert_equal(make_str(b, 9, legacy='1.13'), '[[[111111]]]')
|
||||||
|
assert_equal(make_str(b, 8, legacy='1.13'), '[[[11111\n'
|
||||||
|
' 1]]]')
|
||||||
|
|
||||||
|
assert_equal(make_str(b, 12), '[[[111111]]]')
|
||||||
|
assert_equal(make_str(b, 9), '[[[111\n'
|
||||||
|
' 111]]]')
|
||||||
|
assert_equal(make_str(b, 8), '[[[11\n'
|
||||||
|
' 11\n'
|
||||||
|
' 11]]]')
|
||||||
|
|
||||||
|
def test_wide_element(self):
|
||||||
|
a = np.array(['xxxxx'])
|
||||||
|
assert_equal(
|
||||||
|
np.array2string(a, max_line_width=5),
|
||||||
|
"['xxxxx']"
|
||||||
|
)
|
||||||
|
assert_equal(
|
||||||
|
np.array2string(a, max_line_width=5, legacy='1.13'),
|
||||||
|
"[ 'xxxxx']"
|
||||||
|
)
|
||||||
|
|
||||||
|
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
|
||||||
|
def test_refcount(self):
|
||||||
|
# make sure we do not hold references to the array due to a recursive
|
||||||
|
# closure (gh-10620)
|
||||||
|
gc.disable()
|
||||||
|
a = np.arange(2)
|
||||||
|
r1 = sys.getrefcount(a)
|
||||||
|
np.array2string(a)
|
||||||
|
np.array2string(a)
|
||||||
|
r2 = sys.getrefcount(a)
|
||||||
|
gc.collect()
|
||||||
|
gc.enable()
|
||||||
|
assert_(r1 == r2)
|
||||||
|
|
||||||
|
class TestPrintOptions(object):
|
||||||
|
"""Test getting and setting global print options."""
|
||||||
|
|
||||||
|
def setup(self):
|
||||||
|
self.oldopts = np.get_printoptions()
|
||||||
|
|
||||||
|
def teardown(self):
|
||||||
|
np.set_printoptions(**self.oldopts)
|
||||||
|
|
||||||
|
def test_basic(self):
|
||||||
|
x = np.array([1.5, 0, 1.234567890])
|
||||||
|
assert_equal(repr(x), "array([1.5 , 0. , 1.23456789])")
|
||||||
|
np.set_printoptions(precision=4)
|
||||||
|
assert_equal(repr(x), "array([1.5 , 0. , 1.2346])")
|
||||||
|
|
||||||
|
def test_precision_zero(self):
|
||||||
|
np.set_printoptions(precision=0)
|
||||||
|
for values, string in (
|
||||||
|
([0.], "0."), ([.3], "0."), ([-.3], "-0."), ([.7], "1."),
|
||||||
|
([1.5], "2."), ([-1.5], "-2."), ([-15.34], "-15."),
|
||||||
|
([100.], "100."), ([.2, -1, 122.51], " 0., -1., 123."),
|
||||||
|
([0], "0"), ([-12], "-12"), ([complex(.3, -.7)], "0.-1.j")):
|
||||||
|
x = np.array(values)
|
||||||
|
assert_equal(repr(x), "array([%s])" % string)
|
||||||
|
|
||||||
|
def test_formatter(self):
|
||||||
|
x = np.arange(3)
|
||||||
|
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
|
||||||
|
assert_equal(repr(x), "array([-1, 0, 1])")
|
||||||
|
|
||||||
|
def test_formatter_reset(self):
|
||||||
|
x = np.arange(3)
|
||||||
|
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
|
||||||
|
assert_equal(repr(x), "array([-1, 0, 1])")
|
||||||
|
np.set_printoptions(formatter={'int':None})
|
||||||
|
assert_equal(repr(x), "array([0, 1, 2])")
|
||||||
|
|
||||||
|
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
|
||||||
|
assert_equal(repr(x), "array([-1, 0, 1])")
|
||||||
|
np.set_printoptions(formatter={'all':None})
|
||||||
|
assert_equal(repr(x), "array([0, 1, 2])")
|
||||||
|
|
||||||
|
np.set_printoptions(formatter={'int':lambda x: str(x-1)})
|
||||||
|
assert_equal(repr(x), "array([-1, 0, 1])")
|
||||||
|
np.set_printoptions(formatter={'int_kind':None})
|
||||||
|
assert_equal(repr(x), "array([0, 1, 2])")
|
||||||
|
|
||||||
|
x = np.arange(3.)
|
||||||
|
np.set_printoptions(formatter={'float':lambda x: str(x-1)})
|
||||||
|
assert_equal(repr(x), "array([-1.0, 0.0, 1.0])")
|
||||||
|
np.set_printoptions(formatter={'float_kind':None})
|
||||||
|
assert_equal(repr(x), "array([0., 1., 2.])")
|
||||||
|
|
||||||
|
def test_0d_arrays(self):
|
||||||
|
unicode = type(u'')
|
||||||
|
|
||||||
|
assert_equal(unicode(np.array(u'café', '<U4')), u'café')
|
||||||
|
|
||||||
|
if sys.version_info[0] >= 3:
|
||||||
|
assert_equal(repr(np.array('café', '<U4')),
|
||||||
|
"array('café', dtype='<U4')")
|
||||||
|
else:
|
||||||
|
assert_equal(repr(np.array(u'café', '<U4')),
|
||||||
|
"array(u'caf\\xe9', dtype='<U4')")
|
||||||
|
assert_equal(str(np.array('test', np.str_)), 'test')
|
||||||
|
|
||||||
|
a = np.zeros(1, dtype=[('a', '<i4', (3,))])
|
||||||
|
assert_equal(str(a[0]), '([0, 0, 0],)')
|
||||||
|
|
||||||
|
assert_equal(repr(np.datetime64('2005-02-25')[...]),
|
||||||
|
"array('2005-02-25', dtype='datetime64[D]')")
|
||||||
|
|
||||||
|
assert_equal(repr(np.timedelta64('10', 'Y')[...]),
|
||||||
|
"array(10, dtype='timedelta64[Y]')")
|
||||||
|
|
||||||
|
# repr of 0d arrays is affected by printoptions
|
||||||
|
x = np.array(1)
|
||||||
|
np.set_printoptions(formatter={'all':lambda x: "test"})
|
||||||
|
assert_equal(repr(x), "array(test)")
|
||||||
|
# str is unaffected
|
||||||
|
assert_equal(str(x), "1")
|
||||||
|
|
||||||
|
# check `style` arg raises
|
||||||
|
assert_warns(DeprecationWarning, np.array2string,
|
||||||
|
np.array(1.), style=repr)
|
||||||
|
# but not in legacy mode
|
||||||
|
np.array2string(np.array(1.), style=repr, legacy='1.13')
|
||||||
|
# gh-10934 style was broken in legacy mode, check it works
|
||||||
|
np.array2string(np.array(1.), legacy='1.13')
|
||||||
|
|
||||||
|
def test_float_spacing(self):
|
||||||
|
x = np.array([1., 2., 3.])
|
||||||
|
y = np.array([1., 2., -10.])
|
||||||
|
z = np.array([100., 2., -1.])
|
||||||
|
w = np.array([-100., 2., 1.])
|
||||||
|
|
||||||
|
assert_equal(repr(x), 'array([1., 2., 3.])')
|
||||||
|
assert_equal(repr(y), 'array([ 1., 2., -10.])')
|
||||||
|
assert_equal(repr(np.array(y[0])), 'array(1.)')
|
||||||
|
assert_equal(repr(np.array(y[-1])), 'array(-10.)')
|
||||||
|
assert_equal(repr(z), 'array([100., 2., -1.])')
|
||||||
|
assert_equal(repr(w), 'array([-100., 2., 1.])')
|
||||||
|
|
||||||
|
assert_equal(repr(np.array([np.nan, np.inf])), 'array([nan, inf])')
|
||||||
|
assert_equal(repr(np.array([np.nan, -np.inf])), 'array([ nan, -inf])')
|
||||||
|
|
||||||
|
x = np.array([np.inf, 100000, 1.1234])
|
||||||
|
y = np.array([np.inf, 100000, -1.1234])
|
||||||
|
z = np.array([np.inf, 1.1234, -1e120])
|
||||||
|
np.set_printoptions(precision=2)
|
||||||
|
assert_equal(repr(x), 'array([ inf, 1.00e+05, 1.12e+00])')
|
||||||
|
assert_equal(repr(y), 'array([ inf, 1.00e+05, -1.12e+00])')
|
||||||
|
assert_equal(repr(z), 'array([ inf, 1.12e+000, -1.00e+120])')
|
||||||
|
|
||||||
|
def test_bool_spacing(self):
|
||||||
|
assert_equal(repr(np.array([True, True])),
|
||||||
|
'array([ True, True])')
|
||||||
|
assert_equal(repr(np.array([True, False])),
|
||||||
|
'array([ True, False])')
|
||||||
|
assert_equal(repr(np.array([True])),
|
||||||
|
'array([ True])')
|
||||||
|
assert_equal(repr(np.array(True)),
|
||||||
|
'array(True)')
|
||||||
|
assert_equal(repr(np.array(False)),
|
||||||
|
'array(False)')
|
||||||
|
|
||||||
|
def test_sign_spacing(self):
|
||||||
|
a = np.arange(4.)
|
||||||
|
b = np.array([1.234e9])
|
||||||
|
c = np.array([1.0 + 1.0j, 1.123456789 + 1.123456789j], dtype='c16')
|
||||||
|
|
||||||
|
assert_equal(repr(a), 'array([0., 1., 2., 3.])')
|
||||||
|
assert_equal(repr(np.array(1.)), 'array(1.)')
|
||||||
|
assert_equal(repr(b), 'array([1.234e+09])')
|
||||||
|
assert_equal(repr(np.array([0.])), 'array([0.])')
|
||||||
|
assert_equal(repr(c),
|
||||||
|
"array([1. +1.j , 1.12345679+1.12345679j])")
|
||||||
|
assert_equal(repr(np.array([0., -0.])), 'array([ 0., -0.])')
|
||||||
|
|
||||||
|
np.set_printoptions(sign=' ')
|
||||||
|
assert_equal(repr(a), 'array([ 0., 1., 2., 3.])')
|
||||||
|
assert_equal(repr(np.array(1.)), 'array( 1.)')
|
||||||
|
assert_equal(repr(b), 'array([ 1.234e+09])')
|
||||||
|
assert_equal(repr(c),
|
||||||
|
"array([ 1. +1.j , 1.12345679+1.12345679j])")
|
||||||
|
assert_equal(repr(np.array([0., -0.])), 'array([ 0., -0.])')
|
||||||
|
|
||||||
|
np.set_printoptions(sign='+')
|
||||||
|
assert_equal(repr(a), 'array([+0., +1., +2., +3.])')
|
||||||
|
assert_equal(repr(np.array(1.)), 'array(+1.)')
|
||||||
|
assert_equal(repr(b), 'array([+1.234e+09])')
|
||||||
|
assert_equal(repr(c),
|
||||||
|
"array([+1. +1.j , +1.12345679+1.12345679j])")
|
||||||
|
|
||||||
|
np.set_printoptions(legacy='1.13')
|
||||||
|
assert_equal(repr(a), 'array([ 0., 1., 2., 3.])')
|
||||||
|
assert_equal(repr(b), 'array([ 1.23400000e+09])')
|
||||||
|
assert_equal(repr(-b), 'array([ -1.23400000e+09])')
|
||||||
|
assert_equal(repr(np.array(1.)), 'array(1.0)')
|
||||||
|
assert_equal(repr(np.array([0.])), 'array([ 0.])')
|
||||||
|
assert_equal(repr(c),
|
||||||
|
"array([ 1.00000000+1.j , 1.12345679+1.12345679j])")
|
||||||
|
# gh-10383
|
||||||
|
assert_equal(str(np.array([-1., 10])), "[ -1. 10.]")
|
||||||
|
|
||||||
|
assert_raises(TypeError, np.set_printoptions, wrongarg=True)
|
||||||
|
|
||||||
|
def test_float_overflow_nowarn(self):
|
||||||
|
# make sure internal computations in FloatingFormat don't
|
||||||
|
# warn about overflow
|
||||||
|
repr(np.array([1e4, 0.1], dtype='f2'))
|
||||||
|
|
||||||
|
def test_sign_spacing_structured(self):
|
||||||
|
a = np.ones(2, dtype='<f,<f')
|
||||||
|
assert_equal(repr(a),
|
||||||
|
"array([(1., 1.), (1., 1.)], dtype=[('f0', '<f4'), ('f1', '<f4')])")
|
||||||
|
assert_equal(repr(a[0]), "(1., 1.)")
|
||||||
|
|
||||||
|
def test_floatmode(self):
|
||||||
|
x = np.array([0.6104, 0.922, 0.457, 0.0906, 0.3733, 0.007244,
|
||||||
|
0.5933, 0.947, 0.2383, 0.4226], dtype=np.float16)
|
||||||
|
y = np.array([0.2918820979355541, 0.5064172631089138,
|
||||||
|
0.2848750619642916, 0.4342965294660567,
|
||||||
|
0.7326538397312751, 0.3459503329096204,
|
||||||
|
0.0862072768214508, 0.39112753029631175],
|
||||||
|
dtype=np.float64)
|
||||||
|
z = np.arange(6, dtype=np.float16)/10
|
||||||
|
c = np.array([1.0 + 1.0j, 1.123456789 + 1.123456789j], dtype='c16')
|
||||||
|
|
||||||
|
# also make sure 1e23 is right (is between two fp numbers)
|
||||||
|
w = np.array(['1e{}'.format(i) for i in range(25)], dtype=np.float64)
|
||||||
|
# note: we construct w from the strings `1eXX` instead of doing
|
||||||
|
# `10.**arange(24)` because it turns out the two are not equivalent in
|
||||||
|
# python. On some architectures `1e23 != 10.**23`.
|
||||||
|
wp = np.array([1.234e1, 1e2, 1e123])
|
||||||
|
|
||||||
|
# unique mode
|
||||||
|
np.set_printoptions(floatmode='unique')
|
||||||
|
assert_equal(repr(x),
|
||||||
|
"array([0.6104 , 0.922 , 0.457 , 0.0906 , 0.3733 , 0.007244,\n"
|
||||||
|
" 0.5933 , 0.947 , 0.2383 , 0.4226 ], dtype=float16)")
|
||||||
|
assert_equal(repr(y),
|
||||||
|
"array([0.2918820979355541 , 0.5064172631089138 , 0.2848750619642916 ,\n"
|
||||||
|
" 0.4342965294660567 , 0.7326538397312751 , 0.3459503329096204 ,\n"
|
||||||
|
" 0.0862072768214508 , 0.39112753029631175])")
|
||||||
|
assert_equal(repr(z),
|
||||||
|
"array([0. , 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
|
||||||
|
assert_equal(repr(w),
|
||||||
|
"array([1.e+00, 1.e+01, 1.e+02, 1.e+03, 1.e+04, 1.e+05, 1.e+06, 1.e+07,\n"
|
||||||
|
" 1.e+08, 1.e+09, 1.e+10, 1.e+11, 1.e+12, 1.e+13, 1.e+14, 1.e+15,\n"
|
||||||
|
" 1.e+16, 1.e+17, 1.e+18, 1.e+19, 1.e+20, 1.e+21, 1.e+22, 1.e+23,\n"
|
||||||
|
" 1.e+24])")
|
||||||
|
assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
|
||||||
|
assert_equal(repr(c),
|
||||||
|
"array([1. +1.j , 1.123456789+1.123456789j])")
|
||||||
|
|
||||||
|
# maxprec mode, precision=8
|
||||||
|
np.set_printoptions(floatmode='maxprec', precision=8)
|
||||||
|
assert_equal(repr(x),
|
||||||
|
"array([0.6104 , 0.922 , 0.457 , 0.0906 , 0.3733 , 0.007244,\n"
|
||||||
|
" 0.5933 , 0.947 , 0.2383 , 0.4226 ], dtype=float16)")
|
||||||
|
assert_equal(repr(y),
|
||||||
|
"array([0.2918821 , 0.50641726, 0.28487506, 0.43429653, 0.73265384,\n"
|
||||||
|
" 0.34595033, 0.08620728, 0.39112753])")
|
||||||
|
assert_equal(repr(z),
|
||||||
|
"array([0. , 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
|
||||||
|
assert_equal(repr(w[::5]),
|
||||||
|
"array([1.e+00, 1.e+05, 1.e+10, 1.e+15, 1.e+20])")
|
||||||
|
assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
|
||||||
|
assert_equal(repr(c),
|
||||||
|
"array([1. +1.j , 1.12345679+1.12345679j])")
|
||||||
|
|
||||||
|
# fixed mode, precision=4
|
||||||
|
np.set_printoptions(floatmode='fixed', precision=4)
|
||||||
|
assert_equal(repr(x),
|
||||||
|
"array([0.6104, 0.9219, 0.4570, 0.0906, 0.3733, 0.0072, 0.5933, 0.9468,\n"
|
||||||
|
" 0.2383, 0.4226], dtype=float16)")
|
||||||
|
assert_equal(repr(y),
|
||||||
|
"array([0.2919, 0.5064, 0.2849, 0.4343, 0.7327, 0.3460, 0.0862, 0.3911])")
|
||||||
|
assert_equal(repr(z),
|
||||||
|
"array([0.0000, 0.1000, 0.2000, 0.3000, 0.3999, 0.5000], dtype=float16)")
|
||||||
|
assert_equal(repr(w[::5]),
|
||||||
|
"array([1.0000e+00, 1.0000e+05, 1.0000e+10, 1.0000e+15, 1.0000e+20])")
|
||||||
|
assert_equal(repr(wp), "array([1.2340e+001, 1.0000e+002, 1.0000e+123])")
|
||||||
|
assert_equal(repr(np.zeros(3)), "array([0.0000, 0.0000, 0.0000])")
|
||||||
|
assert_equal(repr(c),
|
||||||
|
"array([1.0000+1.0000j, 1.1235+1.1235j])")
|
||||||
|
# for larger precision, representation error becomes more apparent:
|
||||||
|
np.set_printoptions(floatmode='fixed', precision=8)
|
||||||
|
assert_equal(repr(z),
|
||||||
|
"array([0.00000000, 0.09997559, 0.19995117, 0.30004883, 0.39990234,\n"
|
||||||
|
" 0.50000000], dtype=float16)")
|
||||||
|
|
||||||
|
# maxprec_equal mode, precision=8
|
||||||
|
np.set_printoptions(floatmode='maxprec_equal', precision=8)
|
||||||
|
assert_equal(repr(x),
|
||||||
|
"array([0.610352, 0.921875, 0.457031, 0.090576, 0.373291, 0.007244,\n"
|
||||||
|
" 0.593262, 0.946777, 0.238281, 0.422607], dtype=float16)")
|
||||||
|
assert_equal(repr(y),
|
||||||
|
"array([0.29188210, 0.50641726, 0.28487506, 0.43429653, 0.73265384,\n"
|
||||||
|
" 0.34595033, 0.08620728, 0.39112753])")
|
||||||
|
assert_equal(repr(z),
|
||||||
|
"array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
|
||||||
|
assert_equal(repr(w[::5]),
|
||||||
|
"array([1.e+00, 1.e+05, 1.e+10, 1.e+15, 1.e+20])")
|
||||||
|
assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
|
||||||
|
assert_equal(repr(c),
|
||||||
|
"array([1.00000000+1.00000000j, 1.12345679+1.12345679j])")
|
||||||
|
|
||||||
|
def test_legacy_mode_scalars(self):
|
||||||
|
# in legacy mode, str of floats get truncated, and complex scalars
|
||||||
|
# use * for non-finite imaginary part
|
||||||
|
np.set_printoptions(legacy='1.13')
|
||||||
|
assert_equal(str(np.float64(1.123456789123456789)), '1.12345678912')
|
||||||
|
assert_equal(str(np.complex128(complex(1, np.nan))), '(1+nan*j)')
|
||||||
|
|
||||||
|
np.set_printoptions(legacy=False)
|
||||||
|
assert_equal(str(np.float64(1.123456789123456789)),
|
||||||
|
'1.1234567891234568')
|
||||||
|
assert_equal(str(np.complex128(complex(1, np.nan))), '(1+nanj)')
|
||||||
|
|
||||||
|
def test_legacy_stray_comma(self):
|
||||||
|
np.set_printoptions(legacy='1.13')
|
||||||
|
assert_equal(str(np.arange(10000)), '[ 0 1 2 ..., 9997 9998 9999]')
|
||||||
|
|
||||||
|
np.set_printoptions(legacy=False)
|
||||||
|
assert_equal(str(np.arange(10000)), '[ 0 1 2 ... 9997 9998 9999]')
|
||||||
|
|
||||||
|
def test_dtype_linewidth_wrapping(self):
|
||||||
|
np.set_printoptions(linewidth=75)
|
||||||
|
assert_equal(repr(np.arange(10,20., dtype='f4')),
|
||||||
|
"array([10., 11., 12., 13., 14., 15., 16., 17., 18., 19.], dtype=float32)")
|
||||||
|
assert_equal(repr(np.arange(10,23., dtype='f4')), textwrap.dedent("""\
|
||||||
|
array([10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22.],
|
||||||
|
dtype=float32)"""))
|
||||||
|
|
||||||
|
styp = '<U4' if sys.version_info[0] >= 3 else '|S4'
|
||||||
|
assert_equal(repr(np.ones(3, dtype=styp)),
|
||||||
|
"array(['1', '1', '1'], dtype='{}')".format(styp))
|
||||||
|
assert_equal(repr(np.ones(12, dtype=styp)), textwrap.dedent("""\
|
||||||
|
array(['1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1'],
|
||||||
|
dtype='{}')""".format(styp)))
|
||||||
|
|
||||||
|
def test_linewidth_repr(self):
|
||||||
|
a = np.full(7, fill_value=2)
|
||||||
|
np.set_printoptions(linewidth=17)
|
||||||
|
assert_equal(
|
||||||
|
repr(a),
|
||||||
|
textwrap.dedent("""\
|
||||||
|
array([2, 2, 2,
|
||||||
|
2, 2, 2,
|
||||||
|
2])""")
|
||||||
|
)
|
||||||
|
np.set_printoptions(linewidth=17, legacy='1.13')
|
||||||
|
assert_equal(
|
||||||
|
repr(a),
|
||||||
|
textwrap.dedent("""\
|
||||||
|
array([2, 2, 2,
|
||||||
|
2, 2, 2, 2])""")
|
||||||
|
)
|
||||||
|
|
||||||
|
a = np.full(8, fill_value=2)
|
||||||
|
|
||||||
|
np.set_printoptions(linewidth=18, legacy=False)
|
||||||
|
assert_equal(
|
||||||
|
repr(a),
|
||||||
|
textwrap.dedent("""\
|
||||||
|
array([2, 2, 2,
|
||||||
|
2, 2, 2,
|
||||||
|
2, 2])""")
|
||||||
|
)
|
||||||
|
|
||||||
|
np.set_printoptions(linewidth=18, legacy='1.13')
|
||||||
|
assert_equal(
|
||||||
|
repr(a),
|
||||||
|
textwrap.dedent("""\
|
||||||
|
array([2, 2, 2, 2,
|
||||||
|
2, 2, 2, 2])""")
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_linewidth_str(self):
|
||||||
|
a = np.full(18, fill_value=2)
|
||||||
|
np.set_printoptions(linewidth=18)
|
||||||
|
assert_equal(
|
||||||
|
str(a),
|
||||||
|
textwrap.dedent("""\
|
||||||
|
[2 2 2 2 2 2 2 2
|
||||||
|
2 2 2 2 2 2 2 2
|
||||||
|
2 2]""")
|
||||||
|
)
|
||||||
|
np.set_printoptions(linewidth=18, legacy='1.13')
|
||||||
|
assert_equal(
|
||||||
|
str(a),
|
||||||
|
textwrap.dedent("""\
|
||||||
|
[2 2 2 2 2 2 2 2 2
|
||||||
|
2 2 2 2 2 2 2 2 2]""")
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_edgeitems(self):
|
||||||
|
np.set_printoptions(edgeitems=1, threshold=1)
|
||||||
|
a = np.arange(27).reshape((3, 3, 3))
|
||||||
|
assert_equal(
|
||||||
|
repr(a),
|
||||||
|
textwrap.dedent("""\
|
||||||
|
array([[[ 0, ..., 2],
|
||||||
|
...,
|
||||||
|
[ 6, ..., 8]],
|
||||||
|
|
||||||
|
...,
|
||||||
|
|
||||||
|
[[18, ..., 20],
|
||||||
|
...,
|
||||||
|
[24, ..., 26]]])""")
|
||||||
|
)
|
||||||
|
|
||||||
|
b = np.zeros((3, 3, 1, 1))
|
||||||
|
assert_equal(
|
||||||
|
repr(b),
|
||||||
|
textwrap.dedent("""\
|
||||||
|
array([[[[0.]],
|
||||||
|
|
||||||
|
...,
|
||||||
|
|
||||||
|
[[0.]]],
|
||||||
|
|
||||||
|
|
||||||
|
...,
|
||||||
|
|
||||||
|
|
||||||
|
[[[0.]],
|
||||||
|
|
||||||
|
...,
|
||||||
|
|
||||||
|
[[0.]]]])""")
|
||||||
|
)
|
||||||
|
|
||||||
|
# 1.13 had extra trailing spaces, and was missing newlines
|
||||||
|
np.set_printoptions(legacy='1.13')
|
||||||
|
|
||||||
|
assert_equal(
|
||||||
|
repr(a),
|
||||||
|
textwrap.dedent("""\
|
||||||
|
array([[[ 0, ..., 2],
|
||||||
|
...,
|
||||||
|
[ 6, ..., 8]],
|
||||||
|
|
||||||
|
...,
|
||||||
|
[[18, ..., 20],
|
||||||
|
...,
|
||||||
|
[24, ..., 26]]])""")
|
||||||
|
)
|
||||||
|
|
||||||
|
assert_equal(
|
||||||
|
repr(b),
|
||||||
|
textwrap.dedent("""\
|
||||||
|
array([[[[ 0.]],
|
||||||
|
|
||||||
|
...,
|
||||||
|
[[ 0.]]],
|
||||||
|
|
||||||
|
|
||||||
|
...,
|
||||||
|
[[[ 0.]],
|
||||||
|
|
||||||
|
...,
|
||||||
|
[[ 0.]]]])""")
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_bad_args(self):
|
||||||
|
assert_raises(ValueError, np.set_printoptions, threshold=float('nan'))
|
||||||
|
assert_raises(TypeError, np.set_printoptions, threshold='1')
|
||||||
|
assert_raises(TypeError, np.set_printoptions, threshold=b'1')
|
||||||
|
|
||||||
|
def test_unicode_object_array():
|
||||||
|
import sys
|
||||||
|
if sys.version_info[0] >= 3:
|
||||||
|
expected = "array(['é'], dtype=object)"
|
||||||
|
else:
|
||||||
|
expected = "array([u'\\xe9'], dtype=object)"
|
||||||
|
x = np.array([u'\xe9'], dtype=object)
|
||||||
|
assert_equal(repr(x), expected)
|
||||||
|
|
||||||
|
|
||||||
|
class TestContextManager(object):
|
||||||
|
def test_ctx_mgr(self):
|
||||||
|
# test that context manager actuall works
|
||||||
|
with np.printoptions(precision=2):
|
||||||
|
s = str(np.array([2.0]) / 3)
|
||||||
|
assert_equal(s, '[0.67]')
|
||||||
|
|
||||||
|
def test_ctx_mgr_restores(self):
|
||||||
|
# test that print options are actually restrored
|
||||||
|
opts = np.get_printoptions()
|
||||||
|
with np.printoptions(precision=opts['precision'] - 1,
|
||||||
|
linewidth=opts['linewidth'] - 4):
|
||||||
|
pass
|
||||||
|
assert_equal(np.get_printoptions(), opts)
|
||||||
|
|
||||||
|
def test_ctx_mgr_exceptions(self):
|
||||||
|
# test that print options are restored even if an exception is raised
|
||||||
|
opts = np.get_printoptions()
|
||||||
|
try:
|
||||||
|
with np.printoptions(precision=2, linewidth=11):
|
||||||
|
raise ValueError
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
assert_equal(np.get_printoptions(), opts)
|
||||||
|
|
||||||
|
def test_ctx_mgr_as_smth(self):
|
||||||
|
opts = {"precision": 2}
|
||||||
|
with np.printoptions(**opts) as ctx:
|
||||||
|
saved_opts = ctx.copy()
|
||||||
|
assert_equal({k: saved_opts[k] for k in opts}, opts)
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,692 @@
|
|||||||
|
from __future__ import division, absolute_import, print_function
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from numpy.core.multiarray import _vec_string
|
||||||
|
from numpy.testing import (
|
||||||
|
assert_, assert_equal, assert_array_equal, assert_raises,
|
||||||
|
assert_raises_regex, suppress_warnings,
|
||||||
|
)
|
||||||
|
|
||||||
|
kw_unicode_true = {'unicode': True} # make 2to3 work properly
|
||||||
|
kw_unicode_false = {'unicode': False}
|
||||||
|
|
||||||
|
class TestBasic(object):
|
||||||
|
def test_from_object_array(self):
|
||||||
|
A = np.array([['abc', 2],
|
||||||
|
['long ', '0123456789']], dtype='O')
|
||||||
|
B = np.char.array(A)
|
||||||
|
assert_equal(B.dtype.itemsize, 10)
|
||||||
|
assert_array_equal(B, [[b'abc', b'2'],
|
||||||
|
[b'long', b'0123456789']])
|
||||||
|
|
||||||
|
def test_from_object_array_unicode(self):
|
||||||
|
A = np.array([['abc', u'Sigma \u03a3'],
|
||||||
|
['long ', '0123456789']], dtype='O')
|
||||||
|
assert_raises(ValueError, np.char.array, (A,))
|
||||||
|
B = np.char.array(A, **kw_unicode_true)
|
||||||
|
assert_equal(B.dtype.itemsize, 10 * np.array('a', 'U').dtype.itemsize)
|
||||||
|
assert_array_equal(B, [['abc', u'Sigma \u03a3'],
|
||||||
|
['long', '0123456789']])
|
||||||
|
|
||||||
|
def test_from_string_array(self):
|
||||||
|
A = np.array([[b'abc', b'foo'],
|
||||||
|
[b'long ', b'0123456789']])
|
||||||
|
assert_equal(A.dtype.type, np.string_)
|
||||||
|
B = np.char.array(A)
|
||||||
|
assert_array_equal(B, A)
|
||||||
|
assert_equal(B.dtype, A.dtype)
|
||||||
|
assert_equal(B.shape, A.shape)
|
||||||
|
B[0, 0] = 'changed'
|
||||||
|
assert_(B[0, 0] != A[0, 0])
|
||||||
|
C = np.char.asarray(A)
|
||||||
|
assert_array_equal(C, A)
|
||||||
|
assert_equal(C.dtype, A.dtype)
|
||||||
|
C[0, 0] = 'changed again'
|
||||||
|
assert_(C[0, 0] != B[0, 0])
|
||||||
|
assert_(C[0, 0] == A[0, 0])
|
||||||
|
|
||||||
|
def test_from_unicode_array(self):
|
||||||
|
A = np.array([['abc', u'Sigma \u03a3'],
|
||||||
|
['long ', '0123456789']])
|
||||||
|
assert_equal(A.dtype.type, np.unicode_)
|
||||||
|
B = np.char.array(A)
|
||||||
|
assert_array_equal(B, A)
|
||||||
|
assert_equal(B.dtype, A.dtype)
|
||||||
|
assert_equal(B.shape, A.shape)
|
||||||
|
B = np.char.array(A, **kw_unicode_true)
|
||||||
|
assert_array_equal(B, A)
|
||||||
|
assert_equal(B.dtype, A.dtype)
|
||||||
|
assert_equal(B.shape, A.shape)
|
||||||
|
|
||||||
|
def fail():
|
||||||
|
np.char.array(A, **kw_unicode_false)
|
||||||
|
|
||||||
|
assert_raises(UnicodeEncodeError, fail)
|
||||||
|
|
||||||
|
def test_unicode_upconvert(self):
|
||||||
|
A = np.char.array(['abc'])
|
||||||
|
B = np.char.array([u'\u03a3'])
|
||||||
|
assert_(issubclass((A + B).dtype.type, np.unicode_))
|
||||||
|
|
||||||
|
def test_from_string(self):
|
||||||
|
A = np.char.array(b'abc')
|
||||||
|
assert_equal(len(A), 1)
|
||||||
|
assert_equal(len(A[0]), 3)
|
||||||
|
assert_(issubclass(A.dtype.type, np.string_))
|
||||||
|
|
||||||
|
def test_from_unicode(self):
|
||||||
|
A = np.char.array(u'\u03a3')
|
||||||
|
assert_equal(len(A), 1)
|
||||||
|
assert_equal(len(A[0]), 1)
|
||||||
|
assert_equal(A.itemsize, 4)
|
||||||
|
assert_(issubclass(A.dtype.type, np.unicode_))
|
||||||
|
|
||||||
|
class TestVecString(object):
|
||||||
|
def test_non_existent_method(self):
|
||||||
|
|
||||||
|
def fail():
|
||||||
|
_vec_string('a', np.string_, 'bogus')
|
||||||
|
|
||||||
|
assert_raises(AttributeError, fail)
|
||||||
|
|
||||||
|
def test_non_string_array(self):
|
||||||
|
|
||||||
|
def fail():
|
||||||
|
_vec_string(1, np.string_, 'strip')
|
||||||
|
|
||||||
|
assert_raises(TypeError, fail)
|
||||||
|
|
||||||
|
def test_invalid_args_tuple(self):
|
||||||
|
|
||||||
|
def fail():
|
||||||
|
_vec_string(['a'], np.string_, 'strip', 1)
|
||||||
|
|
||||||
|
assert_raises(TypeError, fail)
|
||||||
|
|
||||||
|
def test_invalid_type_descr(self):
|
||||||
|
|
||||||
|
def fail():
|
||||||
|
_vec_string(['a'], 'BOGUS', 'strip')
|
||||||
|
|
||||||
|
assert_raises(TypeError, fail)
|
||||||
|
|
||||||
|
def test_invalid_function_args(self):
|
||||||
|
|
||||||
|
def fail():
|
||||||
|
_vec_string(['a'], np.string_, 'strip', (1,))
|
||||||
|
|
||||||
|
assert_raises(TypeError, fail)
|
||||||
|
|
||||||
|
def test_invalid_result_type(self):
|
||||||
|
|
||||||
|
def fail():
|
||||||
|
_vec_string(['a'], np.integer, 'strip')
|
||||||
|
|
||||||
|
assert_raises(TypeError, fail)
|
||||||
|
|
||||||
|
def test_broadcast_error(self):
|
||||||
|
|
||||||
|
def fail():
|
||||||
|
_vec_string([['abc', 'def']], np.integer, 'find', (['a', 'd', 'j'],))
|
||||||
|
|
||||||
|
assert_raises(ValueError, fail)
|
||||||
|
|
||||||
|
|
||||||
|
class TestWhitespace(object):
|
||||||
|
def setup(self):
|
||||||
|
self.A = np.array([['abc ', '123 '],
|
||||||
|
['789 ', 'xyz ']]).view(np.chararray)
|
||||||
|
self.B = np.array([['abc', '123'],
|
||||||
|
['789', 'xyz']]).view(np.chararray)
|
||||||
|
|
||||||
|
def test1(self):
|
||||||
|
assert_(np.all(self.A == self.B))
|
||||||
|
assert_(np.all(self.A >= self.B))
|
||||||
|
assert_(np.all(self.A <= self.B))
|
||||||
|
assert_(not np.any(self.A > self.B))
|
||||||
|
assert_(not np.any(self.A < self.B))
|
||||||
|
assert_(not np.any(self.A != self.B))
|
||||||
|
|
||||||
|
class TestChar(object):
|
||||||
|
def setup(self):
|
||||||
|
self.A = np.array('abc1', dtype='c').view(np.chararray)
|
||||||
|
|
||||||
|
def test_it(self):
|
||||||
|
assert_equal(self.A.shape, (4,))
|
||||||
|
assert_equal(self.A.upper()[:2].tobytes(), b'AB')
|
||||||
|
|
||||||
|
class TestComparisons(object):
|
||||||
|
def setup(self):
|
||||||
|
self.A = np.array([['abc', '123'],
|
||||||
|
['789', 'xyz']]).view(np.chararray)
|
||||||
|
self.B = np.array([['efg', '123 '],
|
||||||
|
['051', 'tuv']]).view(np.chararray)
|
||||||
|
|
||||||
|
def test_not_equal(self):
|
||||||
|
assert_array_equal((self.A != self.B), [[True, False], [True, True]])
|
||||||
|
|
||||||
|
def test_equal(self):
|
||||||
|
assert_array_equal((self.A == self.B), [[False, True], [False, False]])
|
||||||
|
|
||||||
|
def test_greater_equal(self):
|
||||||
|
assert_array_equal((self.A >= self.B), [[False, True], [True, True]])
|
||||||
|
|
||||||
|
def test_less_equal(self):
|
||||||
|
assert_array_equal((self.A <= self.B), [[True, True], [False, False]])
|
||||||
|
|
||||||
|
def test_greater(self):
|
||||||
|
assert_array_equal((self.A > self.B), [[False, False], [True, True]])
|
||||||
|
|
||||||
|
def test_less(self):
|
||||||
|
assert_array_equal((self.A < self.B), [[True, False], [False, False]])
|
||||||
|
|
||||||
|
class TestComparisonsMixed1(TestComparisons):
|
||||||
|
"""Ticket #1276"""
|
||||||
|
|
||||||
|
def setup(self):
|
||||||
|
TestComparisons.setup(self)
|
||||||
|
self.B = np.array([['efg', '123 '],
|
||||||
|
['051', 'tuv']], np.unicode_).view(np.chararray)
|
||||||
|
|
||||||
|
class TestComparisonsMixed2(TestComparisons):
|
||||||
|
"""Ticket #1276"""
|
||||||
|
|
||||||
|
def setup(self):
|
||||||
|
TestComparisons.setup(self)
|
||||||
|
self.A = np.array([['abc', '123'],
|
||||||
|
['789', 'xyz']], np.unicode_).view(np.chararray)
|
||||||
|
|
||||||
|
class TestInformation(object):
|
||||||
|
def setup(self):
|
||||||
|
self.A = np.array([[' abc ', ''],
|
||||||
|
['12345', 'MixedCase'],
|
||||||
|
['123 \t 345 \0 ', 'UPPER']]).view(np.chararray)
|
||||||
|
self.B = np.array([[u' \u03a3 ', u''],
|
||||||
|
[u'12345', u'MixedCase'],
|
||||||
|
[u'123 \t 345 \0 ', u'UPPER']]).view(np.chararray)
|
||||||
|
|
||||||
|
def test_len(self):
|
||||||
|
assert_(issubclass(np.char.str_len(self.A).dtype.type, np.integer))
|
||||||
|
assert_array_equal(np.char.str_len(self.A), [[5, 0], [5, 9], [12, 5]])
|
||||||
|
assert_array_equal(np.char.str_len(self.B), [[3, 0], [5, 9], [12, 5]])
|
||||||
|
|
||||||
|
def test_count(self):
|
||||||
|
assert_(issubclass(self.A.count('').dtype.type, np.integer))
|
||||||
|
assert_array_equal(self.A.count('a'), [[1, 0], [0, 1], [0, 0]])
|
||||||
|
assert_array_equal(self.A.count('123'), [[0, 0], [1, 0], [1, 0]])
|
||||||
|
# Python doesn't seem to like counting NULL characters
|
||||||
|
# assert_array_equal(self.A.count('\0'), [[0, 0], [0, 0], [1, 0]])
|
||||||
|
assert_array_equal(self.A.count('a', 0, 2), [[1, 0], [0, 0], [0, 0]])
|
||||||
|
assert_array_equal(self.B.count('a'), [[0, 0], [0, 1], [0, 0]])
|
||||||
|
assert_array_equal(self.B.count('123'), [[0, 0], [1, 0], [1, 0]])
|
||||||
|
# assert_array_equal(self.B.count('\0'), [[0, 0], [0, 0], [1, 0]])
|
||||||
|
|
||||||
|
def test_endswith(self):
|
||||||
|
assert_(issubclass(self.A.endswith('').dtype.type, np.bool_))
|
||||||
|
assert_array_equal(self.A.endswith(' '), [[1, 0], [0, 0], [1, 0]])
|
||||||
|
assert_array_equal(self.A.endswith('3', 0, 3), [[0, 0], [1, 0], [1, 0]])
|
||||||
|
|
||||||
|
def fail():
|
||||||
|
self.A.endswith('3', 'fdjk')
|
||||||
|
|
||||||
|
assert_raises(TypeError, fail)
|
||||||
|
|
||||||
|
def test_find(self):
|
||||||
|
assert_(issubclass(self.A.find('a').dtype.type, np.integer))
|
||||||
|
assert_array_equal(self.A.find('a'), [[1, -1], [-1, 6], [-1, -1]])
|
||||||
|
assert_array_equal(self.A.find('3'), [[-1, -1], [2, -1], [2, -1]])
|
||||||
|
assert_array_equal(self.A.find('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]])
|
||||||
|
assert_array_equal(self.A.find(['1', 'P']), [[-1, -1], [0, -1], [0, 1]])
|
||||||
|
|
||||||
|
def test_index(self):
|
||||||
|
|
||||||
|
def fail():
|
||||||
|
self.A.index('a')
|
||||||
|
|
||||||
|
assert_raises(ValueError, fail)
|
||||||
|
assert_(np.char.index('abcba', 'b') == 1)
|
||||||
|
assert_(issubclass(np.char.index('abcba', 'b').dtype.type, np.integer))
|
||||||
|
|
||||||
|
def test_isalnum(self):
|
||||||
|
assert_(issubclass(self.A.isalnum().dtype.type, np.bool_))
|
||||||
|
assert_array_equal(self.A.isalnum(), [[False, False], [True, True], [False, True]])
|
||||||
|
|
||||||
|
def test_isalpha(self):
|
||||||
|
assert_(issubclass(self.A.isalpha().dtype.type, np.bool_))
|
||||||
|
assert_array_equal(self.A.isalpha(), [[False, False], [False, True], [False, True]])
|
||||||
|
|
||||||
|
def test_isdigit(self):
|
||||||
|
assert_(issubclass(self.A.isdigit().dtype.type, np.bool_))
|
||||||
|
assert_array_equal(self.A.isdigit(), [[False, False], [True, False], [False, False]])
|
||||||
|
|
||||||
|
def test_islower(self):
|
||||||
|
assert_(issubclass(self.A.islower().dtype.type, np.bool_))
|
||||||
|
assert_array_equal(self.A.islower(), [[True, False], [False, False], [False, False]])
|
||||||
|
|
||||||
|
def test_isspace(self):
|
||||||
|
assert_(issubclass(self.A.isspace().dtype.type, np.bool_))
|
||||||
|
assert_array_equal(self.A.isspace(), [[False, False], [False, False], [False, False]])
|
||||||
|
|
||||||
|
def test_istitle(self):
|
||||||
|
assert_(issubclass(self.A.istitle().dtype.type, np.bool_))
|
||||||
|
assert_array_equal(self.A.istitle(), [[False, False], [False, False], [False, False]])
|
||||||
|
|
||||||
|
def test_isupper(self):
|
||||||
|
assert_(issubclass(self.A.isupper().dtype.type, np.bool_))
|
||||||
|
assert_array_equal(self.A.isupper(), [[False, False], [False, False], [False, True]])
|
||||||
|
|
||||||
|
def test_rfind(self):
|
||||||
|
assert_(issubclass(self.A.rfind('a').dtype.type, np.integer))
|
||||||
|
assert_array_equal(self.A.rfind('a'), [[1, -1], [-1, 6], [-1, -1]])
|
||||||
|
assert_array_equal(self.A.rfind('3'), [[-1, -1], [2, -1], [6, -1]])
|
||||||
|
assert_array_equal(self.A.rfind('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]])
|
||||||
|
assert_array_equal(self.A.rfind(['1', 'P']), [[-1, -1], [0, -1], [0, 2]])
|
||||||
|
|
||||||
|
def test_rindex(self):
|
||||||
|
|
||||||
|
def fail():
|
||||||
|
self.A.rindex('a')
|
||||||
|
|
||||||
|
assert_raises(ValueError, fail)
|
||||||
|
assert_(np.char.rindex('abcba', 'b') == 3)
|
||||||
|
assert_(issubclass(np.char.rindex('abcba', 'b').dtype.type, np.integer))
|
||||||
|
|
||||||
|
def test_startswith(self):
|
||||||
|
assert_(issubclass(self.A.startswith('').dtype.type, np.bool_))
|
||||||
|
assert_array_equal(self.A.startswith(' '), [[1, 0], [0, 0], [0, 0]])
|
||||||
|
assert_array_equal(self.A.startswith('1', 0, 3), [[0, 0], [1, 0], [1, 0]])
|
||||||
|
|
||||||
|
def fail():
|
||||||
|
self.A.startswith('3', 'fdjk')
|
||||||
|
|
||||||
|
assert_raises(TypeError, fail)
|
||||||
|
|
||||||
|
|
||||||
|
class TestMethods(object):
|
||||||
|
def setup(self):
|
||||||
|
self.A = np.array([[' abc ', ''],
|
||||||
|
['12345', 'MixedCase'],
|
||||||
|
['123 \t 345 \0 ', 'UPPER']],
|
||||||
|
dtype='S').view(np.chararray)
|
||||||
|
self.B = np.array([[u' \u03a3 ', u''],
|
||||||
|
[u'12345', u'MixedCase'],
|
||||||
|
[u'123 \t 345 \0 ', u'UPPER']]).view(np.chararray)
|
||||||
|
|
||||||
|
def test_capitalize(self):
|
||||||
|
tgt = [[b' abc ', b''],
|
||||||
|
[b'12345', b'Mixedcase'],
|
||||||
|
[b'123 \t 345 \0 ', b'Upper']]
|
||||||
|
assert_(issubclass(self.A.capitalize().dtype.type, np.string_))
|
||||||
|
assert_array_equal(self.A.capitalize(), tgt)
|
||||||
|
|
||||||
|
tgt = [[u' \u03c3 ', ''],
|
||||||
|
['12345', 'Mixedcase'],
|
||||||
|
['123 \t 345 \0 ', 'Upper']]
|
||||||
|
assert_(issubclass(self.B.capitalize().dtype.type, np.unicode_))
|
||||||
|
assert_array_equal(self.B.capitalize(), tgt)
|
||||||
|
|
||||||
|
def test_center(self):
|
||||||
|
assert_(issubclass(self.A.center(10).dtype.type, np.string_))
|
||||||
|
C = self.A.center([10, 20])
|
||||||
|
assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
|
||||||
|
|
||||||
|
C = self.A.center(20, b'#')
|
||||||
|
assert_(np.all(C.startswith(b'#')))
|
||||||
|
assert_(np.all(C.endswith(b'#')))
|
||||||
|
|
||||||
|
C = np.char.center(b'FOO', [[10, 20], [15, 8]])
|
||||||
|
tgt = [[b' FOO ', b' FOO '],
|
||||||
|
[b' FOO ', b' FOO ']]
|
||||||
|
assert_(issubclass(C.dtype.type, np.string_))
|
||||||
|
assert_array_equal(C, tgt)
|
||||||
|
|
||||||
|
def test_decode(self):
|
||||||
|
if sys.version_info[0] >= 3:
|
||||||
|
A = np.char.array([b'\\u03a3'])
|
||||||
|
assert_(A.decode('unicode-escape')[0] == '\u03a3')
|
||||||
|
else:
|
||||||
|
with suppress_warnings() as sup:
|
||||||
|
if sys.py3kwarning:
|
||||||
|
sup.filter(DeprecationWarning, "'hex_codec'")
|
||||||
|
A = np.char.array(['736563726574206d657373616765'])
|
||||||
|
assert_(A.decode('hex_codec')[0] == 'secret message')
|
||||||
|
|
||||||
|
def test_encode(self):
|
||||||
|
B = self.B.encode('unicode_escape')
|
||||||
|
assert_(B[0][0] == str(' \\u03a3 ').encode('latin1'))
|
||||||
|
|
||||||
|
def test_expandtabs(self):
|
||||||
|
T = self.A.expandtabs()
|
||||||
|
assert_(T[2, 0] == b'123 345 \0')
|
||||||
|
|
||||||
|
def test_join(self):
|
||||||
|
if sys.version_info[0] >= 3:
|
||||||
|
# NOTE: list(b'123') == [49, 50, 51]
|
||||||
|
# so that b','.join(b'123') results to an error on Py3
|
||||||
|
A0 = self.A.decode('ascii')
|
||||||
|
else:
|
||||||
|
A0 = self.A
|
||||||
|
|
||||||
|
A = np.char.join([',', '#'], A0)
|
||||||
|
if sys.version_info[0] >= 3:
|
||||||
|
assert_(issubclass(A.dtype.type, np.unicode_))
|
||||||
|
else:
|
||||||
|
assert_(issubclass(A.dtype.type, np.string_))
|
||||||
|
tgt = np.array([[' ,a,b,c, ', ''],
|
||||||
|
['1,2,3,4,5', 'M#i#x#e#d#C#a#s#e'],
|
||||||
|
['1,2,3, ,\t, ,3,4,5, ,\x00, ', 'U#P#P#E#R']])
|
||||||
|
assert_array_equal(np.char.join([',', '#'], A0), tgt)
|
||||||
|
|
||||||
|
def test_ljust(self):
|
||||||
|
assert_(issubclass(self.A.ljust(10).dtype.type, np.string_))
|
||||||
|
|
||||||
|
C = self.A.ljust([10, 20])
|
||||||
|
assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
|
||||||
|
|
||||||
|
C = self.A.ljust(20, b'#')
|
||||||
|
assert_array_equal(C.startswith(b'#'), [
|
||||||
|
[False, True], [False, False], [False, False]])
|
||||||
|
assert_(np.all(C.endswith(b'#')))
|
||||||
|
|
||||||
|
C = np.char.ljust(b'FOO', [[10, 20], [15, 8]])
|
||||||
|
tgt = [[b'FOO ', b'FOO '],
|
||||||
|
[b'FOO ', b'FOO ']]
|
||||||
|
assert_(issubclass(C.dtype.type, np.string_))
|
||||||
|
assert_array_equal(C, tgt)
|
||||||
|
|
||||||
|
def test_lower(self):
|
||||||
|
tgt = [[b' abc ', b''],
|
||||||
|
[b'12345', b'mixedcase'],
|
||||||
|
[b'123 \t 345 \0 ', b'upper']]
|
||||||
|
assert_(issubclass(self.A.lower().dtype.type, np.string_))
|
||||||
|
assert_array_equal(self.A.lower(), tgt)
|
||||||
|
|
||||||
|
tgt = [[u' \u03c3 ', u''],
|
||||||
|
[u'12345', u'mixedcase'],
|
||||||
|
[u'123 \t 345 \0 ', u'upper']]
|
||||||
|
assert_(issubclass(self.B.lower().dtype.type, np.unicode_))
|
||||||
|
assert_array_equal(self.B.lower(), tgt)
|
||||||
|
|
||||||
|
def test_lstrip(self):
|
||||||
|
tgt = [[b'abc ', b''],
|
||||||
|
[b'12345', b'MixedCase'],
|
||||||
|
[b'123 \t 345 \0 ', b'UPPER']]
|
||||||
|
assert_(issubclass(self.A.lstrip().dtype.type, np.string_))
|
||||||
|
assert_array_equal(self.A.lstrip(), tgt)
|
||||||
|
|
||||||
|
tgt = [[b' abc', b''],
|
||||||
|
[b'2345', b'ixedCase'],
|
||||||
|
[b'23 \t 345 \x00', b'UPPER']]
|
||||||
|
assert_array_equal(self.A.lstrip([b'1', b'M']), tgt)
|
||||||
|
|
||||||
|
tgt = [[u'\u03a3 ', ''],
|
||||||
|
['12345', 'MixedCase'],
|
||||||
|
['123 \t 345 \0 ', 'UPPER']]
|
||||||
|
assert_(issubclass(self.B.lstrip().dtype.type, np.unicode_))
|
||||||
|
assert_array_equal(self.B.lstrip(), tgt)
|
||||||
|
|
||||||
|
def test_partition(self):
|
||||||
|
P = self.A.partition([b'3', b'M'])
|
||||||
|
tgt = [[(b' abc ', b'', b''), (b'', b'', b'')],
|
||||||
|
[(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')],
|
||||||
|
[(b'12', b'3', b' \t 345 \0 '), (b'UPPER', b'', b'')]]
|
||||||
|
assert_(issubclass(P.dtype.type, np.string_))
|
||||||
|
assert_array_equal(P, tgt)
|
||||||
|
|
||||||
|
def test_replace(self):
|
||||||
|
R = self.A.replace([b'3', b'a'],
|
||||||
|
[b'##########', b'@'])
|
||||||
|
tgt = [[b' abc ', b''],
|
||||||
|
[b'12##########45', b'MixedC@se'],
|
||||||
|
[b'12########## \t ##########45 \x00', b'UPPER']]
|
||||||
|
assert_(issubclass(R.dtype.type, np.string_))
|
||||||
|
assert_array_equal(R, tgt)
|
||||||
|
|
||||||
|
if sys.version_info[0] < 3:
|
||||||
|
# NOTE: b'abc'.replace(b'a', 'b') is not allowed on Py3
|
||||||
|
R = self.A.replace(b'a', u'\u03a3')
|
||||||
|
tgt = [[u' \u03a3bc ', ''],
|
||||||
|
['12345', u'MixedC\u03a3se'],
|
||||||
|
['123 \t 345 \x00', 'UPPER']]
|
||||||
|
assert_(issubclass(R.dtype.type, np.unicode_))
|
||||||
|
assert_array_equal(R, tgt)
|
||||||
|
|
||||||
|
def test_rjust(self):
|
||||||
|
assert_(issubclass(self.A.rjust(10).dtype.type, np.string_))
|
||||||
|
|
||||||
|
C = self.A.rjust([10, 20])
|
||||||
|
assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
|
||||||
|
|
||||||
|
C = self.A.rjust(20, b'#')
|
||||||
|
assert_(np.all(C.startswith(b'#')))
|
||||||
|
assert_array_equal(C.endswith(b'#'),
|
||||||
|
[[False, True], [False, False], [False, False]])
|
||||||
|
|
||||||
|
C = np.char.rjust(b'FOO', [[10, 20], [15, 8]])
|
||||||
|
tgt = [[b' FOO', b' FOO'],
|
||||||
|
[b' FOO', b' FOO']]
|
||||||
|
assert_(issubclass(C.dtype.type, np.string_))
|
||||||
|
assert_array_equal(C, tgt)
|
||||||
|
|
||||||
|
def test_rpartition(self):
|
||||||
|
P = self.A.rpartition([b'3', b'M'])
|
||||||
|
tgt = [[(b'', b'', b' abc '), (b'', b'', b'')],
|
||||||
|
[(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')],
|
||||||
|
[(b'123 \t ', b'3', b'45 \0 '), (b'', b'', b'UPPER')]]
|
||||||
|
assert_(issubclass(P.dtype.type, np.string_))
|
||||||
|
assert_array_equal(P, tgt)
|
||||||
|
|
||||||
|
def test_rsplit(self):
|
||||||
|
A = self.A.rsplit(b'3')
|
||||||
|
tgt = [[[b' abc '], [b'']],
|
||||||
|
[[b'12', b'45'], [b'MixedCase']],
|
||||||
|
[[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]]
|
||||||
|
assert_(issubclass(A.dtype.type, np.object_))
|
||||||
|
assert_equal(A.tolist(), tgt)
|
||||||
|
|
||||||
|
def test_rstrip(self):
|
||||||
|
assert_(issubclass(self.A.rstrip().dtype.type, np.string_))
|
||||||
|
|
||||||
|
tgt = [[b' abc', b''],
|
||||||
|
[b'12345', b'MixedCase'],
|
||||||
|
[b'123 \t 345', b'UPPER']]
|
||||||
|
assert_array_equal(self.A.rstrip(), tgt)
|
||||||
|
|
||||||
|
tgt = [[b' abc ', b''],
|
||||||
|
[b'1234', b'MixedCase'],
|
||||||
|
[b'123 \t 345 \x00', b'UPP']
|
||||||
|
]
|
||||||
|
assert_array_equal(self.A.rstrip([b'5', b'ER']), tgt)
|
||||||
|
|
||||||
|
tgt = [[u' \u03a3', ''],
|
||||||
|
['12345', 'MixedCase'],
|
||||||
|
['123 \t 345', 'UPPER']]
|
||||||
|
assert_(issubclass(self.B.rstrip().dtype.type, np.unicode_))
|
||||||
|
assert_array_equal(self.B.rstrip(), tgt)
|
||||||
|
|
||||||
|
def test_strip(self):
|
||||||
|
tgt = [[b'abc', b''],
|
||||||
|
[b'12345', b'MixedCase'],
|
||||||
|
[b'123 \t 345', b'UPPER']]
|
||||||
|
assert_(issubclass(self.A.strip().dtype.type, np.string_))
|
||||||
|
assert_array_equal(self.A.strip(), tgt)
|
||||||
|
|
||||||
|
tgt = [[b' abc ', b''],
|
||||||
|
[b'234', b'ixedCas'],
|
||||||
|
[b'23 \t 345 \x00', b'UPP']]
|
||||||
|
assert_array_equal(self.A.strip([b'15', b'EReM']), tgt)
|
||||||
|
|
||||||
|
tgt = [[u'\u03a3', ''],
|
||||||
|
['12345', 'MixedCase'],
|
||||||
|
['123 \t 345', 'UPPER']]
|
||||||
|
assert_(issubclass(self.B.strip().dtype.type, np.unicode_))
|
||||||
|
assert_array_equal(self.B.strip(), tgt)
|
||||||
|
|
||||||
|
def test_split(self):
|
||||||
|
A = self.A.split(b'3')
|
||||||
|
tgt = [
|
||||||
|
[[b' abc '], [b'']],
|
||||||
|
[[b'12', b'45'], [b'MixedCase']],
|
||||||
|
[[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]]
|
||||||
|
assert_(issubclass(A.dtype.type, np.object_))
|
||||||
|
assert_equal(A.tolist(), tgt)
|
||||||
|
|
||||||
|
def test_splitlines(self):
|
||||||
|
A = np.char.array(['abc\nfds\nwer']).splitlines()
|
||||||
|
assert_(issubclass(A.dtype.type, np.object_))
|
||||||
|
assert_(A.shape == (1,))
|
||||||
|
assert_(len(A[0]) == 3)
|
||||||
|
|
||||||
|
def test_swapcase(self):
|
||||||
|
tgt = [[b' ABC ', b''],
|
||||||
|
[b'12345', b'mIXEDcASE'],
|
||||||
|
[b'123 \t 345 \0 ', b'upper']]
|
||||||
|
assert_(issubclass(self.A.swapcase().dtype.type, np.string_))
|
||||||
|
assert_array_equal(self.A.swapcase(), tgt)
|
||||||
|
|
||||||
|
tgt = [[u' \u03c3 ', u''],
|
||||||
|
[u'12345', u'mIXEDcASE'],
|
||||||
|
[u'123 \t 345 \0 ', u'upper']]
|
||||||
|
assert_(issubclass(self.B.swapcase().dtype.type, np.unicode_))
|
||||||
|
assert_array_equal(self.B.swapcase(), tgt)
|
||||||
|
|
||||||
|
def test_title(self):
|
||||||
|
tgt = [[b' Abc ', b''],
|
||||||
|
[b'12345', b'Mixedcase'],
|
||||||
|
[b'123 \t 345 \0 ', b'Upper']]
|
||||||
|
assert_(issubclass(self.A.title().dtype.type, np.string_))
|
||||||
|
assert_array_equal(self.A.title(), tgt)
|
||||||
|
|
||||||
|
tgt = [[u' \u03a3 ', u''],
|
||||||
|
[u'12345', u'Mixedcase'],
|
||||||
|
[u'123 \t 345 \0 ', u'Upper']]
|
||||||
|
assert_(issubclass(self.B.title().dtype.type, np.unicode_))
|
||||||
|
assert_array_equal(self.B.title(), tgt)
|
||||||
|
|
||||||
|
def test_upper(self):
|
||||||
|
tgt = [[b' ABC ', b''],
|
||||||
|
[b'12345', b'MIXEDCASE'],
|
||||||
|
[b'123 \t 345 \0 ', b'UPPER']]
|
||||||
|
assert_(issubclass(self.A.upper().dtype.type, np.string_))
|
||||||
|
assert_array_equal(self.A.upper(), tgt)
|
||||||
|
|
||||||
|
tgt = [[u' \u03a3 ', u''],
|
||||||
|
[u'12345', u'MIXEDCASE'],
|
||||||
|
[u'123 \t 345 \0 ', u'UPPER']]
|
||||||
|
assert_(issubclass(self.B.upper().dtype.type, np.unicode_))
|
||||||
|
assert_array_equal(self.B.upper(), tgt)
|
||||||
|
|
||||||
|
def test_isnumeric(self):
|
||||||
|
|
||||||
|
def fail():
|
||||||
|
self.A.isnumeric()
|
||||||
|
|
||||||
|
assert_raises(TypeError, fail)
|
||||||
|
assert_(issubclass(self.B.isnumeric().dtype.type, np.bool_))
|
||||||
|
assert_array_equal(self.B.isnumeric(), [
|
||||||
|
[False, False], [True, False], [False, False]])
|
||||||
|
|
||||||
|
def test_isdecimal(self):
|
||||||
|
|
||||||
|
def fail():
|
||||||
|
self.A.isdecimal()
|
||||||
|
|
||||||
|
assert_raises(TypeError, fail)
|
||||||
|
assert_(issubclass(self.B.isdecimal().dtype.type, np.bool_))
|
||||||
|
assert_array_equal(self.B.isdecimal(), [
|
||||||
|
[False, False], [True, False], [False, False]])
|
||||||
|
|
||||||
|
|
||||||
|
class TestOperations(object):
|
||||||
|
def setup(self):
|
||||||
|
self.A = np.array([['abc', '123'],
|
||||||
|
['789', 'xyz']]).view(np.chararray)
|
||||||
|
self.B = np.array([['efg', '456'],
|
||||||
|
['051', 'tuv']]).view(np.chararray)
|
||||||
|
|
||||||
|
def test_add(self):
|
||||||
|
AB = np.array([['abcefg', '123456'],
|
||||||
|
['789051', 'xyztuv']]).view(np.chararray)
|
||||||
|
assert_array_equal(AB, (self.A + self.B))
|
||||||
|
assert_(len((self.A + self.B)[0][0]) == 6)
|
||||||
|
|
||||||
|
def test_radd(self):
|
||||||
|
QA = np.array([['qabc', 'q123'],
|
||||||
|
['q789', 'qxyz']]).view(np.chararray)
|
||||||
|
assert_array_equal(QA, ('q' + self.A))
|
||||||
|
|
||||||
|
def test_mul(self):
|
||||||
|
A = self.A
|
||||||
|
for r in (2, 3, 5, 7, 197):
|
||||||
|
Ar = np.array([[A[0, 0]*r, A[0, 1]*r],
|
||||||
|
[A[1, 0]*r, A[1, 1]*r]]).view(np.chararray)
|
||||||
|
|
||||||
|
assert_array_equal(Ar, (self.A * r))
|
||||||
|
|
||||||
|
for ob in [object(), 'qrs']:
|
||||||
|
with assert_raises_regex(ValueError,
|
||||||
|
'Can only multiply by integers'):
|
||||||
|
A*ob
|
||||||
|
|
||||||
|
def test_rmul(self):
|
||||||
|
A = self.A
|
||||||
|
for r in (2, 3, 5, 7, 197):
|
||||||
|
Ar = np.array([[A[0, 0]*r, A[0, 1]*r],
|
||||||
|
[A[1, 0]*r, A[1, 1]*r]]).view(np.chararray)
|
||||||
|
assert_array_equal(Ar, (r * self.A))
|
||||||
|
|
||||||
|
for ob in [object(), 'qrs']:
|
||||||
|
with assert_raises_regex(ValueError,
|
||||||
|
'Can only multiply by integers'):
|
||||||
|
ob * A
|
||||||
|
|
||||||
|
def test_mod(self):
|
||||||
|
"""Ticket #856"""
|
||||||
|
F = np.array([['%d', '%f'], ['%s', '%r']]).view(np.chararray)
|
||||||
|
C = np.array([[3, 7], [19, 1]])
|
||||||
|
FC = np.array([['3', '7.000000'],
|
||||||
|
['19', '1']]).view(np.chararray)
|
||||||
|
assert_array_equal(FC, F % C)
|
||||||
|
|
||||||
|
A = np.array([['%.3f', '%d'], ['%s', '%r']]).view(np.chararray)
|
||||||
|
A1 = np.array([['1.000', '1'], ['1', '1']]).view(np.chararray)
|
||||||
|
assert_array_equal(A1, (A % 1))
|
||||||
|
|
||||||
|
A2 = np.array([['1.000', '2'], ['3', '4']]).view(np.chararray)
|
||||||
|
assert_array_equal(A2, (A % [[1, 2], [3, 4]]))
|
||||||
|
|
||||||
|
def test_rmod(self):
|
||||||
|
assert_(("%s" % self.A) == str(self.A))
|
||||||
|
assert_(("%r" % self.A) == repr(self.A))
|
||||||
|
|
||||||
|
for ob in [42, object()]:
|
||||||
|
with assert_raises_regex(
|
||||||
|
TypeError, "unsupported operand type.* and 'chararray'"):
|
||||||
|
ob % self.A
|
||||||
|
|
||||||
|
def test_slice(self):
|
||||||
|
"""Regression test for https://github.com/numpy/numpy/issues/5982"""
|
||||||
|
|
||||||
|
arr = np.array([['abc ', 'def '], ['geh ', 'ijk ']],
|
||||||
|
dtype='S4').view(np.chararray)
|
||||||
|
sl1 = arr[:]
|
||||||
|
assert_array_equal(sl1, arr)
|
||||||
|
assert_(sl1.base is arr)
|
||||||
|
assert_(sl1.base.base is arr.base)
|
||||||
|
|
||||||
|
sl2 = arr[:, :]
|
||||||
|
assert_array_equal(sl2, arr)
|
||||||
|
assert_(sl2.base is arr)
|
||||||
|
assert_(sl2.base.base is arr.base)
|
||||||
|
|
||||||
|
assert_(arr[0, 0] == b'abc')
|
||||||
|
|
||||||
|
|
||||||
|
def test_empty_indexing():
|
||||||
|
"""Regression test for ticket 1948."""
|
||||||
|
# Check that indexing a chararray with an empty list/array returns an
|
||||||
|
# empty chararray instead of a chararray with a single empty string in it.
|
||||||
|
s = np.chararray((4,))
|
||||||
|
assert_(s[[]].size == 0)
|
@ -0,0 +1,570 @@
|
|||||||
|
"""
|
||||||
|
Tests related to deprecation warnings. Also a convenient place
|
||||||
|
to document how deprecations should eventually be turned into errors.
|
||||||
|
|
||||||
|
"""
|
||||||
|
from __future__ import division, absolute_import, print_function
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
import sys
|
||||||
|
import operator
|
||||||
|
import warnings
|
||||||
|
import pytest
|
||||||
|
import shutil
|
||||||
|
import tempfile
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from numpy.testing import (
|
||||||
|
assert_raises, assert_warns, assert_, assert_array_equal
|
||||||
|
)
|
||||||
|
|
||||||
|
from numpy.core._multiarray_tests import fromstring_null_term_c_api
|
||||||
|
|
||||||
|
try:
|
||||||
|
import pytz
|
||||||
|
_has_pytz = True
|
||||||
|
except ImportError:
|
||||||
|
_has_pytz = False
|
||||||
|
|
||||||
|
|
||||||
|
class _DeprecationTestCase(object):
|
||||||
|
# Just as warning: warnings uses re.match, so the start of this message
|
||||||
|
# must match.
|
||||||
|
message = ''
|
||||||
|
warning_cls = DeprecationWarning
|
||||||
|
|
||||||
|
def setup(self):
|
||||||
|
self.warn_ctx = warnings.catch_warnings(record=True)
|
||||||
|
self.log = self.warn_ctx.__enter__()
|
||||||
|
|
||||||
|
# Do *not* ignore other DeprecationWarnings. Ignoring warnings
|
||||||
|
# can give very confusing results because of
|
||||||
|
# https://bugs.python.org/issue4180 and it is probably simplest to
|
||||||
|
# try to keep the tests cleanly giving only the right warning type.
|
||||||
|
# (While checking them set to "error" those are ignored anyway)
|
||||||
|
# We still have them show up, because otherwise they would be raised
|
||||||
|
warnings.filterwarnings("always", category=self.warning_cls)
|
||||||
|
warnings.filterwarnings("always", message=self.message,
|
||||||
|
category=self.warning_cls)
|
||||||
|
|
||||||
|
def teardown(self):
|
||||||
|
self.warn_ctx.__exit__()
|
||||||
|
|
||||||
|
def assert_deprecated(self, function, num=1, ignore_others=False,
|
||||||
|
function_fails=False,
|
||||||
|
exceptions=np._NoValue,
|
||||||
|
args=(), kwargs={}):
|
||||||
|
"""Test if DeprecationWarnings are given and raised.
|
||||||
|
|
||||||
|
This first checks if the function when called gives `num`
|
||||||
|
DeprecationWarnings, after that it tries to raise these
|
||||||
|
DeprecationWarnings and compares them with `exceptions`.
|
||||||
|
The exceptions can be different for cases where this code path
|
||||||
|
is simply not anticipated and the exception is replaced.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
function : callable
|
||||||
|
The function to test
|
||||||
|
num : int
|
||||||
|
Number of DeprecationWarnings to expect. This should normally be 1.
|
||||||
|
ignore_others : bool
|
||||||
|
Whether warnings of the wrong type should be ignored (note that
|
||||||
|
the message is not checked)
|
||||||
|
function_fails : bool
|
||||||
|
If the function would normally fail, setting this will check for
|
||||||
|
warnings inside a try/except block.
|
||||||
|
exceptions : Exception or tuple of Exceptions
|
||||||
|
Exception to expect when turning the warnings into an error.
|
||||||
|
The default checks for DeprecationWarnings. If exceptions is
|
||||||
|
empty the function is expected to run successfully.
|
||||||
|
args : tuple
|
||||||
|
Arguments for `function`
|
||||||
|
kwargs : dict
|
||||||
|
Keyword arguments for `function`
|
||||||
|
"""
|
||||||
|
# reset the log
|
||||||
|
self.log[:] = []
|
||||||
|
|
||||||
|
if exceptions is np._NoValue:
|
||||||
|
exceptions = (self.warning_cls,)
|
||||||
|
|
||||||
|
try:
|
||||||
|
function(*args, **kwargs)
|
||||||
|
except (Exception if function_fails else tuple()):
|
||||||
|
pass
|
||||||
|
|
||||||
|
# just in case, clear the registry
|
||||||
|
num_found = 0
|
||||||
|
for warning in self.log:
|
||||||
|
if warning.category is self.warning_cls:
|
||||||
|
num_found += 1
|
||||||
|
elif not ignore_others:
|
||||||
|
raise AssertionError(
|
||||||
|
"expected %s but got: %s" %
|
||||||
|
(self.warning_cls.__name__, warning.category))
|
||||||
|
if num is not None and num_found != num:
|
||||||
|
msg = "%i warnings found but %i expected." % (len(self.log), num)
|
||||||
|
lst = [str(w) for w in self.log]
|
||||||
|
raise AssertionError("\n".join([msg] + lst))
|
||||||
|
|
||||||
|
with warnings.catch_warnings():
|
||||||
|
warnings.filterwarnings("error", message=self.message,
|
||||||
|
category=self.warning_cls)
|
||||||
|
try:
|
||||||
|
function(*args, **kwargs)
|
||||||
|
if exceptions != tuple():
|
||||||
|
raise AssertionError(
|
||||||
|
"No error raised during function call")
|
||||||
|
except exceptions:
|
||||||
|
if exceptions == tuple():
|
||||||
|
raise AssertionError(
|
||||||
|
"Error raised during function call")
|
||||||
|
|
||||||
|
def assert_not_deprecated(self, function, args=(), kwargs={}):
|
||||||
|
"""Test that warnings are not raised.
|
||||||
|
|
||||||
|
This is just a shorthand for:
|
||||||
|
|
||||||
|
self.assert_deprecated(function, num=0, ignore_others=True,
|
||||||
|
exceptions=tuple(), args=args, kwargs=kwargs)
|
||||||
|
"""
|
||||||
|
self.assert_deprecated(function, num=0, ignore_others=True,
|
||||||
|
exceptions=tuple(), args=args, kwargs=kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class _VisibleDeprecationTestCase(_DeprecationTestCase):
|
||||||
|
warning_cls = np.VisibleDeprecationWarning
|
||||||
|
|
||||||
|
|
||||||
|
class TestNonTupleNDIndexDeprecation(object):
|
||||||
|
def test_basic(self):
|
||||||
|
a = np.zeros((5, 5))
|
||||||
|
with warnings.catch_warnings():
|
||||||
|
warnings.filterwarnings('always')
|
||||||
|
assert_warns(FutureWarning, a.__getitem__, [[0, 1], [0, 1]])
|
||||||
|
assert_warns(FutureWarning, a.__getitem__, [slice(None)])
|
||||||
|
|
||||||
|
warnings.filterwarnings('error')
|
||||||
|
assert_raises(FutureWarning, a.__getitem__, [[0, 1], [0, 1]])
|
||||||
|
assert_raises(FutureWarning, a.__getitem__, [slice(None)])
|
||||||
|
|
||||||
|
# a a[[0, 1]] always was advanced indexing, so no error/warning
|
||||||
|
a[[0, 1]]
|
||||||
|
|
||||||
|
|
||||||
|
class TestComparisonDeprecations(_DeprecationTestCase):
|
||||||
|
"""This tests the deprecation, for non-element-wise comparison logic.
|
||||||
|
This used to mean that when an error occurred during element-wise comparison
|
||||||
|
(i.e. broadcasting) NotImplemented was returned, but also in the comparison
|
||||||
|
itself, False was given instead of the error.
|
||||||
|
|
||||||
|
Also test FutureWarning for the None comparison.
|
||||||
|
"""
|
||||||
|
|
||||||
|
message = "elementwise.* comparison failed; .*"
|
||||||
|
|
||||||
|
def test_normal_types(self):
|
||||||
|
for op in (operator.eq, operator.ne):
|
||||||
|
# Broadcasting errors:
|
||||||
|
self.assert_deprecated(op, args=(np.zeros(3), []))
|
||||||
|
a = np.zeros(3, dtype='i,i')
|
||||||
|
# (warning is issued a couple of times here)
|
||||||
|
self.assert_deprecated(op, args=(a, a[:-1]), num=None)
|
||||||
|
|
||||||
|
# ragged array comparison returns True/False
|
||||||
|
a = np.array([1, np.array([1,2,3])], dtype=object)
|
||||||
|
b = np.array([1, np.array([1,2,3])], dtype=object)
|
||||||
|
self.assert_deprecated(op, args=(a, b), num=None)
|
||||||
|
|
||||||
|
def test_string(self):
|
||||||
|
# For two string arrays, strings always raised the broadcasting error:
|
||||||
|
a = np.array(['a', 'b'])
|
||||||
|
b = np.array(['a', 'b', 'c'])
|
||||||
|
assert_raises(ValueError, lambda x, y: x == y, a, b)
|
||||||
|
|
||||||
|
# The empty list is not cast to string, and this used to pass due
|
||||||
|
# to dtype mismatch; now (2018-06-21) it correctly leads to a
|
||||||
|
# FutureWarning.
|
||||||
|
assert_warns(FutureWarning, lambda: a == [])
|
||||||
|
|
||||||
|
def test_void_dtype_equality_failures(self):
|
||||||
|
class NotArray(object):
|
||||||
|
def __array__(self):
|
||||||
|
raise TypeError
|
||||||
|
|
||||||
|
# Needed so Python 3 does not raise DeprecationWarning twice.
|
||||||
|
def __ne__(self, other):
|
||||||
|
return NotImplemented
|
||||||
|
|
||||||
|
self.assert_deprecated(lambda: np.arange(2) == NotArray())
|
||||||
|
self.assert_deprecated(lambda: np.arange(2) != NotArray())
|
||||||
|
|
||||||
|
struct1 = np.zeros(2, dtype="i4,i4")
|
||||||
|
struct2 = np.zeros(2, dtype="i4,i4,i4")
|
||||||
|
|
||||||
|
assert_warns(FutureWarning, lambda: struct1 == 1)
|
||||||
|
assert_warns(FutureWarning, lambda: struct1 == struct2)
|
||||||
|
assert_warns(FutureWarning, lambda: struct1 != 1)
|
||||||
|
assert_warns(FutureWarning, lambda: struct1 != struct2)
|
||||||
|
|
||||||
|
def test_array_richcompare_legacy_weirdness(self):
|
||||||
|
# It doesn't really work to use assert_deprecated here, b/c part of
|
||||||
|
# the point of assert_deprecated is to check that when warnings are
|
||||||
|
# set to "error" mode then the error is propagated -- which is good!
|
||||||
|
# But here we are testing a bunch of code that is deprecated *because*
|
||||||
|
# it has the habit of swallowing up errors and converting them into
|
||||||
|
# different warnings. So assert_warns will have to be sufficient.
|
||||||
|
assert_warns(FutureWarning, lambda: np.arange(2) == "a")
|
||||||
|
assert_warns(FutureWarning, lambda: np.arange(2) != "a")
|
||||||
|
# No warning for scalar comparisons
|
||||||
|
with warnings.catch_warnings():
|
||||||
|
warnings.filterwarnings("error")
|
||||||
|
assert_(not (np.array(0) == "a"))
|
||||||
|
assert_(np.array(0) != "a")
|
||||||
|
assert_(not (np.int16(0) == "a"))
|
||||||
|
assert_(np.int16(0) != "a")
|
||||||
|
|
||||||
|
for arg1 in [np.asarray(0), np.int16(0)]:
|
||||||
|
struct = np.zeros(2, dtype="i4,i4")
|
||||||
|
for arg2 in [struct, "a"]:
|
||||||
|
for f in [operator.lt, operator.le, operator.gt, operator.ge]:
|
||||||
|
if sys.version_info[0] >= 3:
|
||||||
|
# py3
|
||||||
|
with warnings.catch_warnings() as l:
|
||||||
|
warnings.filterwarnings("always")
|
||||||
|
assert_raises(TypeError, f, arg1, arg2)
|
||||||
|
assert_(not l)
|
||||||
|
else:
|
||||||
|
# py2
|
||||||
|
assert_warns(DeprecationWarning, f, arg1, arg2)
|
||||||
|
|
||||||
|
|
||||||
|
class TestDatetime64Timezone(_DeprecationTestCase):
|
||||||
|
"""Parsing of datetime64 with timezones deprecated in 1.11.0, because
|
||||||
|
datetime64 is now timezone naive rather than UTC only.
|
||||||
|
|
||||||
|
It will be quite a while before we can remove this, because, at the very
|
||||||
|
least, a lot of existing code uses the 'Z' modifier to avoid conversion
|
||||||
|
from local time to UTC, even if otherwise it handles time in a timezone
|
||||||
|
naive fashion.
|
||||||
|
"""
|
||||||
|
def test_string(self):
|
||||||
|
self.assert_deprecated(np.datetime64, args=('2000-01-01T00+01',))
|
||||||
|
self.assert_deprecated(np.datetime64, args=('2000-01-01T00Z',))
|
||||||
|
|
||||||
|
@pytest.mark.skipif(not _has_pytz,
|
||||||
|
reason="The pytz module is not available.")
|
||||||
|
def test_datetime(self):
|
||||||
|
tz = pytz.timezone('US/Eastern')
|
||||||
|
dt = datetime.datetime(2000, 1, 1, 0, 0, tzinfo=tz)
|
||||||
|
self.assert_deprecated(np.datetime64, args=(dt,))
|
||||||
|
|
||||||
|
|
||||||
|
class TestNonCContiguousViewDeprecation(_DeprecationTestCase):
|
||||||
|
"""View of non-C-contiguous arrays deprecated in 1.11.0.
|
||||||
|
|
||||||
|
The deprecation will not be raised for arrays that are both C and F
|
||||||
|
contiguous, as C contiguous is dominant. There are more such arrays
|
||||||
|
with relaxed stride checking than without so the deprecation is not
|
||||||
|
as visible with relaxed stride checking in force.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def test_fortran_contiguous(self):
|
||||||
|
self.assert_deprecated(np.ones((2,2)).T.view, args=(complex,))
|
||||||
|
self.assert_deprecated(np.ones((2,2)).T.view, args=(np.int8,))
|
||||||
|
|
||||||
|
|
||||||
|
class TestArrayDataAttributeAssignmentDeprecation(_DeprecationTestCase):
|
||||||
|
"""Assigning the 'data' attribute of an ndarray is unsafe as pointed
|
||||||
|
out in gh-7093. Eventually, such assignment should NOT be allowed, but
|
||||||
|
in the interests of maintaining backwards compatibility, only a Deprecation-
|
||||||
|
Warning will be raised instead for the time being to give developers time to
|
||||||
|
refactor relevant code.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def test_data_attr_assignment(self):
|
||||||
|
a = np.arange(10)
|
||||||
|
b = np.linspace(0, 1, 10)
|
||||||
|
|
||||||
|
self.message = ("Assigning the 'data' attribute is an "
|
||||||
|
"inherently unsafe operation and will "
|
||||||
|
"be removed in the future.")
|
||||||
|
self.assert_deprecated(a.__setattr__, args=('data', b.data))
|
||||||
|
|
||||||
|
|
||||||
|
class TestBinaryReprInsufficientWidthParameterForRepresentation(_DeprecationTestCase):
|
||||||
|
"""
|
||||||
|
If a 'width' parameter is passed into ``binary_repr`` that is insufficient to
|
||||||
|
represent the number in base 2 (positive) or 2's complement (negative) form,
|
||||||
|
the function used to silently ignore the parameter and return a representation
|
||||||
|
using the minimal number of bits needed for the form in question. Such behavior
|
||||||
|
is now considered unsafe from a user perspective and will raise an error in the future.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def test_insufficient_width_positive(self):
|
||||||
|
args = (10,)
|
||||||
|
kwargs = {'width': 2}
|
||||||
|
|
||||||
|
self.message = ("Insufficient bit width provided. This behavior "
|
||||||
|
"will raise an error in the future.")
|
||||||
|
self.assert_deprecated(np.binary_repr, args=args, kwargs=kwargs)
|
||||||
|
|
||||||
|
def test_insufficient_width_negative(self):
|
||||||
|
args = (-5,)
|
||||||
|
kwargs = {'width': 2}
|
||||||
|
|
||||||
|
self.message = ("Insufficient bit width provided. This behavior "
|
||||||
|
"will raise an error in the future.")
|
||||||
|
self.assert_deprecated(np.binary_repr, args=args, kwargs=kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class TestNumericStyleTypecodes(_DeprecationTestCase):
|
||||||
|
"""
|
||||||
|
Deprecate the old numeric-style dtypes, which are especially
|
||||||
|
confusing for complex types, e.g. Complex32 -> complex64. When the
|
||||||
|
deprecation cycle is complete, the check for the strings should be
|
||||||
|
removed from PyArray_DescrConverter in descriptor.c, and the
|
||||||
|
deprecated keys should not be added as capitalized aliases in
|
||||||
|
_add_aliases in numerictypes.py.
|
||||||
|
"""
|
||||||
|
def test_all_dtypes(self):
|
||||||
|
deprecated_types = [
|
||||||
|
'Bool', 'Complex32', 'Complex64', 'Float16', 'Float32', 'Float64',
|
||||||
|
'Int8', 'Int16', 'Int32', 'Int64', 'Object0', 'Timedelta64',
|
||||||
|
'UInt8', 'UInt16', 'UInt32', 'UInt64', 'Void0'
|
||||||
|
]
|
||||||
|
if sys.version_info[0] < 3:
|
||||||
|
deprecated_types.extend(['Unicode0', 'String0'])
|
||||||
|
|
||||||
|
for dt in deprecated_types:
|
||||||
|
self.assert_deprecated(np.dtype, exceptions=(TypeError,),
|
||||||
|
args=(dt,))
|
||||||
|
|
||||||
|
|
||||||
|
class TestTestDeprecated(object):
|
||||||
|
def test_assert_deprecated(self):
|
||||||
|
test_case_instance = _DeprecationTestCase()
|
||||||
|
test_case_instance.setup()
|
||||||
|
assert_raises(AssertionError,
|
||||||
|
test_case_instance.assert_deprecated,
|
||||||
|
lambda: None)
|
||||||
|
|
||||||
|
def foo():
|
||||||
|
warnings.warn("foo", category=DeprecationWarning, stacklevel=2)
|
||||||
|
|
||||||
|
test_case_instance.assert_deprecated(foo)
|
||||||
|
test_case_instance.teardown()
|
||||||
|
|
||||||
|
|
||||||
|
class TestClassicIntDivision(_DeprecationTestCase):
|
||||||
|
"""
|
||||||
|
See #7949. Deprecate the numeric-style dtypes with -3 flag in python 2
|
||||||
|
if used for division
|
||||||
|
List of data types: https://docs.scipy.org/doc/numpy/user/basics.types.html
|
||||||
|
"""
|
||||||
|
def test_int_dtypes(self):
|
||||||
|
#scramble types and do some mix and match testing
|
||||||
|
deprecated_types = [
|
||||||
|
'bool_', 'int_', 'intc', 'uint8', 'int8', 'uint64', 'int32', 'uint16',
|
||||||
|
'intp', 'int64', 'uint32', 'int16'
|
||||||
|
]
|
||||||
|
if sys.version_info[0] < 3 and sys.py3kwarning:
|
||||||
|
import operator as op
|
||||||
|
dt2 = 'bool_'
|
||||||
|
for dt1 in deprecated_types:
|
||||||
|
a = np.array([1,2,3], dtype=dt1)
|
||||||
|
b = np.array([1,2,3], dtype=dt2)
|
||||||
|
self.assert_deprecated(op.div, args=(a,b))
|
||||||
|
dt2 = dt1
|
||||||
|
|
||||||
|
|
||||||
|
class TestNonNumericConjugate(_DeprecationTestCase):
|
||||||
|
"""
|
||||||
|
Deprecate no-op behavior of ndarray.conjugate on non-numeric dtypes,
|
||||||
|
which conflicts with the error behavior of np.conjugate.
|
||||||
|
"""
|
||||||
|
def test_conjugate(self):
|
||||||
|
for a in np.array(5), np.array(5j):
|
||||||
|
self.assert_not_deprecated(a.conjugate)
|
||||||
|
for a in (np.array('s'), np.array('2016', 'M'),
|
||||||
|
np.array((1, 2), [('a', int), ('b', int)])):
|
||||||
|
self.assert_deprecated(a.conjugate)
|
||||||
|
|
||||||
|
|
||||||
|
class TestNPY_CHAR(_DeprecationTestCase):
|
||||||
|
# 2017-05-03, 1.13.0
|
||||||
|
def test_npy_char_deprecation(self):
|
||||||
|
from numpy.core._multiarray_tests import npy_char_deprecation
|
||||||
|
self.assert_deprecated(npy_char_deprecation)
|
||||||
|
assert_(npy_char_deprecation() == 'S1')
|
||||||
|
|
||||||
|
|
||||||
|
class TestPyArray_AS1D(_DeprecationTestCase):
|
||||||
|
def test_npy_pyarrayas1d_deprecation(self):
|
||||||
|
from numpy.core._multiarray_tests import npy_pyarrayas1d_deprecation
|
||||||
|
assert_raises(NotImplementedError, npy_pyarrayas1d_deprecation)
|
||||||
|
|
||||||
|
|
||||||
|
class TestPyArray_AS2D(_DeprecationTestCase):
|
||||||
|
def test_npy_pyarrayas2d_deprecation(self):
|
||||||
|
from numpy.core._multiarray_tests import npy_pyarrayas2d_deprecation
|
||||||
|
assert_raises(NotImplementedError, npy_pyarrayas2d_deprecation)
|
||||||
|
|
||||||
|
|
||||||
|
class Test_UPDATEIFCOPY(_DeprecationTestCase):
|
||||||
|
"""
|
||||||
|
v1.14 deprecates creating an array with the UPDATEIFCOPY flag, use
|
||||||
|
WRITEBACKIFCOPY instead
|
||||||
|
"""
|
||||||
|
def test_npy_updateifcopy_deprecation(self):
|
||||||
|
from numpy.core._multiarray_tests import npy_updateifcopy_deprecation
|
||||||
|
arr = np.arange(9).reshape(3, 3)
|
||||||
|
v = arr.T
|
||||||
|
self.assert_deprecated(npy_updateifcopy_deprecation, args=(v,))
|
||||||
|
|
||||||
|
|
||||||
|
class TestDatetimeEvent(_DeprecationTestCase):
|
||||||
|
# 2017-08-11, 1.14.0
|
||||||
|
def test_3_tuple(self):
|
||||||
|
for cls in (np.datetime64, np.timedelta64):
|
||||||
|
# two valid uses - (unit, num) and (unit, num, den, None)
|
||||||
|
self.assert_not_deprecated(cls, args=(1, ('ms', 2)))
|
||||||
|
self.assert_not_deprecated(cls, args=(1, ('ms', 2, 1, None)))
|
||||||
|
|
||||||
|
# trying to use the event argument, removed in 1.7.0, is deprecated
|
||||||
|
# it used to be a uint8
|
||||||
|
self.assert_deprecated(cls, args=(1, ('ms', 2, 'event')))
|
||||||
|
self.assert_deprecated(cls, args=(1, ('ms', 2, 63)))
|
||||||
|
self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 'event')))
|
||||||
|
self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 63)))
|
||||||
|
|
||||||
|
|
||||||
|
class TestTruthTestingEmptyArrays(_DeprecationTestCase):
|
||||||
|
# 2017-09-25, 1.14.0
|
||||||
|
message = '.*truth value of an empty array is ambiguous.*'
|
||||||
|
|
||||||
|
def test_1d(self):
|
||||||
|
self.assert_deprecated(bool, args=(np.array([]),))
|
||||||
|
|
||||||
|
def test_2d(self):
|
||||||
|
self.assert_deprecated(bool, args=(np.zeros((1, 0)),))
|
||||||
|
self.assert_deprecated(bool, args=(np.zeros((0, 1)),))
|
||||||
|
self.assert_deprecated(bool, args=(np.zeros((0, 0)),))
|
||||||
|
|
||||||
|
|
||||||
|
class TestBincount(_DeprecationTestCase):
|
||||||
|
# 2017-06-01, 1.14.0
|
||||||
|
def test_bincount_minlength(self):
|
||||||
|
self.assert_deprecated(lambda: np.bincount([1, 2, 3], minlength=None))
|
||||||
|
|
||||||
|
|
||||||
|
class TestAlen(_DeprecationTestCase):
|
||||||
|
# 2019-08-02, 1.18.0
|
||||||
|
def test_alen(self):
|
||||||
|
self.assert_deprecated(lambda: np.alen(np.array([1, 2, 3])))
|
||||||
|
|
||||||
|
|
||||||
|
class TestGeneratorSum(_DeprecationTestCase):
|
||||||
|
# 2018-02-25, 1.15.0
|
||||||
|
def test_generator_sum(self):
|
||||||
|
self.assert_deprecated(np.sum, args=((i for i in range(5)),))
|
||||||
|
|
||||||
|
|
||||||
|
class TestSctypeNA(_VisibleDeprecationTestCase):
|
||||||
|
# 2018-06-24, 1.16
|
||||||
|
def test_sctypeNA(self):
|
||||||
|
self.assert_deprecated(lambda: np.sctypeNA['?'])
|
||||||
|
self.assert_deprecated(lambda: np.typeNA['?'])
|
||||||
|
self.assert_deprecated(lambda: np.typeNA.get('?'))
|
||||||
|
|
||||||
|
|
||||||
|
class TestPositiveOnNonNumerical(_DeprecationTestCase):
|
||||||
|
# 2018-06-28, 1.16.0
|
||||||
|
def test_positive_on_non_number(self):
|
||||||
|
self.assert_deprecated(operator.pos, args=(np.array('foo'),))
|
||||||
|
|
||||||
|
|
||||||
|
class TestFromstring(_DeprecationTestCase):
|
||||||
|
# 2017-10-19, 1.14
|
||||||
|
def test_fromstring(self):
|
||||||
|
self.assert_deprecated(np.fromstring, args=('\x00'*80,))
|
||||||
|
|
||||||
|
|
||||||
|
class TestFromStringAndFileInvalidData(_DeprecationTestCase):
|
||||||
|
# 2019-06-08, 1.17.0
|
||||||
|
# Tests should be moved to real tests when deprecation is done.
|
||||||
|
message = "string or file could not be read to its end"
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"])
|
||||||
|
def test_deprecate_unparsable_data_file(self, invalid_str):
|
||||||
|
x = np.array([1.51, 2, 3.51, 4], dtype=float)
|
||||||
|
|
||||||
|
with tempfile.TemporaryFile(mode="w") as f:
|
||||||
|
x.tofile(f, sep=',', format='%.2f')
|
||||||
|
f.write(invalid_str)
|
||||||
|
|
||||||
|
f.seek(0)
|
||||||
|
self.assert_deprecated(lambda: np.fromfile(f, sep=","))
|
||||||
|
f.seek(0)
|
||||||
|
self.assert_deprecated(lambda: np.fromfile(f, sep=",", count=5))
|
||||||
|
# Should not raise:
|
||||||
|
with warnings.catch_warnings():
|
||||||
|
warnings.simplefilter("error", DeprecationWarning)
|
||||||
|
f.seek(0)
|
||||||
|
res = np.fromfile(f, sep=",", count=4)
|
||||||
|
assert_array_equal(res, x)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"])
|
||||||
|
def test_deprecate_unparsable_string(self, invalid_str):
|
||||||
|
x = np.array([1.51, 2, 3.51, 4], dtype=float)
|
||||||
|
x_str = "1.51,2,3.51,4{}".format(invalid_str)
|
||||||
|
|
||||||
|
self.assert_deprecated(lambda: np.fromstring(x_str, sep=","))
|
||||||
|
self.assert_deprecated(lambda: np.fromstring(x_str, sep=",", count=5))
|
||||||
|
|
||||||
|
# The C-level API can use not fixed size, but 0 terminated strings,
|
||||||
|
# so test that as well:
|
||||||
|
bytestr = x_str.encode("ascii")
|
||||||
|
self.assert_deprecated(lambda: fromstring_null_term_c_api(bytestr))
|
||||||
|
|
||||||
|
with assert_warns(DeprecationWarning):
|
||||||
|
# this is slightly strange, in that fromstring leaves data
|
||||||
|
# potentially uninitialized (would be good to error when all is
|
||||||
|
# read, but count is larger then actual data maybe).
|
||||||
|
res = np.fromstring(x_str, sep=",", count=5)
|
||||||
|
assert_array_equal(res[:-1], x)
|
||||||
|
|
||||||
|
with warnings.catch_warnings():
|
||||||
|
warnings.simplefilter("error", DeprecationWarning)
|
||||||
|
|
||||||
|
# Should not raise:
|
||||||
|
res = np.fromstring(x_str, sep=",", count=4)
|
||||||
|
assert_array_equal(res, x)
|
||||||
|
|
||||||
|
|
||||||
|
class Test_GetSet_NumericOps(_DeprecationTestCase):
|
||||||
|
# 2018-09-20, 1.16.0
|
||||||
|
def test_get_numeric_ops(self):
|
||||||
|
from numpy.core._multiarray_tests import getset_numericops
|
||||||
|
self.assert_deprecated(getset_numericops, num=2)
|
||||||
|
|
||||||
|
# empty kwargs prevents any state actually changing which would break
|
||||||
|
# other tests.
|
||||||
|
self.assert_deprecated(np.set_numeric_ops, kwargs={})
|
||||||
|
assert_raises(ValueError, np.set_numeric_ops, add='abc')
|
||||||
|
|
||||||
|
|
||||||
|
class TestShape1Fields(_DeprecationTestCase):
|
||||||
|
warning_cls = FutureWarning
|
||||||
|
|
||||||
|
# 2019-05-20, 1.17.0
|
||||||
|
def test_shape_1_fields(self):
|
||||||
|
self.assert_deprecated(np.dtype, args=([('a', int, 1)],))
|
||||||
|
|
||||||
|
|
||||||
|
class TestNonZero(_DeprecationTestCase):
|
||||||
|
# 2019-05-26, 1.17.0
|
||||||
|
def test_zerod(self):
|
||||||
|
self.assert_deprecated(lambda: np.nonzero(np.array(0)))
|
||||||
|
self.assert_deprecated(lambda: np.nonzero(np.array(1)))
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,49 @@
|
|||||||
|
from __future__ import division, absolute_import, print_function
|
||||||
|
|
||||||
|
import platform
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from numpy.testing import assert_, assert_raises
|
||||||
|
|
||||||
|
|
||||||
|
class TestErrstate(object):
|
||||||
|
@pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.")
|
||||||
|
def test_invalid(self):
|
||||||
|
with np.errstate(all='raise', under='ignore'):
|
||||||
|
a = -np.arange(3)
|
||||||
|
# This should work
|
||||||
|
with np.errstate(invalid='ignore'):
|
||||||
|
np.sqrt(a)
|
||||||
|
# While this should fail!
|
||||||
|
with assert_raises(FloatingPointError):
|
||||||
|
np.sqrt(a)
|
||||||
|
|
||||||
|
def test_divide(self):
|
||||||
|
with np.errstate(all='raise', under='ignore'):
|
||||||
|
a = -np.arange(3)
|
||||||
|
# This should work
|
||||||
|
with np.errstate(divide='ignore'):
|
||||||
|
a // 0
|
||||||
|
# While this should fail!
|
||||||
|
with assert_raises(FloatingPointError):
|
||||||
|
a // 0
|
||||||
|
|
||||||
|
def test_errcall(self):
|
||||||
|
def foo(*args):
|
||||||
|
print(args)
|
||||||
|
|
||||||
|
olderrcall = np.geterrcall()
|
||||||
|
with np.errstate(call=foo):
|
||||||
|
assert_(np.geterrcall() is foo, 'call is not foo')
|
||||||
|
with np.errstate(call=None):
|
||||||
|
assert_(np.geterrcall() is None, 'call is not None')
|
||||||
|
assert_(np.geterrcall() is olderrcall, 'call is not olderrcall')
|
||||||
|
|
||||||
|
def test_errstate_decorator(self):
|
||||||
|
@np.errstate(all='ignore')
|
||||||
|
def foo():
|
||||||
|
a = -np.arange(3)
|
||||||
|
a // 0
|
||||||
|
|
||||||
|
foo()
|
@ -0,0 +1,221 @@
|
|||||||
|
from __future__ import division, absolute_import, print_function
|
||||||
|
|
||||||
|
import itertools
|
||||||
|
import contextlib
|
||||||
|
import operator
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import numpy.core._multiarray_tests as mt
|
||||||
|
|
||||||
|
from numpy.testing import assert_raises, assert_equal
|
||||||
|
|
||||||
|
|
||||||
|
INT64_MAX = np.iinfo(np.int64).max
|
||||||
|
INT64_MIN = np.iinfo(np.int64).min
|
||||||
|
INT64_MID = 2**32
|
||||||
|
|
||||||
|
# int128 is not two's complement, the sign bit is separate
|
||||||
|
INT128_MAX = 2**128 - 1
|
||||||
|
INT128_MIN = -INT128_MAX
|
||||||
|
INT128_MID = 2**64
|
||||||
|
|
||||||
|
INT64_VALUES = (
|
||||||
|
[INT64_MIN + j for j in range(20)] +
|
||||||
|
[INT64_MAX - j for j in range(20)] +
|
||||||
|
[INT64_MID + j for j in range(-20, 20)] +
|
||||||
|
[2*INT64_MID + j for j in range(-20, 20)] +
|
||||||
|
[INT64_MID//2 + j for j in range(-20, 20)] +
|
||||||
|
list(range(-70, 70))
|
||||||
|
)
|
||||||
|
|
||||||
|
INT128_VALUES = (
|
||||||
|
[INT128_MIN + j for j in range(20)] +
|
||||||
|
[INT128_MAX - j for j in range(20)] +
|
||||||
|
[INT128_MID + j for j in range(-20, 20)] +
|
||||||
|
[2*INT128_MID + j for j in range(-20, 20)] +
|
||||||
|
[INT128_MID//2 + j for j in range(-20, 20)] +
|
||||||
|
list(range(-70, 70)) +
|
||||||
|
[False] # negative zero
|
||||||
|
)
|
||||||
|
|
||||||
|
INT64_POS_VALUES = [x for x in INT64_VALUES if x > 0]
|
||||||
|
|
||||||
|
|
||||||
|
@contextlib.contextmanager
|
||||||
|
def exc_iter(*args):
|
||||||
|
"""
|
||||||
|
Iterate over Cartesian product of *args, and if an exception is raised,
|
||||||
|
add information of the current iterate.
|
||||||
|
"""
|
||||||
|
|
||||||
|
value = [None]
|
||||||
|
|
||||||
|
def iterate():
|
||||||
|
for v in itertools.product(*args):
|
||||||
|
value[0] = v
|
||||||
|
yield v
|
||||||
|
|
||||||
|
try:
|
||||||
|
yield iterate()
|
||||||
|
except Exception:
|
||||||
|
import traceback
|
||||||
|
msg = "At: %r\n%s" % (repr(value[0]),
|
||||||
|
traceback.format_exc())
|
||||||
|
raise AssertionError(msg)
|
||||||
|
|
||||||
|
|
||||||
|
def test_safe_binop():
|
||||||
|
# Test checked arithmetic routines
|
||||||
|
|
||||||
|
ops = [
|
||||||
|
(operator.add, 1),
|
||||||
|
(operator.sub, 2),
|
||||||
|
(operator.mul, 3)
|
||||||
|
]
|
||||||
|
|
||||||
|
with exc_iter(ops, INT64_VALUES, INT64_VALUES) as it:
|
||||||
|
for xop, a, b in it:
|
||||||
|
pyop, op = xop
|
||||||
|
c = pyop(a, b)
|
||||||
|
|
||||||
|
if not (INT64_MIN <= c <= INT64_MAX):
|
||||||
|
assert_raises(OverflowError, mt.extint_safe_binop, a, b, op)
|
||||||
|
else:
|
||||||
|
d = mt.extint_safe_binop(a, b, op)
|
||||||
|
if c != d:
|
||||||
|
# assert_equal is slow
|
||||||
|
assert_equal(d, c)
|
||||||
|
|
||||||
|
|
||||||
|
def test_to_128():
|
||||||
|
with exc_iter(INT64_VALUES) as it:
|
||||||
|
for a, in it:
|
||||||
|
b = mt.extint_to_128(a)
|
||||||
|
if a != b:
|
||||||
|
assert_equal(b, a)
|
||||||
|
|
||||||
|
|
||||||
|
def test_to_64():
|
||||||
|
with exc_iter(INT128_VALUES) as it:
|
||||||
|
for a, in it:
|
||||||
|
if not (INT64_MIN <= a <= INT64_MAX):
|
||||||
|
assert_raises(OverflowError, mt.extint_to_64, a)
|
||||||
|
else:
|
||||||
|
b = mt.extint_to_64(a)
|
||||||
|
if a != b:
|
||||||
|
assert_equal(b, a)
|
||||||
|
|
||||||
|
|
||||||
|
def test_mul_64_64():
|
||||||
|
with exc_iter(INT64_VALUES, INT64_VALUES) as it:
|
||||||
|
for a, b in it:
|
||||||
|
c = a * b
|
||||||
|
d = mt.extint_mul_64_64(a, b)
|
||||||
|
if c != d:
|
||||||
|
assert_equal(d, c)
|
||||||
|
|
||||||
|
|
||||||
|
def test_add_128():
|
||||||
|
with exc_iter(INT128_VALUES, INT128_VALUES) as it:
|
||||||
|
for a, b in it:
|
||||||
|
c = a + b
|
||||||
|
if not (INT128_MIN <= c <= INT128_MAX):
|
||||||
|
assert_raises(OverflowError, mt.extint_add_128, a, b)
|
||||||
|
else:
|
||||||
|
d = mt.extint_add_128(a, b)
|
||||||
|
if c != d:
|
||||||
|
assert_equal(d, c)
|
||||||
|
|
||||||
|
|
||||||
|
def test_sub_128():
|
||||||
|
with exc_iter(INT128_VALUES, INT128_VALUES) as it:
|
||||||
|
for a, b in it:
|
||||||
|
c = a - b
|
||||||
|
if not (INT128_MIN <= c <= INT128_MAX):
|
||||||
|
assert_raises(OverflowError, mt.extint_sub_128, a, b)
|
||||||
|
else:
|
||||||
|
d = mt.extint_sub_128(a, b)
|
||||||
|
if c != d:
|
||||||
|
assert_equal(d, c)
|
||||||
|
|
||||||
|
|
||||||
|
def test_neg_128():
|
||||||
|
with exc_iter(INT128_VALUES) as it:
|
||||||
|
for a, in it:
|
||||||
|
b = -a
|
||||||
|
c = mt.extint_neg_128(a)
|
||||||
|
if b != c:
|
||||||
|
assert_equal(c, b)
|
||||||
|
|
||||||
|
|
||||||
|
def test_shl_128():
|
||||||
|
with exc_iter(INT128_VALUES) as it:
|
||||||
|
for a, in it:
|
||||||
|
if a < 0:
|
||||||
|
b = -(((-a) << 1) & (2**128-1))
|
||||||
|
else:
|
||||||
|
b = (a << 1) & (2**128-1)
|
||||||
|
c = mt.extint_shl_128(a)
|
||||||
|
if b != c:
|
||||||
|
assert_equal(c, b)
|
||||||
|
|
||||||
|
|
||||||
|
def test_shr_128():
|
||||||
|
with exc_iter(INT128_VALUES) as it:
|
||||||
|
for a, in it:
|
||||||
|
if a < 0:
|
||||||
|
b = -((-a) >> 1)
|
||||||
|
else:
|
||||||
|
b = a >> 1
|
||||||
|
c = mt.extint_shr_128(a)
|
||||||
|
if b != c:
|
||||||
|
assert_equal(c, b)
|
||||||
|
|
||||||
|
|
||||||
|
def test_gt_128():
|
||||||
|
with exc_iter(INT128_VALUES, INT128_VALUES) as it:
|
||||||
|
for a, b in it:
|
||||||
|
c = a > b
|
||||||
|
d = mt.extint_gt_128(a, b)
|
||||||
|
if c != d:
|
||||||
|
assert_equal(d, c)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.slow
|
||||||
|
def test_divmod_128_64():
|
||||||
|
with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it:
|
||||||
|
for a, b in it:
|
||||||
|
if a >= 0:
|
||||||
|
c, cr = divmod(a, b)
|
||||||
|
else:
|
||||||
|
c, cr = divmod(-a, b)
|
||||||
|
c = -c
|
||||||
|
cr = -cr
|
||||||
|
|
||||||
|
d, dr = mt.extint_divmod_128_64(a, b)
|
||||||
|
|
||||||
|
if c != d or d != dr or b*d + dr != a:
|
||||||
|
assert_equal(d, c)
|
||||||
|
assert_equal(dr, cr)
|
||||||
|
assert_equal(b*d + dr, a)
|
||||||
|
|
||||||
|
|
||||||
|
def test_floordiv_128_64():
|
||||||
|
with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it:
|
||||||
|
for a, b in it:
|
||||||
|
c = a // b
|
||||||
|
d = mt.extint_floordiv_128_64(a, b)
|
||||||
|
|
||||||
|
if c != d:
|
||||||
|
assert_equal(d, c)
|
||||||
|
|
||||||
|
|
||||||
|
def test_ceildiv_128_64():
|
||||||
|
with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it:
|
||||||
|
for a, b in it:
|
||||||
|
c = (a + b - 1) // b
|
||||||
|
d = mt.extint_ceildiv_128_64(a, b)
|
||||||
|
|
||||||
|
if c != d:
|
||||||
|
assert_equal(d, c)
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user