From 3563c62b62bfb0c537b1d32a352dbccf18c1b291 Mon Sep 17 00:00:00 2001 From: Sam Van Kooten Date: Thu, 22 Jun 2023 15:28:51 -0600 Subject: [PATCH 01/30] BUG: fix error message for nanargmin/max of empty array For arrays with a length-zero dimension (e.g. shape (0, 50)), nanargmin and nanargmax would incorrectly raise `ValueError: All-NaN slice encountered`, despite there being no NaNs in the input array. This commit avoids proceeding to the all-NaNs check for size-zero arrays, falling through to argmin/argmax which handle empty sequences. --- numpy/lib/nanfunctions.py | 4 ++-- numpy/lib/tests/test_nanfunctions.py | 7 +++++-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py index b3b570860ff8..2c2ac0405153 100644 --- a/numpy/lib/nanfunctions.py +++ b/numpy/lib/nanfunctions.py @@ -546,7 +546,7 @@ def nanargmin(a, axis=None, out=None, *, keepdims=np._NoValue): """ a, mask = _replace_nan(a, np.inf) - if mask is not None: + if mask is not None and mask.size: mask = np.all(mask, axis=axis) if np.any(mask): raise ValueError("All-NaN slice encountered") @@ -607,7 +607,7 @@ def nanargmax(a, axis=None, out=None, *, keepdims=np._NoValue): """ a, mask = _replace_nan(a, -np.inf) - if mask is not None: + if mask is not None and mask.size: mask = np.all(mask, axis=axis) if np.any(mask): raise ValueError("All-NaN slice encountered") diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py index 257de381b394..11c85404a642 100644 --- a/numpy/lib/tests/test_nanfunctions.py +++ b/numpy/lib/tests/test_nanfunctions.py @@ -7,7 +7,7 @@ from numpy.lib.nanfunctions import _nan_mask, _replace_nan from numpy.testing import ( assert_, assert_equal, assert_almost_equal, assert_raises, - assert_array_equal, suppress_warnings + assert_raises_regex, assert_array_equal, suppress_warnings ) @@ -303,7 +303,10 @@ def test_empty(self): mat = np.zeros((0, 3)) for f in self.nanfuncs: for axis in [0, None]: - assert_raises(ValueError, f, mat, axis=axis) + assert_raises_regex( + ValueError, + "attempt to get argm.. of an empty sequence", + f, mat, axis=axis) for axis in [1]: res = f(mat, axis=axis) assert_equal(res, np.zeros(0)) From f2ac4f967fb8bbf9da30bc19f16805f3f3260c7b Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Sat, 8 Jul 2023 13:05:42 +0300 Subject: [PATCH 02/30] DEP: remove np.cast --- numpy/core/numerictypes.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py index a809c10becb2..53c004d459b0 100644 --- a/numpy/core/numerictypes.py +++ b/numpy/core/numerictypes.py @@ -90,7 +90,7 @@ # we add more at the bottom __all__ = ['sctypeDict', 'sctypes', - 'ScalarType', 'obj2sctype', 'cast', 'nbytes', 'sctype2char', + 'ScalarType', 'obj2sctype', 'nbytes', 'sctype2char', 'maximum_sctype', 'issctype', 'typecodes', 'find_common_type', 'issubdtype', 'datetime_data', 'datetime_as_string', 'busday_offset', 'busday_count', 'is_busday', 'busdaycalendar', @@ -503,12 +503,6 @@ def sctype2char(sctype): raise KeyError(sctype) return dtype(sctype).char -# Create dictionary of casting functions that wrap sequences -# indexed by type or type character -cast = _typedict() -for key in _concrete_types: - cast[key] = lambda x, k=key: array(x, copy=False).astype(k) - def _scalar_type_key(typ): """A ``key`` function for `sorted`.""" From 732843ef8751354ef7614d4267ef73f89c66f311 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Sat, 8 Jul 2023 13:10:37 +0300 Subject: [PATCH 03/30] DEP: remove _alignment dict --- numpy/core/numerictypes.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py index 53c004d459b0..b383ad5efecf 100644 --- a/numpy/core/numerictypes.py +++ b/numpy/core/numerictypes.py @@ -434,14 +434,12 @@ def __getitem__(self, obj): return dict.__getitem__(self, obj2sctype(obj)) nbytes = _typedict() -_alignment = _typedict() _maxvals = _typedict() _minvals = _typedict() def _construct_lookups(): for name, info in _concrete_typeinfo.items(): obj = info.type nbytes[obj] = info.bits // 8 - _alignment[obj] = info.alignment if len(info) > 5: _maxvals[obj] = info.max _minvals[obj] = info.min From f71fd87d6a965bc071060fca4f057414bb218ed5 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Sat, 8 Jul 2023 13:25:43 +0300 Subject: [PATCH 04/30] DEP: remove np.lookfor --- numpy/lib/tests/test_utils.py | 13 -- numpy/lib/utils.py | 287 +--------------------------------- 2 files changed, 2 insertions(+), 298 deletions(-) diff --git a/numpy/lib/tests/test_utils.py b/numpy/lib/tests/test_utils.py index db2196d71aa6..8594bf2e899f 100644 --- a/numpy/lib/tests/test_utils.py +++ b/numpy/lib/tests/test_utils.py @@ -11,19 +11,6 @@ from io import StringIO -@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") -@pytest.mark.skipif( - sys.version_info == (3, 10, 0, "candidate", 1), - reason="Broken as of bpo-44524", -) -def test_lookfor(): - out = StringIO() - utils.lookfor('eigenvalue', module='numpy', output=out, - import_modules=False) - out = out.getvalue() - assert_('numpy.linalg.eig' in out) - - @deprecate def old_func(self, x): return x diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py index f1dbe126f726..ca9c4add3d49 100644 --- a/numpy/lib/utils.py +++ b/numpy/lib/utils.py @@ -14,8 +14,8 @@ __all__ = [ 'issubclass_', 'issubsctype', 'issubdtype', 'deprecate', - 'deprecate_with_doc', 'get_include', 'info', 'source', 'who', - 'lookfor', 'byte_bounds', 'safe_eval', 'show_runtime' + 'deprecate_with_doc', 'get_include', 'source', 'who', + 'byte_bounds', 'safe_eval', 'show_runtime' ] @@ -743,289 +743,6 @@ def interp(x, xp, fp, left=None, right=None): print("Not available for this object.", file=output) -# Cache for lookfor: {id(module): {name: (docstring, kind, index), ...}...} -# where kind: "func", "class", "module", "object" -# and index: index in breadth-first namespace traversal -_lookfor_caches = {} - -# regexp whose match indicates that the string may contain a function -# signature -_function_signature_re = re.compile(r"[a-z0-9_]+\(.*[,=].*\)", re.I) - - -@set_module('numpy') -def lookfor(what, module=None, import_modules=True, regenerate=False, - output=None): - """ - Do a keyword search on docstrings. - - A list of objects that matched the search is displayed, - sorted by relevance. All given keywords need to be found in the - docstring for it to be returned as a result, but the order does - not matter. - - Parameters - ---------- - what : str - String containing words to look for. - module : str or list, optional - Name of module(s) whose docstrings to go through. - import_modules : bool, optional - Whether to import sub-modules in packages. Default is True. - regenerate : bool, optional - Whether to re-generate the docstring cache. Default is False. - output : file-like, optional - File-like object to write the output to. If omitted, use a pager. - - See Also - -------- - source, info - - Notes - ----- - Relevance is determined only roughly, by checking if the keywords occur - in the function name, at the start of a docstring, etc. - - Examples - -------- - >>> np.lookfor('binary representation') # doctest: +SKIP - Search results for 'binary representation' - ------------------------------------------ - numpy.binary_repr - Return the binary representation of the input number as a string. - numpy.core.setup_common.long_double_representation - Given a binary dump as given by GNU od -b, look for long double - numpy.base_repr - Return a string representation of a number in the given base system. - ... - - """ - import pydoc - - # Cache - cache = _lookfor_generate_cache(module, import_modules, regenerate) - - # Search - # XXX: maybe using a real stemming search engine would be better? - found = [] - whats = str(what).lower().split() - if not whats: - return - - for name, (docstring, kind, index) in cache.items(): - if kind in ('module', 'object'): - # don't show modules or objects - continue - doc = docstring.lower() - if all(w in doc for w in whats): - found.append(name) - - # Relevance sort - # XXX: this is full Harrison-Stetson heuristics now, - # XXX: it probably could be improved - - kind_relevance = {'func': 1000, 'class': 1000, - 'module': -1000, 'object': -1000} - - def relevance(name, docstr, kind, index): - r = 0 - # do the keywords occur within the start of the docstring? - first_doc = "\n".join(docstr.lower().strip().split("\n")[:3]) - r += sum([200 for w in whats if w in first_doc]) - # do the keywords occur in the function name? - r += sum([30 for w in whats if w in name]) - # is the full name long? - r += -len(name) * 5 - # is the object of bad type? - r += kind_relevance.get(kind, -1000) - # is the object deep in namespace hierarchy? - r += -name.count('.') * 10 - r += max(-index / 100, -100) - return r - - def relevance_value(a): - return relevance(a, *cache[a]) - found.sort(key=relevance_value) - - # Pretty-print - s = "Search results for '%s'" % (' '.join(whats)) - help_text = [s, "-"*len(s)] - for name in found[::-1]: - doc, kind, ix = cache[name] - - doclines = [line.strip() for line in doc.strip().split("\n") - if line.strip()] - - # find a suitable short description - try: - first_doc = doclines[0].strip() - if _function_signature_re.search(first_doc): - first_doc = doclines[1].strip() - except IndexError: - first_doc = "" - help_text.append("%s\n %s" % (name, first_doc)) - - if not found: - help_text.append("Nothing found.") - - # Output - if output is not None: - output.write("\n".join(help_text)) - elif len(help_text) > 10: - pager = pydoc.getpager() - pager("\n".join(help_text)) - else: - print("\n".join(help_text)) - -def _lookfor_generate_cache(module, import_modules, regenerate): - """ - Generate docstring cache for given module. - - Parameters - ---------- - module : str, None, module - Module for which to generate docstring cache - import_modules : bool - Whether to import sub-modules in packages. - regenerate : bool - Re-generate the docstring cache - - Returns - ------- - cache : dict {obj_full_name: (docstring, kind, index), ...} - Docstring cache for the module, either cached one (regenerate=False) - or newly generated. - - """ - # Local import to speed up numpy's import time. - import inspect - - from io import StringIO - - if module is None: - module = "numpy" - - if isinstance(module, str): - try: - __import__(module) - except ImportError: - return {} - module = sys.modules[module] - elif isinstance(module, list) or isinstance(module, tuple): - cache = {} - for mod in module: - cache.update(_lookfor_generate_cache(mod, import_modules, - regenerate)) - return cache - - if id(module) in _lookfor_caches and not regenerate: - return _lookfor_caches[id(module)] - - # walk items and collect docstrings - cache = {} - _lookfor_caches[id(module)] = cache - seen = {} - index = 0 - stack = [(module.__name__, module)] - while stack: - name, item = stack.pop(0) - if id(item) in seen: - continue - seen[id(item)] = True - - index += 1 - kind = "object" - - if inspect.ismodule(item): - kind = "module" - try: - _all = item.__all__ - except AttributeError: - _all = None - - # import sub-packages - if import_modules and hasattr(item, '__path__'): - for pth in item.__path__: - for mod_path in os.listdir(pth): - this_py = os.path.join(pth, mod_path) - init_py = os.path.join(pth, mod_path, '__init__.py') - if (os.path.isfile(this_py) and - mod_path.endswith('.py')): - to_import = mod_path[:-3] - elif os.path.isfile(init_py): - to_import = mod_path - else: - continue - if to_import == '__init__': - continue - - try: - old_stdout = sys.stdout - old_stderr = sys.stderr - try: - sys.stdout = StringIO() - sys.stderr = StringIO() - __import__("%s.%s" % (name, to_import)) - finally: - sys.stdout = old_stdout - sys.stderr = old_stderr - except KeyboardInterrupt: - # Assume keyboard interrupt came from a user - raise - except BaseException: - # Ignore also SystemExit and pytests.importorskip - # `Skipped` (these are BaseExceptions; gh-22345) - continue - - for n, v in _getmembers(item): - try: - item_name = getattr(v, '__name__', "%s.%s" % (name, n)) - mod_name = getattr(v, '__module__', None) - except NameError: - # ref. SWIG's global cvars - # NameError: Unknown C global variable - item_name = "%s.%s" % (name, n) - mod_name = None - if '.' not in item_name and mod_name: - item_name = "%s.%s" % (mod_name, item_name) - - if not item_name.startswith(name + '.'): - # don't crawl "foreign" objects - if isinstance(v, ufunc): - # ... unless they are ufuncs - pass - else: - continue - elif not (inspect.ismodule(v) or _all is None or n in _all): - continue - stack.append(("%s.%s" % (name, n), v)) - elif inspect.isclass(item): - kind = "class" - for n, v in _getmembers(item): - stack.append(("%s.%s" % (name, n), v)) - elif hasattr(item, "__call__"): - kind = "func" - - try: - doc = inspect.getdoc(item) - except NameError: - # ref SWIG's NameError: Unknown C global variable - doc = None - if doc is not None: - cache[name] = (doc, kind, index) - - return cache - -def _getmembers(item): - import inspect - try: - members = inspect.getmembers(item) - except Exception: - members = [(x, getattr(item, x)) for x in dir(item) - if hasattr(item, x)] - return members - - @deprecate def safe_eval(source): """ From 5579f0b64296d611621171087a0c161f2b4c80dc Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Sat, 8 Jul 2023 13:28:37 +0300 Subject: [PATCH 05/30] DEP: remove np.source --- numpy/lib/utils.py | 51 +--------------------------------------------- 1 file changed, 1 insertion(+), 50 deletions(-) diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py index ca9c4add3d49..c309afc660ed 100644 --- a/numpy/lib/utils.py +++ b/numpy/lib/utils.py @@ -14,7 +14,7 @@ __all__ = [ 'issubclass_', 'issubsctype', 'issubdtype', 'deprecate', - 'deprecate_with_doc', 'get_include', 'source', 'who', + 'deprecate_with_doc', 'get_include', 'info', 'who', 'byte_bounds', 'safe_eval', 'show_runtime' ] @@ -694,55 +694,6 @@ def info(object=None, maxwidth=76, output=None, toplevel='numpy'): print(inspect.getdoc(object), file=output) -@set_module('numpy') -def source(object, output=sys.stdout): - """ - Print or write to a file the source code for a NumPy object. - - The source code is only returned for objects written in Python. Many - functions and classes are defined in C and will therefore not return - useful information. - - Parameters - ---------- - object : numpy object - Input object. This can be any object (function, class, module, - ...). - output : file object, optional - If `output` not supplied then source code is printed to screen - (sys.stdout). File object must be created with either write 'w' or - append 'a' modes. - - See Also - -------- - lookfor, info - - Examples - -------- - >>> np.source(np.interp) #doctest: +SKIP - In file: /usr/lib/python2.6/dist-packages/numpy/lib/function_base.py - def interp(x, xp, fp, left=None, right=None): - \"\"\".... (full docstring printed)\"\"\" - if isinstance(x, (float, int, number)): - return compiled_interp([x], xp, fp, left, right).item() - else: - return compiled_interp(x, xp, fp, left, right) - - The source code is only returned for objects written in Python. - - >>> np.source(np.array) #doctest: +SKIP - Not available for this object. - - """ - # Local import to speed up numpy's import time. - import inspect - try: - print("In file: %s\n" % inspect.getsourcefile(object), file=output) - print(inspect.getsource(object), file=output) - except Exception: - print("Not available for this object.", file=output) - - @deprecate def safe_eval(source): """ From e334ec3ee4b7727813e98541cb819482e818073c Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Mon, 10 Jul 2023 16:23:26 +0300 Subject: [PATCH 06/30] DOC: add the release notes snippet --- doc/release/upcoming_changes/24144.python_removal.rst | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 doc/release/upcoming_changes/24144.python_removal.rst diff --git a/doc/release/upcoming_changes/24144.python_removal.rst b/doc/release/upcoming_changes/24144.python_removal.rst new file mode 100644 index 000000000000..0e17b6203991 --- /dev/null +++ b/doc/release/upcoming_changes/24144.python_removal.rst @@ -0,0 +1,7 @@ +* ``np.cast`` has been removed. The literal replacement for + ``np.cast[dtype](arg)`` is ``np.asarray(arg, dtype=dtype)``. + +* ``np.source`` has been removed. The preferred replacement is + ``inspect.getsource``. + +* ``np.lookfor`` has been removed. From cdfda1d0ee989ffae09af944c1a407f18f6ae4e7 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Wed, 12 Jul 2023 23:37:47 +0300 Subject: [PATCH 07/30] TST: remove removed stuff from typing tests --- numpy/typing/tests/data/pass/lib_utils.py | 3 --- numpy/typing/tests/data/pass/numerictypes.py | 4 ---- 2 files changed, 7 deletions(-) diff --git a/numpy/typing/tests/data/pass/lib_utils.py b/numpy/typing/tests/data/pass/lib_utils.py index b8ffa54d74c2..78b85708cfe1 100644 --- a/numpy/typing/tests/data/pass/lib_utils.py +++ b/numpy/typing/tests/data/pass/lib_utils.py @@ -17,6 +17,3 @@ def func(a: int) -> bool: ... np.info(1, output=FILE) -np.source(np.interp, output=FILE) - -np.lookfor("binary representation", output=FILE) diff --git a/numpy/typing/tests/data/pass/numerictypes.py b/numpy/typing/tests/data/pass/numerictypes.py index 63b6ad0e22e2..8d2c5245f323 100644 --- a/numpy/typing/tests/data/pass/numerictypes.py +++ b/numpy/typing/tests/data/pass/numerictypes.py @@ -23,10 +23,6 @@ np.sctype2char("S1") np.sctype2char(list) -np.cast[int] -np.cast["i8"] -np.cast[np.int64] - np.nbytes[int] np.nbytes["i8"] np.nbytes[np.int64] From 8a2f99db60cfedaef64636f1eedc3f539bb43f54 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Melissa=20Weber=20Mendon=C3=A7a?= Date: Tue, 25 Jul 2023 22:49:36 -0300 Subject: [PATCH 08/30] DOC: Fix links to random.Generator methods in quickstart [skip azp] [skip cirrus] [skip travis] --- doc/source/user/quickstart.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst index 783d5a447df9..5dc5a98a2897 100644 --- a/doc/source/user/quickstart.rst +++ b/doc/source/user/quickstart.rst @@ -230,8 +230,8 @@ of elements that we want, instead of the step:: `empty_like`, `arange`, `linspace`, - `numpy.random.Generator.rand`, - `numpy.random.Generator.randn`, + `random.Generator.random`, + `random.Generator.normal`, `fromfunction`, `fromfile` From a0b45c5d62aed45ed26b8ae4360ee71443878e84 Mon Sep 17 00:00:00 2001 From: warren Date: Wed, 26 Jul 2023 11:54:09 -0400 Subject: [PATCH 09/30] BUG: random: Fix generation of nan by beta. The implementation of Johnk's algorithm for beta(a, b) could generate nan values if both a and b were extremely small (i.e. subnormal or a small multiple of the smallest normal double precision float). The fix is to handle variate generation in this case by noting that when both a and b are extremely small, the probability of generating a double precision value that is not either 0 or 1 is also extremely small. In particular, if a and b are less than 3e-103, the probability of generating a (double precision) value that is not 0 or 1 is less than approximately 1e-100. So instead of using Johnk's algorithm in this extreme case, we can generate the values 0 or 1 as Bernoulli trials, with the probability of 1 being a/(a + b). Closes gh-24266. --- numpy/random/src/distributions/distributions.c | 18 ++++++++++++++++++ .../test_generator_mt19937_regressions.py | 7 +++++++ 2 files changed, 25 insertions(+) diff --git a/numpy/random/src/distributions/distributions.c b/numpy/random/src/distributions/distributions.c index c1830b86ac50..1241329151a9 100644 --- a/numpy/random/src/distributions/distributions.c +++ b/numpy/random/src/distributions/distributions.c @@ -403,11 +403,29 @@ float random_gamma_f(bitgen_t *bitgen_state, float shape, float scale) { return scale * random_standard_gamma_f(bitgen_state, shape); } +#define BETA_TINY_THRESHOLD 3e-103 + +/* + * Note: random_beta assumes that a != 0 and b != 0. + */ double random_beta(bitgen_t *bitgen_state, double a, double b) { double Ga, Gb; if ((a <= 1.0) && (b <= 1.0)) { double U, V, X, Y, XpY; + + if (a < BETA_TINY_THRESHOLD && b < BETA_TINY_THRESHOLD) { + /* + * When a and b are this small, the probability that the + * sample would be a double precision float that is not + * 0 or 1 is less than approx. 1e-100. So we use the + * proportion a/(a + b) and a single uniform sample to + * generate the result. + */ + U = next_double(bitgen_state); + return (a + b)*U < a; + } + /* Use Johnk's algorithm */ while (1) { diff --git a/numpy/random/tests/test_generator_mt19937_regressions.py b/numpy/random/tests/test_generator_mt19937_regressions.py index 6177ef2c1b04..f16af2b293ce 100644 --- a/numpy/random/tests/test_generator_mt19937_regressions.py +++ b/numpy/random/tests/test_generator_mt19937_regressions.py @@ -79,6 +79,13 @@ def test_beta_very_small_parameters(self): # gh-24203: beta would hang with very small parameters. self.mt19937.beta(1e-49, 1e-40) + def test_beta_ridiculously_small_parameters(self): + # gh-24266: beta would generate nan when the parameters + # were subnormal or a small multiple of the smallest normal. + tiny = np.finfo(1.0).tiny + x = self.mt19937.beta(tiny/32, tiny/40, size=50) + assert not np.any(np.isnan(x)) + def test_choice_sum_of_probs_tolerance(self): # The sum of probs should be 1.0 with some tolerance. # For low precision dtypes the tolerance was too tight. From f989273522032772f0f4c073af29edf2333d5da6 Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Wed, 26 Jul 2023 15:41:13 -0700 Subject: [PATCH 10/30] CI: Enable running with Intel SDE --- .github/workflows/build_test.yml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml index ab34c029cfcb..18a5cdaa884f 100644 --- a/.github/workflows/build_test.yml +++ b/.github/workflows/build_test.yml @@ -423,7 +423,7 @@ jobs: python-version: ${{ env.PYTHON_VERSION }} - name: Install Intel SDE run: | - curl -o /tmp/sde.tar.xz https://downloadmirror.intel.com/751535/sde-external-9.14.0-2022-10-25-lin.tar.xz + curl -o /tmp/sde.tar.xz https://downloadmirror.intel.com/784319/sde-external-9.24.0-2023-07-13-lin.tar.xz mkdir /tmp/sde && tar -xvf /tmp/sde.tar.xz -C /tmp/sde/ sudo mv /tmp/sde/* /opt/sde && sudo ln -s /opt/sde/sde64 /usr/bin/sde - name: Install dependencies @@ -440,8 +440,6 @@ jobs: python -c "import numpy as np; np.show_config()" # Run only a few tests, running everything in an SDE takes a long time # Using pytest directly, unable to use python runtests.py -n -t ... - # Disabled running in the SDE because of an SDE bug - name: Run linalg/ufunc/umath tests run: | - python -m pytest numpy/core/tests/test_umath* numpy/core/tests/test_ufunc.py numpy/linalg/tests/test_* - #sde -spr -- python -m pytest numpy/core/tests/test_umath* numpy/core/tests/test_ufunc.py numpy/linalg/tests/test_* + sde -spr -- python -m pytest numpy/core/tests/test_umath* numpy/core/tests/test_ufunc.py numpy/linalg/tests/test_* From d624b62750032d6735984aded26a9b7d8dbda706 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 27 Jul 2023 09:16:55 +0200 Subject: [PATCH 11/30] BUG: Move legacy check for void printing The check needs to be in the python path, because also the printing for `str()` subtly changed. To be the same as tuples, we should actually print `(np.float32(3.), np.int8(1))` for `str()` (tuples include the repr), while for `repr()` we include the dtype so we would print similar (but ideally with full precision) to arrays. This isn't quite ideal, I would be happy to print the full repr in the `str()` but I guess that might be a bit annoying in practice, so maybe our Numeric types are special enough for now. --- numpy/core/arrayprint.py | 4 ++++ numpy/core/src/multiarray/scalartypes.c.src | 4 ++-- numpy/core/tests/test_arrayprint.py | 10 ++++++++++ 3 files changed, 16 insertions(+), 2 deletions(-) diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py index 6da5a5a31f36..a2a664f766c1 100644 --- a/numpy/core/arrayprint.py +++ b/numpy/core/arrayprint.py @@ -1415,6 +1415,10 @@ def _void_scalar_to_string(x, is_repr=True): formatters defined above. """ options = _format_options.copy() + + if options["legacy"] <= 125: + return StructuredVoidFormat.from_data(array(x), **_format_options)(x) + if options.get('formatter') is None: options['formatter'] = {} options['formatter'].setdefault('float_kind', str) diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src index 2976e065bb78..f78854279680 100644 --- a/numpy/core/src/multiarray/scalartypes.c.src +++ b/numpy/core/src/multiarray/scalartypes.c.src @@ -622,8 +622,8 @@ voidtype_repr(PyObject *self) { PyVoidScalarObject *s = (PyVoidScalarObject*) self; if (PyDataType_HASFIELDS(s->descr)) { - /* use string on old versions */ - return _void_scalar_to_string(self, npy_legacy_print_mode > 125); + /* Python helper checks for the legacy mode printing */ + return _void_scalar_to_string(self, 1); } if (npy_legacy_print_mode > 125) { return _void_to_hex(s->obval, s->descr->elsize, "np.void(b'", "\\x", "')"); diff --git a/numpy/core/tests/test_arrayprint.py b/numpy/core/tests/test_arrayprint.py index 72d4e106aee1..bf70babc5e2c 100644 --- a/numpy/core/tests/test_arrayprint.py +++ b/numpy/core/tests/test_arrayprint.py @@ -1084,6 +1084,9 @@ def test_scalar_repr_numbers(dtype, value): (np.void((True, 2), dtype="?,f4')])"), (np.void(b'a'), r"void(b'\x61')", r"np.void(b'\x61')"), ]) def test_scalar_repr_special(scalar, legacy_repr, representation): @@ -1092,3 +1095,10 @@ def test_scalar_repr_special(scalar, legacy_repr, representation): with np.printoptions(legacy="1.25"): assert repr(scalar) == legacy_repr + +def test_scalar_void_float_str(): + # Note that based on this currently we do not print the same as a tuple + # would, since the tuple would include the repr() inside for floats, but + # we do not do that. + scalar = np.void((1.0, 2.0), dtype=[('f0', 'f4')]) + assert str(scalar) == "(1.0, 2.0)" From 145065bb7ca5071fe61c4591c1677449fa6e1ffb Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 27 Jul 2023 11:45:34 +0200 Subject: [PATCH 12/30] API: Remove legacy-inner-loop-selector Remove legacy-inner-loop-selector and clean up the masked one (This was effectivly removed a while ago, but a deprecation was added for ABI compatibility if compiling against a newer NumPy that still had the slot). Removed the `reservedX` struct member docs, seems clear enough to not have them explicitly... --- .../upcoming_changes/24271.c_api_removal.rst | 3 ++ .../reference/c-api/types-and-structures.rst | 19 +--------- numpy/core/include/numpy/ufuncobject.h | 35 ++----------------- numpy/core/src/umath/legacy_array_method.c | 3 +- numpy/core/src/umath/ufunc_object.c | 14 -------- numpy/core/src/umath/umathmodule.c | 14 -------- 6 files changed, 9 insertions(+), 79 deletions(-) create mode 100644 doc/release/upcoming_changes/24271.c_api_removal.rst diff --git a/doc/release/upcoming_changes/24271.c_api_removal.rst b/doc/release/upcoming_changes/24271.c_api_removal.rst new file mode 100644 index 000000000000..fd839a6f6130 --- /dev/null +++ b/doc/release/upcoming_changes/24271.c_api_removal.rst @@ -0,0 +1,3 @@ +* The ``legacy_inner_loop_selector`` member of the ufunc struct is removed + to simplify improvements to the dispatching system. + There are no known users overriding or directly accessing this member. diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst index cff1b3d38d2e..7be5ee263b6b 100644 --- a/doc/source/reference/c-api/types-and-structures.rst +++ b/doc/source/reference/c-api/types-and-structures.rst @@ -820,8 +820,8 @@ PyUFunc_Type and PyUFuncObject int *core_offsets; char *core_signature; PyUFunc_TypeResolutionFunc *type_resolver; - PyUFunc_LegacyInnerLoopSelectionFunc *legacy_inner_loop_selector; void *reserved2; + void *reserved3; npy_uint32 *op_flags; npy_uint32 *iter_flags; /* new in API version 0x0000000D */ @@ -890,10 +890,6 @@ PyUFunc_Type and PyUFuncObject specifies how many different 1-d loops (of the builtin data types) are available. - .. c:member:: int reserved1 - - Unused. - .. c:member:: char *name A string name for the ufunc. This is used dynamically to build @@ -966,19 +962,6 @@ PyUFunc_Type and PyUFuncObject A function which resolves the types and fills an array with the dtypes for the inputs and outputs - .. c:member:: PyUFunc_LegacyInnerLoopSelectionFunc *legacy_inner_loop_selector - - .. deprecated:: 1.22 - - Some fallback support for this slot exists, but will be removed - eventually. A universal function that relied on this will - have to be ported eventually. - See :ref:`NEP 41 ` and :ref:`NEP 43 ` - - .. c:member:: void *reserved2 - - For a possible future loop selector with a different signature. - .. c:member:: npy_uint32 op_flags Override the default operand flags for each ufunc operand. diff --git a/numpy/core/include/numpy/ufuncobject.h b/numpy/core/include/numpy/ufuncobject.h index 3f123cacb0a4..c326536baaa4 100644 --- a/numpy/core/include/numpy/ufuncobject.h +++ b/numpy/core/include/numpy/ufuncobject.h @@ -65,30 +65,6 @@ typedef int (PyUFunc_TypeResolutionFunc)( PyObject *type_tup, PyArray_Descr **out_dtypes); -/* - * Legacy loop selector. (This should NOT normally be used and we can expect - * that only the `PyUFunc_DefaultLegacyInnerLoopSelector` is ever set). - * However, unlike the masked version, it probably still works. - * - * ufunc: The ufunc object. - * dtypes: An array which has been populated with dtypes, - * in most cases by the type resolution function - * for the same ufunc. - * out_innerloop: Should be populated with the correct ufunc inner - * loop for the given type. - * out_innerloopdata: Should be populated with the void* data to - * be passed into the out_innerloop function. - * out_needs_api: If the inner loop needs to use the Python API, - * should set the to 1, otherwise should leave - * this untouched. - */ -typedef int (PyUFunc_LegacyInnerLoopSelectionFunc)( - struct _tagPyUFuncObject *ufunc, - PyArray_Descr **dtypes, - PyUFuncGenericFunction *out_innerloop, - void **out_innerloopdata, - int *out_needs_api); - typedef struct _tagPyUFuncObject { PyObject_HEAD @@ -161,13 +137,8 @@ typedef struct _tagPyUFuncObject { * with the dtypes for the inputs and outputs. */ PyUFunc_TypeResolutionFunc *type_resolver; - /* - * A function which returns an inner loop written for - * NumPy 1.6 and earlier ufuncs. This is for backwards - * compatibility, and may be NULL if inner_loop_selector - * is specified. - */ - PyUFunc_LegacyInnerLoopSelectionFunc *legacy_inner_loop_selector; + /* Was the legacy loop resolver */ + void *reserved2; /* * This was blocked off to be the "new" inner loop selector in 1.7, * but this was never implemented. (This is also why the above @@ -180,7 +151,7 @@ typedef struct _tagPyUFuncObject { #endif /* Was previously the `PyUFunc_MaskedInnerLoopSelectionFunc` */ - void *_always_null_previously_masked_innerloop_selector; + void *reserved3; /* * List of flags for each operand when ufunc is called by nditer object. diff --git a/numpy/core/src/umath/legacy_array_method.c b/numpy/core/src/umath/legacy_array_method.c index 08f4c1534b73..8054e533c9ff 100644 --- a/numpy/core/src/umath/legacy_array_method.c +++ b/numpy/core/src/umath/legacy_array_method.c @@ -19,6 +19,7 @@ #include "dtypemeta.h" #include "ufunc_object.h" +#include "ufunc_type_resolution.h" typedef struct { @@ -216,7 +217,7 @@ get_wrapped_legacy_ufunc_loop(PyArrayMethod_Context *context, PyUFuncGenericFunction loop = NULL; /* Note that `needs_api` is not reliable (it was in fact unused normally) */ - if (ufunc->legacy_inner_loop_selector(ufunc, + if (PyUFunc_DefaultLegacyInnerLoopSelector(ufunc, context->descriptors, &loop, &user_data, &needs_api) < 0) { return -1; } diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index 5b75b3bfcdfa..fa4f1908fbfd 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -1422,18 +1422,6 @@ execute_ufunc_loop(PyArrayMethod_Context *context, int masked, if (masked) { assert(PyArray_TYPE(op[nop]) == NPY_BOOL); - if (ufunc->_always_null_previously_masked_innerloop_selector != NULL) { - if (PyErr_WarnFormat(PyExc_UserWarning, 1, - "The ufunc %s has a custom masked-inner-loop-selector." - "NumPy assumes that this is NEVER used. If you do make " - "use of this please notify the NumPy developers to discuss " - "future solutions. (See NEP 41 and 43)\n" - "NumPy will continue, but ignore the custom loop selector. " - "This should only affect performance.", - ufunc_get_name_cstr(ufunc)) < 0) { - return -1; - } - } /* * NOTE: In the masked version, we consider the output read-write, @@ -5109,8 +5097,6 @@ PyUFunc_FromFuncAndDataAndSignatureAndIdentity(PyUFuncGenericFunction *func, voi /* Type resolution and inner loop selection functions */ ufunc->type_resolver = &PyUFunc_DefaultTypeResolver; - ufunc->legacy_inner_loop_selector = &PyUFunc_DefaultLegacyInnerLoopSelector; - ufunc->_always_null_previously_masked_innerloop_selector = NULL; ufunc->op_flags = NULL; ufunc->_loops = NULL; diff --git a/numpy/core/src/umath/umathmodule.c b/numpy/core/src/umath/umathmodule.c index e387a2160311..07a9159b0dcc 100644 --- a/numpy/core/src/umath/umathmodule.c +++ b/numpy/core/src/umath/umathmodule.c @@ -58,19 +58,6 @@ object_ufunc_type_resolver(PyUFuncObject *ufunc, return 0; } -static int -object_ufunc_loop_selector(PyUFuncObject *ufunc, - PyArray_Descr **NPY_UNUSED(dtypes), - PyUFuncGenericFunction *out_innerloop, - void **out_innerloopdata, - int *out_needs_api) -{ - *out_innerloop = ufunc->functions[0]; - *out_innerloopdata = (ufunc->data == NULL) ? NULL : ufunc->data[0]; - *out_needs_api = 1; - - return 0; -} PyObject * ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) { @@ -166,7 +153,6 @@ ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) { self->ptr = ptr; self->type_resolver = &object_ufunc_type_resolver; - self->legacy_inner_loop_selector = &object_ufunc_loop_selector; PyObject_GC_Track(self); return (PyObject *)self; From 9a8246e497b74ac064648c731951cf4d8b51b681 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Thu, 27 Jul 2023 15:04:19 +0300 Subject: [PATCH 13/30] TST: add failing test --- numpy/core/tests/test_ufunc.py | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py index 99390e48c3db..9b8992b0d367 100644 --- a/numpy/core/tests/test_ufunc.py +++ b/numpy/core/tests/test_ufunc.py @@ -2231,6 +2231,7 @@ def test_at_negative_indexes(self): assert a[-1] == 11 # issue 24147 assert a[1] == 2 assert a[2] == 3 + assert np.all(indxs == [-1, 1, -1, 2]) def test_at_not_none_signature(self): # Test ufuncs with non-trivial signature raise a TypeError From d13e5f43b6ebf0db7323130c11c78710fcce6317 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Thu, 27 Jul 2023 15:05:58 +0300 Subject: [PATCH 14/30] BUG: fix for modifying the index arg in ufunc_at --- numpy/core/src/umath/loops.c.src | 99 +++++++++++++++++++++-------- numpy/core/src/umath/ufunc_object.c | 13 ++-- 2 files changed, 79 insertions(+), 33 deletions(-) diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index 97a74b4257aa..e69a27a9ea70 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -450,14 +450,19 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 int void *NPY_UNUSED(func)) { char *ip1 = args[0]; - char *indx = args[1]; + char *indxp = args[1]; char *value = args[2]; npy_intp is1 = steps[0], isindex = steps[1], isb = steps[2]; + npy_intp shape = steps[3]; npy_intp n = dimensions[0]; npy_intp i; @type@ *indexed; - for(i = 0; i < n; i++, indx += isindex, value += isb) { - indexed = (@type@ *)(ip1 + is1 * *(npy_intp *)indx); + for(i = 0; i < n; i++, indxp += isindex, value += isb) { + npy_intp indx = *(npy_intp *)indxp; + if (indx < 0) { + indx += shape; + } + indexed = (@type@ *)(ip1 + is1 * indx); *indexed = *indexed @OP@ *(@type@ *)value; } return 0; @@ -1241,14 +1246,19 @@ NPY_NO_EXPORT int void *NPY_UNUSED(func)) { char *ip1 = args[0]; - char *indx = args[1]; + char *indxp = args[1]; char *value = args[2]; npy_intp is1 = steps[0], isindex = steps[1], isb = steps[2]; + npy_intp shape = steps[3]; npy_intp n = dimensions[0]; npy_intp i; @type@ *indexed; - for(i = 0; i < n; i++, indx += isindex, value += isb) { - indexed = (@type@ *)(ip1 + is1 * *(npy_intp *)indx); + for(i = 0; i < n; i++, indxp += isindex, value += isb) { + npy_intp indx = *(npy_intp *)indxp; + if (indx < 0) { + indx += shape; + } + indexed = (@type@ *)(ip1 + is1 * indx); *indexed = npy_floor_divide@c@(*indexed, *(@type@ *)value); } return 0; @@ -1395,14 +1405,19 @@ LONGDOUBLE_@kind@_indexed(PyArrayMethod_Context *NPY_UNUSED(context), void *NPY_UNUSED(func)) { char *ip1 = args[0]; - char *indx = args[1]; + char *indxp = args[1]; char *value = args[2]; npy_intp is1 = steps[0], isindex = steps[1], isb = steps[2]; + npy_intp shape = steps[3]; npy_intp n = dimensions[0]; npy_intp i; npy_longdouble *indexed; - for(i = 0; i < n; i++, indx += isindex, value += isb) { - indexed = (npy_longdouble *)(ip1 + is1 * *(npy_intp *)indx); + for(i = 0; i < n; i++, indxp += isindex, value += isb) { + npy_intp indx = *(npy_intp *)indxp; + if (indx < 0) { + indx += shape; + } + indexed = (npy_longdouble *)(ip1 + is1 * indx); *indexed = *indexed @OP@ *(npy_longdouble *)value; } return 0; @@ -1520,14 +1535,19 @@ HALF_@kind@_indexed(void *NPY_UNUSED(context), void *NPY_UNUSED(func)) { char *ip1 = args[0]; - char *indx = args[1]; + char *indxp = args[1]; char *value = args[2]; npy_intp is1 = steps[0], isindex = steps[1], isb = steps[2]; + npy_intp shape = steps[3]; npy_intp n = dimensions[0]; npy_intp i; npy_half *indexed; - for(i = 0; i < n; i++, indx += isindex, value += isb) { - indexed = (npy_half *)(ip1 + is1 * *(npy_intp *)indx); + for(i = 0; i < n; i++, indxp += isindex, value += isb) { + npy_intp indx = *(npy_intp *)indxp; + if (indx < 0) { + indx += shape; + } + indexed = (npy_half *)(ip1 + is1 * indx); const float v = npy_half_to_float(*(npy_half *)value); *indexed = npy_float_to_half(npy_half_to_float(*indexed) @OP@ v); } @@ -1641,14 +1661,19 @@ HALF_@kind@_indexed(PyArrayMethod_Context *NPY_UNUSED(context), void *NPY_UNUSED(func)) { char *ip1 = args[0]; - char *indx = args[1]; + char *indxp = args[1]; char *value = args[2]; npy_intp is1 = steps[0], isindex = steps[1], isb = steps[2]; + npy_intp shape = steps[3]; npy_intp n = dimensions[0]; npy_intp i; npy_half *indexed; - for(i = 0; i < n; i++, indx += isindex, value += isb) { - indexed = (npy_half *)(ip1 + is1 * *(npy_intp *)indx); + for(i = 0; i < n; i++, indxp += isindex, value += isb) { + npy_intp indx = *(npy_intp *)indxp; + if (indx < 0) { + indx += shape; + } + indexed = (npy_half *)(ip1 + is1 * indx); npy_half v = *(npy_half *)value; *indexed = (@OP@(*indexed, v) || npy_half_isnan(*indexed)) ? *indexed : v; } @@ -1679,14 +1704,19 @@ HALF_@kind@_indexed(PyArrayMethod_Context *NPY_UNUSED(context), void *NPY_UNUSED(func)) { char *ip1 = args[0]; - char *indx = args[1]; + char *indxp = args[1]; char *value = args[2]; npy_intp is1 = steps[0], isindex = steps[1], isb = steps[2]; + npy_intp shape = steps[3]; npy_intp n = dimensions[0]; npy_intp i; npy_half *indexed; - for (i = 0; i < n; i++, indx += isindex, value += isb) { - indexed = (npy_half *)(ip1 + is1 * *(npy_intp *)indx); + for (i = 0; i < n; i++, indxp += isindex, value += isb) { + npy_intp indx = *(npy_intp *)indxp; + if (indx < 0) { + indx += shape; + } + indexed = (npy_half *)(ip1 + is1 * indx); npy_half v = *(npy_half *)value; *indexed = (@OP@(*indexed, v) || npy_half_isnan(v)) ? *indexed: v; } @@ -1717,14 +1747,19 @@ HALF_floor_divide_indexed(PyArrayMethod_Context *NPY_UNUSED(context), void *NPY_UNUSED(func)) { char *ip1 = args[0]; - char *indx = args[1]; + char *indxp = args[1]; char *value = args[2]; npy_intp is1 = steps[0], isindex = steps[1], isb = steps[2]; + npy_intp shape = steps[3]; npy_intp n = dimensions[0]; npy_intp i; npy_half *indexed; - for(i = 0; i < n; i++, indx += isindex, value += isb) { - indexed = (npy_half *)(ip1 + is1 * *(npy_intp *)indx); + for(i = 0; i < n; i++, indxp += isindex, value += isb) { + npy_intp indx = *(npy_intp *)indxp; + if (indx < 0) { + indx += shape; + } + indexed = (npy_half *)(ip1 + is1 * indx); float v = npy_half_to_float(*(npy_half *)value); float div = npy_floor_dividef(npy_half_to_float(*indexed), v); *indexed = npy_float_to_half(div); @@ -1947,14 +1982,19 @@ NPY_NO_EXPORT int @TYPE@_@kind@_indexed (PyArrayMethod_Context *NPY_UNUSED(context), char * const*args, npy_intp const *dimensions, npy_intp const *steps, NpyAuxData *NPY_UNUSED(func)) { char *ip1 = args[0]; - char *indx = args[1]; + char *indxp = args[1]; char *value = args[2]; npy_intp is1 = steps[0], isindex = steps[1], isb = steps[2]; + npy_intp shape = steps[3]; npy_intp n = dimensions[0]; npy_intp i; @ftype@ *indexed; - for(i = 0; i < n; i++, indx += isindex, value += isb) { - indexed = (@ftype@ *)(ip1 + is1 * *(npy_intp *)indx); + for(i = 0; i < n; i++, indxp += isindex, value += isb) { + npy_intp indx = *(npy_intp *)indxp; + if (indx < 0) { + indx += shape; + } + indexed = (@ftype@ *)(ip1 + is1 * indx); const @ftype@ b_r = ((@ftype@ *)value)[0]; const @ftype@ b_i = ((@ftype@ *)value)[1]; indexed[0] @OP@= b_r; @@ -1981,14 +2021,19 @@ NPY_NO_EXPORT int @TYPE@_multiply_indexed (PyArrayMethod_Context *NPY_UNUSED(context), char * const*args, npy_intp const *dimensions, npy_intp const *steps, NpyAuxData *NPY_UNUSED(func)) { char *ip1 = args[0]; - char *indx = args[1]; + char *indxp = args[1]; char *value = args[2]; npy_intp is1 = steps[0], isindex = steps[1], isb = steps[2]; + npy_intp shape = steps[3]; npy_intp n = dimensions[0]; npy_intp i; @ftype@ *indexed; - for(i = 0; i < n; i++, indx += isindex, value += isb) { - indexed = (@ftype@ *)(ip1 + is1 * *(npy_intp *)indx); + for(i = 0; i < n; i++, indxp += isindex, value += isb) { + npy_intp indx = *(npy_intp *)indxp; + if (indx < 0) { + indx += shape; + } + indexed = (@ftype@ *)(ip1 + is1 * indx); const @ftype@ a_r = indexed[0]; const @ftype@ a_i = indexed[1]; const @ftype@ b_r = ((@ftype@ *)value)[0]; diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index 5b75b3bfcdfa..33b05f3757e0 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -5796,7 +5796,7 @@ trivial_at_loop(PyArrayMethodObject *ufuncimpl, NPY_ARRAYMETHOD_FLAGS flags, int buffersize=0, errormask = 0; int res; char *args[3]; - npy_intp steps[3]; + npy_intp steps[4]; args[0] = (char *) iter->baseoffset; steps[0] = iter->fancy_strides[0]; if (ufuncimpl->nin == 1) { @@ -5819,16 +5819,17 @@ trivial_at_loop(PyArrayMethodObject *ufuncimpl, NPY_ARRAYMETHOD_FLAGS flags, do { npy_intp *inner_size = NpyIter_GetInnerLoopSizePtr(iter->outer); npy_intp * indxP = (npy_intp *)iter->outer_ptrs[0]; - for (npy_intp i=0; i < *inner_size; i++) { - if (indxP[i] < 0) { - indxP[i] += iter->fancy_dims[0]; - } - } args[1] = (char *)indxP; steps[1] = iter->outer_strides[0]; + /* + * The value of iter->fancy_dims[0] is added to negative indexes + * inside the inner loop + */ + steps[3] = iter->fancy_dims[0]; res = ufuncimpl->contiguous_indexed_loop( context, args, inner_size, steps, NULL); + if (args[2] != NULL) { args[2] += (*inner_size) * steps[2]; } From 1ba826a7dd1ac82d0f1f5d61ec163d973ea594b2 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 27 Jul 2023 15:04:08 +0200 Subject: [PATCH 15/30] TYP: Trim down the `_NestedSequence.__getitem__` signature Remove the `slice`-based overload such that it successfully supertypes `deque.__getitem__` --- numpy/_typing/_nested_sequence.py | 8 +------- numpy/typing/tests/data/reveal/array_constructors.pyi | 2 ++ 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/numpy/_typing/_nested_sequence.py b/numpy/_typing/_nested_sequence.py index 4b6cafc51009..3d0d25ae5b48 100644 --- a/numpy/_typing/_nested_sequence.py +++ b/numpy/_typing/_nested_sequence.py @@ -5,7 +5,6 @@ from collections.abc import Iterator from typing import ( Any, - overload, TypeVar, Protocol, runtime_checkable, @@ -62,12 +61,7 @@ def __len__(self, /) -> int: """Implement ``len(self)``.""" raise NotImplementedError - @overload - def __getitem__(self, index: int, /) -> _T_co | _NestedSequence[_T_co]: ... - @overload - def __getitem__(self, index: slice, /) -> _NestedSequence[_T_co]: ... - - def __getitem__(self, index, /): + def __getitem__(self, index: int, /) -> _T_co | _NestedSequence[_T_co]: """Implement ``self[x]``.""" raise NotImplementedError diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index 2ff20e9aeeca..759d521c8d2a 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -1,5 +1,6 @@ from typing import Any, TypeVar from pathlib import Path +from collections import deque import numpy as np import numpy.typing as npt @@ -26,6 +27,7 @@ reveal_type(np.array(A)) # E: ndarray[Any, dtype[{float64}]] reveal_type(np.array(B)) # E: ndarray[Any, dtype[{float64}]] reveal_type(np.array(B, subok=True)) # E: SubClass[{float64}] reveal_type(np.array([1, 1.0])) # E: ndarray[Any, dtype[Any]] +reveal_type(np.array(deque([1, 2, 3]))) # E: ndarray[Any, dtype[Any]] reveal_type(np.array(A, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]] reveal_type(np.array(A, dtype='c16')) # E: ndarray[Any, dtype[Any]] reveal_type(np.array(A, like=A)) # E: ndarray[Any, dtype[{float64}]] From db083e779b7a08e994778134428510fee04fa31e Mon Sep 17 00:00:00 2001 From: paulreece Date: Sat, 15 Jul 2023 18:45:38 -0400 Subject: [PATCH 16/30] BUG: array2string does not add signs for positive integers. Fixes #24181. --- numpy/core/arrayprint.py | 31 +++++++--- numpy/core/tests/test_arrayprint.py | 87 +++++++++++++++++++++++++++++ 2 files changed, 110 insertions(+), 8 deletions(-) diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py index 770452194b83..302e8b869ac9 100644 --- a/numpy/core/arrayprint.py +++ b/numpy/core/arrayprint.py @@ -154,6 +154,11 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, print the sign of positive values. If ' ', always prints a space (whitespace character) in the sign position of positive values. If '-', omit the sign character of positive values. (default '-') + + .. versionchanged:: 2.0 + The sign parameter can now be an integer type, previously + types were floating-point types. + formatter : dict of callables, optional If not None, the keys should indicate the type(s) that the respective formatting function applies to. Callables should return a string. @@ -407,7 +412,7 @@ def _get_formatdict(data, *, precision, floatmode, suppress, sign, legacy, # wrapped in lambdas to avoid taking a code path with the wrong type of data formatdict = { 'bool': lambda: BoolFormat(data), - 'int': lambda: IntegerFormat(data), + 'int': lambda: IntegerFormat(data, sign), 'float': lambda: FloatingFormat( data, precision, floatmode, suppress, sign, legacy=legacy), 'longfloat': lambda: FloatingFormat( @@ -639,6 +644,11 @@ def array2string(a, max_line_width=None, precision=None, (whitespace character) in the sign position of positive values. If '-', omit the sign character of positive values. Defaults to ``numpy.get_printoptions()['sign']``. + + .. versionchanged:: 2.0 + The sign parameter can now be an integer type, previously + types were floating-point types. + floatmode : str, optional Controls the interpretation of the `precision` option for floating-point types. @@ -1218,19 +1228,24 @@ def format_float_positional(x, precision=None, unique=True, sign=sign, pad_left=pad_left, pad_right=pad_right, min_digits=min_digits) - class IntegerFormat: - def __init__(self, data): + def __init__(self, data, sign='-'): if data.size > 0: - max_str_len = max(len(str(np.max(data))), - len(str(np.min(data)))) + data_max = np.max(data) + data_min = np.min(data) + data_max_str_len = len(str(data_max)) + if sign == ' ' and data_min < 0: + sign = '-' + if data_max >= 0 and sign in "+ ": + data_max_str_len += 1 + max_str_len = max(data_max_str_len, + len(str(data_min))) else: max_str_len = 0 - self.format = '%{}d'.format(max_str_len) + self.format = f'{{:{sign}{max_str_len}d}}' def __call__(self, x): - return self.format % x - + return self.format.format(x) class BoolFormat: def __init__(self, data, **kwargs): diff --git a/numpy/core/tests/test_arrayprint.py b/numpy/core/tests/test_arrayprint.py index 6796b40777fe..93c8c68bd3f0 100644 --- a/numpy/core/tests/test_arrayprint.py +++ b/numpy/core/tests/test_arrayprint.py @@ -524,6 +524,93 @@ def test_refcount(self): gc.enable() assert_(r1 == r2) + def test_with_sign(self): + # mixed negative and positive value array + a = np.array([-2, 0, 3]) + assert_equal( + np.array2string(a, sign='+'), + '[-2 +0 +3]' + ) + assert_equal( + np.array2string(a, sign='-'), + '[-2 0 3]' + ) + assert_equal( + np.array2string(a, sign=' '), + '[-2 0 3]' + ) + # all non-negative array + a = np.array([2, 0, 3]) + assert_equal( + np.array2string(a, sign='+'), + '[+2 +0 +3]' + ) + assert_equal( + np.array2string(a, sign='-'), + '[2 0 3]' + ) + assert_equal( + np.array2string(a, sign=' '), + '[ 2 0 3]' + ) + # all negative array + a = np.array([-2, -1, -3]) + assert_equal( + np.array2string(a, sign='+'), + '[-2 -1 -3]' + ) + assert_equal( + np.array2string(a, sign='-'), + '[-2 -1 -3]' + ) + assert_equal( + np.array2string(a, sign=' '), + '[-2 -1 -3]' + ) + # 2d array mixed negative and positive + a = np.array([[10, -1, 1, 1], [10, 10, 10, 10]]) + assert_equal( + np.array2string(a, sign='+'), + '[[+10 -1 +1 +1]\n [+10 +10 +10 +10]]' + ) + assert_equal( + np.array2string(a, sign='-'), + '[[10 -1 1 1]\n [10 10 10 10]]' + ) + assert_equal( + np.array2string(a, sign=' '), + '[[10 -1 1 1]\n [10 10 10 10]]' + ) + # 2d array all positive + a = np.array([[10, 0, 1, 1], [10, 10, 10, 10]]) + assert_equal( + np.array2string(a, sign='+'), + '[[+10 +0 +1 +1]\n [+10 +10 +10 +10]]' + ) + assert_equal( + np.array2string(a, sign='-'), + '[[10 0 1 1]\n [10 10 10 10]]' + ) + assert_equal( + np.array2string(a, sign=' '), + '[[ 10 0 1 1]\n [ 10 10 10 10]]' + ) + # 2d array all negative + a = np.array([[-10, -1, -1, -1], [-10, -10, -10, -10]]) + assert_equal( + np.array2string(a, sign='+'), + '[[-10 -1 -1 -1]\n [-10 -10 -10 -10]]' + ) + assert_equal( + np.array2string(a, sign='-'), + '[[-10 -1 -1 -1]\n [-10 -10 -10 -10]]' + ) + assert_equal( + np.array2string(a, sign=' '), + '[[-10 -1 -1 -1]\n [-10 -10 -10 -10]]' + ) + + class TestPrintOptions: """Test getting and setting global print options.""" From 42928ea6439cbe468554787bab0ee26069aab7eb Mon Sep 17 00:00:00 2001 From: Yuki Date: Fri, 28 Jul 2023 06:11:00 +0000 Subject: [PATCH 17/30] DOC: Remove ``np.source`` and ``np.lookfor`` ``np.source`` and ``np.lookfor`` was removed (gh-24144). --- doc/source/reference/routines.help.rst | 13 ------------- doc/source/user/numpy-for-matlab-users.rst | 2 +- numpy/lib/utils.py | 4 ---- 3 files changed, 1 insertion(+), 18 deletions(-) diff --git a/doc/source/reference/routines.help.rst b/doc/source/reference/routines.help.rst index 9b6eb4ad307c..2dcad7393472 100644 --- a/doc/source/reference/routines.help.rst +++ b/doc/source/reference/routines.help.rst @@ -5,20 +5,7 @@ NumPy-specific help functions .. currentmodule:: numpy -Finding help ------------- - -.. autosummary:: - :toctree: generated/ - - lookfor - - -Reading help ------------- - .. autosummary:: :toctree: generated/ info - source diff --git a/doc/source/user/numpy-for-matlab-users.rst b/doc/source/user/numpy-for-matlab-users.rst index 3344b87bbb69..7c7fd0898490 100644 --- a/doc/source/user/numpy-for-matlab-users.rst +++ b/doc/source/user/numpy-for-matlab-users.rst @@ -577,7 +577,7 @@ indices using the ``ix_`` command. E.g., for 2D array ``a``, one might do: ``ind=[1, 3]; a[np.ix_(ind, ind)] += 100``. \ **HELP**: There is no direct equivalent of MATLAB's ``which`` command, -but the commands :func:`help` and :func:`numpy.source` will usually list the filename +but the commands :func:`help` will usually list the filename where the function is located. Python also has an ``inspect`` module (do ``import inspect``) which provides a ``getfile`` that often works. diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py index c309afc660ed..adf4aac686e1 100644 --- a/numpy/lib/utils.py +++ b/numpy/lib/utils.py @@ -549,10 +549,6 @@ def info(object=None, maxwidth=76, output=None, toplevel='numpy'): toplevel : str, optional Start search at this level. - See Also - -------- - source, lookfor - Notes ----- When used interactively with an object, ``np.info(obj)`` is equivalent From f063fcbdb41694fd2c142b47d9259954eaa52b83 Mon Sep 17 00:00:00 2001 From: Shen Zhou Date: Fri, 28 Jul 2023 12:11:11 +0200 Subject: [PATCH 18/30] fix: inconsistency between doc and code --- doc/source/user/basics.dispatch.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/doc/source/user/basics.dispatch.rst b/doc/source/user/basics.dispatch.rst index a493ef769dbe..fecdc77a9644 100644 --- a/doc/source/user/basics.dispatch.rst +++ b/doc/source/user/basics.dispatch.rst @@ -96,10 +96,10 @@ For this example we will only handle the method ``__call__`` ... elif isinstance(input, self.__class__): ... scalars.append(input._i) ... if N is not None: -... if N != self._N: +... if N != input._N: ... raise TypeError("inconsistent sizes") ... else: -... N = self._N +... N = input._N ... else: ... return NotImplemented ... return self.__class__(N, ufunc(*scalars, **kwargs)) @@ -147,10 +147,10 @@ conveniently by inheriting from the mixin ... elif isinstance(input, self.__class__): ... scalars.append(input._i) ... if N is not None: -... if N != self._N: +... if N != input._N: ... raise TypeError("inconsistent sizes") ... else: -... N = self._N +... N = input._N ... else: ... return NotImplemented ... return self.__class__(N, ufunc(*scalars, **kwargs)) @@ -186,10 +186,10 @@ functions to our custom variants. ... elif isinstance(input, self.__class__): ... scalars.append(input._i) ... if N is not None: -... if N != self._N: +... if N != input._N: ... raise TypeError("inconsistent sizes") ... else: -... N = self._N +... N = input._N ... else: ... return NotImplemented ... return self.__class__(N, ufunc(*scalars, **kwargs)) From 5c3ebd1e785f43b1764dc9070046942ac35272a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Fri, 28 Jul 2023 14:28:48 +0200 Subject: [PATCH 19/30] DOC: fix a couple typos and rst formatting errors in NEP 0053 --- doc/neps/nep-0053-c-abi-evolution.rst | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/doc/neps/nep-0053-c-abi-evolution.rst b/doc/neps/nep-0053-c-abi-evolution.rst index 1b6315f6b360..940746730580 100644 --- a/doc/neps/nep-0053-c-abi-evolution.rst +++ b/doc/neps/nep-0053-c-abi-evolution.rst @@ -170,13 +170,14 @@ Cython users may use the NumPy C-API via ``cimport numpy as cnp``. Due to the uncertainty of Cython development, there are two scenarios for impact on Cython users. -If Cython 3 can be relied on, Cython users would be impacted *less* then C-API +If Cython 3 can be relied on, Cython users would be impacted *less* than C-API users, because Cython 3 allows us to hide struct layout changes (i.e. changes to ``PyArray_Descr``). -If this is not the case and we must support Cython 2.x, then Cython users -will also have to use a function/macro like ``PyDataType_ITEMSIZE()`` (or -use the Python object). This is unfortunately less typical in Cython code, -but also unlikely to be a common pattern for dtype struct fields/attributes. +If this is not the case and we must support Cython 0.29.x (which is the historic branch +before Cython 3), then Cython users will also have to use a function/macro like +``PyDataType_ITEMSIZE()`` (or use the Python object). This is unfortunately less +typical in Cython code, but also unlikely to be a common pattern for dtype struct +fields/attributes. A further impact is that some future API additions such as new classes may need to placed in a distinct ``.pyd`` file to avoid Cython generating code @@ -199,7 +200,7 @@ is missing. Some new API can be backported ------------------------------- -One large advantage of allowing users to compile with the newst version of +One large advantage of allowing users to compile with the newest version of NumPy is that in some cases we will be able to backport new API. Some new API functions can be written in terms of old ones or included directly. @@ -229,7 +230,7 @@ An implementation can be found in the `PR 23528`_. The second part is mainly about identifying and implementing the desired changes in a way that backwards compatibility will not be broken and API breaks remain manageable for downstream libraries. -Everyone change we do must have a brief note on how to adapt to the +Every change we do must have a brief note on how to adapt to the API change (i.e. alternative functions). NumPy 2 compatibility and API table changes @@ -239,12 +240,13 @@ NumPy 1.x (a table is a list of functions and symbols). For compatibility we would need to translate the 1.x table to the 2.0 table. This could be done in headers only in theory, but this seems unwieldy. -We thus propose to add a ``numpy2_compat`` package. This packages main +We thus propose to add a ``numpy2_compat`` package. This package's main purpose would be to provide a translation of the 1.x table to the 2.x one in a single place (filling in any necessary blanks). Introducing this package solves the "transition" issue because it allows a user to: + * Install a SciPy version that is compatible with 2.0 and 1.x * and keep using NumPy 1.x because of other packages they are using are not yet compatible. @@ -278,6 +280,7 @@ Backward compatibility ====================== As mentioned above backwards compatibility is achieved by: + 1. Forcing downstream to recompile with NumPy 2.0 2. Providing a ``numpy2_compat`` library. From 37bb5c797acc908c06dcef7a96b79b7decc8bd1d Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Fri, 28 Jul 2023 14:52:17 +0200 Subject: [PATCH 20/30] BLD: default to failing the build when BLAS or LAPACK are missing --- meson_options.txt | 2 ++ numpy/linalg/meson.build | 1 - numpy/meson.build | 30 ++++++++++++++++++++++++------ 3 files changed, 26 insertions(+), 7 deletions(-) diff --git a/meson_options.txt b/meson_options.txt index f18d1c0942ac..7ce4eefacd89 100644 --- a/meson_options.txt +++ b/meson_options.txt @@ -2,6 +2,8 @@ option('blas', type: 'string', value: 'openblas', description: 'option for BLAS library switching') option('lapack', type: 'string', value: 'openblas', description: 'option for LAPACK library switching') +option('allow-noblas', type: 'boolean', value: false, + description: 'If set to true, allow building with (slow!) internal fallback routines') option('use-ilp64', type: 'boolean', value: false, description: 'Use ILP64 (64-bit integer) BLAS and LAPACK interfaces') option('blas-symbol-suffix', type: 'string', value: '', diff --git a/numpy/linalg/meson.build b/numpy/linalg/meson.build index d290e5b3932d..a753c6409918 100644 --- a/numpy/linalg/meson.build +++ b/numpy/linalg/meson.build @@ -14,7 +14,6 @@ lapack_lite_sources = [ lapack_lite_module_src = ['lapack_litemodule.c'] if not have_lapack - warning('LAPACK was not found, NumPy is using an unoptimized, naive build from sources!') lapack_lite_module_src += lapack_lite_sources endif diff --git a/numpy/meson.build b/numpy/meson.build index 3666e6848456..cffaf3b99c10 100644 --- a/numpy/meson.build +++ b/numpy/meson.build @@ -134,14 +134,22 @@ if not use_ilp64 ).stdout().strip() == '1' endif -# BLAS and LAPACK are optional dependencies for NumPy. We can only use a BLAS -# which provides a CBLAS interface. So disable BLAS completely if CBLAS is not -# found (lapack-lite will be used instead; xref gh-24200 for a discussion on -# whether this silent disabling should stay as-is) +# BLAS and LAPACK are dependencies for NumPy. Since NumPy 2.0, by default the +# build will fail if they are missing; the performance impact is large, so +# using fallback routines must be explicitly opted into by the user. xref +# gh-24200 for a discussion on this. +# +# Note that we can only use a BLAS which provides a CBLAS interface. So disable +# BLAS completely if CBLAS is not found. +allow_noblas = get_option('allow-noblas') if have_blas _args_blas = [] # note: used for C and C++ via `blas_dep` below if have_cblas _args_blas += ['-DHAVE_CBLAS'] + elif not allow_noblas + error('No CBLAS interface detected! Install a BLAS library with CBLAS ' + \ + 'support, or use the `allow-noblas` build option (note, this ' + \ + 'may be up to 100x slower for some linear algebra operations).') endif if use_ilp64 _args_blas += ['-DHAVE_BLAS_ILP64'] @@ -154,7 +162,13 @@ if have_blas compile_args: _args_blas, ) else - blas_dep = [] + if allow_noblas + blas_dep = [] + else + error('No BLAS library detected! Install one, or use the ' + \ + '`allow-noblas` build option (note, this may be up to 100x slower ' + \ + 'for some linear algebra operations).') + endif endif if lapack_name == 'openblas' @@ -162,7 +176,11 @@ if lapack_name == 'openblas' endif lapack_dep = dependency(lapack_name, required: false) have_lapack = lapack_dep.found() - +if not have_lapack and not allow_noblas + error('No LAPACK library detected! Install one, or use the ' + \ + '`allow-noblas` build option (note, this may be up to 100x slower ' + \ + 'for some linear algebra operations).') +endif # Copy the main __init__.py|pxd files to the build dir (needed for Cython) __init__py = fs.copyfile('__init__.py') From 0010717b7634e4913db4fe95294ac99ca77a9d8b Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Fri, 28 Jul 2023 13:41:36 +0200 Subject: [PATCH 21/30] CI: add a Windows + 32-bit Python job on GitHub Actions --- .github/workflows/windows_meson.yml | 41 ++++++++++++++++++++++++++--- 1 file changed, 38 insertions(+), 3 deletions(-) diff --git a/.github/workflows/windows_meson.yml b/.github/workflows/windows_meson.yml index eac0f7e640be..97dfa41eaa2c 100644 --- a/.github/workflows/windows_meson.yml +++ b/.github/workflows/windows_meson.yml @@ -17,10 +17,10 @@ permissions: contents: read # to fetch code (actions/checkout) jobs: - meson: - name: Meson windows build/test + msvc_64bit_python_openblas: + name: MSVC, x86-64, LP64 OpenBLAS runs-on: windows-2019 - # if: "github.repository == 'numpy/numpy'" + if: "github.repository == 'numpy/numpy'" steps: - name: Checkout uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 @@ -86,3 +86,38 @@ jobs: echo "LASTEXITCODE is '$LASTEXITCODE'" python -c "import numpy, sys; sys.exit(numpy.test(verbose=3) is False)" echo "LASTEXITCODE is '$LASTEXITCODE'" + + msvc_32bit_python_openblas: + name: MSVC, 32-bit Python, no BLAS + runs-on: windows-2019 + if: "github.repository == 'numpy/numpy'" + steps: + - name: Checkout + uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + with: + submodules: recursive + fetch-depth: 0 + + - name: Setup Python (32-bit) + uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1 # v4.7.0 + with: + python-version: '3.10' + architecture: 'x86' + + - name: Setup MSVC (32-bit) + uses: bus1/cabuild/action/msdevshell@e22aba57d6e74891d059d66501b6b5aed8123c4d # v1 + with: + architecture: 'x86' + + - name: Build and install + run: | + python -m pip install . -v -Ccompile-args="-j2" -Csetup-args="-Dallow-noblas=true" + + - name: Install test dependencies + run: | + python -m pip install -r test_requirements.txt + + - name: Run test suite (fast) + run: | + cd tools + python -m pytest --pyargs numpy -m "not slow" -n2 From 7b1083fa0d786d8b7b54741bdb8136c2a45c910d Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Fri, 28 Jul 2023 14:46:04 +0200 Subject: [PATCH 22/30] TST: skip FP exceptions test on 32-bit Windows --- numpy/core/tests/test_umath.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py index 6951d41e43d4..0e07a6cb937b 100644 --- a/numpy/core/tests/test_umath.py +++ b/numpy/core/tests/test_umath.py @@ -1764,6 +1764,8 @@ def test_expm1(self): np.log, np.log2, np.log10, np.reciprocal, np.arccosh ] + @pytest.mark.skipif(sys.platform == "win32" and sys.maxsize < 2**31 + 1, + reason='failures on 32-bit Python, see FIXME below') @pytest.mark.parametrize("ufunc", UFUNCS_UNARY_FP) @pytest.mark.parametrize("dtype", ('e', 'f', 'd')) @pytest.mark.parametrize("data, escape", ( @@ -1810,6 +1812,8 @@ def test_unary_spurious_fpexception(self, ufunc, dtype, data, escape): # FIXME: NAN raises FP invalid exception: # - ceil/float16 on MSVC:32-bit # - spacing/float16 on almost all platforms + # FIXME: skipped on MSVC:32-bit during switch to Meson, 10 cases fail + # when SIMD support not present / disabled if ufunc in (np.spacing, np.ceil) and dtype == 'e': return array = np.array(data, dtype=dtype) From ee16c2222216c39caf0d79dca007a3d706fd25ff Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 28 Jul 2023 21:25:42 +0200 Subject: [PATCH 23/30] BUG: Further fixes to indexing loop and added tests This is a follow-up to gh-24272 which missed a few files. --- numpy/core/src/umath/_umath_tests.c.src | 11 +++++++--- .../src/umath/loops_arithm_fp.dispatch.c.src | 22 ++++++++++++++----- .../src/umath/loops_arithmetic.dispatch.c.src | 22 ++++++++++++++----- .../src/umath/loops_minmax.dispatch.c.src | 11 +++++++--- numpy/core/tests/test_ufunc.py | 22 +++++++++++++------ 5 files changed, 63 insertions(+), 25 deletions(-) diff --git a/numpy/core/src/umath/_umath_tests.c.src b/numpy/core/src/umath/_umath_tests.c.src index b427991e5463..b9e192706d00 100644 --- a/numpy/core/src/umath/_umath_tests.c.src +++ b/numpy/core/src/umath/_umath_tests.c.src @@ -375,13 +375,18 @@ INT32_negative_indexed(PyArrayMethod_Context *NPY_UNUSED(context), npy_intp const *steps, NpyAuxData *NPY_UNUSED(func)) { char *ip1 = args[0]; - char *indx = args[1]; + char *indxp = args[1]; npy_intp is1 = steps[0], isindex = steps[1]; npy_intp n = dimensions[0]; + npy_intp shape = steps[3]; npy_intp i; int32_t *indexed; - for(i = 0; i < n; i++, indx += isindex) { - indexed = (int32_t *)(ip1 + is1 * *(npy_intp *)indx); + for(i = 0; i < n; i++, indxp += isindex) { + npy_intp indx = *(npy_intp *)indxp; + if (indx < 0) { + indx += shape; + } + indexed = (int32_t *)(ip1 + is1 * indx); if (i == 3) { *indexed = -200; } else { diff --git a/numpy/core/src/umath/loops_arithm_fp.dispatch.c.src b/numpy/core/src/umath/loops_arithm_fp.dispatch.c.src index b72db5846f70..7ba3981e8119 100644 --- a/numpy/core/src/umath/loops_arithm_fp.dispatch.c.src +++ b/numpy/core/src/umath/loops_arithm_fp.dispatch.c.src @@ -258,14 +258,19 @@ NPY_NO_EXPORT int NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@_indexed) (PyArrayMethod_Context *NPY_UNUSED(context), char * const*args, npy_intp const *dimensions, npy_intp const *steps, NpyAuxData *NPY_UNUSED(func)) { char *ip1 = args[0]; - char *indx = args[1]; + char *indxp = args[1]; char *value = args[2]; npy_intp is1 = steps[0], isindex = steps[1], isb = steps[2]; + npy_intp shape = steps[3]; npy_intp n = dimensions[0]; npy_intp i; @type@ *indexed; - for(i = 0; i < n; i++, indx += isindex, value += isb) { - indexed = (@type@ *)(ip1 + is1 * *(npy_intp *)indx); + for(i = 0; i < n; i++, indxp += isindex, value += isb) { + npy_intp indx = *(npy_intp *)indxp; + if (indx < 0) { + indx += shape; + } + indexed = (@type@ *)(ip1 + is1 * indx); *indexed = *indexed @OP@ *(@type@ *)value; } return 0; @@ -650,14 +655,19 @@ NPY_NO_EXPORT int NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@_indexed) (PyArrayMethod_Context *NPY_UNUSED(context), char * const*args, npy_intp const *dimensions, npy_intp const *steps, NpyAuxData *NPY_UNUSED(func)) { char *ip1 = args[0]; - char *indx = args[1]; + char *indxp = args[1]; char *value = args[2]; npy_intp is1 = steps[0], isindex = steps[1], isb = steps[2]; + npy_intp shape = steps[3]; npy_intp n = dimensions[0]; npy_intp i; @ftype@ *indexed; - for(i = 0; i < n; i++, indx += isindex, value += isb) { - indexed = (@ftype@ *)(ip1 + is1 * *(npy_intp *)indx); + for(i = 0; i < n; i++, indxp += isindex, value += isb) { + npy_intp indx = *(npy_intp *)indxp; + if (indx < 0) { + indx += shape; + } + indexed = (@ftype@ *)(ip1 + is1 * indx); const @ftype@ b_r = ((@ftype@ *)value)[0]; const @ftype@ b_i = ((@ftype@ *)value)[1]; #if @is_mul@ diff --git a/numpy/core/src/umath/loops_arithmetic.dispatch.c.src b/numpy/core/src/umath/loops_arithmetic.dispatch.c.src index b6f12629807b..e07bb79808af 100644 --- a/numpy/core/src/umath/loops_arithmetic.dispatch.c.src +++ b/numpy/core/src/umath/loops_arithmetic.dispatch.c.src @@ -400,14 +400,19 @@ NPY_NO_EXPORT int NPY_CPU_DISPATCH_CURFX(@TYPE@_divide_indexed) (PyArrayMethod_Context *NPY_UNUSED(context), char * const*args, npy_intp const *dimensions, npy_intp const *steps, NpyAuxData *NPY_UNUSED(func)) { char *ip1 = args[0]; - char *indx = args[1]; + char *indxp = args[1]; char *value = args[2]; npy_intp is1 = steps[0], isindex = steps[1], isb = steps[2]; + npy_intp shape = steps[3]; npy_intp n = dimensions[0]; npy_intp i; @type@ *indexed; - for(i = 0; i < n; i++, indx += isindex, value += isb) { - indexed = (@type@ *)(ip1 + is1 * *(npy_intp *)indx); + for(i = 0; i < n; i++, indxp += isindex, value += isb) { + npy_intp indx = *(npy_intp *)indxp; + if (indx < 0) { + indx += shape; + } + indexed = (@type@ *)(ip1 + is1 * indx); *indexed = floor_div_@TYPE@(*indexed, *(@type@ *)value); } return 0; @@ -486,14 +491,19 @@ NPY_NO_EXPORT int NPY_CPU_DISPATCH_CURFX(@TYPE@_divide_indexed) (PyArrayMethod_Context *NPY_UNUSED(context), char * const*args, npy_intp const *dimensions, npy_intp const *steps, NpyAuxData *NPY_UNUSED(func)) { char *ip1 = args[0]; - char *indx = args[1]; + char *indxp = args[1]; char *value = args[2]; npy_intp is1 = steps[0], isindex = steps[1], isb = steps[2]; + npy_intp shape = steps[3]; npy_intp n = dimensions[0]; npy_intp i; @type@ *indexed; - for(i = 0; i < n; i++, indx += isindex, value += isb) { - indexed = (@type@ *)(ip1 + is1 * *(npy_intp *)indx); + for(i = 0; i < n; i++, indxp += isindex, value += isb) { + npy_intp indx = *(npy_intp *)indxp; + if (indx < 0) { + indx += shape; + } + indexed = (@type@ *)(ip1 + is1 * indx); @type@ in2 = *(@type@ *)value; if (NPY_UNLIKELY(in2 == 0)) { npy_set_floatstatus_divbyzero(); diff --git a/numpy/core/src/umath/loops_minmax.dispatch.c.src b/numpy/core/src/umath/loops_minmax.dispatch.c.src index 9d8667d3830a..236e2e2eb760 100644 --- a/numpy/core/src/umath/loops_minmax.dispatch.c.src +++ b/numpy/core/src/umath/loops_minmax.dispatch.c.src @@ -456,14 +456,19 @@ NPY_NO_EXPORT int NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@_indexed) (PyArrayMethod_Context *NPY_UNUSED(context), char *const *args, npy_intp const *dimensions, npy_intp const *steps, NpyAuxData *NPY_UNUSED(func)) { char *ip1 = args[0]; - char *indx = args[1]; + char *indxp = args[1]; char *value = args[2]; npy_intp is1 = steps[0], isindex = steps[1], isb = steps[2]; npy_intp n = dimensions[0]; + npy_intp shape = steps[3]; npy_intp i; @type@ *indexed; - for(i = 0; i < n; i++, indx += isindex, value += isb) { - indexed = (@type@ *)(ip1 + is1 * *(npy_intp *)indx); + for(i = 0; i < n; i++, indxp += isindex, value += isb) { + npy_intp indx = *(npy_intp *)indxp; + if (indx < 0) { + indx += shape; + } + indexed = (@type@ *)(ip1 + is1 * indx); *indexed = SCALAR_OP(*indexed, *(@type@ *)value); } return 0; diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py index 9b8992b0d367..8abbfaed5486 100644 --- a/numpy/core/tests/test_ufunc.py +++ b/numpy/core/tests/test_ufunc.py @@ -2224,13 +2224,21 @@ def test_ufunc_at_advanced(self): np.maximum.at(a, [0], 0) assert_equal(a, np.array([1, 2, 3])) - def test_at_negative_indexes(self): - a = np.arange(10) - indxs = np.array([-1, 1, -1, 2]) - np.add.at(a, indxs, 1) - assert a[-1] == 11 # issue 24147 - assert a[1] == 2 - assert a[2] == 3 + @pytest.mark.parametrize("dtype", + np.typecodes['AllInteger'] + np.typecodes['Float']) + @pytest.mark.parametrize("ufunc", + [np.add, np.subtract, np.divide, np.minimum, np.maximum]) + def test_at_negative_indexes(self, dtype, ufunc): + a = np.arange(0, 10).astype(dtype) + indxs = np.array([-1, 1, -1, 2]).astype(np.intp) + vals = np.array([1, 5, 2, 10], dtype=a.dtype) + + expected = a.copy() + for i, v in zip(indxs, vals): + expected[i] = ufunc(expected[i], v) + + ufunc.at(a, indxs, vals) + assert_array_equal(a, expected) assert np.all(indxs == [-1, 1, -1, 2]) def test_at_not_none_signature(self): From 826a37891297d9a15af3df10cdc6b4c2a002cab2 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Fri, 28 Jul 2023 17:09:48 +0200 Subject: [PATCH 24/30] CI: set flags where we don't need BLAS, improve Azure failure reporting --- .circleci/config.yml | 2 +- .github/workflows/wheels.yml | 5 +++-- azure-steps-windows.yml | 8 ++++++-- tools/ci/cirrus_macosx_arm64.yml | 2 +- 4 files changed, 11 insertions(+), 6 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index b8b5f0ae90d7..c6b9a6c3ea28 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -63,7 +63,7 @@ jobs: . venv/bin/activate pip install --progress-bar=off -r test_requirements.txt pip install --progress-bar=off -r doc_requirements.txt - pip install . + pip install . --config-settings=setup-args="-Dallow-noblas=true" - run: name: create release notes diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index bb72f227152a..5a33200089ab 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -186,12 +186,13 @@ jobs: python-version: "3.9" - name: Build sdist run: | - python setup.py sdist + python -m pip install -U pip build + python -m build --sdist -Csetup-args=-Dallow-noblas=true - name: Test the sdist run: | # TODO: Don't run test suite, and instead build wheels from sdist # Depends on pypa/cibuildwheel#1020 - python -m pip install dist/*.gz + python -m pip install dist/*.gz -Csetup-args=-Dallow-noblas=true pip install ninja pip install -r test_requirements.txt cd .. # Can't import numpy within numpy src directory diff --git a/azure-steps-windows.yml b/azure-steps-windows.yml index 03eba9092827..766da3353d28 100644 --- a/azure-steps-windows.yml +++ b/azure-steps-windows.yml @@ -28,7 +28,6 @@ steps: mkdir C:/opt/openblas/openblas_dll mkdir C:/opt/32/lib/pkgconfig mkdir C:/opt/64/lib/pkgconfig - # TBD: support 32 bit testing $target=$(python -c "import tools.openblas_support as obs; plat=obs.get_plat(); ilp64=obs.get_ilp64(); target=f'openblas_{plat}.zip'; obs.download_openblas(target, plat, ilp64);print(target)") unzip -o -d c:/opt/ $target echo "##vso[task.setvariable variable=PKG_CONFIG_PATH]c:/opt/64/lib/pkgconfig" @@ -36,18 +35,23 @@ steps: displayName: 'Download / Install OpenBLAS' - powershell: | + # Note: ensure the `pip install .` command remains the last one here, to + # avoid "green on failure" issues python -c "from tools import openblas_support; openblas_support.make_init('numpy')" If ( Test-Path env:NPY_USE_BLAS_ILP64 ) { python -m pip install . -Csetup-args="--vsenv" -Csetup-args="-Duse-ilp64=true" -Csetup-args="-Dblas-symbol-suffix=64_" } else { python -m pip install . -Csetup-args="--vsenv" } + displayName: 'Build NumPy' + +- powershell: | # copy from c:/opt/openblas/openblas_dll to numpy/.libs to ensure it can # get loaded when numpy is imported (no RPATH on Windows) $target = $(python -c "import sysconfig; print(sysconfig.get_path('platlib'))") mkdir $target/numpy/.libs copy C:/opt/openblas/openblas_dll/*.dll $target/numpy/.libs - displayName: 'Build NumPy' + displayName: 'Copy OpenBLAS DLL to site-packages' - script: | python -m pip install threadpoolctl diff --git a/tools/ci/cirrus_macosx_arm64.yml b/tools/ci/cirrus_macosx_arm64.yml index 7b16dbe91ddf..0dc97763e954 100644 --- a/tools/ci/cirrus_macosx_arm64.yml +++ b/tools/ci/cirrus_macosx_arm64.yml @@ -53,5 +53,5 @@ macos_arm64_test_task: pip install -r build_requirements.txt pip install pytest pytest-xdist hypothesis typing_extensions - spin build + spin build -- -Dallow-noblas=true spin test -j auto From 488d535c777f0caf4601e502ba323cc241a1312a Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Fri, 28 Jul 2023 18:35:54 +0200 Subject: [PATCH 25/30] BLD: fix issue ILP64 OpenBLAS detection, disable BLAS for PyPy job Disable using BLAS in the PyPy job on Azure because it was broken. Before this PR, it uses to silently not find OpenBLAS and continue, now we have to be explicit about it. --- azure-pipelines.yml | 2 ++ azure-steps-windows.yml | 9 ++++++--- numpy/meson.build | 33 +++++++++++++++++++-------------- tools/openblas_support.py | 2 +- 4 files changed, 28 insertions(+), 18 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index bb667e066ea2..d14e73b27edc 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -227,6 +227,8 @@ stages: TEST_MODE: fast BITS: 64 NPY_USE_BLAS_ILP64: '1' + # Broken - it builds but _multiarray_umath doesn't import - needs investigating + DISABLE_BLAS: '1' steps: - template: azure-steps-windows.yml diff --git a/azure-steps-windows.yml b/azure-steps-windows.yml index 766da3353d28..e09663cd7fbc 100644 --- a/azure-steps-windows.yml +++ b/azure-steps-windows.yml @@ -38,10 +38,13 @@ steps: # Note: ensure the `pip install .` command remains the last one here, to # avoid "green on failure" issues python -c "from tools import openblas_support; openblas_support.make_init('numpy')" - If ( Test-Path env:NPY_USE_BLAS_ILP64 ) { - python -m pip install . -Csetup-args="--vsenv" -Csetup-args="-Duse-ilp64=true" -Csetup-args="-Dblas-symbol-suffix=64_" + If ( Test-Path env:DISABLE_BLAS ) { + python -m pip install . -v -Csetup-args="--vsenv" -Csetup-args="-Dblas=none" -Csetup-args="-Dlapack=none" -Csetup-args="-Dallow-noblas=true" + } + elseif ( Test-Path env:NPY_USE_BLAS_ILP64 ) { + python -m pip install . -v -Csetup-args="--vsenv" -Csetup-args="-Duse-ilp64=true" -Csetup-args="-Dblas-symbol-suffix=64_" } else { - python -m pip install . -Csetup-args="--vsenv" + python -m pip install . -v -Csetup-args="--vsenv" } displayName: 'Build NumPy' diff --git a/numpy/meson.build b/numpy/meson.build index cffaf3b99c10..a8ca03cf76de 100644 --- a/numpy/meson.build +++ b/numpy/meson.build @@ -53,6 +53,19 @@ endif # (see cibuildwheel settings in pyproject.toml), but used by CI jobs already blas_symbol_suffix = get_option('blas-symbol-suffix') +use_ilp64 = get_option('use-ilp64') +if not use_ilp64 + # For now, keep supporting this environment variable too (same as in setup.py) + # `false is the default for the CLI flag, so check if env var was set + use_ilp64 = run_command(py, + [ + '-c', + 'import os; print(1) if os.environ.get("NPY_USE_BLAS_ILP64", "0") != "0" else print(0)' + ], + check: true + ).stdout().strip() == '1' +endif + # TODO: 64-bit (ILP64) BLAS and LAPACK support (e.g., check for more .pc files # so we detect `openblas64_.so` directly). Partially supported now, needs more @@ -70,7 +83,12 @@ lapack_name = get_option('lapack') # pkg-config uses a lower-case name while CMake uses a capitalized name, so try # that too to make the fallback detection with CMake work if blas_name == 'openblas' - blas = dependency(['openblas', 'OpenBLAS'], required: false) + if use_ilp64 + _openblas_names = ['openblas64', 'openblas', 'OpenBLAS'] + else + _openblas_names = ['openblas', 'OpenBLAS'] + endif + blas = dependency(_openblas_names, required: false) else blas = dependency(blas_name, required: false) endif @@ -121,19 +139,6 @@ if have_blas endif endif -use_ilp64 = get_option('use-ilp64') -if not use_ilp64 - # For now, keep supporting this environment variable too (same as in setup.py) - # `false is the default for the CLI flag, so check if env var was set - use_ilp64 = run_command(py, - [ - '-c', - 'import os; print(1) if os.environ.get("NPY_USE_BLAS_ILP64", "0") != "0" else print(0)' - ], - check: true - ).stdout().strip() == '1' -endif - # BLAS and LAPACK are dependencies for NumPy. Since NumPy 2.0, by default the # build will fail if they are missing; the performance impact is large, so # using fallback routines must be explicitly opted into by the user. xref diff --git a/tools/openblas_support.py b/tools/openblas_support.py index 454d6b746c6c..201824b0871a 100644 --- a/tools/openblas_support.py +++ b/tools/openblas_support.py @@ -324,7 +324,7 @@ def test_version(expected_version=None): data = threadpoolctl.threadpool_info() if len(data) != 1: if platform.python_implementation() == 'PyPy': - print(f"Check broken in CI on PyPy, data is: {data}") + print(f"Not using OpenBLAS for PyPy in Azure CI, so skip this") return raise ValueError(f"expected single threadpool_info result, got {data}") if not expected_version: From cad8595a8c86c173285d82b61f6797ff24324364 Mon Sep 17 00:00:00 2001 From: Andrew Nelson Date: Sat, 29 Jul 2023 10:15:13 +1000 Subject: [PATCH 26/30] CI: correct URL in cirrus.star [skip cirrus] --- .cirrus.star | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.cirrus.star b/.cirrus.star index 25c7b7dfd863..6b2203872394 100644 --- a/.cirrus.star +++ b/.cirrus.star @@ -24,7 +24,7 @@ def main(ctx): # only contains the actual commit message on a non-PR trigger event. # For a PR event it contains the PR title and description. SHA = env.get("CIRRUS_CHANGE_IN_REPO") - url = "https://api.github.com/repos/scipy/scipy/git/commits/" + SHA + url = "https://api.github.com/repos/numpy/numpy/git/commits/" + SHA dct = http.get(url).json() # if "[wheel build]" in dct["message"]: # return fs.read("ci/cirrus_wheels.yml") From 0793a86ef37764fd5d488348646c592175bb3839 Mon Sep 17 00:00:00 2001 From: Yuki K Date: Sun, 30 Jul 2023 05:15:52 +0900 Subject: [PATCH 27/30] DOC: Fix some incorrectly formatted documents (#24287) Fix rst formatting to correct rendered html --- doc/source/reference/c-api/array.rst | 4 ++-- doc/source/user/how-to-index.rst | 14 +++++++------- doc/source/user/how-to-io.rst | 4 ++-- numpy/lib/index_tricks.py | 3 ++- 4 files changed, 13 insertions(+), 12 deletions(-) diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 170430c6af80..0139279c1fc8 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -1630,8 +1630,8 @@ Conversion `_ to `PyArray_Descr` and returns a new array of the given `dtype` using the data in the current array at a specified `offset` in bytes. The - `offset` plus the itemsize of the new array type must be less than ``self - ->descr->elsize`` or an error is raised. The same shape and strides + `offset` plus the itemsize of the new array type must be less than + ``self->descr->elsize`` or an error is raised. The same shape and strides as the original array are used. Therefore, this function has the effect of returning a field from a structured array. But, it can also be used to select specific bytes or groups of bytes from any array diff --git a/doc/source/user/how-to-index.rst b/doc/source/user/how-to-index.rst index 97c45126012f..5db69c71a7f6 100644 --- a/doc/source/user/how-to-index.rst +++ b/doc/source/user/how-to-index.rst @@ -193,13 +193,13 @@ Non-zero elements Use :meth:`nonzero` to get a tuple of array indices of non-zero elements corresponding to every dimension:: - >>> z = np.array([[1, 2, 3, 0], [0, 0, 5, 3], [4, 6, 0, 0]]) - >>> z - array([[1, 2, 3, 0], - [0, 0, 5, 3], - [4, 6, 0, 0]]) - >>> np.nonzero(z) - (array([0, 0, 0, 1, 1, 2, 2]), array([0, 1, 2, 2, 3, 0, 1])) + >>> z = np.array([[1, 2, 3, 0], [0, 0, 5, 3], [4, 6, 0, 0]]) + >>> z + array([[1, 2, 3, 0], + [0, 0, 5, 3], + [4, 6, 0, 0]]) + >>> np.nonzero(z) + (array([0, 0, 0, 1, 1, 2, 2]), array([0, 1, 2, 2, 3, 0, 1])) Use :meth:`flatnonzero` to fetch indices of elements that are non-zero in the flattened version of the ndarray:: diff --git a/doc/source/user/how-to-io.rst b/doc/source/user/how-to-io.rst index 6a4127e8f00a..7a4f9ed9799c 100644 --- a/doc/source/user/how-to-io.rst +++ b/doc/source/user/how-to-io.rst @@ -96,7 +96,7 @@ Whitespace-delimited :func:`numpy.genfromtxt` can also parse whitespace-delimited data files that have missing values if -* **Each field has a fixed width**: Use the width as the `delimiter` argument. +* **Each field has a fixed width**: Use the width as the `delimiter` argument.:: # File with width=4. The data does not have to be justified (for example, # the 2 in row 1), the last column can be less than width (for example, the 6 @@ -154,7 +154,7 @@ that have missing values if * **The delimiter whitespace character is different from the whitespace that indicates missing data**. For instance, if columns are delimited by ``\t``, then missing data will be recognized if it consists of one - or more spaces. + or more spaces.:: >>> with open("tabs.txt", "r") as f: ... data = (f.read()) diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py index 4a55b1afd297..7c1a80cd718d 100644 --- a/numpy/lib/index_tricks.py +++ b/numpy/lib/index_tricks.py @@ -225,7 +225,8 @@ class MGridClass(nd_grid): Returns ------- - mesh-grid `ndarrays` all of the same dimensions + mesh-grid + `ndarrays` all of the same dimensions See Also -------- From ee56599ba62eee055255982c78906aca8541da93 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sun, 30 Jul 2023 11:59:52 +0200 Subject: [PATCH 28/30] DOC: update code comment about `NPY_USE_BLAS_ILP64` environment variable (#24289) As suggested by Chuck in a review comment on a previous PR. Sending as a separate follow-up also because it helps to check Cirrus CI triggering which we are looking at in issue 24280. [skip azp] [skip circle] [skip actions] [skip travis] --- numpy/meson.build | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/numpy/meson.build b/numpy/meson.build index a8ca03cf76de..76ef7b52ece5 100644 --- a/numpy/meson.build +++ b/numpy/meson.build @@ -55,8 +55,9 @@ blas_symbol_suffix = get_option('blas-symbol-suffix') use_ilp64 = get_option('use-ilp64') if not use_ilp64 - # For now, keep supporting this environment variable too (same as in setup.py) - # `false is the default for the CLI flag, so check if env var was set + # For now, keep supporting the `NPY_USE_BLAS_ILP64` environment variable too + # (same as in setup.py) `false is the default for the CLI flag, so check if + # env var was set use_ilp64 = run_command(py, [ '-c', From 1a0066b93a29843140793e9b9d31fbbdaf25f1c7 Mon Sep 17 00:00:00 2001 From: Andrew Nelson Date: Mon, 31 Jul 2023 07:14:44 +1000 Subject: [PATCH 29/30] CI: only build cirrus wheels when requested (#24286) [skip ci] --- .cirrus.star | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/.cirrus.star b/.cirrus.star index 6b2203872394..6f331a7c5b66 100644 --- a/.cirrus.star +++ b/.cirrus.star @@ -26,14 +26,26 @@ def main(ctx): SHA = env.get("CIRRUS_CHANGE_IN_REPO") url = "https://api.github.com/repos/numpy/numpy/git/commits/" + SHA dct = http.get(url).json() - # if "[wheel build]" in dct["message"]: - # return fs.read("ci/cirrus_wheels.yml") - if "[skip cirrus]" in dct["message"] or "[skip ci]" in dct["message"]: + commit_msg = dct["message"] + if "[skip cirrus]" in commit_msg or "[skip ci]" in commit_msg: return [] - # add extra jobs to the cirrus run by += adding to config - config = fs.read("tools/ci/cirrus_wheels.yml") - config += fs.read("tools/ci/cirrus_macosx_arm64.yml") + wheel = False + labels = env.get("CIRRUS_PR_LABELS", "") + pr_number = env.get("CIRRUS_PR", "-1") + tag = env.get("CIRRUS_TAG", "") - return config + if "[wheel build]" in commit_msg: + wheel = True + + # if int(pr_number) > 0 and ("14 - Release" in labels or "36 - Build" in labels): + # wheel = True + + if tag.startswith("v") and "dev0" not in tag: + wheel = True + + if wheel: + return fs.read("tools/ci/cirrus_wheels.yml") + + return fs.read("tools/ci/cirrus_macosx_arm64.yml") From e4ce121105ebb34cf70b540531a2006b42e1e539 Mon Sep 17 00:00:00 2001 From: ellaella12 <120079323+ellaella12@users.noreply.github.com> Date: Mon, 31 Jul 2023 19:02:57 +0800 Subject: [PATCH 30/30] DOC: Clarify correlate function definition (#24100) * DOC: add reference to correlate functions * DOC: Minor var fix closes gh-23782 --------- Co-authored-by: Kei Co-authored-by: Mukulika --- numpy/core/numeric.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py index b857a5f253ab..2b30ee594cd8 100644 --- a/numpy/core/numeric.py +++ b/numpy/core/numeric.py @@ -661,12 +661,12 @@ def correlate(a, v, mode='valid'): Cross-correlation of two 1-dimensional sequences. This function computes the correlation as generally defined in signal - processing texts: + processing texts [1]_: .. math:: c_k = \sum_n a_{n+k} \cdot \overline{v}_n with a and v sequences being zero-padded where necessary and - :math:`\overline x` denoting complex conjugation. + :math:`\overline v` denoting complex conjugation. Parameters ---------- @@ -693,7 +693,7 @@ def correlate(a, v, mode='valid'): Notes ----- The definition of correlation above is not unique and sometimes correlation - may be defined differently. Another common definition is: + may be defined differently. Another common definition is [1]_: .. math:: c'_k = \sum_n a_{n} \cdot \overline{v_{n+k}} @@ -702,7 +702,11 @@ def correlate(a, v, mode='valid'): `numpy.correlate` may perform slowly in large arrays (i.e. n = 1e5) because it does not use the FFT to compute the convolution; in that case, `scipy.signal.correlate` might be preferable. - + + References + ---------- + .. [1] Wikipedia, "Cross-correlation", + https://en.wikipedia.org/wiki/Cross-correlation Examples --------