From 6dd9166346d3367aa31c79da3dc2e7700c5b831d Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Fri, 7 Feb 2025 10:28:03 -0600 Subject: [PATCH 001/111] Initial attempt to adopt array API --- ccdproc/core.py | 38 +++++++++++++++++++++++++++----------- 1 file changed, 27 insertions(+), 11 deletions(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index 26b27a20..d8118922 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -7,6 +7,7 @@ import numbers import warnings +import array_api_compat import numpy as np from astropy import nddata, stats from astropy import units as u @@ -347,6 +348,8 @@ def create_deviation(ccd_data, gain=None, readnoise=None, disregard_nan=False): units as the data in the parameter ``ccd_data``. """ + # Get array namespace + xp = array_api_compat.array_namespace(ccd_data.data) if gain is not None and not isinstance(gain, Quantity): raise TypeError("gain must be a astropy.units.Quantity.") @@ -373,7 +376,7 @@ def create_deviation(ccd_data, gain=None, readnoise=None, disregard_nan=False): if disregard_nan: data[mask] = 0 else: - data[mask] = np.nan + data[mask] = xp.nan logging.warning("Negative values in array will be replaced with nan") # calculate the deviation @@ -480,6 +483,9 @@ def subtract_overscan( if not isinstance(ccd, CCDData): raise TypeError("ccddata is not a CCDData object.") + # Set array namespace + xp = array_api_compat.array_namespace(ccd.data) + if (overscan is not None and fits_section is not None) or ( overscan is None and fits_section is None ): @@ -498,24 +504,24 @@ def subtract_overscan( overscan_axis = 0 if overscan.shape[1] > overscan.shape[0] else 1 if median: - oscan = np.median(overscan.data, axis=overscan_axis) + oscan = xp.median(overscan.data, axis=overscan_axis) else: - oscan = np.mean(overscan.data, axis=overscan_axis) + oscan = xp.mean(overscan.data, axis=overscan_axis) if model is not None: of = fitting.LinearLSQFitter() - yarr = np.arange(len(oscan)) + yarr = xp.arange(len(oscan)) oscan = of(model, yarr, oscan) oscan = oscan(yarr) if overscan_axis == 1: - oscan = np.reshape(oscan, (oscan.size, 1)) + oscan = xp.reshape(oscan, (oscan.size, 1)) else: - oscan = np.reshape(oscan, (1, oscan.size)) + oscan = xp.reshape(oscan, (1, oscan.size)) else: if overscan_axis == 1: - oscan = np.reshape(oscan, oscan.shape + (1,)) + oscan = xp.reshape(oscan, oscan.shape + (1,)) else: - oscan = np.reshape(oscan, (1,) + oscan.shape) + oscan = xp.reshape(oscan, (1,) + oscan.shape) subtracted = ccd.copy() @@ -968,6 +974,9 @@ def wcs_project(ccd, target_wcs, target_shape=None, order="bilinear"): from astropy.nddata.ccddata import _generate_wcs_and_update_header from reproject import reproject_interp + # Set array namespace + xp = array_api_compat.array_namespace(ccd.data) + if not (ccd.wcs.is_celestial and target_wcs.is_celestial): raise ValueError("one or both WCS is not celestial.") @@ -990,7 +999,7 @@ def wcs_project(ccd, target_wcs, target_shape=None, order="bilinear"): # The reprojection will contain nan for any pixels for which the source # was outside the original image. Those should be masked also. - output_mask = np.isnan(projected_image_raw) + output_mask = xp.isnan(projected_image_raw) if reprojected_mask is not None: output_mask = output_mask | reprojected_mask @@ -1216,14 +1225,21 @@ def rebin(ccd, newshape): rebin(arr1, (20,20)) """ # check to see that is in a nddata type - if isinstance(ccd, np.ndarray): + try: + xp = array_api_compat.array_namespace(ccd) + except TypeError: + # This will also raise aTypeError if ccd.data isn't an array + # but that is fine. + xp = array_api_compat.array_namespace(ccd.data) + + if isinstance(ccd, xp.ndarray): # check to see that the two arrays are going to be the same length if len(ccd.shape) != len(newshape): raise ValueError("newshape does not have the same dimensions as " "ccd.") slices = [slice(0, old, old / new) for old, new in zip(ccd.shape, newshape)] - coordinates = np.mgrid[slices] + coordinates = xp.mgrid[slices] indices = coordinates.astype("i") return ccd[tuple(indices)] From d3b620375acc1a6333f6b7baad6a4347499fc49f Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Sun, 9 Feb 2025 12:30:24 -0600 Subject: [PATCH 002/111] A couple more changes --- ccdproc/core.py | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index d8118922..04bf65b5 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -1228,9 +1228,12 @@ def rebin(ccd, newshape): try: xp = array_api_compat.array_namespace(ccd) except TypeError: - # This will also raise aTypeError if ccd.data isn't an array - # but that is fine. - xp = array_api_compat.array_namespace(ccd.data) + try: + # This will also raise a TypeError if ccd.data isn't an array + # but that is fine. + xp = array_api_compat.array_namespace(ccd.data) + except AttributeError as e: + raise TypeError("ccd is not an ndarray or a CCDData object.") from e if isinstance(ccd, xp.ndarray): @@ -1265,8 +1268,11 @@ def rebin(ccd, newshape): raise TypeError("ccd is not an ndarray or a CCDData object.") -def block_reduce(ccd, block_size, func=np.sum): +def block_reduce(ccd, block_size, func=None): """Thin wrapper around `astropy.nddata.block_reduce`.""" + if func is None: + xp = array_api_compat.array_namespace(ccd.data) + func = xp.sum data = nddata.block_reduce(ccd, block_size, func) if isinstance(ccd, CCDData): # unit and meta "should" be unaffected by the change of shape and can @@ -1277,7 +1283,10 @@ def block_reduce(ccd, block_size, func=np.sum): def block_average(ccd, block_size): """Like `block_reduce` but with predefined ``func=np.mean``.""" - data = nddata.block_reduce(ccd, block_size, np.mean) + + xp = array_api_compat.array_namespace(ccd.data) + + data = nddata.block_reduce(ccd, block_size, xp.mean) # Like in block_reduce: if isinstance(ccd, CCDData): data = CCDData(data, unit=ccd.unit, meta=ccd.meta.copy()) @@ -1988,13 +1997,16 @@ def ccdmask( # No data attribute or data has no shape attribute. raise ValueError('"ratio" should be a "CCDData".') from err + # Get array namespace + xp = array_api_compat.array_namespace(ratio.data) + def _sigma_mask(baseline, one_sigma_value, lower_sigma, upper_sigma): """Helper function to mask values outside of the specified sigma range.""" return (baseline < -lower_sigma * one_sigma_value) | ( baseline > upper_sigma * one_sigma_value ) - mask = ~np.isfinite(ratio.data) + mask = ~xp.isfinite(ratio.data) medsub = ratio.data - ndimage.median_filter(ratio.data, size=(nlmed, ncmed)) if byblocks: From f0bc6b8ca7c14d8b334b6b1f719c2084aa4cb8d1 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Mon, 10 Feb 2025 11:06:59 -0600 Subject: [PATCH 003/111] Add function for check if something is an array --- ccdproc/core.py | 113 ++++++++++++++++++++++++++++-------------------- 1 file changed, 65 insertions(+), 48 deletions(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index 04bf65b5..01eb2c3c 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -69,6 +69,14 @@ } +def _is_array(arr): + try: + array_api_compat.array_namespace(arr) + except TypeError: + return False + return True + + @log_to_metadata def ccd_process( ccd, @@ -223,6 +231,9 @@ def ccd_process( # make a copy of the object nccd = ccd.copy() + # Set array namespace + xp = array_api_compat.array_namespace(nccd.data) + # apply the overscan correction if isinstance(oscan, CCDData): nccd = subtract_overscan( @@ -252,12 +263,13 @@ def ccd_process( raise ValueError("gain and readnoise must be specified to create error frame.") # apply the bad pixel mask - if isinstance(bad_pixel_mask, np.ndarray): - nccd.mask = bad_pixel_mask - elif bad_pixel_mask is None: + if bad_pixel_mask is None: + # Handle this simple case first.... pass + elif _is_array(bad_pixel_mask): + nccd.mask = xp.asarray(bad_pixel_mask, dtype=bool) else: - raise TypeError("bad_pixel_mask is not None or numpy.ndarray.") + raise TypeError("bad_pixel_mask is not None or an array.") # apply the gain correction if not (gain is None or isinstance(gain, Quantity)): @@ -489,7 +501,7 @@ def subtract_overscan( if (overscan is not None and fits_section is not None) or ( overscan is None and fits_section is None ): - raise TypeError("specify either overscan or fits_section, but not " "both.") + raise TypeError("specify either overscan or fits_section, but not both.") if (overscan is not None) and (not isinstance(overscan, CCDData)): raise TypeError("overscan is not a CCDData object.") @@ -1598,39 +1610,7 @@ def cosmicray_lacosmic( asy_background_kwargs = dict(inbkg=inbkg, invar=invar) - if isinstance(ccd, np.ndarray): - data = ccd - - crmask, cleanarr = detect_cosmics( - data + data_offset, - inmask=None, - sigclip=sigclip, - sigfrac=sigfrac, - objlim=objlim, - gain=gain.value, - readnoise=readnoise.value, - satlevel=satlevel, - niter=niter, - sepmed=sepmed, - cleantype=cleantype, - fsmode=fsmode, - psfmodel=psfmodel, - psffwhm=psffwhm, - psfsize=psfsize, - psfk=psfk, - psfbeta=psfbeta, - verbose=verbose, - **asy_background_kwargs, - ) - - cleanarr = cleanarr - data_offset - cleanarr = _astroscrappy_gain_apply_helper( - cleanarr, gain.value, gain_apply, old_astroscrappy_interface - ) - - return cleanarr, crmask - - elif isinstance(ccd, CCDData): + if isinstance(ccd, CCDData): # Start with a check for a special case: ccd is in electron, and # gain and readnoise have no units. In that case we issue a warning # instead of raising an error to avoid crashing user's pipelines. @@ -1697,6 +1677,37 @@ def cosmicray_lacosmic( nccd.mask = nccd.mask + crmask return nccd + elif _is_array(ccd): + data = ccd + + crmask, cleanarr = detect_cosmics( + data + data_offset, + inmask=None, + sigclip=sigclip, + sigfrac=sigfrac, + objlim=objlim, + gain=gain.value, + readnoise=readnoise.value, + satlevel=satlevel, + niter=niter, + sepmed=sepmed, + cleantype=cleantype, + fsmode=fsmode, + psfmodel=psfmodel, + psffwhm=psffwhm, + psfsize=psfsize, + psfk=psfk, + psfbeta=psfbeta, + verbose=verbose, + **asy_background_kwargs, + ) + + cleanarr = cleanarr - data_offset + cleanarr = _astroscrappy_gain_apply_helper( + cleanarr, gain.value, gain_apply, old_astroscrappy_interface + ) + + return cleanarr, crmask else: raise TypeError("ccd is not a CCDData or ndarray object.") @@ -1810,22 +1821,26 @@ def cosmicray_median(ccd, error_image=None, thresh=5, mbox=11, gbox=0, rbox=0): mask of the object will be created if it did not previously exist or be updated with the detected cosmic rays. """ - if isinstance(ccd, np.ndarray): - data = ccd + if _is_array(ccd): + xp = array_api_compat.array_namespace(ccd) + + # Masked data is not part of the array API so remove mask if present. + # Only look at the data array, guessing that if there is a .mask then + # there is also a .data. + if hasattr(ccd, "mask"): + data = ccd.data + + data = xp.asarray(ccd) if error_image is None: - error_image = data.std() - else: - if not isinstance(error_image, (float, np.ndarray)): + error_image = xp.std(data) + elif not isinstance(error_image, float): + if not _is_array(error_image): raise TypeError("error_image is not a float or ndarray.") # create the median image marr = ndimage.median_filter(data, size=(mbox, mbox)) - # Only look at the data array - if isinstance(data, np.ma.MaskedArray): - data = data.data - # Find the residual image rarr = (data - marr) / error_image @@ -1839,7 +1854,9 @@ def cosmicray_median(ccd, error_image=None, thresh=5, mbox=11, gbox=0, rbox=0): # replace bad pixels in the image ndata = data.copy() if rbox > 0: - data = np.ma.masked_array(data, (crarr == 1)) + # Fun fact: scipy.ndimage ignores the mask, so may as well not + # bother with it. + # data = np.ma.masked_array(data, (crarr == 1)) mdata = ndimage.median_filter(data, rbox) ndata[crarr == 1] = mdata[crarr == 1] From 424ebdc0432c876e4f36fa278c2bcde4c116392c Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Wed, 12 Feb 2025 10:31:09 -0600 Subject: [PATCH 004/111] Remove numpy from core --- ccdproc/core.py | 85 +++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 76 insertions(+), 9 deletions(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index 01eb2c3c..fa24793f 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -8,7 +8,6 @@ import warnings import array_api_compat -import numpy as np from astropy import nddata, stats from astropy import units as u from astropy.modeling import fitting @@ -70,6 +69,20 @@ def _is_array(arr): + """ + Check whether an object is an array by tring to find a namespace + for it. + + Parameters + ---------- + arr : object + Object to be tested. + + Returns + ------- + is_array : bool + ``True`` if arr is an array, ``False`` otherwise. + """ try: array_api_compat.array_namespace(arr) except TypeError: @@ -77,6 +90,38 @@ def _is_array(arr): return True +def _percentile_fallback(array, percentiles): + """ + Try calculating percentile using namespace, otherwise fall back to + an implmentation that uses sort. As of the 2023 version of the array API + there is no percentile function in the API but there is a sort function. + + Parameters + ---------- + array : array-like + Array from which to calculate the percentile. + + percentiles : float or list-like + Percentile to calculate. + + Returns + ------- + percentile : float or list-like + Calculated percentile. + """ + xp = array_api_compat.array_namespace(array) + try: + return xp.percentile(array, percentiles) + except AttributeError: + pass + + # Fall back to using sort + sorted_array = xp.sort(array) + + indexes = xp.astype(len(sorted_array) * xp.asarray(percentiles), int) + return sorted_array[indexes] + + @log_to_metadata def ccd_process( ccd, @@ -2036,18 +2081,40 @@ def _sigma_mask(baseline, one_sigma_value, lower_sigma, upper_sigma): c1 = j * ncsig c2 = min((j + 1) * ncsig, ncols) block = medsub[l1:l2, c1:c2] - high = np.percentile(block.ravel(), 69.1) - low = np.percentile(block.ravel(), 30.9) + # The array API has no percentile function, so we use a small + # function that first tries percentile in case a particular + # array package has it but otherwise falls back to a sort. + # This is the case at least as of the 2023.12 API. + high = _percentile_fallback( + xp.reshape(block, (xp.prod(block.shape),)), 69.1 + ) + low = _percentile_fallback( + xp.reshape(block, (xp.prod(block.shape),)), 30.9 + ) block_sigma = (high - low) / 2.0 block_mask = _sigma_mask(block, block_sigma, lsigma, hsigma) - mblock = np.ma.MaskedArray(block, mask=block_mask, copy=False) + # mblock = np.ma.MaskedArray(block, mask=block_mask, copy=False) if findbadcolumns: - csum = np.ma.sum(mblock, axis=0) + # Not clear yet what the right solution to masking is in the array + # API, so we'll use a boolean index to get the elements we want + # and sum them....unfortunately, we'll need to do this in a loop + # as far as I can tell. + csum = [] + all_masked = [] + for k in range(block.shape[1]): + subset = block[:, k] + csum.append(xp.sum(subset[~block_mask[:, k]])) + all_masked.append(xp.all(block_mask[:, k])) + csum = xp.array(csum) csum[csum <= 0] = 0 - csum_sigma = np.ma.MaskedArray(np.sqrt(c2 - c1 - csum)) - colmask = _sigma_mask(csum.filled(1), csum_sigma, lsigma, hsigma) - block_mask[:, :] |= colmask[np.newaxis, :] + csum_sigma = xp.array(xp.sqrt(c2 - c1 - csum)) + # The prior code filled the csum array with the value 1, which + # only affects those cases where all of the input values to + # the csum were masked, so we fill those with 1. + csum[all_masked] = 1 + colmask = _sigma_mask(csum, csum_sigma, lsigma, hsigma) + block_mask[:, :] |= colmask[xp.newaxis, :] mask[l1:l2, c1:c2] = block_mask else: @@ -2065,7 +2132,7 @@ def _sigma_mask(baseline, one_sigma_value, lower_sigma, upper_sigma): if mask[line, col]: for i in range(2, ngood + 2): lend = line + i - if mask[lend, col] and not np.all(mask[line : lend + 1, col]): + if mask[lend, col] and not xp.all(mask[line : lend + 1, col]): mask[line:lend, col] = True return mask From 7f5050dfed0d5d42f77b561790e3dc67cb8b1c49 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 4 Mar 2025 10:10:54 -0600 Subject: [PATCH 005/111] WIP rewrite of a boolean index --- ccdproc/core.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index fa24793f..a6ee524e 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -430,14 +430,15 @@ def create_deviation(ccd_data, gain=None, readnoise=None, disregard_nan=False): # remove values that might be negative or treat as nan data = gain_value * ccd_data.data mask = data < 0 + if disregard_nan: - data[mask] = 0 + data = data * ~mask else: - data[mask] = xp.nan + # data[mask] = xp.nan logging.warning("Negative values in array will be replaced with nan") # calculate the deviation - var = (data + readnoise_value**2) ** 0.5 + var = (xp.sqrt(data) ** 2 + readnoise_value**2) ** 0.5 # ensure uncertainty and image data have same unit ccd = ccd_data.copy() From 6a6bfa29c508d0744d65941167a77ea63a3714b1 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 4 Mar 2025 10:12:34 -0600 Subject: [PATCH 006/111] WIP testing updates --- ccdproc/tests/pytest_fixtures.py | 4 +- ccdproc/tests/test_ccdproc.py | 110 +++++++++++++++++-------------- 2 files changed, 64 insertions(+), 50 deletions(-) diff --git a/ccdproc/tests/pytest_fixtures.py b/ccdproc/tests/pytest_fixtures.py index e900e0f8..0c27a94a 100644 --- a/ccdproc/tests/pytest_fixtures.py +++ b/ccdproc/tests/pytest_fixtures.py @@ -2,6 +2,8 @@ from shutil import rmtree +# import dask.array as da +import jax.numpy as jnp import numpy as np import pytest from astropy import units as u @@ -59,7 +61,7 @@ def ccd_data( data = rng.normal(loc=mean, size=[size, size], scale=scale) fake_meta = {"my_key": 42, "your_key": "not 42"} - ccd = CCDData(data, unit=u.adu) + ccd = CCDData(jnp.array(data), unit=u.adu) ccd.header = fake_meta return ccd diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index 55a1ccbe..54ee069a 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -1,8 +1,9 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst +# import array_api_compat import astropy import astropy.units as u -import numpy as np +import jax.numpy as np import pytest import skimage from astropy.io import fits @@ -11,6 +12,7 @@ from astropy.units.quantity import Quantity from astropy.utils.exceptions import AstropyUserWarning from astropy.wcs import WCS +from numpy import testing as np_testing from ccdproc.core import ( Keyword, @@ -36,7 +38,15 @@ except ImportError: HAS_BLOCK_X_FUNCS = False -_NUMPY_COPY_IF_NEEDED = False if np.__version__.startswith("1.") else None +_NUMPY_COPY_IF_NEEDED = None # False if np.__version__.startswith("1.") else None + + +# import dask.array as da +# import numpy +# data = numpy.arange(100_000).reshape(200, 500) +# a = da.from_array(data, chunks=(100, 100)) + +# np = array_api_compat.array_namespace(a) # Test creating deviation @@ -65,12 +75,12 @@ def test_create_deviation(u_image, u_gain, u_readnoise, expect_success): ccd_var = create_deviation(ccd_data, gain=gain, readnoise=readnoise) assert ccd_var.uncertainty.array.shape == (10, 10) assert ccd_var.uncertainty.array.size == 100 - assert ccd_var.uncertainty.array.dtype == np.dtype(float) + assert np.isdtype(ccd_var.uncertainty.array.dtype, "real floating") if gain is not None: expected_var = np.sqrt(2 * ccd_data.data + 5**2) / 2 else: expected_var = np.sqrt(ccd_data.data + 5**2) - np.testing.assert_allclose(ccd_var.uncertainty.array, expected_var) + np_testing.assert_allclose(ccd_var.uncertainty.array, expected_var) assert ccd_var.unit == ccd_data.unit # Uncertainty should *not* have any units -- does it? with pytest.raises(AttributeError): @@ -87,7 +97,7 @@ def test_create_deviation_from_negative(): ccd_var = create_deviation( ccd_data, gain=None, readnoise=readnoise, disregard_nan=False ) - np.testing.assert_array_equal( + np_testing.assert_array_equal( ccd_data.data < 0, np.isnan(ccd_var.uncertainty.array) ) @@ -102,7 +112,7 @@ def test_create_deviation_from_negative_2(): mask = ccd_data.data < 0 ccd_data.data[mask] = 0 expected_var = np.sqrt(ccd_data.data + readnoise.value**2) - np.testing.assert_allclose(ccd_var.uncertainty.array, expected_var) + np_testing.assert_allclose(ccd_var.uncertainty.array, expected_var) def test_create_deviation_keywords_must_have_unit(): @@ -164,7 +174,7 @@ def test_subtract_overscan(median, transpose, data_rectangle): ) # Is the mean of the "science" region the sum of sky and the mean the # "science" section had before backgrounds were added? - np.testing.assert_almost_equal( + np_testing.assert_almost_equal( ccd_data_overscan.data[science_region].mean(), sky + original_mean ) # Is the overscan region zero? @@ -181,14 +191,14 @@ def test_subtract_overscan(median, transpose, data_rectangle): ) # Is the mean of the "science" region the sum of sky and the mean the # "science" section had before backgrounds were added? - np.testing.assert_almost_equal( + np_testing.assert_almost_equal( ccd_data_fits_section.data[science_region].mean(), sky + original_mean ) # Is the overscan region zero? assert (ccd_data_fits_section.data[oscan_region] == 0).all() # Do both ways of subtracting overscan give exactly the same result? - np.testing.assert_allclose( + np_testing.assert_allclose( ccd_data_overscan[science_region], ccd_data_fits_section[science_region] ) @@ -201,7 +211,7 @@ def test_subtract_overscan(median, transpose, data_rectangle): median=median, model=None, ) - np.testing.assert_almost_equal( + np_testing.assert_almost_equal( ccd_data_overscan_auto.data[science_region].mean(), sky + original_mean ) # Use overscan_axis=None with a FITS section @@ -212,7 +222,7 @@ def test_subtract_overscan(median, transpose, data_rectangle): median=median, model=None, ) - np.testing.assert_almost_equal( + np_testing.assert_almost_equal( ccd_data_fits_section_overscan_auto.data[science_region].mean(), sky + original_mean, ) @@ -237,7 +247,7 @@ def test_subtract_overscan(median, transpose, data_rectangle): median=median, model=None, ) - np.testing.assert_allclose(ccd_data_square_overscan_auto, ccd_data_square) + np_testing.assert_allclose(ccd_data_square_overscan_auto, ccd_data_square) # A more substantial test of overscan modeling @@ -273,7 +283,7 @@ def test_subtract_overscan_model(transpose): median=False, model=models.Polynomial1D(2), ) - np.testing.assert_almost_equal(ccd_data.data[science_region].mean(), original_mean) + np_testing.assert_almost_equal(ccd_data.data[science_region].mean(), original_mean) # Set the overscan_axis explicitly to None, and let the routine # figure it out. ccd_data = subtract_overscan( @@ -283,7 +293,7 @@ def test_subtract_overscan_model(transpose): median=False, model=models.Polynomial1D(2), ) - np.testing.assert_almost_equal(ccd_data.data[science_region].mean(), original_mean) + np_testing.assert_almost_equal(ccd_data.data[science_region].mean(), original_mean) def test_subtract_overscan_fails(): @@ -326,7 +336,7 @@ def test_trim_image_fits_section(mask_data, uncertainty): trimmed = trim_image(ccd_data, fits_section="[20:40,:]") # FITS reverse order, bounds are inclusive and starting index is 1-based assert trimmed.shape == (50, 21) - np.testing.assert_allclose(trimmed.data, ccd_data[:, 19:40]) + np_testing.assert_allclose(trimmed.data, ccd_data[:, 19:40]) if mask_data: assert trimmed.shape == trimmed.mask.shape if uncertainty: @@ -337,7 +347,7 @@ def test_trim_image_no_section(): ccd_data = ccd_data_func(data_size=50) trimmed = trim_image(ccd_data[:, 19:40]) assert trimmed.shape == (50, 21) - np.testing.assert_allclose(trimmed.data, ccd_data[:, 19:40]) + np_testing.assert_allclose(trimmed.data, ccd_data[:, 19:40]) def test_trim_with_wcs_alters_wcs(): @@ -367,7 +377,7 @@ def test_subtract_bias(): master_bias = CCDData(master_bias_array, unit=ccd_data.unit) no_bias = subtract_bias(ccd_data, master_bias, add_keyword=None) # Does the data we are left with have the correct average? - np.testing.assert_almost_equal(no_bias.data.mean(), data_avg) + np_testing.assert_almost_equal(no_bias.data.mean(), data_avg) # With logging turned off, metadata should not change assert no_bias.header == ccd_data.header del no_bias.header["key"] @@ -434,7 +444,9 @@ def test_subtract_dark(explicit_times, scale, exposure_keyword): (exptime / dark_exptime) * (exposure_unit / dark_exposure_unit) ) - np.testing.assert_allclose(ccd_data.data - dark_scale * dark_level, dark_sub.data) + np_testing.assert_allclose( + ccd_data.data - dark_scale * dark_level, dark_sub.data + ) # Headers should have the same content...do they? assert dark_sub.header == ccd_data.header # But the headers should not be the same object -- a copy was made @@ -535,10 +547,10 @@ def test_flat_correct(): # Check that the flat was normalized # Should be the case that flat * flat_data = ccd_data * flat.data.mean # if the normalization was done correctly. - np.testing.assert_almost_equal( + np_testing.assert_almost_equal( (flat_data.data * flat.data).mean(), ccd_data.data.mean() * flat.data.mean() ) - np.testing.assert_allclose( + np_testing.assert_allclose( ccd_data.data / flat_data.data, flat.data / flat.data.mean() ) @@ -563,11 +575,11 @@ def test_flat_correct_min_value(): # Check that the flat was normalized. The asserts below, which look a # little odd, are correctly testing that # flat_corrected_data = ccd_data / (flat_with_min / mean(flat_with_min)) - np.testing.assert_almost_equal( + np_testing.assert_almost_equal( (flat_corrected_data.data * flat_with_min.data).mean(), (ccd_data.data * flat_with_min.data.mean()).mean(), ) - np.testing.assert_allclose( + np_testing.assert_allclose( ccd_data.data / flat_corrected_data.data, flat_with_min.data / flat_with_min.data.mean(), ) @@ -593,10 +605,10 @@ def test_flat_correct_norm_value(): # Check that the flat was normalized # Should be the case that flat * flat_data = ccd_data * flat_mean # if the normalization was done correctly. - np.testing.assert_almost_equal( + np_testing.assert_almost_equal( (flat_data.data * flat.data).mean(), ccd_data.data.mean() * flat_mean ) - np.testing.assert_allclose(ccd_data.data / flat_data.data, flat.data / flat_mean) + np_testing.assert_allclose(ccd_data.data / flat_data.data, flat.data / flat_mean) def test_flat_correct_norm_value_bad_value(): @@ -641,7 +653,7 @@ def test_gain_correct(): ccd_data = ccd_data_func() init_data = ccd_data.data gain_data = gain_correct(ccd_data, gain=3, add_keyword=None) - np.testing.assert_allclose(gain_data.data, 3 * init_data) + np_testing.assert_allclose(gain_data.data, 3 * init_data) assert ccd_data.meta == gain_data.meta @@ -651,7 +663,7 @@ def test_gain_correct_quantity(): g = Quantity(3, u.electron / u.adu) ccd_data = gain_correct(ccd_data, gain=g) - np.testing.assert_allclose(ccd_data.data, 3 * init_data) + np_testing.assert_allclose(ccd_data.data, 3 * init_data) assert ccd_data.unit == u.electron @@ -699,13 +711,13 @@ def tran(arr): tran = transform_image(ccd_data, tran) - np.testing.assert_allclose(10 * ccd_data.data, tran.data) + np_testing.assert_allclose(10 * ccd_data.data, tran.data) if mask_data: assert tran.shape == tran.mask.shape - np.testing.assert_array_equal(ccd_data.mask, tran.mask) + np_testing.assert_allclose(ccd_data.mask, tran.mask) if uncertainty: assert tran.shape == tran.uncertainty.array.shape - np.testing.assert_allclose( + np_testing.assert_allclose( 10 * ccd_data.uncertainty.array, tran.uncertainty.array ) @@ -814,7 +826,7 @@ def test__overscan_schange(): old_data = ccd_data.copy() new_data = subtract_overscan(ccd_data, overscan=ccd_data[:, 1], overscan_axis=0) assert not np.allclose(old_data.data, new_data.data) - np.testing.assert_allclose(old_data.data, ccd_data.data) + np_testing.assert_allclose(old_data.data, ccd_data.data) def test_create_deviation_does_not_change_input(): @@ -823,7 +835,7 @@ def test_create_deviation_does_not_change_input(): _ = create_deviation( ccd_data, gain=5 * u.electron / u.adu, readnoise=10 * u.electron ) - np.testing.assert_allclose(original.data, ccd_data.data) + np_testing.assert_allclose(original.data, ccd_data.data) assert original.unit == ccd_data.unit @@ -835,7 +847,7 @@ def test_cosmicray_median_does_not_change_input(): _ = cosmicray_median( ccd_data, error_image=error, thresh=5, mbox=11, gbox=0, rbox=0 ) - np.testing.assert_allclose(original.data, ccd_data.data) + np_testing.assert_allclose(original.data, ccd_data.data) assert original.unit == ccd_data.unit @@ -843,7 +855,7 @@ def test_cosmicray_lacosmic_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() _ = cosmicray_lacosmic(ccd_data) - np.testing.assert_allclose(original.data, ccd_data.data) + np_testing.assert_allclose(original.data, ccd_data.data) assert original.unit == ccd_data.unit @@ -853,7 +865,7 @@ def test_flat_correct_does_not_change_input(): flat = CCDData(np.zeros_like(ccd_data), unit=ccd_data.unit) with np.errstate(invalid="ignore"): _ = flat_correct(ccd_data, flat=flat) - np.testing.assert_allclose(original.data, ccd_data.data) + np_testing.assert_allclose(original.data, ccd_data.data) assert original.unit == ccd_data.unit @@ -861,7 +873,7 @@ def test_gain_correct_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() _ = gain_correct(ccd_data, gain=1, gain_unit=ccd_data.unit) - np.testing.assert_allclose(original.data, ccd_data.data) + np_testing.assert_allclose(original.data, ccd_data.data) assert original.unit == ccd_data.unit @@ -870,7 +882,7 @@ def test_subtract_bias_does_not_change_input(): original = ccd_data.copy() master_frame = CCDData(np.zeros_like(ccd_data), unit=ccd_data.unit) _ = subtract_bias(ccd_data, master=master_frame) - np.testing.assert_allclose(original.data, ccd_data.data) + np_testing.assert_allclose(original.data, ccd_data.data) assert original.unit == ccd_data.unit @@ -878,7 +890,7 @@ def test_trim_image_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() _ = trim_image(ccd_data, fits_section=None) - np.testing.assert_allclose(original.data, ccd_data.data) + np_testing.assert_allclose(original.data, ccd_data.data) assert original.unit == ccd_data.unit @@ -887,7 +899,7 @@ def test_transform_image_does_not_change_input(): original = ccd_data.copy() with np.errstate(invalid="ignore"): _ = transform_image(ccd_data, np.sqrt) - np.testing.assert_allclose(original.data, ccd_data) + np_testing.assert_allclose(original.data, ccd_data) assert original.unit == ccd_data.unit @@ -922,7 +934,7 @@ def test_wcs_project_onto_same_wcs(): assert new_ccd.wcs.wcs.compare(target_wcs.wcs) # Make sure data matches within some reasonable tolerance. - np.testing.assert_allclose(ccd_data.data, new_ccd.data, rtol=1e-5) + np_testing.assert_allclose(ccd_data.data, new_ccd.data, rtol=1e-5) def test_wcs_project_onto_same_wcs_remove_headers(): @@ -958,10 +970,10 @@ def test_wcs_project_onto_shifted_wcs(): # that the pixels should all be shifted. masked_input = np.ma.array(ccd_data.data, mask=ccd_data.mask) masked_output = np.ma.array(new_ccd.data, mask=new_ccd.mask) - np.testing.assert_allclose(masked_input[:-1, :-1], masked_output[1:, 1:], rtol=1e-5) + np_testing.assert_allclose(masked_input[:-1, :-1], masked_output[1:, 1:], rtol=1e-5) # The masks should all be shifted too. - np.testing.assert_array_equal(ccd_data.mask[:-1, :-1], new_ccd.mask[1:, 1:]) + np_testing.assert_array_equal(ccd_data.mask[:-1, :-1], new_ccd.mask[1:, 1:]) # We should have more values that are masked in the output array # than on input because some on output were not in the footprint @@ -1017,7 +1029,7 @@ def test_wcs_project_onto_scale_wcs(): # Make sure data matches within some reasonable tolerance, keeping in mind # that the pixels have been scaled. - np.testing.assert_allclose(ccd_data.data / 4, data_cutout, rtol=1e-5) + np_testing.assert_allclose(ccd_data.data / 4, data_cutout, rtol=1e-5) # Mask should be true for four pixels (all nearest neighbors) # of the single pixel we masked initially. @@ -1038,7 +1050,7 @@ def test_ccd_process_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() _ = ccd_process(ccd_data, gain=5 * u.electron / u.adu, readnoise=10 * u.electron) - np.testing.assert_allclose(original.data, ccd_data.data) + np_testing.assert_allclose(original.data, ccd_data.data) assert original.unit == ccd_data.unit @@ -1111,9 +1123,9 @@ def test_ccd_process(): # Final results should be (10 - 2) / 2.0 - 2 = 2 # Error should be (4 + 5)**0.5 / 0.5 = 3.0 - np.testing.assert_allclose(2.0 * np.ones((100, 90)), occd.data) - np.testing.assert_almost_equal(3.0 * np.ones((100, 90)), occd.uncertainty.array) - np.testing.assert_array_equal(mask, occd.mask) + np_testing.assert_allclose(2.0 * np.ones((100, 90)), occd.data) + np_testing.assert_allclose(3.0 * np.ones((100, 90)), occd.uncertainty.array) + np_testing.assert_array_equal(mask, occd.mask) assert occd.unit == u.electron # Make sure the original keyword is still present. Regression test for #401 assert occd.meta["testkw"] == 100 @@ -1157,9 +1169,9 @@ def test_ccd_process_gain_corrected(): # Final results should be (10 - 2) / 2.0 - 2 = 2 # Error should be (4 + 5)**0.5 / 0.5 = 3.0 - np.testing.assert_allclose(2.0 * np.ones((100, 90)), occd.data) - np.testing.assert_almost_equal(3.0 * np.ones((100, 90)), occd.uncertainty.array) - np.testing.assert_array_equal(mask, occd.mask) + np_testing.assert_allclose(2.0 * np.ones((100, 90)), occd.data) + np_testing.assert_allclose(3.0 * np.ones((100, 90)), occd.uncertainty.array) + np_testing.assert_array_equal(mask, occd.mask) assert occd.unit == u.electron # Make sure the original keyword is still present. Regression test for #401 assert occd.meta["testkw"] == 100 From 1d2d171b9a68f419a2e28de8f3539ced2001b0c5 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Wed, 5 Mar 2025 09:46:36 -0600 Subject: [PATCH 007/111] Change more almost_equal to allclose --- ccdproc/tests/test_ccdproc.py | 35 +++++++++++++++++++---------------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index 54ee069a..76b345ed 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -174,8 +174,8 @@ def test_subtract_overscan(median, transpose, data_rectangle): ) # Is the mean of the "science" region the sum of sky and the mean the # "science" section had before backgrounds were added? - np_testing.assert_almost_equal( - ccd_data_overscan.data[science_region].mean(), sky + original_mean + np_testing.assert_allclose( + ccd_data_overscan.data[science_region].mean(), sky + original_mean, rtol=1e-6 ) # Is the overscan region zero? assert (ccd_data_overscan.data[oscan_region] == 0).all() @@ -191,8 +191,10 @@ def test_subtract_overscan(median, transpose, data_rectangle): ) # Is the mean of the "science" region the sum of sky and the mean the # "science" section had before backgrounds were added? - np_testing.assert_almost_equal( - ccd_data_fits_section.data[science_region].mean(), sky + original_mean + np_testing.assert_allclose( + ccd_data_fits_section.data[science_region].mean(), + sky + original_mean, + rtol=1e-6, ) # Is the overscan region zero? assert (ccd_data_fits_section.data[oscan_region] == 0).all() @@ -211,8 +213,10 @@ def test_subtract_overscan(median, transpose, data_rectangle): median=median, model=None, ) - np_testing.assert_almost_equal( - ccd_data_overscan_auto.data[science_region].mean(), sky + original_mean + np_testing.assert_allclose( + ccd_data_overscan_auto.data[science_region].mean(), + sky + original_mean, + rtol=1e-6, ) # Use overscan_axis=None with a FITS section ccd_data_fits_section_overscan_auto = subtract_overscan( @@ -222,9 +226,10 @@ def test_subtract_overscan(median, transpose, data_rectangle): median=median, model=None, ) - np_testing.assert_almost_equal( + np_testing.assert_allclose( ccd_data_fits_section_overscan_auto.data[science_region].mean(), sky + original_mean, + rtol=1e-6, ) # Overscan_axis should be 1 for a square overscan region # This test only works for a non-square data region, but the @@ -283,7 +288,7 @@ def test_subtract_overscan_model(transpose): median=False, model=models.Polynomial1D(2), ) - np_testing.assert_almost_equal(ccd_data.data[science_region].mean(), original_mean) + np_testing.assert_allclose(ccd_data.data[science_region].mean(), original_mean) # Set the overscan_axis explicitly to None, and let the routine # figure it out. ccd_data = subtract_overscan( @@ -293,7 +298,7 @@ def test_subtract_overscan_model(transpose): median=False, model=models.Polynomial1D(2), ) - np_testing.assert_almost_equal(ccd_data.data[science_region].mean(), original_mean) + np_testing.assert_allclose(ccd_data.data[science_region].mean(), original_mean) def test_subtract_overscan_fails(): @@ -377,7 +382,7 @@ def test_subtract_bias(): master_bias = CCDData(master_bias_array, unit=ccd_data.unit) no_bias = subtract_bias(ccd_data, master_bias, add_keyword=None) # Does the data we are left with have the correct average? - np_testing.assert_almost_equal(no_bias.data.mean(), data_avg) + np_testing.assert_allclose(no_bias.data.mean(), data_avg) # With logging turned off, metadata should not change assert no_bias.header == ccd_data.header del no_bias.header["key"] @@ -444,9 +449,7 @@ def test_subtract_dark(explicit_times, scale, exposure_keyword): (exptime / dark_exptime) * (exposure_unit / dark_exposure_unit) ) - np_testing.assert_allclose( - ccd_data.data - dark_scale * dark_level, dark_sub.data - ) + np_testing.assert_allclose(ccd_data.data - dark_scale * dark_level, dark_sub.data) # Headers should have the same content...do they? assert dark_sub.header == ccd_data.header # But the headers should not be the same object -- a copy was made @@ -547,7 +550,7 @@ def test_flat_correct(): # Check that the flat was normalized # Should be the case that flat * flat_data = ccd_data * flat.data.mean # if the normalization was done correctly. - np_testing.assert_almost_equal( + np_testing.assert_allclose( (flat_data.data * flat.data).mean(), ccd_data.data.mean() * flat.data.mean() ) np_testing.assert_allclose( @@ -575,7 +578,7 @@ def test_flat_correct_min_value(): # Check that the flat was normalized. The asserts below, which look a # little odd, are correctly testing that # flat_corrected_data = ccd_data / (flat_with_min / mean(flat_with_min)) - np_testing.assert_almost_equal( + np_testing.assert_allclose( (flat_corrected_data.data * flat_with_min.data).mean(), (ccd_data.data * flat_with_min.data.mean()).mean(), ) @@ -605,7 +608,7 @@ def test_flat_correct_norm_value(): # Check that the flat was normalized # Should be the case that flat * flat_data = ccd_data * flat_mean # if the normalization was done correctly. - np_testing.assert_almost_equal( + np_testing.assert_allclose( (flat_data.data * flat.data).mean(), ccd_data.data.mean() * flat_mean ) np_testing.assert_allclose(ccd_data.data / flat_data.data, flat.data / flat_mean) From 22bc04d95156a2afb10779753aa4d13c362a7420 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Wed, 5 Mar 2025 10:19:14 -0600 Subject: [PATCH 008/111] Use the random number generator from numpy --- ccdproc/tests/test_ccdproc.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index 76b345ed..2dd82365 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -12,6 +12,7 @@ from astropy.units.quantity import Quantity from astropy.utils.exceptions import AstropyUserWarning from astropy.wcs import WCS +from numpy import random as np_random from numpy import testing as np_testing from ccdproc.core import ( @@ -40,6 +41,7 @@ _NUMPY_COPY_IF_NEEDED = None # False if np.__version__.startswith("1.") else None +RNG = np_random.default_rng # import dask.array as da # import numpy @@ -335,7 +337,7 @@ def test_trim_image_fits_section(mask_data, uncertainty): if mask_data: ccd_data.mask = np.zeros_like(ccd_data) if uncertainty: - err = np.random.default_rng().normal(size=ccd_data.shape) + err = RNG().normal(size=ccd_data.shape) ccd_data.uncertainty = StdDevUncertainty(err) trimmed = trim_image(ccd_data, fits_section="[20:40,:]") @@ -543,7 +545,7 @@ def test_flat_correct(): ccd_data.header["my_key"] = 42 size = ccd_data.shape[0] # create the flat, with some scatter - data = 2 * np.random.default_rng().normal(loc=1.0, scale=0.05, size=(size, size)) + data = 2 * RNG().normal(loc=1.0, scale=0.05, size=(size, size)) flat = CCDData(data, meta=fits.header.Header(), unit=ccd_data.unit) flat_data = flat_correct(ccd_data, flat, add_keyword=None) @@ -567,7 +569,7 @@ def test_flat_correct_min_value(): size = ccd_data.shape[0] # Create the flat - data = 2 * np.random.default_rng().normal(loc=1.0, scale=0.05, size=(size, size)) + data = 2 * RNG().normal(loc=1.0, scale=0.05, size=(size, size)) flat = CCDData(data, meta=fits.header.Header(), unit=ccd_data.unit) flat_orig_data = flat.data.copy() min_value = 2.1 # Should replace some, but not all, values @@ -601,7 +603,7 @@ def test_flat_correct_norm_value(): # Note that mean value of flat is set below and is different than # the mean of the flat data. flat_mean = 5.0 - data = np.random.default_rng().normal(loc=1.0, scale=0.05, size=ccd_data.shape) + data = RNG().normal(loc=1.0, scale=0.05, size=ccd_data.shape) flat = CCDData(data, meta=fits.Header(), unit=ccd_data.unit) flat_data = flat_correct(ccd_data, flat, add_keyword=None, norm_value=flat_mean) @@ -620,7 +622,7 @@ def test_flat_correct_norm_value_bad_value(): # it is given a bad norm_value. Bad means <=0. # Create the flat, with some scatter - data = np.random.default_rng().normal(loc=1.0, scale=0.05, size=ccd_data.shape) + data = RNG().normal(loc=1.0, scale=0.05, size=ccd_data.shape) flat = CCDData(data, meta=fits.Header(), unit=ccd_data.unit) with pytest.raises(ValueError) as e: flat_correct(ccd_data, flat, add_keyword=None, norm_value=-7) @@ -706,7 +708,7 @@ def test_transform_image(mask_data, uncertainty): ccd_data.mask = np.zeros_like(ccd_data) ccd_data.mask[10, 10] = 1 if uncertainty: - err = np.random.default_rng().normal(size=ccd_data.shape) + err = RNG().normal(size=ccd_data.shape) ccd_data.uncertainty = StdDevUncertainty(err) def tran(arr): @@ -962,7 +964,7 @@ def test_wcs_project_onto_shifted_wcs(): target_wcs = wcs_for_testing(ccd_data.shape) target_wcs.wcs.crpix += [1, 1] - ccd_data.mask = np.random.default_rng().choice([0, 1], size=ccd_data.shape) + ccd_data.mask = RNG().choice([0, 1], size=ccd_data.shape) new_ccd = wcs_project(ccd_data, target_wcs) From 245d641109c9d99417e09fbe4fff14ca1941f427 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Wed, 5 Mar 2025 10:20:33 -0600 Subject: [PATCH 009/111] Remove more in-place array operations --- ccdproc/tests/test_ccdproc.py | 38 +++++++++++++++++++++++++++-------- 1 file changed, 30 insertions(+), 8 deletions(-) diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index 2dd82365..ae5c11cb 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -112,7 +112,10 @@ def test_create_deviation_from_negative_2(): ccd_data, gain=None, readnoise=readnoise, disregard_nan=True ) mask = ccd_data.data < 0 - ccd_data.data[mask] = 0 + # Set the variance to zero where the data is negative + # In-place replacement of values does not work in some array + # libraries. + ccd_data.data = ccd_data.data * ~mask expected_var = np.sqrt(ccd_data.data + readnoise.value**2) np_testing.assert_allclose(ccd_var.uncertainty.array, expected_var) @@ -160,12 +163,19 @@ def test_subtract_overscan(median, transpose, data_rectangle): science_region = science_region[::-1] overscan_axis = 0 - ccd_data.data[oscan_region] = oscan + # Since some array libraries do not support in-place operations, we + # work on the science and overscan regions separately. + science_data = ccd_data.data[science_region].copy() + overscan_data = 0 * ccd_data.data[oscan_region].copy() + oscan + # Add a fake sky background so the "science" part of the image has a # different average than the "overscan" part. sky = 10.0 - original_mean = ccd_data.data[science_region].mean() - ccd_data.data[science_region] += oscan + sky + original_mean = science_data.mean() + science_data = science_data + oscan + sky + + # Reconstruct the full image + ccd_data.data = np.concat([overscan_data, science_data], axis=overscan_axis) # Test once using the overscan argument to specify the overscan region ccd_data_overscan = subtract_overscan( ccd_data, @@ -280,8 +290,12 @@ def test_subtract_overscan_model(transpose): original_mean = ccd_data.data[science_region].mean() - ccd_data.data[oscan_region] = 0.0 # Only want overscan in that region - ccd_data.data = ccd_data.data + scan + science_data = ccd_data.data[science_region].copy() + # Set any existing overscan to zero. Overscan is stored for the entire + # image, so we need to do this before we add the new overscan. + overscan_data = 0 * ccd_data.data[oscan_region].copy() + # Reconstruct the full image + ccd_data.data = np.concat([overscan_data, science_data], axis=overscan_axis) + scan ccd_data = subtract_overscan( ccd_data, @@ -766,14 +780,22 @@ def test_block_reduce(): reason="Incompatibility between scikit-image " "and numpy 1.16", ) def test_block_average(): + data = np.array( + [ + [2.0, 1.0, 2.0, 1.0], + [1.0, 1.0, 1.0, 1.0], + [2.0, 1.0, 2.0, 1.0], + [1.0, 1.0, 1.0, 1.0], + ] + ) ccd = CCDData( - np.ones((4, 4)), + data, unit="adu", meta={"testkw": 1}, mask=np.zeros((4, 4), dtype=bool), uncertainty=StdDevUncertainty(np.ones((4, 4))), ) - ccd.data[::2, ::2] = 2 + with pytest.warns(AstropyUserWarning) as w: ccd_avgd = block_average(ccd, (2, 2)) assert len(w) == 1 From 55b90bec2499e5b68a43508587e0b8fc15fcbd96 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Wed, 5 Mar 2025 10:21:06 -0600 Subject: [PATCH 010/111] Set a reasonable tolerance value in float comparisons --- ccdproc/tests/test_ccdproc.py | 30 +++++++++++++++++++++++------- 1 file changed, 23 insertions(+), 7 deletions(-) diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index ae5c11cb..e8210a3e 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -304,7 +304,9 @@ def test_subtract_overscan_model(transpose): median=False, model=models.Polynomial1D(2), ) - np_testing.assert_allclose(ccd_data.data[science_region].mean(), original_mean) + np_testing.assert_allclose( + ccd_data.data[science_region].mean(), original_mean, atol=1e-5 + ) # Set the overscan_axis explicitly to None, and let the routine # figure it out. ccd_data = subtract_overscan( @@ -314,7 +316,9 @@ def test_subtract_overscan_model(transpose): median=False, model=models.Polynomial1D(2), ) - np_testing.assert_allclose(ccd_data.data[science_region].mean(), original_mean) + np_testing.assert_allclose( + ccd_data.data[science_region].mean(), original_mean, atol=1e-5 + ) def test_subtract_overscan_fails(): @@ -465,7 +469,9 @@ def test_subtract_dark(explicit_times, scale, exposure_keyword): (exptime / dark_exptime) * (exposure_unit / dark_exposure_unit) ) - np_testing.assert_allclose(ccd_data.data - dark_scale * dark_level, dark_sub.data) + np_testing.assert_allclose( + ccd_data.data - dark_scale * dark_level, dark_sub.data, rtol=1e-6 + ) # Headers should have the same content...do they? assert dark_sub.header == ccd_data.header # But the headers should not be the same object -- a copy was made @@ -567,10 +573,14 @@ def test_flat_correct(): # Should be the case that flat * flat_data = ccd_data * flat.data.mean # if the normalization was done correctly. np_testing.assert_allclose( - (flat_data.data * flat.data).mean(), ccd_data.data.mean() * flat.data.mean() + (flat_data.data * flat.data).mean(), + ccd_data.data.mean() * flat.data.mean(), + rtol=1e-6, ) np_testing.assert_allclose( - ccd_data.data / flat_data.data, flat.data / flat.data.mean() + ccd_data.data / flat_data.data, + flat.data / flat.data.mean(), + rtol=1e-6, ) # Check that metadata is unchanged (since logging is turned off) @@ -597,10 +607,12 @@ def test_flat_correct_min_value(): np_testing.assert_allclose( (flat_corrected_data.data * flat_with_min.data).mean(), (ccd_data.data * flat_with_min.data.mean()).mean(), + rtol=1e-6, ) np_testing.assert_allclose( ccd_data.data / flat_corrected_data.data, flat_with_min.data / flat_with_min.data.mean(), + rtol=1e-6, ) # Test that flat is not modified. @@ -625,9 +637,13 @@ def test_flat_correct_norm_value(): # Should be the case that flat * flat_data = ccd_data * flat_mean # if the normalization was done correctly. np_testing.assert_allclose( - (flat_data.data * flat.data).mean(), ccd_data.data.mean() * flat_mean + (flat_data.data * flat.data).mean(), + ccd_data.data.mean() * flat_mean, + rtol=1e-6, + ) + np_testing.assert_allclose( + ccd_data.data / flat_data.data, flat.data / flat_mean, rtol=1e-6 ) - np_testing.assert_allclose(ccd_data.data / flat_data.data, flat.data / flat_mean) def test_flat_correct_norm_value_bad_value(): From 23de261afb08209ded492ec8c26aae21b9b44597 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 6 Mar 2025 10:58:12 -0600 Subject: [PATCH 011/111] Continue to use numpy arrays in a few places These are necessary because upstream dependencies (astropy) explicitly check for numpy arrays rather than using the array API. --- ccdproc/tests/test_ccdproc.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index e8210a3e..4f4ca5bd 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -12,6 +12,7 @@ from astropy.units.quantity import Quantity from astropy.utils.exceptions import AstropyUserWarning from astropy.wcs import WCS +from numpy import array as np_array from numpy import random as np_random from numpy import testing as np_testing @@ -675,7 +676,11 @@ def test_flat_correct_deviation(): # Test the uncertainty on the data after flat correction def test_flat_correct_data_uncertainty(): # Regression test for #345 - dat = CCDData(np.ones([100, 100]), unit="adu", uncertainty=np.ones([100, 100])) + # Temporarily work around the fact that NDUncertainty explicitly checks + # whether the value is a numpy array. + dat = CCDData( + np.ones([100, 100]), unit="adu", uncertainty=np_array(np.ones([100, 100])) + ) # Note flat is set to 10, error, if present, is set to one. flat = CCDData(10 * np.ones([100, 100]), unit="adu") res = flat_correct(dat, flat) @@ -971,6 +976,8 @@ def test_wcs_project_onto_same_wcs(): target_wcs = wcs_for_testing(ccd_data.shape) ccd_data.wcs = wcs_for_testing(ccd_data.shape) + # Ugly hack for numpy-specific check in astropy.wcs + ccd_data.data = np_array(ccd_data.data) new_ccd = wcs_project(ccd_data, target_wcs) # Make sure new image has correct WCS. @@ -987,6 +994,8 @@ def test_wcs_project_onto_same_wcs_remove_headers(): ccd_data.wcs = wcs_for_testing(ccd_data.shape) ccd_data.header = ccd_data.wcs.to_header() + # Ugly hack for numpy-specific check in astropy.wcs + ccd_data.data = np_array(ccd_data.data) new_ccd = wcs_project(ccd_data, target_wcs) for k in ccd_data.wcs.to_header(): @@ -1004,6 +1013,8 @@ def test_wcs_project_onto_shifted_wcs(): ccd_data.mask = RNG().choice([0, 1], size=ccd_data.shape) + # Ugly hack for numpy-specific check in astropy.wcs + ccd_data.data = np_array(ccd_data.data) new_ccd = wcs_project(ccd_data, target_wcs) # Make sure new image has correct WCS. @@ -1054,6 +1065,8 @@ def test_wcs_project_onto_scale_wcs(): target_shape = 2 * np.array(ccd_data.shape) + 1 target_wcs.wcs.crpix = 2 * target_wcs.wcs.crpix + 1 + 0.5 + # Ugly hack for numpy-specific check in astropy.wcs + ccd_data.data = np_array(ccd_data.data) # Explicitly set the interpolation method so we know what to # expect for the mass. new_ccd = wcs_project( From dffadce3011c34fd39972da882c18c724ff2f79e Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 6 Mar 2025 10:58:59 -0600 Subject: [PATCH 012/111] Refactor warning handling for numerical warnings --- ccdproc/tests/test_ccdproc.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index 4f4ca5bd..da13e574 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -1,5 +1,7 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst +import warnings + # import array_api_compat import astropy import astropy.units as u @@ -891,10 +893,7 @@ def test_cosmicray_median_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() error = np.zeros_like(ccd_data) - with np.errstate(invalid="ignore", divide="ignore"): - _ = cosmicray_median( - ccd_data, error_image=error, thresh=5, mbox=11, gbox=0, rbox=0 - ) + _ = cosmicray_median(ccd_data, error_image=error, thresh=5, mbox=11, gbox=0, rbox=0) np_testing.assert_allclose(original.data, ccd_data.data) assert original.unit == ccd_data.unit @@ -911,7 +910,8 @@ def test_flat_correct_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() flat = CCDData(np.zeros_like(ccd_data), unit=ccd_data.unit) - with np.errstate(invalid="ignore"): + # Ignore the divide by zero warning that is raised when the flat is zero. + with warnings.catch_warnings(action="ignore", category=RuntimeWarning): _ = flat_correct(ccd_data, flat=flat) np_testing.assert_allclose(original.data, ccd_data.data) assert original.unit == ccd_data.unit @@ -945,8 +945,7 @@ def test_trim_image_does_not_change_input(): def test_transform_image_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() - with np.errstate(invalid="ignore"): - _ = transform_image(ccd_data, np.sqrt) + _ = transform_image(ccd_data, np.sqrt) np_testing.assert_allclose(original.data, ccd_data) assert original.unit == ccd_data.unit From 12ff8b06804716e4a684d73737e50f323b342df0 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 6 Mar 2025 11:00:03 -0600 Subject: [PATCH 013/111] Avoid explicit use of numpy masked arrays --- ccdproc/tests/test_ccdproc.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index da13e574..0de65df9 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -1021,9 +1021,7 @@ def test_wcs_project_onto_shifted_wcs(): # Make sure data matches within some reasonable tolerance, keeping in mind # that the pixels should all be shifted. - masked_input = np.ma.array(ccd_data.data, mask=ccd_data.mask) - masked_output = np.ma.array(new_ccd.data, mask=new_ccd.mask) - np_testing.assert_allclose(masked_input[:-1, :-1], masked_output[1:, 1:], rtol=1e-5) + np_testing.assert_allclose(ccd_data.data[:-1, :-1], new_ccd[1:, 1:], rtol=1e-5) # The masks should all be shifted too. np_testing.assert_array_equal(ccd_data.mask[:-1, :-1], new_ccd.mask[1:, 1:]) @@ -1035,7 +1033,7 @@ def test_wcs_project_onto_shifted_wcs(): # In the case of a shift, one row and one column should be nan, and they # will share one common nan where they intersect, so we know how many nan # there should be. - assert np.isnan(new_ccd.data).sum() == np.sum(new_ccd.shape) - 1 + assert np.sum(np.isnan(new_ccd.data)) == np.sum(np.array(new_ccd.shape)) - 1 # Use an odd number of pixels to make a well-defined center pixel From 63f81ba5550730266f7798832e7edce7088b55f2 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 6 Mar 2025 11:00:22 -0600 Subject: [PATCH 014/111] Rewrite test to not modify array in-place --- ccdproc/tests/test_ccdproc.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index 0de65df9..ab4ebc60 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -1142,7 +1142,9 @@ def test_ccd_process_parameters_are_appropriate(): def test_ccd_process(): # Test the through ccd_process ccd_data = CCDData(10.0 * np.ones((100, 100)), unit=u.adu) - ccd_data.data[:, -10:] = 2 + # Rewrite to not change data in-place. + ccd_data.data = np.concat([ccd_data.data[:, :-10], 2 * np.ones((100, 10))], axis=1) + ccd_data.meta["testkw"] = 100 mask = np.zeros((100, 90)) @@ -1187,7 +1189,9 @@ def test_ccd_process(): def test_ccd_process_gain_corrected(): # Test the through ccd_process with gain_corrected as False ccd_data = CCDData(10.0 * np.ones((100, 100)), unit=u.adu) - ccd_data.data[:, -10:] = 2 + + # Rewrite to not change data in-place. + ccd_data.data = np.concat([ccd_data.data[:, :-10], 2 * np.ones((100, 10))], axis=1) ccd_data.meta["testkw"] = 100 mask = np.zeros((100, 90)) From 4ae3fc63f61ddead151ba6cfe2e509222b9fb2f9 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Fri, 7 Mar 2025 11:49:51 -0600 Subject: [PATCH 015/111] Use assert_allclose instead of older alternatives --- ccdproc/tests/test_combiner.py | 36 +++++++++++++++++----------------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/ccdproc/tests/test_combiner.py b/ccdproc/tests/test_combiner.py index b5936f4f..1cde6535 100644 --- a/ccdproc/tests/test_combiner.py +++ b/ccdproc/tests/test_combiner.py @@ -428,7 +428,7 @@ def test_combine_average_fitsimages(): fitsfilename_list = [fitsfile] * 3 avgccd = combine(fitsfilename_list, output_file=None, method="average", unit=u.adu) # averaging same fits images should give back same fits image - np.testing.assert_array_almost_equal(avgccd.data, ccd_by_combiner.data) + np.testing.assert_allclose(avgccd.data, ccd_by_combiner.data) def test_combine_numpyndarray(): @@ -446,7 +446,7 @@ def test_combine_numpyndarray(): fitsfilename_list = np.array([fitsfile] * 3) avgccd = combine(fitsfilename_list, output_file=None, method="average", unit=u.adu) # averaging same fits images should give back same fits image - np.testing.assert_array_almost_equal(avgccd.data, ccd_by_combiner.data) + np.testing.assert_allclose(avgccd.data, ccd_by_combiner.data) def test_combiner_result_dtype(): @@ -459,13 +459,13 @@ def test_combiner_result_dtype(): # The default dtype of Combiner is float64 assert res.data.dtype == np.float64 ref = np.ones((3, 3)) * 1.5 - np.testing.assert_array_almost_equal(res.data, ref) + np.testing.assert_allclose(res.data, ref) res = combine([ccd, ccd.multiply(2), ccd.multiply(3)], dtype=int) # The result dtype should be integer: assert res.data.dtype == np.int_ ref = np.ones((3, 3)) * 2 - np.testing.assert_array_almost_equal(res.data, ref) + np.testing.assert_allclose(res.data, ref) def test_combiner_image_file_collection_input(tmp_path): @@ -476,7 +476,7 @@ def test_combiner_image_file_collection_input(tmp_path): ifc = ImageFileCollection(tmp_path) comb = Combiner(ifc.ccds()) - np.testing.assert_array_almost_equal(ccd.data, comb.average_combine().data) + np.testing.assert_allclose(ccd.data, comb.average_combine().data) def test_combine_image_file_collection_input(tmp_path): @@ -492,8 +492,8 @@ def test_combine_image_file_collection_input(tmp_path): comb_ccds = combine(ifc.ccds(), method="average") - np.testing.assert_array_almost_equal(ccd.data, comb_files.data) - np.testing.assert_array_almost_equal(ccd.data, comb_ccds.data) + np.testing.assert_allclose(ccd.data, comb_files.data) + np.testing.assert_allclose(ccd.data, comb_ccds.data) with pytest.raises(FileNotFoundError): # This should fail because the test is not running in the @@ -511,7 +511,7 @@ def test_combine_average_ccddata(): avgccd = combine(ccd_list, output_file=None, method="average", unit=u.adu) # averaging same ccdData should give back same images - np.testing.assert_array_almost_equal(avgccd.data, ccd_by_combiner.data) + np.testing.assert_allclose(avgccd.data, ccd_by_combiner.data) # test combiner convenience function reads fits file and @@ -528,7 +528,7 @@ def test_combine_limitedmem_fitsimages(): fitsfilename_list, output_file=None, method="average", mem_limit=1e6, unit=u.adu ) # averaging same ccdData should give back same images - np.testing.assert_array_almost_equal(avgccd.data, ccd_by_combiner.data) + np.testing.assert_allclose(avgccd.data, ccd_by_combiner.data) # test combiner convenience function reads fits file and @@ -553,7 +553,7 @@ def test_combine_limitedmem_scale_fitsimages(): unit=u.adu, ) - np.testing.assert_array_almost_equal(avgccd.data, ccd_by_combiner.data, decimal=4) + np.testing.assert_allclose(avgccd.data, ccd_by_combiner.data) # test the optional uncertainty function in average_combine @@ -593,7 +593,7 @@ def test_sum_combine_uncertainty(): c = Combiner(ccd_list) ccd = c.sum_combine(uncertainty_func=np.sum) uncert_ref = np.sum(c.data_arr, 0) * np.sqrt(3) - np.testing.assert_almost_equal(ccd.uncertainty.array, uncert_ref) + np.testing.assert_allclose(ccd.uncertainty.array, uncert_ref) # Compare this also to the "combine" call ccd2 = combine(ccd_list, method="sum", combine_uncertainty_function=np.sum) @@ -639,7 +639,7 @@ def test_combine_result_uncertainty_and_mask(comb_func, mask_point): ccd_list, method=combine_method_name, minmax_clip=True, minmax_clip_min=-100 ) - np.testing.assert_array_almost_equal( + np.testing.assert_allclose( ccd_comb.uncertainty.array, expected_result.uncertainty.array ) @@ -690,7 +690,7 @@ def test_combiner_uncertainty_average(): ref_uncertainty = np.ones((10, 10)) / 2 # Correction because we combined two images. ref_uncertainty /= np.sqrt(2) - np.testing.assert_array_almost_equal(ccd.uncertainty.array, ref_uncertainty) + np.testing.assert_allclose(ccd.uncertainty.array, ref_uncertainty) # test resulting uncertainty is corrected for the number of images (with mask) @@ -710,7 +710,7 @@ def test_combiner_uncertainty_average_mask(): # Correction because we combined two images. ref_uncertainty /= np.sqrt(3) ref_uncertainty[5, 5] = np.std([2, 3]) / np.sqrt(2) - np.testing.assert_array_almost_equal(ccd.uncertainty.array, ref_uncertainty) + np.testing.assert_allclose(ccd.uncertainty.array, ref_uncertainty) # test resulting uncertainty is corrected for the number of images (with mask) @@ -731,7 +731,7 @@ def test_combiner_uncertainty_median_mask(): # Correction because we combined two images. ref_uncertainty /= np.sqrt(3) # 0.855980789955 ref_uncertainty[5, 5] = mad_to_sigma * mad([2, 3]) / np.sqrt(2) # 0.524179041254 - np.testing.assert_array_almost_equal(ccd.uncertainty.array, ref_uncertainty) + np.testing.assert_allclose(ccd.uncertainty.array, ref_uncertainty) # test resulting uncertainty is corrected for the number of images (with mask) @@ -750,7 +750,7 @@ def test_combiner_uncertainty_sum_mask(): ref_uncertainty = np.ones((10, 10)) * np.std([1, 2, 3]) ref_uncertainty *= np.sqrt(3) ref_uncertainty[5, 5] = np.std([2, 3]) * np.sqrt(2) - np.testing.assert_array_almost_equal(ccd.uncertainty.array, ref_uncertainty) + np.testing.assert_allclose(ccd.uncertainty.array, ref_uncertainty) def test_combiner_3d(): @@ -762,11 +762,11 @@ def test_combiner_3d(): c = Combiner(ccd_list) assert c.data_arr.shape == (3, 5, 5, 5) - assert c.data_arr.mask.shape == (3, 5, 5, 5) + assert c.data_arr_mask.shape == (3, 5, 5, 5) ccd = c.average_combine() assert ccd.shape == (5, 5, 5) - np.testing.assert_array_almost_equal(ccd.data, data1, decimal=4) + np.testing.assert_allclose(ccd.data, data1) def test_3d_combiner_with_scaling(): From 7b2ed18a9218ba6224d0e07bca37384d0ec46ff7 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Fri, 7 Mar 2025 11:50:56 -0600 Subject: [PATCH 016/111] Initial attempt to adopt array API in combiner --- ccdproc/combiner.py | 240 +++++++++++++++++++++------------ ccdproc/tests/test_combiner.py | 36 ++--- 2 files changed, 173 insertions(+), 103 deletions(-) diff --git a/ccdproc/combiner.py b/ccdproc/combiner.py index 615f4cc5..deea6a32 100644 --- a/ccdproc/combiner.py +++ b/ccdproc/combiner.py @@ -3,7 +3,8 @@ """This module implements the combiner class.""" import numpy as np -from numpy import ma + +# from numpy import ma try: import bottleneck as bn @@ -12,6 +13,7 @@ else: HAS_BOTTLENECK = True +import array_api_compat from astropy import log from astropy.nddata import CCDData, StdDevUncertainty from astropy.stats import sigma_clip @@ -105,9 +107,6 @@ def __init__(self, ccd_iter, dtype=None): "ccd_iter should be a list or a generator of CCDData objects." ) - if dtype is None: - dtype = np.float64 - default_shape = None default_unit = None @@ -132,22 +131,28 @@ def __init__(self, ccd_iter, dtype=None): if not (default_unit == ccd.unit): raise TypeError("CCDData objects don't have the same unit.") + # Set array namespace + xp = array_api_compat.array_namespace(ccd_list[0].data) + if dtype is None: + dtype = xp.float64 self.ccd_list = ccd_list self.unit = default_unit self.weights = None self._dtype = dtype # set up the data array - new_shape = (len(ccd_list),) + default_shape - self.data_arr = ma.masked_all(new_shape, dtype=dtype) + # new_shape = (len(ccd_list),) + default_shape + self.data_arr = xp.array([ccd.data for ccd in ccd_list], dtype=dtype) # populate self.data_arr - for i, ccd in enumerate(ccd_list): - self.data_arr[i] = ccd.data + mask_list = [] + for ccd in ccd_list: if ccd.mask is not None: - self.data_arr.mask[i] = ccd.mask + mask_list.append(ccd.mask) else: - self.data_arr.mask[i] = ma.zeros(default_shape) + mask_list.append(xp.zeros(default_shape)) + + self.data_arr_mask = xp.array(mask_list, dtype=bool) # Must be after self.data_arr is defined because it checks the # length of the data array. @@ -173,20 +178,23 @@ def weights(self): @weights.setter def weights(self, value): if value is not None: - if isinstance(value, np.ndarray): - if value.shape != self.data_arr.data.shape: - if value.ndim != 1: - raise ValueError( - "1D weights expected when shapes of the " - "data and weights differ." - ) - if value.shape[0] != self.data_arr.data.shape[0]: - raise ValueError( - "Length of weights not compatible with specified axis." - ) - self._weights = value - else: - raise TypeError("weights must be a numpy.ndarray.") + try: + _ = array_api_compat.array_namespace(value) + except TypeError as err: + raise TypeError("weights must be an array.") from err + + if value.shape != self.data_arr.shape: + if value.ndim != 1: + raise ValueError( + "1D weights expected when shapes of the " + "data and weights differ." + ) + if value.shape[0] != self.data_arr.shape[0]: + raise ValueError( + "Length of weights not compatible with specified axis." + ) + self._weights = value + else: self._weights = None @@ -207,13 +215,14 @@ def scaling(self): @scaling.setter def scaling(self, value): + xp = array_api_compat.array_namespace(self.data_arr) if value is None: self._scaling = value else: - n_images = self.data_arr.data.shape[0] + n_images = self.data_arr.shape[0] if callable(value): self._scaling = [value(self.data_arr[i]) for i in range(n_images)] - self._scaling = np.array(self._scaling) + self._scaling = xp.array(self._scaling) else: try: len(value) @@ -227,10 +236,10 @@ def scaling(self, value): "scaling must be a function or an array " "the same length as the number of images." ) - self._scaling = np.array(value) + self._scaling = xp.array(value) # reshape so that broadcasting occurs properly - for _ in range(len(self.data_arr.data.shape) - 1): - self._scaling = self.scaling[:, np.newaxis] + for _ in range(len(self.data_arr.shape) - 1): + self._scaling = self.scaling[:, xp.newaxis] # set up IRAF-like minmax clipping def clip_extrema(self, nlow=0, nhigh=0): @@ -275,20 +284,20 @@ def clip_extrema(self, nlow=0, nhigh=0): .. [0] image.imcombine help text. http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?imcombine """ - + xp = array_api_compat.array_namespace(self.data_arr) if nlow is None: nlow = 0 if nhigh is None: nhigh = 0 - argsorted = np.argsort(self.data_arr.data, axis=0) - mg = np.mgrid[ + argsorted = xp.argsort(self.data_arr, axis=0) + mg = xp.mgrid[ [slice(ndim) for i, ndim in enumerate(self.data_arr.shape) if i > 0] ] for i in range(-1 * nhigh, nlow): # create a tuple with the indices where = tuple([argsorted[i, :, :].ravel()] + [i.ravel() for i in mg]) - self.data_arr.mask[where] = True + self.data_arr_mask[where] = True # set up min/max clipping algorithms def minmax_clipping(self, min_clip=None, max_clip=None): @@ -306,10 +315,12 @@ def minmax_clipping(self, min_clip=None, max_clip=None): """ if min_clip is not None: mask = self.data_arr < min_clip - self.data_arr.mask[mask] = True + # Written to avoid in-place modification of array + self.data_arr_mask = self.data_arr_mask | mask if max_clip is not None: mask = self.data_arr > max_clip - self.data_arr.mask[mask] = True + # Written to avoid in-place modification of array + self.data_arr_mask = self.data_arr_mask | mask # set up sigma clipping algorithms @deprecated_renamed_argument( @@ -368,18 +379,21 @@ def sigma_clipping( # Remove in 3.0 _ = kwd.pop("use_astropy", True) - self.data_arr.mask |= sigma_clip( - self.data_arr.data, - sigma_lower=low_thresh, - sigma_upper=high_thresh, - axis=kwd.get("axis", 0), - copy=kwd.get("copy", False), - maxiters=kwd.get("maxiters", 1), - cenfunc=func, - stdfunc=dev_func, - masked=True, - **kwd, - ).mask + self.data_arr_mask = ( + self.data_arr_mask + | sigma_clip( + self.data_arr, + sigma_lower=low_thresh, + sigma_upper=high_thresh, + axis=kwd.get("axis", 0), + copy=kwd.get("copy", False), + maxiters=kwd.get("maxiters", 1), + cenfunc=func, + stdfunc=dev_func, + masked=True, + **kwd, + ).mask + ) def _get_scaled_data(self, scale_arg): if scale_arg is not None: @@ -389,11 +403,17 @@ def _get_scaled_data(self, scale_arg): return self.data_arr def _get_nan_substituted_data(self, data): + xp = array_api_compat.array_namespace(self.data_arr) + # Get the data as an unmasked array with masked values filled as NaN - if self.data_arr.mask.any(): - data = np.ma.filled(data, fill_value=np.nan) + if self.data_arr_mask.any(): + # Workaround for array libraries that do not allow in-=place modification + try: + data[self.data_arr_mask] = xp.nan + except TypeError: + data = data.at[self.data_arr_mask].set(xp.nan) else: - data = data.data + data = data return data def _combination_setup(self, user_func, default_func, scale_to): @@ -401,16 +421,16 @@ def _combination_setup(self, user_func, default_func, scale_to): Handle the common pieces of image combination data/mask setup. """ data = self._get_scaled_data(scale_to) - + xp = array_api_compat.array_namespace(data) # Play it safe for now and only do the nan thing if the user is using # the default combination function. if user_func is None: combo_func = default_func # Subtitute NaN for masked entries data = self._get_nan_substituted_data(data) - masked_values = np.isnan(data).sum(axis=0) + masked_values = xp.isnan(data).sum(axis=0) else: - masked_values = self.data_arr.mask.sum(axis=0) + masked_values = self.data_arr_mask.sum(axis=0) combo_func = user_func return data, masked_values, combo_func @@ -458,7 +478,7 @@ def median_combine( data, masked_values, median_func = self._combination_setup( median_func, _default_median(), scale_to ) - + xp = array_api_compat.array_namespace(data) medianed = median_func(data, axis=0) # set the mask @@ -476,17 +496,17 @@ def median_combine( else: uncertainty = uncertainty_func(data, axis=0) # Divide uncertainty by the number of pixel (#309) - uncertainty /= np.sqrt(len(self.data_arr) - masked_values) + uncertainty /= xp.sqrt(len(self.data_arr) - masked_values) # Convert uncertainty to plain numpy array (#351) # There is no need to care about potential masks because the # uncertainty was calculated based on the data so potential masked # elements are also masked in the data. No need to keep two identical # masks. - uncertainty = np.asarray(uncertainty) + uncertainty = xp.asarray(uncertainty) # create the combined image with a dtype matching the combiner combined_image = CCDData( - np.asarray(medianed, dtype=self.dtype), + xp.asarray(medianed, dtype=self.dtype), mask=mask, unit=self.unit, uncertainty=StdDevUncertainty(uncertainty), @@ -503,9 +523,10 @@ def _weighted_sum(self, data, sum_func): Perform weighted sum, used by both ``sum_combine`` and in some cases by ``average_combine``. """ + xp = array_api_compat.array_namespace(data) if self.weights.shape != data.shape: # Add extra axes to the weights for broadcasting - weights = np.reshape(self.weights, [len(self.weights), 1, 1]) + weights = xp.reshape(self.weights, [len(self.weights), 1, 1]) else: weights = self.weights @@ -560,6 +581,8 @@ def average_combine( scale_func, _default_average(), scale_to ) + xp = array_api_compat.array_namespace(data) + # Do NOT modify data after this -- we need it to be intact when we # we get to the uncertainty calculation. if self.weights is not None: @@ -575,13 +598,13 @@ def average_combine( # set up the deviation uncertainty = uncertainty_func(data, axis=0) # Divide uncertainty by the number of pixel (#309) - uncertainty /= np.sqrt(len(data) - masked_values) + uncertainty /= xp.sqrt(len(data) - masked_values) # Convert uncertainty to plain numpy array (#351) - uncertainty = np.asarray(uncertainty) + uncertainty = xp.asarray(uncertainty) # create the combined image with a dtype that matches the combiner combined_image = CCDData( - np.asarray(mean, dtype=self.dtype), + xp.asarray(mean, dtype=self.dtype), mask=mask, unit=self.unit, uncertainty=StdDevUncertainty(uncertainty), @@ -633,6 +656,8 @@ def sum_combine( sum_func, _default_sum(), scale_to ) + xp = array_api_compat.array_namespace(data) + if self.weights is not None: summed, weights = self._weighted_sum(data, sum_func) else: @@ -644,15 +669,15 @@ def sum_combine( # set up the deviation uncertainty = uncertainty_func(data, axis=0) # Divide uncertainty by the number of pixel (#309) - uncertainty /= np.sqrt(len(data) - masked_values) + uncertainty /= xp.sqrt(len(data) - masked_values) # Convert uncertainty to plain numpy array (#351) - uncertainty = np.asarray(uncertainty) + uncertainty = xp.asarray(uncertainty) # Multiply uncertainty by square root of the number of images uncertainty *= len(data) - masked_values # create the combined image with a dtype that matches the combiner combined_image = CCDData( - np.asarray(summed, dtype=self.dtype), + xp.asarray(summed, dtype=self.dtype), mask=mask, unit=self.unit, uncertainty=StdDevUncertainty(uncertainty), @@ -696,8 +721,9 @@ def _calculate_size_of_image(ccd, combine_uncertainty_function): # If uncertainty_func is given for combine this will create an uncertainty # even if the originals did not have one. In that case we need to create # an empty placeholder. + xp = array_api_compat.array_namespace(ccd.data) if ccd.uncertainty is None and combine_uncertainty_function is not None: - ccd.uncertainty = StdDevUncertainty(np.zeros(ccd.data.shape)) + ccd.uncertainty = StdDevUncertainty(xp.zeros(ccd.data.shape)) size_of_an_img = ccd.data.nbytes try: @@ -737,8 +763,8 @@ def combine( sigma_clip=False, sigma_clip_low_thresh=3, sigma_clip_high_thresh=3, - sigma_clip_func=ma.mean, - sigma_clip_dev_func=ma.std, + sigma_clip_func=None, + sigma_clip_dev_func=None, dtype=None, combine_uncertainty_function=None, overwrite_output=False, @@ -852,21 +878,30 @@ def combine( combined_image : `~astropy.nddata.CCDData` CCDData object based on the combined input of CCDData objects. """ + # Handle case where the input is an array of file names first if not isinstance(img_list, list): - # If not a list, check whether it is a numpy ndarray or string of - # filenames separated by comma - if isinstance(img_list, np.ndarray): - img_list = img_list.tolist() - elif isinstance(img_list, str) and ("," in img_list): - img_list = img_list.split(",") + try: + _ = array_api_compat.array_namespace(img_list) + except TypeError: + pass else: - try: - # Maybe the input can be made into a list, so try that - img_list = list(img_list) - except TypeError as err: - raise ValueError( - "unrecognised input for list of images to combine." - ) from err + # If it is an array, convert it to a list + img_list = list(img_list) + if ( + not isinstance(img_list, list) + and isinstance(img_list, str) + and ("," in img_list) + ): + # Handle case where the input is a string of file names separated by comma + img_list = img_list.split(",") + else: + try: + # Maybe the input can be made into a list, so try that + img_list = list(img_list) + except TypeError as err: + raise ValueError( + "unrecognised input for list of images to combine." + ) from err # Select Combine function to call in Combiner if method == "average": @@ -885,24 +920,32 @@ def combine( # User has provided fits filenames to read from ccd = CCDData.read(img_list[0], **ccdkwargs) + # Get the array namespace + xp = array_api_compat.array_namespace(ccd.data) + if dtype is None: - dtype = np.float64 + dtype = xp.float64 + + if sigma_clip_func is None: + sigma_clip_func = xp.mean + if sigma_clip_dev_func is None: + sigma_clip_dev_func = xp.std # Convert the master image to the appropriate dtype so when overwriting it # later the data is not downcast and the memory consumption calculation # uses the internally used dtype instead of the original dtype. #391 if ccd.data.dtype != dtype: - ccd.data = ccd.data.astype(dtype) + ccd.data = xp.astype(ccd.data, dtype) # If the template image doesn't have an uncertainty, add one, because the # result always has an uncertainty. if ccd.uncertainty is None: - ccd.uncertainty = StdDevUncertainty(np.zeros_like(ccd.data)) + ccd.uncertainty = StdDevUncertainty(xp.zeros_like(ccd.data)) # If the template doesn't have a mask, add one, because the result may have # a mask if ccd.mask is None: - ccd.mask = np.zeros_like(ccd.data, dtype=bool) + ccd.mask = xp.zeros_like(ccd.data, dtype=bool) size_of_an_img = _calculate_size_of_image(ccd, combine_uncertainty_function) @@ -951,7 +994,7 @@ def combine( scalevalues.append(scale(imgccd.data)) - to_set_in_combiner["scaling"] = np.array(scalevalues) + to_set_in_combiner["scaling"] = xp.array(scalevalues) else: to_set_in_combiner["scaling"] = scale @@ -1008,11 +1051,32 @@ def combine( comb_tile = getattr(tile_combiner, combine_function)(**combine_kwds) # add it back into the master image - ccd.data[x:xend, y:yend] = comb_tile.data + # The try/except below is to handle array libraries that may not + # allow in-place operations. + try: + ccd.data[x:xend, y:yend] = comb_tile.data + except TypeError: + ccd.data = ccd.data.at[x:xend, y:yend].set(comb_tile.data) + if ccd.mask is not None: - ccd.mask[x:xend, y:yend] = comb_tile.mask + # Maybe temporary workaround for the mask not being writeable... + ccd.mask = ccd.mask.copy() + # The try/except below is to handle array libraries that may not + # allow in-place operations. + try: + ccd.mask[x:xend, y:yend] = comb_tile.mask + except (TypeError, ValueError): + ccd.mask = ccd.mask.at[x:xend, y:yend].set(comb_tile.mask) + if ccd.uncertainty is not None: - ccd.uncertainty.array[x:xend, y:yend] = comb_tile.uncertainty.array + # The try/except below is to handle array libraries that may not + # allow in-place operations. + try: + ccd.uncertainty.array[x:xend, y:yend] = comb_tile.uncertainty.array + except TypeError: + ccd.uncertainty.array = ccd.uncertainty.array.at[ + x:xend, y:yend + ].set(comb_tile.uncertainty.array) # Free up memory to try to stay under user's limit del comb_tile del tile_combiner diff --git a/ccdproc/tests/test_combiner.py b/ccdproc/tests/test_combiner.py index 1cde6535..6f3fd440 100644 --- a/ccdproc/tests/test_combiner.py +++ b/ccdproc/tests/test_combiner.py @@ -83,7 +83,7 @@ def test_combiner_create(): ccd_list = [ccd_data, ccd_data, ccd_data] c = Combiner(ccd_list) assert c.data_arr.shape == (3, 100, 100) - assert c.data_arr.mask.shape == (3, 100, 100) + assert c.data_arr_mask.shape == (3, 100, 100) # test if dtype matches the value that is passed @@ -112,8 +112,8 @@ def test_combiner_mask(): ccd_list = [ccd, ccd, ccd] c = Combiner(ccd_list) assert c.data_arr.shape == (3, 10, 10) - assert c.data_arr.mask.shape == (3, 10, 10) - assert not c.data_arr.mask[0, 5, 5] + assert c.data_arr_mask.shape == (3, 10, 10) + assert not c.data_arr_mask[0, 5, 5] def test_weights(): @@ -185,7 +185,7 @@ def test_combiner_minmax_max(): c = Combiner(ccd_list) c.minmax_clipping(min_clip=None, max_clip=500) - assert c.data_arr[2].mask.all() + assert c.data_arr_mask[2].all() def test_combiner_minmax_min(): @@ -197,7 +197,7 @@ def test_combiner_minmax_min(): c = Combiner(ccd_list) c.minmax_clipping(min_clip=-500, max_clip=None) - assert c.data_arr[1].mask.all() + assert c.data_arr_mask[1].all() def test_combiner_sigmaclip_high(): @@ -213,7 +213,7 @@ def test_combiner_sigmaclip_high(): c = Combiner(ccd_list) # using mad for more robust statistics vs. std c.sigma_clipping(high_thresh=3, low_thresh=None, func=np.ma.median, dev_func=mad) - assert c.data_arr[5].mask.all() + assert c.data_arr_mask[5].all() def test_combiner_sigmaclip_single_pix(): @@ -234,7 +234,7 @@ def test_combiner_sigmaclip_single_pix(): c.data_arr[3, 5, 5] = -5 c.data_arr[4, 5, 5] = 25 c.sigma_clipping(high_thresh=3, low_thresh=None, func=np.ma.median, dev_func=mad) - assert c.data_arr.mask[4, 5, 5] + assert c.data_arr_mask[4, 5, 5] def test_combiner_sigmaclip_low(): @@ -250,7 +250,7 @@ def test_combiner_sigmaclip_low(): c = Combiner(ccd_list) # using mad for more robust statistics vs. std c.sigma_clipping(high_thresh=None, low_thresh=3, func=np.ma.median, dev_func=mad) - assert c.data_arr[5].mask.all() + assert c.data_arr_mask[5].all() # test that the median combination works and returns a ccddata object @@ -334,6 +334,7 @@ def test_combiner_mask_average(): # are masked?! # assert ccd.data[0, 0] == 0 assert ccd.data[5, 5] == 1 + # THE LINE BELOW IS CATCHING A REAL ERROR assert ccd.mask[0, 0] assert not ccd.mask[5, 5] @@ -623,7 +624,11 @@ def test_combine_result_uncertainty_and_mask(comb_func, mask_point): if mask_point: # Make one pixel really negative so we can clip it and guarantee a resulting # pixel is masked. - ccd_data.data[0, 0] = -1000 + # Handle case where array is immutable + try: + ccd_data.data[0, 0] = -1000 + except TypeError: + ccd_data.data = ccd_data.data.at[0, 0].set(-1000) ccd_list = [ccd_data, ccd_data, ccd_data] c = Combiner(ccd_list) @@ -889,9 +894,9 @@ def test_clip_extrema_with_other_rejection(): ccdlist[1].data[2, 0] = 100.1 c = Combiner(ccdlist) # Reject ccdlist[1].data[1,2] by other means - c.data_arr.mask[1, 1, 2] = True + c.data_arr_mask[1, 1, 2] = True # Reject ccdlist[1].data[1,2] by other means - c.data_arr.mask[3, 0, 0] = True + c.data_arr_mask[3, 0, 0] = True c.clip_extrema(nlow=1, nhigh=1) result = c.average_combine() @@ -934,7 +939,7 @@ def create_gen(): c = Combiner(create_gen()) assert c.data_arr.shape == (3, 100, 100) - assert c.data_arr.mask.shape == (3, 100, 100) + assert c.data_arr_mask.shape == (3, 100, 100) @pytest.mark.parametrize( @@ -996,16 +1001,17 @@ def test_user_supplied_combine_func_that_relies_on_masks(comb_func): if comb_func == "sum_combine": expected_result = 3 * data - actual_result = c.sum_combine(sum_func=np.ma.sum) + actual_result = c.sum_combine(sum_func=np.sum) elif comb_func == "average_combine": expected_result = data - actual_result = c.average_combine(scale_func=np.ma.mean) + actual_result = c.average_combine(scale_func=np.mean) elif comb_func == "median_combine": expected_result = data - actual_result = c.median_combine(median_func=np.ma.median) + actual_result = c.median_combine(median_func=np.median) # Two of the three values are masked, so no matter what the combination # method is the result in this pixel should be 2. expected_result[5, 5] = 2 + # THIS IS A REAL TEST FAILURE!!! np.testing.assert_almost_equal(expected_result, actual_result) From 1cbb7bc40538b39c05a511da2292e0c19ede183f Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Mon, 17 Mar 2025 11:09:27 -0500 Subject: [PATCH 017/111] Write a mask-aware sum function that is array API compatible --- ccdproc/tests/test_combiner.py | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/ccdproc/tests/test_combiner.py b/ccdproc/tests/test_combiner.py index 6f3fd440..1729da43 100644 --- a/ccdproc/tests/test_combiner.py +++ b/ccdproc/tests/test_combiner.py @@ -1,4 +1,5 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst +import array_api_compat import astropy import astropy.units as u import numpy as np @@ -1000,8 +1001,23 @@ def test_user_supplied_combine_func_that_relies_on_masks(comb_func): c = Combiner(ccd_list) if comb_func == "sum_combine": + + def my_summer(data, mask, axis=None): + xp = array_api_compat.array_namespace(data) + new_data = [] + for i in range(data.shape[0]): + if mask[i] is not None: + new_data.append(data[i] * ~mask[i]) + else: + new_data.append(xp.zeros_like(data[i])) + + new_data = xp.array(new_data) + + def sum_func(_, axis=axis): + return xp.sum(new_data, axis=axis) + expected_result = 3 * data - actual_result = c.sum_combine(sum_func=np.sum) + actual_result = c.sum_combine(sum_func=my_summer(c.data_arr, c.data_arr_mask)) elif comb_func == "average_combine": expected_result = data actual_result = c.average_combine(scale_func=np.mean) From da9035060cfe94074cf5cb3237fbc1fba851190e Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Wed, 19 Mar 2025 10:05:49 -0500 Subject: [PATCH 018/111] Eliminate most use of numpy in tests --- ccdproc/tests/test_cosmicray.py | 98 ++++++++++++++++++++++----------- 1 file changed, 67 insertions(+), 31 deletions(-) diff --git a/ccdproc/tests/test_cosmicray.py b/ccdproc/tests/test_cosmicray.py index cfccc747..dba0ea74 100644 --- a/ccdproc/tests/test_cosmicray.py +++ b/ccdproc/tests/test_cosmicray.py @@ -1,9 +1,13 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst -import numpy as np +import array_api_compat import pytest from astropy import units as u from astropy.utils.exceptions import AstropyDeprecationWarning +from numpy import array as np_array +from numpy.ma import array as np_ma_array +from numpy.random import default_rng +from numpy.testing import assert_allclose from ccdproc.core import ( background_deviation_box, @@ -21,17 +25,23 @@ def add_cosmicrays(data, scale, threshold, ncrays=NCRAYS): size = data.shape[0] - rng = np.random.default_rng(99) + rng = default_rng(99) crrays = rng.integers(0, size, size=(ncrays, 2)) # use (threshold + 15) below to make sure cosmic ray is well above the # threshold no matter what the random number generator returns # add_cosmicrays is highly sensitive to the seed # ideally threshold should be set so it is not sensitive to seed, but # this is not working right now - crflux = 10 * scale * rng.random(NCRAYS) + (threshold + 15) * scale + crflux = 10 * scale * rng.random(ncrays) + (threshold + 15) * scale for i in range(ncrays): y, x = crrays[i] - data.data[y, x] = crflux[i] + try: + data.data[y, x] = crflux[i] + except TypeError as e: + try: + data.data = data.data.at[y, x].set(crflux[i]) + except AttributeError as other_e: + raise other_e from e def test_cosmicray_lacosmic(): @@ -48,10 +58,13 @@ def test_cosmicray_lacosmic(): def test_cosmicray_lacosmic_ccddata(): ccd_data = ccd_data_func(data_scale=DATA_SCALE) + xp = array_api_compat.array_namespace(ccd_data.data) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) - noise = DATA_SCALE * np.ones_like(ccd_data.data) - ccd_data.uncertainty = noise + noise = DATA_SCALE * xp.ones_like(ccd_data.data) + # Workaround for the fact that upstream checks for numpy array + # specifically. + ccd_data.uncertainty = np_array(noise) nccd_data = cosmicray_lacosmic(ccd_data, sigclip=5.9) # check the number of cosmic rays detected @@ -62,8 +75,9 @@ def test_cosmicray_lacosmic_ccddata(): def test_cosmicray_lacosmic_check_data(): ccd_data = ccd_data_func(data_scale=DATA_SCALE) + xp = array_api_compat.array_namespace(ccd_data.data) with pytest.raises(TypeError): - noise = DATA_SCALE * np.ones_like(ccd_data.data) + noise = DATA_SCALE * xp.ones_like(ccd_data.data) cosmicray_lacosmic(10, noise) @@ -76,10 +90,13 @@ def test_cosmicray_gain_correct(array_input, gain_correct_data): # data and returns that gain corrected data. That is not the # intent... ccd_data = ccd_data_func(data_scale=DATA_SCALE) + xp = array_api_compat.array_namespace(ccd_data.data) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) - noise = DATA_SCALE * np.ones_like(ccd_data.data) - ccd_data.uncertainty = noise + noise = DATA_SCALE * xp.ones_like(ccd_data.data) + # Workaround for the fact that upstream checks for numpy array + # specifically. + ccd_data.uncertainty = np_array(noise) # No units here on purpose. gain = 2.0 @@ -93,22 +110,26 @@ def test_cosmicray_gain_correct(array_input, gain_correct_data): cr_mask = new_ccd.mask # Fill masked locations with 0 since there is no simple relationship # between the original value and the corrected value. - orig_data = np.ma.array(ccd_data.data, mask=cr_mask).filled(0) - new_data = np.ma.array(new_data.data, mask=cr_mask).filled(0) + # Masking using numpy is a handy way to check the results here. + orig_data = xp.array(np_ma_array(ccd_data.data, mask=cr_mask).filled(0)) + new_data = xp.array(np_ma_array(new_data.data, mask=cr_mask).filled(0)) if gain_correct_data: gain_for_test = gain else: gain_for_test = 1.0 - np.testing.assert_allclose(gain_for_test * orig_data, new_data) + assert_allclose(gain_for_test * orig_data, new_data) def test_cosmicray_lacosmic_accepts_quantity_gain(): ccd_data = ccd_data_func(data_scale=DATA_SCALE) + xp = array_api_compat.array_namespace(ccd_data.data) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) - noise = DATA_SCALE * np.ones_like(ccd_data.data) - ccd_data.uncertainty = noise + noise = DATA_SCALE * xp.ones_like(ccd_data.data) + # Workaround for the fact that upstream checks for numpy array + # specifically. + ccd_data.uncertainty = np_array(noise) # The units below are the point of the test gain = 2.0 * u.electron / u.adu @@ -117,10 +138,13 @@ def test_cosmicray_lacosmic_accepts_quantity_gain(): def test_cosmicray_lacosmic_accepts_quantity_readnoise(): ccd_data = ccd_data_func(data_scale=DATA_SCALE) + xp = array_api_compat.array_namespace(ccd_data.data) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) - noise = DATA_SCALE * np.ones_like(ccd_data.data) - ccd_data.uncertainty = noise + noise = DATA_SCALE * xp.ones_like(ccd_data.data) + # Workaround for the fact that upstream checks for numpy array + # specifically. + ccd_data.uncertainty = np_array(noise) gain = 2.0 * u.electron / u.adu # The units below are the point of this test readnoise = 6.5 * u.electron @@ -132,11 +156,14 @@ def test_cosmicray_lacosmic_detects_inconsistent_units(): # of adu, a readnoise in electrons and a gain in adu / electron. # That is not internally inconsistent. ccd_data = ccd_data_func(data_scale=DATA_SCALE) + xp = array_api_compat.array_namespace(ccd_data.data) ccd_data.unit = "adu" threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) - noise = DATA_SCALE * np.ones_like(ccd_data.data) - ccd_data.uncertainty = noise + noise = DATA_SCALE * xp.ones_like(ccd_data.data) + # Workaround for the fact that upstream checks for numpy array + # specifically. + ccd_data.uncertainty = np_array(noise) readnoise = 6.5 * u.electron # The units below are deliberately incorrect. @@ -149,13 +176,16 @@ def test_cosmicray_lacosmic_detects_inconsistent_units(): def test_cosmicray_lacosmic_warns_on_ccd_in_electrons(): # Check that an input ccd in electrons raises a warning. ccd_data = ccd_data_func(data_scale=DATA_SCALE) + xp = array_api_compat.array_namespace(ccd_data.data) # The unit below is important for the test; this unit on # input is supposed to raise an error. ccd_data.unit = u.electron threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) - noise = DATA_SCALE * np.ones_like(ccd_data.data) - ccd_data.uncertainty = noise + noise = DATA_SCALE * xp.ones_like(ccd_data.data) + # Workaround for the fact that upstream checks for numpy array + # specifically. + ccd_data.uncertainty = np_array(noise) # No units here on purpose. gain = 2.0 # Don't really need to set this (6.5 is the default value) but want to @@ -176,10 +206,13 @@ def test_cosmicray_lacosmic_invar_inbkg(new_args): # that calling with the new keyword arguments to astroscrappy # 1.1.0 raises no error. ccd_data = ccd_data_func(data_scale=DATA_SCALE) + xp = array_api_compat.array_namespace(ccd_data.data) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) - noise = DATA_SCALE * np.ones_like(ccd_data.data) - ccd_data.uncertainty = noise + noise = DATA_SCALE * xp.ones_like(ccd_data.data) + # Workaround for the fact that upstream checks for numpy array + # specifically. + ccd_data.uncertainty = np_array(noise) with pytest.raises(TypeError): cosmicray_lacosmic(ccd_data, sigclip=5.9, **new_args) @@ -206,7 +239,9 @@ def test_cosmicray_median_ccddata(): ccd_data = ccd_data_func(data_scale=DATA_SCALE) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) - ccd_data.uncertainty = ccd_data.data * 0.0 + DATA_SCALE + # Workaround for the fact that upstream checks for numpy array + # specifically. + ccd_data.uncertainty = np_array(ccd_data.data * 0.0 + DATA_SCALE) nccd = cosmicray_median(ccd_data, thresh=5, mbox=11, error_image=None) # check the number of cosmic rays detected @@ -217,7 +252,7 @@ def test_cosmicray_median_masked(): ccd_data = ccd_data_func(data_scale=DATA_SCALE) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) - data = np.ma.masked_array(ccd_data.data, (ccd_data.data > -1e6)) + data = np_ma_array(ccd_data.data, (ccd_data.data > -1e6)) ndata, crarr = cosmicray_median(data, thresh=5, mbox=11, error_image=DATA_SCALE) # check the number of cosmic rays detected @@ -243,7 +278,7 @@ def test_cosmicray_median_gbox(): data, crarr = cosmicray_median( ccd_data.data, error_image=error, thresh=5, mbox=11, rbox=0, gbox=5 ) - data = np.ma.masked_array(data, crarr) + data = np_ma_array(data, crarr) assert crarr.sum() > NCRAYS assert abs(data.std() - scale) < 0.1 @@ -269,28 +304,28 @@ def test_cosmicray_median_background_deviation(): def test_background_deviation_box(): scale = 5.3 - cd = np.random.default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale) + cd = default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale) bd = background_deviation_box(cd, 25) assert abs(bd.mean() - scale) < 0.10 def test_background_deviation_box_fail(): scale = 5.3 - cd = np.random.default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale) + cd = default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale) with pytest.raises(ValueError): background_deviation_box(cd, 0.5) def test_background_deviation_filter(): scale = 5.3 - cd = np.random.default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale) + cd = default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale) bd = background_deviation_filter(cd, 25) assert abs(bd.mean() - scale) < 0.10 def test_background_deviation_filter_fail(): scale = 5.3 - cd = np.random.default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale) + cd = default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale) with pytest.raises(ValueError): background_deviation_filter(cd, 0.5) @@ -321,10 +356,11 @@ def test_cosmicray_lacosmic_pssl_does_not_fail(): # to make sure that passing in pssl does not lead to an error # since the new interface does not include pssl. ccd_data = ccd_data_func(data_scale=DATA_SCALE) + xp = array_api_compat.array_namespace(ccd_data.data) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) - noise = DATA_SCALE * np.ones_like(ccd_data.data) - ccd_data.uncertainty = noise + noise = DATA_SCALE * xp.ones_like(ccd_data.data) + ccd_data.uncertainty = np_array(noise) with pytest.warns(AstropyDeprecationWarning): # The deprecation warning is expected and should be captured nccd_data = cosmicray_lacosmic(ccd_data, sigclip=5.9, pssl=0.0001) From bddb2de76d4f293b5be2fb11758e561fe4b04d56 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Wed, 19 Mar 2025 10:15:12 -0500 Subject: [PATCH 019/111] Fix a couple calls to numpy masked array --- ccdproc/tests/test_cosmicray.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ccdproc/tests/test_cosmicray.py b/ccdproc/tests/test_cosmicray.py index dba0ea74..f41d52dd 100644 --- a/ccdproc/tests/test_cosmicray.py +++ b/ccdproc/tests/test_cosmicray.py @@ -252,7 +252,7 @@ def test_cosmicray_median_masked(): ccd_data = ccd_data_func(data_scale=DATA_SCALE) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) - data = np_ma_array(ccd_data.data, (ccd_data.data > -1e6)) + data = np_ma_array(ccd_data.data, mask=(ccd_data.data > -1e6)) ndata, crarr = cosmicray_median(data, thresh=5, mbox=11, error_image=DATA_SCALE) # check the number of cosmic rays detected @@ -278,7 +278,7 @@ def test_cosmicray_median_gbox(): data, crarr = cosmicray_median( ccd_data.data, error_image=error, thresh=5, mbox=11, rbox=0, gbox=5 ) - data = np_ma_array(data, crarr) + data = np_ma_array(data, mask=crarr) assert crarr.sum() > NCRAYS assert abs(data.std() - scale) < 0.1 From 8ac481a26031b0bfac5b8f424f5813aab29ba672 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Wed, 19 Mar 2025 10:15:25 -0500 Subject: [PATCH 020/111] One more workaround for immutable arrays --- ccdproc/core.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index a6ee524e..bf02ef48 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -1904,7 +1904,10 @@ def cosmicray_median(ccd, error_image=None, thresh=5, mbox=11, gbox=0, rbox=0): # bother with it. # data = np.ma.masked_array(data, (crarr == 1)) mdata = ndimage.median_filter(data, rbox) - ndata[crarr == 1] = mdata[crarr == 1] + try: + ndata[crarr == 1] = mdata[crarr == 1] + except TypeError: + ndata = ndata.at[crarr == 1].set(mdata[crarr == 1]) return ndata, crarr elif isinstance(ccd, CCDData): From 220a96e1fad076a7b050754b65aea776a786f210 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Wed, 19 Mar 2025 10:25:41 -0500 Subject: [PATCH 021/111] Fix up dependencies and test environment setup --- pyproject.toml | 2 ++ tox.ini | 1 + 2 files changed, 3 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 2d43ba32..0c9118a5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,6 +15,7 @@ authors = [ { name = "and Michael Seifert" }, ] dependencies = [ + "array_api_compat", "astropy>=5.0.1", "astroscrappy>=1.1.0", "numpy>=1.24", @@ -34,6 +35,7 @@ test = [ "pre-commit", "pytest-astropy>=0.10.0", "ruff", + "jax", ] [project.urls] diff --git a/tox.ini b/tox.ini index 62ff0d73..b35840ae 100644 --- a/tox.ini +++ b/tox.ini @@ -7,6 +7,7 @@ isolated_build = true [testenv] setenv = + test: JAX_ENABLE_X64 = True devdeps: PIP_EXTRA_INDEX_URL = https://pypi.anaconda.org/astropy/simple extras = test From 7378297774fd5a97617b48f71c454abf00ecf4a9 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Wed, 19 Mar 2025 12:57:40 -0500 Subject: [PATCH 022/111] Update minimum python to 3.10 --- .github/workflows/ci_tests.yml | 12 +++--------- pyproject.toml | 2 +- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/.github/workflows/ci_tests.yml b/.github/workflows/ci_tests.yml index 614fc2cc..e80353cb 100644 --- a/.github/workflows/ci_tests.yml +++ b/.github/workflows/ci_tests.yml @@ -27,18 +27,12 @@ jobs: strategy: matrix: include: - - name: 'ubuntu-py38-oldestdeps' + - name: 'ubuntu-py310-oldestdeps' os: ubuntu-latest - python: '3.8' + python: '3.10' # Test the oldest supported dependencies on the oldest supported Python - tox_env: 'py38-test-oldestdeps' + tox_env: 'py310-test-oldestdeps' - - name: 'macos-py310-astroscrappy11' - # Keep this test until astroscrappy 1.1.0 is the oldest supported - # version. - os: macos-latest - python: '3.10' - tox_env: 'py310-test-astroscrappy11' - name: 'ubuntu-py312-bottleneck' os: ubuntu-latest diff --git a/pyproject.toml b/pyproject.toml index 0c9118a5..ca507d6e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,7 +8,7 @@ dynamic = ["version"] description = "Astropy affiliated package" readme = "README.rst" license = { text = "BSD-3-Clause" } -requires-python = ">=3.8" +requires-python = ">=3.10" authors = [ { name = "Steve Crawford", email = "ccdproc@gmail.com" }, { name = "Matt Craig" }, From 44858390b3caedb177e117f49e6d599423f629a3 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 20 Mar 2025 09:14:39 -0500 Subject: [PATCH 023/111] Fix mask access --- docs/image_combination.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/image_combination.rst b/docs/image_combination.rst index eed5ae1a..460a1570 100644 --- a/docs/image_combination.rst +++ b/docs/image_combination.rst @@ -104,11 +104,11 @@ To clip iteratively, continuing the clipping process until no more pixels are rejected, loop in the code calling the clipping method: >>> old_n_masked = 0 # dummy value to make loop execute at least once - >>> new_n_masked = combiner.data_arr.mask.sum() + >>> new_n_masked = combiner.data_arr_mask.sum() >>> while (new_n_masked > old_n_masked): ... combiner.sigma_clipping(func=np.ma.median) ... old_n_masked = new_n_masked - ... new_n_masked = combiner.data_arr.mask.sum() + ... new_n_masked = combiner.data_arr_mask.sum() Note that the default values for the high and low thresholds for rejection are 3 standard deviations. From 30e9e558caaf5e3d33b8d057e242ff2bed0dfd45 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 20 Mar 2025 09:18:11 -0500 Subject: [PATCH 024/111] Ignore warnings about negative values in square root --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index ca507d6e..d7c0139f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -147,6 +147,7 @@ filterwarnings= [ "ignore:numpy\\.ufunc size changed:RuntimeWarning", "ignore:numpy.ndarray size changed:RuntimeWarning", "ignore:`np.bool` is a deprecated alias for the builtin `bool`:DeprecationWarning", + "ignore:invalid value encountered in sqrt:RuntimeWarning", ] markers = [ "data_size(N): set dimension of square data array for ccd_data fixture", From a45d2cd9e48d6b9e58704dae9953d380cd545816 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 20 Mar 2025 09:36:38 -0500 Subject: [PATCH 025/111] Update several minimum dependencies --- .github/workflows/ci_tests.yml | 6 +++--- pyproject.toml | 8 ++++---- tox.ini | 10 ++++------ 3 files changed, 11 insertions(+), 13 deletions(-) diff --git a/.github/workflows/ci_tests.yml b/.github/workflows/ci_tests.yml index e80353cb..4fcb269d 100644 --- a/.github/workflows/ci_tests.yml +++ b/.github/workflows/ci_tests.yml @@ -27,11 +27,11 @@ jobs: strategy: matrix: include: - - name: 'ubuntu-py310-oldestdeps' + - name: 'ubuntu-py311-oldestdeps' os: ubuntu-latest - python: '3.10' + python: '3.11' # Test the oldest supported dependencies on the oldest supported Python - tox_env: 'py310-test-oldestdeps' + tox_env: 'py311-test-oldestdeps' - name: 'ubuntu-py312-bottleneck' diff --git a/pyproject.toml b/pyproject.toml index d7c0139f..cc346c72 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,7 +8,7 @@ dynamic = ["version"] description = "Astropy affiliated package" readme = "README.rst" license = { text = "BSD-3-Clause" } -requires-python = ">=3.10" +requires-python = ">=3.11" authors = [ { name = "Steve Crawford", email = "ccdproc@gmail.com" }, { name = "Matt Craig" }, @@ -16,10 +16,10 @@ authors = [ ] dependencies = [ "array_api_compat", - "astropy>=5.0.1", + "astropy>=6.0.1", "astroscrappy>=1.1.0", - "numpy>=1.24", - "reproject>=0.7", + "numpy>=1.26", + "reproject>=0.9.1", "scikit-image", "scipy", ] diff --git a/tox.ini b/tox.ini index b35840ae..0ab4ee81 100644 --- a/tox.ini +++ b/tox.ini @@ -33,8 +33,7 @@ description = deps = cov: coverage - numpy124: numpy==1.24.* # current oldest suppported numpy - numpy126: numpy==1.26.* + numpy126: numpy==1.26.* # currently oldest support numpy version numpy200: numpy==2.0.* numpy210: numpy==2.1.* @@ -48,10 +47,9 @@ deps = # Remember to transfer any changes here to setup.cfg also. Only listing # packages which are constrained in the setup.cfg - oldestdeps: numpy==1.24.* - oldestdeps: astropy==5.0.* - oldestdeps: reproject==0.7 - oldestdeps: cython + oldestdeps: numpy==1.26.* + oldestdeps: astropy==6.0.* + oldestdeps: reproject==0.9.1 commands = pip freeze From e4afebcfb806ae414267d9479928c59f9080dfd1 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 20 Mar 2025 09:46:00 -0500 Subject: [PATCH 026/111] Fix linting errors --- ccdproc/core.py | 5 ++++- ccdproc/image_collection.py | 2 +- ccdproc/log_meta.py | 4 ++-- ccdproc/tests/test_combiner.py | 2 +- ccdproc/tests/test_image_collection.py | 6 +++--- 5 files changed, 11 insertions(+), 8 deletions(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index bf02ef48..5d799980 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -1299,7 +1299,10 @@ def rebin(ccd, newshape): if len(ccd.shape) != len(newshape): raise ValueError("newshape does not have the same dimensions as " "ccd.") - slices = [slice(0, old, old / new) for old, new in zip(ccd.shape, newshape)] + slices = [ + slice(0, old, old / new) + for old, new in zip(ccd.shape, newshape, strict=True) + ] coordinates = xp.mgrid[slices] indices = coordinates.astype("i") return ccd[tuple(indices)] diff --git a/ccdproc/image_collection.py b/ccdproc/image_collection.py index 35a8acda..4d9eb575 100644 --- a/ccdproc/image_collection.py +++ b/ccdproc/image_collection.py @@ -696,7 +696,7 @@ def _find_keywords_by_values(self, **kwd): use_info = self._fits_summary(header_keywords=keywords) matches = np.ones(len(use_info), dtype=bool) - for key, value in zip(keywords, values): + for key, value in zip(keywords, values, strict=True): logger.debug("key %s, value %s", key, value) logger.debug("value in table %s", use_info[key]) value_missing = use_info[key].mask diff --git a/ccdproc/log_meta.py b/ccdproc/log_meta.py index 67a469c0..ebafa320 100644 --- a/ccdproc/log_meta.py +++ b/ccdproc/log_meta.py @@ -102,7 +102,7 @@ def wrapper(*args, **kwd): # been called as keywords. positional_args = original_args[: len(args)] - all_args = chain(zip(positional_args, args), kwd.items()) + all_args = chain(zip(positional_args, args, strict=True), kwd.items()) all_args = [ f"{name}={_replace_array_with_placeholder(val)}" for name, val in all_args @@ -134,7 +134,7 @@ def _replace_array_with_placeholder(value): return_type_not_value = False if isinstance(value, u.Quantity): return_type_not_value = not value.isscalar - elif isinstance(value, (NDData, np.ndarray)): + elif isinstance(value, NDData | np.ndarray): try: length = len(value) except TypeError: diff --git a/ccdproc/tests/test_combiner.py b/ccdproc/tests/test_combiner.py index 1729da43..d7e3d4d0 100644 --- a/ccdproc/tests/test_combiner.py +++ b/ccdproc/tests/test_combiner.py @@ -297,7 +297,7 @@ def test_combiner_sum_weighted(): c = Combiner(ccd_list) c.weights = np.array([1, 2, 3]) ccd = c.sum_combine() - expected_result = sum(w * d.data for w, d in zip(c.weights, ccd_list)) + expected_result = sum(w * d.data for w, d in zip(c.weights, ccd_list, strict=True)) np.testing.assert_almost_equal(ccd, expected_result) diff --git a/ccdproc/tests/test_image_collection.py b/ccdproc/tests/test_image_collection.py index f165dc92..4b79afc0 100644 --- a/ccdproc/tests/test_image_collection.py +++ b/ccdproc/tests/test_image_collection.py @@ -160,7 +160,7 @@ def test_filtered_files_have_proper_path(self, triage_setup): plain_biases = list(plain_biases) # Same subset, but with full path. path_biases = ic.files_filtered(imagetyp="bias", include_path=True) - for path_b, plain_b in zip(path_biases, plain_biases): + for path_b, plain_b in zip(path_biases, plain_biases, strict=True): # If the path munging has been done properly, this will succeed. assert os.path.basename(path_b) == plain_b @@ -207,7 +207,7 @@ def test_generator_full_path(self, triage_setup): location=triage_setup.test_dir, keywords=["imagetyp"] ) - for path, file_name in zip(collection._paths(), collection.files): + for path, file_name in zip(collection._paths(), collection.files, strict=True): assert path == os.path.join(triage_setup.test_dir, file_name) def test_hdus(self, triage_setup): @@ -278,7 +278,7 @@ def test_multiple_extensions(self, triage_setup, extension): ccd_kwargs = {"unit": "adu"} for data, hdr, hdu, ccd in zip( - ic2.data(), ic2.headers(), ic2.hdus(), ic2.ccds(ccd_kwargs) + ic2.data(), ic2.headers(), ic2.hdus(), ic2.ccds(ccd_kwargs), strict=True ): np.testing.assert_allclose(data, ext2.data) assert hdr == ext2.header From 3724f0acfa3b69fc2f14369dc151eb48b4f83d63 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 20 Mar 2025 09:47:45 -0500 Subject: [PATCH 027/111] Drop unnecessary import --- docs/conf.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index cb0fa5fe..68a64539 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -39,10 +39,7 @@ ) sys.exit(1) -if sys.version_info < (3, 11): - import tomli as tomllib -else: - import tomllib +import tomllib # Grab minversion from pyproject.toml with (Path(__file__).parents[1] / "pyproject.toml").open("rb") as f: From c0928c884feb0d80d271665ba99faf6637e56ff5 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 20 Mar 2025 09:50:16 -0500 Subject: [PATCH 028/111] Drop unneeded test --- .github/workflows/ci_tests.yml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/.github/workflows/ci_tests.yml b/.github/workflows/ci_tests.yml index 4fcb269d..99d41a90 100644 --- a/.github/workflows/ci_tests.yml +++ b/.github/workflows/ci_tests.yml @@ -39,11 +39,6 @@ jobs: python: '3.12' tox_env: 'py312-test-alldeps-bottleneck-cov' - - name: 'ubuntu-py310' - os: ubuntu-latest - python: '3.10' - tox_env: 'py310-test-alldeps-numpy124' - - name: 'ubuntu-py311' os: ubuntu-latest python: '3.11' From c04f84292abd5eb1cb3af17f000c311c5fd9aa4e Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 20 Mar 2025 10:02:24 -0500 Subject: [PATCH 029/111] Skip memory tests if jax is installed --- ccdproc/tests/test_memory_use.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/ccdproc/tests/test_memory_use.py b/ccdproc/tests/test_memory_use.py index 5c22248f..173e0f96 100644 --- a/ccdproc/tests/test_memory_use.py +++ b/ccdproc/tests/test_memory_use.py @@ -15,6 +15,13 @@ else: memory_profile_present = True +try: + import jax # noqa +except ImportError: + JAX_PRESENT = False +else: + JAX_PRESENT = True + image_size = 2000 # Square image, so 4000 x 4000 num_files = 10 @@ -30,8 +37,10 @@ def teardown_module(): fil.unlink() +@pytest.mark.skipif(JAX_PRESENT, reason="JAX is present, and does not allow os.fork") @pytest.mark.skipif( - not platform.startswith("linux"), reason="memory tests only work on linux" + not platform.startswith("linux"), + reason="memory tests only work on linux", ) @pytest.mark.skipif(not memory_profile_present, reason="memory_profiler not installed") @pytest.mark.parametrize("combine_method", ["average", "sum", "median"]) From 9fcc655419bb228879069d8c78d8bdf876694eaa Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Fri, 21 Mar 2025 10:22:46 -0500 Subject: [PATCH 030/111] Explain why numpy is still used in image_collection --- ccdproc/image_collection.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/ccdproc/image_collection.py b/ccdproc/image_collection.py index 4d9eb575..6a01066b 100644 --- a/ccdproc/image_collection.py +++ b/ccdproc/image_collection.py @@ -8,13 +8,20 @@ from os import listdir, path import astropy.io.fits as fits -import numpy as np +import numpy as np # see numpy comment below import numpy.ma as ma from astropy.table import MaskedColumn, Table from astropy.utils.exceptions import AstropyUserWarning from .ccddata import _recognized_fits_file_extensions, fits_ccddata_reader +# ==> numpy comment <== +# numpy is used internally to keep track of masking in the summary +# table. It is not used for any CCD processing, so there is no need +# to implement the array API here. In other words, ImageFileCollection +# is fine to implement its internal tables however it wantrs, regardless +# of what the user is using for their data arrays. + logger = logging.getLogger(__name__) __all__ = ["ImageFileCollection"] From 1fe1180a1ec8f2abb96be87b2a8ca8c5cd78749b Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Mon, 24 Mar 2025 10:24:06 -0500 Subject: [PATCH 031/111] Drop numpy import in combiner --- ccdproc/combiner.py | 88 ++++++++++++++++++++++++++-------- ccdproc/tests/test_combiner.py | 5 +- 2 files changed, 71 insertions(+), 22 deletions(-) diff --git a/ccdproc/combiner.py b/ccdproc/combiner.py index deea6a32..77fb735c 100644 --- a/ccdproc/combiner.py +++ b/ccdproc/combiner.py @@ -2,10 +2,6 @@ """This module implements the combiner class.""" -import numpy as np - -# from numpy import ma - try: import bottleneck as bn except ImportError: @@ -24,32 +20,68 @@ __all__ = ["Combiner", "combine"] -def _default_median(): # pragma: no cover +def _default_median(xp=None): # pragma: no cover if HAS_BOTTLENECK: return bn.nanmedian else: - return np.nanmedian + if xp is None: + return None + + # No bottleneck, but we have a namespace. + try: + return xp.nanmedian + except AttributeError as e: + raise RuntimeError( + "No NaN-aware median function available. Please install bottleneck." + ) from e -def _default_average(): # pragma: no cover +def _default_average(xp=None): # pragma: no cover if HAS_BOTTLENECK: return bn.nanmean else: - return np.nanmean + if xp is None: + return None + + # No bottleneck, but we have a namespace. + try: + return xp.nanmean + except AttributeError as e: + raise RuntimeError( + "No NaN-aware mean function available. Please install bottleneck." + ) from e -def _default_sum(): # pragma: no cover +def _default_sum(xp=None): # pragma: no cover if HAS_BOTTLENECK: return bn.nansum else: - return np.nansum + if xp is None: + return None + + # No bottleneck, but we have a namespace. + try: + return xp.nansum + except AttributeError as e: + raise RuntimeError( + "No NaN-aware sum function available. Please install bottleneck." + ) from e -def _default_std(): # pragma: no cover +def _default_std(xp=None): # pragma: no cover if HAS_BOTTLENECK: return bn.nanstd else: - return np.nanstd + if xp is None: + return None + + # No bottleneck, but we have a namespace. + try: + return xp.nanstd + except AttributeError as e: + raise RuntimeError( + "No NaN-aware std function available. Please install bottleneck." + ) from e _default_sum_func = _default_sum() @@ -474,11 +506,14 @@ def median_combine( The uncertainty currently calculated using the median absolute deviation does not account for rejected pixels. """ + xp = array_api_compat.array_namespace(self.data_arr) + + _default_median_func = _default_median(xp=xp) data, masked_values, median_func = self._combination_setup( - median_func, _default_median(), scale_to + median_func, _default_median_func, scale_to ) - xp = array_api_compat.array_namespace(data) + medianed = median_func(data, axis=0) # set the mask @@ -577,12 +612,20 @@ def average_combine( combined_image: `~astropy.nddata.CCDData` CCDData object based on the combined input of CCDData objects. """ + xp = array_api_compat.array_namespace(self.data_arr) + + _default_average_func = _default_average(xp=xp) + + if sum_func is None: + sum_func = _default_sum(xp=xp) + + if uncertainty_func is None: + uncertainty_func = _default_std(xp=xp) + data, masked_values, scale_func = self._combination_setup( - scale_func, _default_average(), scale_to + scale_func, _default_average_func, scale_to ) - xp = array_api_compat.array_namespace(data) - # Do NOT modify data after this -- we need it to be intact when we # we get to the uncertainty calculation. if self.weights is not None: @@ -652,12 +695,17 @@ def sum_combine( CCDData object based on the combined input of CCDData objects. """ + xp = array_api_compat.array_namespace(self.data_arr) + + _default_sum_func = _default_sum(xp=xp) + + if uncertainty_func is None: + uncertainty_func = _default_std(xp=xp) + data, masked_values, sum_func = self._combination_setup( - sum_func, _default_sum(), scale_to + sum_func, _default_sum_func, scale_to ) - xp = array_api_compat.array_namespace(data) - if self.weights is not None: summed, weights = self._weighted_sum(data, sum_func) else: diff --git a/ccdproc/tests/test_combiner.py b/ccdproc/tests/test_combiner.py index d7e3d4d0..7838cfe9 100644 --- a/ccdproc/tests/test_combiner.py +++ b/ccdproc/tests/test_combiner.py @@ -973,13 +973,14 @@ def test_combiner_with_scaling_uncertainty(comb_func): avg_ccd = getattr(combiner, comb_func)() if comb_func != "median_combine": - uncertainty_func = _default_std() + xp = array_api_compat.array_namespace(ccd_data.data) + uncertainty_func = _default_std(xp=xp) else: uncertainty_func = sigma_func expected_unc = uncertainty_func(scaled_ccds, axis=0) - np.testing.assert_almost_equal(avg_ccd.uncertainty.array, expected_unc) + np.testing.assert_allclose(avg_ccd.uncertainty.array, expected_unc, atol=1e-10) @pytest.mark.parametrize( From 282eaee873d251fe1889f33e8b21ff975d19272c Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 27 Mar 2025 10:31:45 -0500 Subject: [PATCH 032/111] Use array_api_extra to handle immutable arrays --- ccdproc/combiner.py | 16 +++++----------- ccdproc/core.py | 6 ++---- ccdproc/tests/test_combiner.py | 9 ++++----- ccdproc/tests/test_cosmicray.py | 9 ++------- pyproject.toml | 1 + 5 files changed, 14 insertions(+), 27 deletions(-) diff --git a/ccdproc/combiner.py b/ccdproc/combiner.py index 77fb735c..fc03094d 100644 --- a/ccdproc/combiner.py +++ b/ccdproc/combiner.py @@ -10,6 +10,7 @@ HAS_BOTTLENECK = True import array_api_compat +import array_api_extra as xpx from astropy import log from astropy.nddata import CCDData, StdDevUncertainty from astropy.stats import sigma_clip @@ -439,11 +440,8 @@ def _get_nan_substituted_data(self, data): # Get the data as an unmasked array with masked values filled as NaN if self.data_arr_mask.any(): - # Workaround for array libraries that do not allow in-=place modification - try: - data[self.data_arr_mask] = xp.nan - except TypeError: - data = data.at[self.data_arr_mask].set(xp.nan) + # Use array_api_extra so that we can use at with all array libraries + data = xpx.at(data)[self.data_arr_mask].set(xp.nan) else: data = data return data @@ -1099,12 +1097,8 @@ def combine( comb_tile = getattr(tile_combiner, combine_function)(**combine_kwds) # add it back into the master image - # The try/except below is to handle array libraries that may not - # allow in-place operations. - try: - ccd.data[x:xend, y:yend] = comb_tile.data - except TypeError: - ccd.data = ccd.data.at[x:xend, y:yend].set(comb_tile.data) + # Use array_api_extra so that we can use at with all array libraries + ccd.data = xpx.at(ccd.data)[x:xend, y:yend].set(comb_tile.data) if ccd.mask is not None: # Maybe temporary workaround for the mask not being writeable... diff --git a/ccdproc/core.py b/ccdproc/core.py index 5d799980..b15e253d 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -8,6 +8,7 @@ import warnings import array_api_compat +import array_api_extra as xpx from astropy import nddata, stats from astropy import units as u from astropy.modeling import fitting @@ -1907,10 +1908,7 @@ def cosmicray_median(ccd, error_image=None, thresh=5, mbox=11, gbox=0, rbox=0): # bother with it. # data = np.ma.masked_array(data, (crarr == 1)) mdata = ndimage.median_filter(data, rbox) - try: - ndata[crarr == 1] = mdata[crarr == 1] - except TypeError: - ndata = ndata.at[crarr == 1].set(mdata[crarr == 1]) + ndata = xpx.at(ndata)[crarr == 1].set(mdata[crarr == 1]) return ndata, crarr elif isinstance(ccd, CCDData): diff --git a/ccdproc/tests/test_combiner.py b/ccdproc/tests/test_combiner.py index 7838cfe9..7c9ecae2 100644 --- a/ccdproc/tests/test_combiner.py +++ b/ccdproc/tests/test_combiner.py @@ -1,5 +1,6 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst import array_api_compat +import array_api_extra as xpx import astropy import astropy.units as u import numpy as np @@ -625,11 +626,9 @@ def test_combine_result_uncertainty_and_mask(comb_func, mask_point): if mask_point: # Make one pixel really negative so we can clip it and guarantee a resulting # pixel is masked. - # Handle case where array is immutable - try: - ccd_data.data[0, 0] = -1000 - except TypeError: - ccd_data.data = ccd_data.data.at[0, 0].set(-1000) + # Handle case where array is immutable by using array_api_extra, + # which provides at for all array libraries. + ccd_data.data = xpx.at(ccd_data.data)[0, 0].set(-1000) ccd_list = [ccd_data, ccd_data, ccd_data] c = Combiner(ccd_list) diff --git a/ccdproc/tests/test_cosmicray.py b/ccdproc/tests/test_cosmicray.py index f41d52dd..ced95aa3 100644 --- a/ccdproc/tests/test_cosmicray.py +++ b/ccdproc/tests/test_cosmicray.py @@ -1,6 +1,7 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst import array_api_compat +import array_api_extra as xpx import pytest from astropy import units as u from astropy.utils.exceptions import AstropyDeprecationWarning @@ -35,13 +36,7 @@ def add_cosmicrays(data, scale, threshold, ncrays=NCRAYS): crflux = 10 * scale * rng.random(ncrays) + (threshold + 15) * scale for i in range(ncrays): y, x = crrays[i] - try: - data.data[y, x] = crflux[i] - except TypeError as e: - try: - data.data = data.data.at[y, x].set(crflux[i]) - except AttributeError as other_e: - raise other_e from e + data.data = xpx.at(data.data)[y, x].set(crflux[i]) def test_cosmicray_lacosmic(): diff --git a/pyproject.toml b/pyproject.toml index cc346c72..2d205637 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,6 +16,7 @@ authors = [ ] dependencies = [ "array_api_compat", + "array_api_extra>=0.7.0", "astropy>=6.0.1", "astroscrappy>=1.1.0", "numpy>=1.26", From 5f27cd40cc24115da661376bca1006ee4796c22c Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 27 Mar 2025 10:40:44 -0500 Subject: [PATCH 033/111] Use a consistent namespace for arrays --- ccdproc/core.py | 4 +++- ccdproc/tests/test_cosmicray.py | 5 +++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index b15e253d..2c7176a5 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -1907,7 +1907,9 @@ def cosmicray_median(ccd, error_image=None, thresh=5, mbox=11, gbox=0, rbox=0): # Fun fact: scipy.ndimage ignores the mask, so may as well not # bother with it. # data = np.ma.masked_array(data, (crarr == 1)) - mdata = ndimage.median_filter(data, rbox) + + # make sure that mdata is the same type as data + mdata = xp.asarray(ndimage.median_filter(data, rbox)) ndata = xpx.at(ndata)[crarr == 1].set(mdata[crarr == 1]) return ndata, crarr diff --git a/ccdproc/tests/test_cosmicray.py b/ccdproc/tests/test_cosmicray.py index ced95aa3..177a7f64 100644 --- a/ccdproc/tests/test_cosmicray.py +++ b/ccdproc/tests/test_cosmicray.py @@ -27,13 +27,14 @@ def add_cosmicrays(data, scale, threshold, ncrays=NCRAYS): size = data.shape[0] rng = default_rng(99) - crrays = rng.integers(0, size, size=(ncrays, 2)) + xp = array_api_compat.array_namespace(data.data) + crrays = xp.asarray(rng.integers(0, size, size=(ncrays, 2))) # use (threshold + 15) below to make sure cosmic ray is well above the # threshold no matter what the random number generator returns # add_cosmicrays is highly sensitive to the seed # ideally threshold should be set so it is not sensitive to seed, but # this is not working right now - crflux = 10 * scale * rng.random(ncrays) + (threshold + 15) * scale + crflux = xp.asarray(10 * scale * rng.random(ncrays) + (threshold + 15) * scale) for i in range(ncrays): y, x = crrays[i] data.data = xpx.at(data.data)[y, x].set(crflux[i]) From 5f7f3e0fa0fbc773ad71ea7bfbb29f080f40d375 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 27 Mar 2025 12:06:17 -0500 Subject: [PATCH 034/111] Clean up a couple more cases to use array_api_extra --- ccdproc/combiner.py | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) diff --git a/ccdproc/combiner.py b/ccdproc/combiner.py index fc03094d..9b77315c 100644 --- a/ccdproc/combiner.py +++ b/ccdproc/combiner.py @@ -1103,22 +1103,14 @@ def combine( if ccd.mask is not None: # Maybe temporary workaround for the mask not being writeable... ccd.mask = ccd.mask.copy() - # The try/except below is to handle array libraries that may not - # allow in-place operations. - try: - ccd.mask[x:xend, y:yend] = comb_tile.mask - except (TypeError, ValueError): - ccd.mask = ccd.mask.at[x:xend, y:yend].set(comb_tile.mask) + # Handle immutable arrays with array_api_extra + ccd.mask = xpx.at(ccd.mask)[x:xend, y:yend].set(comb_tile.mask) if ccd.uncertainty is not None: - # The try/except below is to handle array libraries that may not - # allow in-place operations. - try: - ccd.uncertainty.array[x:xend, y:yend] = comb_tile.uncertainty.array - except TypeError: - ccd.uncertainty.array = ccd.uncertainty.array.at[ - x:xend, y:yend - ].set(comb_tile.uncertainty.array) + # Handle immutable arrays with array_api_extra + ccd.uncertainty.array = xpx.at(ccd.uncertainty.array)[ + x:xend, y:yend + ].set(comb_tile.uncertainty.array) # Free up memory to try to stay under user's limit del comb_tile del tile_combiner From b98f8f59c1cab08ca7d90276ba8e7cc8cfec0652 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 27 Mar 2025 12:38:23 -0500 Subject: [PATCH 035/111] Change where bottleneck is test on GitHub Actions --- .github/workflows/ci_tests.yml | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci_tests.yml b/.github/workflows/ci_tests.yml index 99d41a90..fd71ca45 100644 --- a/.github/workflows/ci_tests.yml +++ b/.github/workflows/ci_tests.yml @@ -33,18 +33,21 @@ jobs: # Test the oldest supported dependencies on the oldest supported Python tox_env: 'py311-test-oldestdeps' - - - name: 'ubuntu-py312-bottleneck' + # Do not include bottleneck in this coverage test. By not including + # it we get a better measure of how we are covered when using the + # array API, which bottleneck short-circuits. + - name: 'ubuntu-py312-coverage' os: ubuntu-latest python: '3.12' - tox_env: 'py312-test-alldeps-bottleneck-cov' + tox_env: 'py312-test-alldeps-cov' - name: 'ubuntu-py311' os: ubuntu-latest python: '3.11' tox_env: 'py311-test-alldeps-numpy124' - - name: 'ubuntu-py312' + # Move bottleneck test a test without coverage + - name: 'ubuntu-py312-bottleneck' os: ubuntu-latest python: '3.12' tox_env: 'py312-test-alldeps-numpy126' From 6ce2309228ec66d3010ced8e8c0351e3b09e0fc2 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 27 Mar 2025 12:38:34 -0500 Subject: [PATCH 036/111] Skip coverage of one function --- ccdproc/core.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index 2c7176a5..daaf7be5 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -91,7 +91,9 @@ def _is_array(arr): return True -def _percentile_fallback(array, percentiles): +# Ideally this would eventually be covered by tests. Looks like Sparse +# could be used to test this, since it has no percentile... +def _percentile_fallback(array, percentiles): # pragma: no cover """ Try calculating percentile using namespace, otherwise fall back to an implmentation that uses sort. As of the 2023 version of the array API From b66abbc3078b407de1c67292e7652b1878f9a798 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 27 Mar 2025 12:59:03 -0500 Subject: [PATCH 037/111] Remove unused argument and logic --- ccdproc/combiner.py | 7 ++----- ccdproc/tests/run_for_memory_profile.py | 2 +- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/ccdproc/combiner.py b/ccdproc/combiner.py index 9b77315c..4f24ba59 100644 --- a/ccdproc/combiner.py +++ b/ccdproc/combiner.py @@ -763,13 +763,10 @@ def _calculate_step_sizes(x_size, y_size, num_chunks): return xstep, ystep -def _calculate_size_of_image(ccd, combine_uncertainty_function): +def _calculate_size_of_image(ccd): # If uncertainty_func is given for combine this will create an uncertainty # even if the originals did not have one. In that case we need to create # an empty placeholder. - xp = array_api_compat.array_namespace(ccd.data) - if ccd.uncertainty is None and combine_uncertainty_function is not None: - ccd.uncertainty = StdDevUncertainty(xp.zeros(ccd.data.shape)) size_of_an_img = ccd.data.nbytes try: @@ -993,7 +990,7 @@ def combine( if ccd.mask is None: ccd.mask = xp.zeros_like(ccd.data, dtype=bool) - size_of_an_img = _calculate_size_of_image(ccd, combine_uncertainty_function) + size_of_an_img = _calculate_size_of_image(ccd) no_of_img = len(img_list) diff --git a/ccdproc/tests/run_for_memory_profile.py b/ccdproc/tests/run_for_memory_profile.py index d9b445a2..50e39532 100644 --- a/ccdproc/tests/run_for_memory_profile.py +++ b/ccdproc/tests/run_for_memory_profile.py @@ -109,7 +109,7 @@ def run_memory_profile( ) ccd = CCDData.read(files[0]) - expected_img_size = _calculate_size_of_image(ccd, None) + expected_img_size = _calculate_size_of_image(ccd) if memory_limit: kwargs["mem_limit"] = memory_limit From e67ac8bc27dc257b035a6a4d5adcc2f20037f8ca Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 27 Mar 2025 13:04:42 -0500 Subject: [PATCH 038/111] Add a test --- ccdproc/tests/test_combiner.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ccdproc/tests/test_combiner.py b/ccdproc/tests/test_combiner.py index 7c9ecae2..5c2bbb06 100644 --- a/ccdproc/tests/test_combiner.py +++ b/ccdproc/tests/test_combiner.py @@ -495,8 +495,13 @@ def test_combine_image_file_collection_input(tmp_path): comb_ccds = combine(ifc.ccds(), method="average") + comb_string = combine( + ",".join(ifc.files_filtered(include_path=True)), method="average" + ) + np.testing.assert_allclose(ccd.data, comb_files.data) np.testing.assert_allclose(ccd.data, comb_ccds.data) + np.testing.assert_allclose(ccd.data, comb_string.data) with pytest.raises(FileNotFoundError): # This should fail because the test is not running in the From cdb9c45c4a1f3ca81c973b50a445b1649443b6be Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Wed, 18 Jun 2025 16:42:52 -0500 Subject: [PATCH 039/111] Use tox environment to handle testing of different array libraries --- .github/workflows/ci_tests.yml | 7 +- ccdproc/conftest.py | 20 ++++ ccdproc/tests/pytest_fixtures.py | 7 +- ccdproc/tests/test_ccdproc.py | 152 +++++++++++++++---------------- ccdproc/tests/test_memory_use.py | 16 ++-- tox.ini | 6 +- 6 files changed, 117 insertions(+), 91 deletions(-) diff --git a/.github/workflows/ci_tests.yml b/.github/workflows/ci_tests.yml index fd71ca45..ea5b783a 100644 --- a/.github/workflows/ci_tests.yml +++ b/.github/workflows/ci_tests.yml @@ -41,10 +41,11 @@ jobs: python: '3.12' tox_env: 'py312-test-alldeps-cov' - - name: 'ubuntu-py311' + # Run at least one non-numpy array library + - name: 'ubuntu-py313-jax' os: ubuntu-latest - python: '3.11' - tox_env: 'py311-test-alldeps-numpy124' + python: '3.13' + tox_env: 'py313-jax' # Move bottleneck test a test without coverage - name: 'ubuntu-py312-bottleneck' diff --git a/ccdproc/conftest.py b/ccdproc/conftest.py index 38a3277b..cf8af327 100644 --- a/ccdproc/conftest.py +++ b/ccdproc/conftest.py @@ -3,6 +3,7 @@ # this contains imports plugins that configure py.test for astropy tests. # by importing them here in conftest.py they are discoverable by py.test # no matter how it is invoked within the source tree. +import os try: # When the pytest_astropy_header package is installed @@ -33,3 +34,22 @@ def pytest_configure(config): PYTEST_HEADER_MODULES["astroscrappy"] = "astroscrappy" PYTEST_HEADER_MODULES["reproject"] = "reproject" PYTEST_HEADER_MODULES.pop("h5py", None) + +# Set up the array library to be used in tests +# What happens here is controlled by an environmental variable +array_library = os.environ.get("CCDPROC_ARRAY_LIBRARY", "numpy").lower() + +match array_library: + case "numpy": + import numpy as testing_array_library # noqa: F401 + + case "jax": + import jax.numpy as testing_array_library # noqa: F401 + + PYTEST_HEADER_MODULES["jax"] = "jax" + + case _: + raise ValueError( + f"Unsupported array library: {array_library}. " + "Supported libraries are 'numpy' and 'jax'." + ) diff --git a/ccdproc/tests/pytest_fixtures.py b/ccdproc/tests/pytest_fixtures.py index 0c27a94a..9b56290a 100644 --- a/ccdproc/tests/pytest_fixtures.py +++ b/ccdproc/tests/pytest_fixtures.py @@ -2,8 +2,6 @@ from shutil import rmtree -# import dask.array as da -import jax.numpy as jnp import numpy as np import pytest from astropy import units as u @@ -51,6 +49,9 @@ def ccd_data( The mean can be changed with the marker @pytest.marker.scale(m) on the test function, where m is the desired mean. """ + # Need the import here to avoid circular import issues + from ..conftest import testing_array_library as array_library + size = data_size scale = data_scale mean = data_mean @@ -61,7 +62,7 @@ def ccd_data( data = rng.normal(loc=mean, size=[size, size], scale=scale) fake_meta = {"my_key": 42, "your_key": "not 42"} - ccd = CCDData(jnp.array(data), unit=u.adu) + ccd = CCDData(array_library.array(data), unit=u.adu) ccd.header = fake_meta return ccd diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index ab4ebc60..f987150a 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -5,7 +5,6 @@ # import array_api_compat import astropy import astropy.units as u -import jax.numpy as np import pytest import skimage from astropy.io import fits @@ -18,6 +17,7 @@ from numpy import random as np_random from numpy import testing as np_testing +from ccdproc.conftest import testing_array_library as xp from ccdproc.core import ( Keyword, ccd_process, @@ -42,7 +42,7 @@ except ImportError: HAS_BLOCK_X_FUNCS = False -_NUMPY_COPY_IF_NEEDED = None # False if np.__version__.startswith("1.") else None +_NUMPY_COPY_IF_NEEDED = None # False if xp.__version__.startswith("1.") else None RNG = np_random.default_rng @@ -80,11 +80,11 @@ def test_create_deviation(u_image, u_gain, u_readnoise, expect_success): ccd_var = create_deviation(ccd_data, gain=gain, readnoise=readnoise) assert ccd_var.uncertainty.array.shape == (10, 10) assert ccd_var.uncertainty.array.size == 100 - assert np.isdtype(ccd_var.uncertainty.array.dtype, "real floating") + assert xp.isdtype(ccd_var.uncertainty.array.dtype, "real floating") if gain is not None: - expected_var = np.sqrt(2 * ccd_data.data + 5**2) / 2 + expected_var = xp.sqrt(2 * ccd_data.data + 5**2) / 2 else: - expected_var = np.sqrt(ccd_data.data + 5**2) + expected_var = xp.sqrt(ccd_data.data + 5**2) np_testing.assert_allclose(ccd_var.uncertainty.array, expected_var) assert ccd_var.unit == ccd_data.unit # Uncertainty should *not* have any units -- does it? @@ -103,7 +103,7 @@ def test_create_deviation_from_negative(): ccd_data, gain=None, readnoise=readnoise, disregard_nan=False ) np_testing.assert_array_equal( - ccd_data.data < 0, np.isnan(ccd_var.uncertainty.array) + ccd_data.data < 0, xp.isnan(ccd_var.uncertainty.array) ) @@ -119,7 +119,7 @@ def test_create_deviation_from_negative_2(): # In-place replacement of values does not work in some array # libraries. ccd_data.data = ccd_data.data * ~mask - expected_var = np.sqrt(ccd_data.data + readnoise.value**2) + expected_var = xp.sqrt(ccd_data.data + readnoise.value**2) np_testing.assert_allclose(ccd_var.uncertainty.array, expected_var) @@ -178,7 +178,7 @@ def test_subtract_overscan(median, transpose, data_rectangle): science_data = science_data + oscan + sky # Reconstruct the full image - ccd_data.data = np.concat([overscan_data, science_data], axis=overscan_axis) + ccd_data.data = xp.concat([overscan_data, science_data], axis=overscan_axis) # Test once using the overscan argument to specify the overscan region ccd_data_overscan = subtract_overscan( ccd_data, @@ -280,7 +280,7 @@ def test_subtract_overscan_model(transpose): oscan_region = (slice(None), slice(0, 10)) science_region = (slice(None), slice(10, None)) - yscan, xscan = np.mgrid[0:size, 0:size] / 10.0 + 300.0 + yscan, xscan = xp.mgrid[0:size, 0:size] / 10.0 + 300.0 if transpose: oscan_region = oscan_region[::-1] @@ -298,7 +298,7 @@ def test_subtract_overscan_model(transpose): # image, so we need to do this before we add the new overscan. overscan_data = 0 * ccd_data.data[oscan_region].copy() # Reconstruct the full image - ccd_data.data = np.concat([overscan_data, science_data], axis=overscan_axis) + scan + ccd_data.data = xp.concat([overscan_data, science_data], axis=overscan_axis) + scan ccd_data = subtract_overscan( ccd_data, @@ -328,10 +328,10 @@ def test_subtract_overscan_fails(): ccd_data = ccd_data_func() # Do we get an error if the *image* is neither CCDData nor an array? with pytest.raises(TypeError): - subtract_overscan(3, np.zeros((5, 5))) + subtract_overscan(3, xp.zeros((5, 5))) # Do we get an error if the *overscan* is not an image or an array? with pytest.raises(TypeError): - subtract_overscan(np.zeros((10, 10)), 3, median=False, model=None) + subtract_overscan(xp.zeros((10, 10)), 3, median=False, model=None) # Do we get an error if we specify both overscan and fits_section? with pytest.raises(TypeError): subtract_overscan(ccd_data, overscan=ccd_data[0:10], fits_section="[1:10]") @@ -343,7 +343,7 @@ def test_subtract_overscan_fails(): subtract_overscan(ccd_data, fits_section=5) # Do we raise an error if the input is a plain array? with pytest.raises(TypeError): - subtract_overscan(np.zeros((10, 10)), fits_section="[1:10]") + subtract_overscan(xp.zeros((10, 10)), fits_section="[1:10]") def test_trim_image_fits_section_requires_string(): @@ -356,7 +356,7 @@ def test_trim_image_fits_section_requires_string(): def test_trim_image_fits_section(mask_data, uncertainty): ccd_data = ccd_data_func(data_size=50) if mask_data: - ccd_data.mask = np.zeros_like(ccd_data) + ccd_data.mask = xp.zeros_like(ccd_data) if uncertainty: err = RNG().normal(size=ccd_data.shape) ccd_data.uncertainty = StdDevUncertainty(err) @@ -382,8 +382,8 @@ def test_trim_with_wcs_alters_wcs(): ccd_data = ccd_data_func() # WCS construction example pulled form astropy.wcs docs wcs = WCS(naxis=2) - wcs.wcs.crpix = np.array(ccd_data.shape) / 2 - wcs.wcs.cdelt = np.array([-0.066667, 0.066667]) + wcs.wcs.crpix = xp.array(ccd_data.shape) / 2 + wcs.wcs.cdelt = xp.array([-0.066667, 0.066667]) wcs.wcs.crval = [0, -90] wcs.wcs.ctype = ["RA---AIR", "DEC--AIR"] wcs.wcs.set_pv([(2, 1, 45.0)]) @@ -401,7 +401,7 @@ def test_subtract_bias(): bias_level = 5.0 ccd_data.data = ccd_data.data + bias_level ccd_data.header["key"] = "value" - master_bias_array = np.zeros_like(ccd_data.data) + bias_level + master_bias_array = xp.zeros_like(ccd_data.data) + bias_level master_bias = CCDData(master_bias_array, unit=ccd_data.unit) no_bias = subtract_bias(ccd_data, master_bias, add_keyword=None) # Does the data we are left with have the correct average? @@ -416,11 +416,11 @@ def test_subtract_bias(): def test_subtract_bias_fails(): ccd_data = ccd_data_func(data_size=50) # Should fail if shapes don't match - bias = CCDData(np.array([200, 200]), unit=u.adu) + bias = CCDData(xp.array([200, 200]), unit=u.adu) with pytest.raises(ValueError): subtract_bias(ccd_data, bias) # Should fail because units don't match - bias = CCDData(np.zeros_like(ccd_data), unit=u.meter) + bias = CCDData(xp.zeros_like(ccd_data), unit=u.meter) with pytest.raises(u.UnitsError): subtract_bias(ccd_data, bias) @@ -434,7 +434,7 @@ def test_subtract_dark(explicit_times, scale, exposure_keyword): exptime_key = "exposure" exposure_unit = u.second dark_level = 1.7 - master_dark_data = np.zeros_like(ccd_data.data) + dark_level + master_dark_data = xp.zeros_like(ccd_data.data) + dark_level master_dark = CCDData(master_dark_data, unit=u.adu) master_dark.header[exptime_key] = 2 * exptime dark_exptime = master_dark.header[exptime_key] @@ -531,7 +531,7 @@ def test_subtract_dark_fails(): # Fail when the arrays are not the same size with pytest.raises(ValueError): small_master = CCDData(ccd_data) - small_master.data = np.zeros((1, 1)) + small_master.data = xp.zeros((1, 1)) subtract_dark(ccd_data, small_master) @@ -669,7 +669,7 @@ def test_flat_correct_deviation(): ccd_data.unit = u.electron ccd_data = create_deviation(ccd_data, readnoise=5 * u.electron) # Create the flat - data = 2 * np.ones((size, size)) + data = 2 * xp.ones((size, size)) flat = CCDData(data, meta=fits.header.Header(), unit=ccd_data.unit) flat = create_deviation(flat, readnoise=0.5 * u.electron) ccd_data = flat_correct(ccd_data, flat) @@ -681,10 +681,10 @@ def test_flat_correct_data_uncertainty(): # Temporarily work around the fact that NDUncertainty explicitly checks # whether the value is a numpy array. dat = CCDData( - np.ones([100, 100]), unit="adu", uncertainty=np_array(np.ones([100, 100])) + xp.ones([100, 100]), unit="adu", uncertainty=np_array(xp.ones([100, 100])) ) # Note flat is set to 10, error, if present, is set to one. - flat = CCDData(10 * np.ones([100, 100]), unit="adu") + flat = CCDData(10 * xp.ones([100, 100]), unit="adu") res = flat_correct(dat, flat) assert (res.data == dat.data).all() assert (res.uncertainty.array == dat.uncertainty.array).all() @@ -742,7 +742,7 @@ def tran(arr): def test_transform_image(mask_data, uncertainty): ccd_data = ccd_data_func(data_size=50) if mask_data: - ccd_data.mask = np.zeros_like(ccd_data) + ccd_data.mask = xp.zeros_like(ccd_data) ccd_data.mask[10, 10] = 1 if uncertainty: err = RNG().normal(size=ccd_data.shape) @@ -767,23 +767,23 @@ def tran(arr): # Test block_reduce and block_replicate wrapper @pytest.mark.skipif(not HAS_BLOCK_X_FUNCS, reason="needs astropy >= 1.1.x") @pytest.mark.skipif( - (skimage.__version__ < "0.14.2") and ("dev" in np.__version__), + (skimage.__version__ < "0.14.2") and ("dev" in xp.__version__), reason="Incompatibility between scikit-image " "and numpy 1.16", ) def test_block_reduce(): ccd = CCDData( - np.ones((4, 4)), + xp.ones((4, 4)), unit="adu", meta={"testkw": 1}, - mask=np.zeros((4, 4), dtype=bool), - uncertainty=StdDevUncertainty(np.ones((4, 4))), + mask=xp.zeros((4, 4), dtype=bool), + uncertainty=StdDevUncertainty(xp.ones((4, 4))), ) with pytest.warns(AstropyUserWarning) as w: ccd_summed = block_reduce(ccd, (2, 2)) assert len(w) == 1 assert "following attributes were set" in str(w[0].message) assert isinstance(ccd_summed, CCDData) - assert np.all(ccd_summed.data == 4) + assert xp.all(ccd_summed.data == 4) assert ccd_summed.data.shape == (2, 2) assert ccd_summed.unit == u.adu # Other attributes are set to None. In case the function is modified to @@ -799,11 +799,11 @@ def test_block_reduce(): @pytest.mark.skipif(not HAS_BLOCK_X_FUNCS, reason="needs astropy >= 1.1.x") @pytest.mark.skipif( - (skimage.__version__ < "0.14.2") and ("dev" in np.__version__), + (skimage.__version__ < "0.14.2") and ("dev" in xp.__version__), reason="Incompatibility between scikit-image " "and numpy 1.16", ) def test_block_average(): - data = np.array( + data = xp.array( [ [2.0, 1.0, 2.0, 1.0], [1.0, 1.0, 1.0, 1.0], @@ -815,8 +815,8 @@ def test_block_average(): data, unit="adu", meta={"testkw": 1}, - mask=np.zeros((4, 4), dtype=bool), - uncertainty=StdDevUncertainty(np.ones((4, 4))), + mask=xp.zeros((4, 4), dtype=bool), + uncertainty=StdDevUncertainty(xp.ones((4, 4))), ) with pytest.warns(AstropyUserWarning) as w: @@ -825,7 +825,7 @@ def test_block_average(): assert "following attributes were set" in str(w[0].message) assert isinstance(ccd_avgd, CCDData) - assert np.all(ccd_avgd.data == 1.25) + assert xp.all(ccd_avgd.data == 1.25) assert ccd_avgd.data.shape == (2, 2) assert ccd_avgd.unit == u.adu # Other attributes are set to None. In case the function is modified to @@ -843,11 +843,11 @@ def test_block_average(): @pytest.mark.skipif(not HAS_BLOCK_X_FUNCS, reason="needs astropy >= 1.1.x") def test_block_replicate(): ccd = CCDData( - np.ones((4, 4)), + xp.ones((4, 4)), unit="adu", meta={"testkw": 1}, - mask=np.zeros((4, 4), dtype=bool), - uncertainty=StdDevUncertainty(np.ones((4, 4))), + mask=xp.zeros((4, 4), dtype=bool), + uncertainty=StdDevUncertainty(xp.ones((4, 4))), ) with pytest.warns(AstropyUserWarning) as w: ccd_repl = block_replicate(ccd, (2, 2)) @@ -855,7 +855,7 @@ def test_block_replicate(): assert "following attributes were set" in str(w[0].message) assert isinstance(ccd_repl, CCDData) - assert np.all(ccd_repl.data == 0.25) + assert xp.all(ccd_repl.data == 0.25) assert ccd_repl.data.shape == (8, 8) assert ccd_repl.unit == u.adu # Other attributes are set to None. In case the function is modified to @@ -875,7 +875,7 @@ def test__overscan_schange(): ccd_data = ccd_data_func() old_data = ccd_data.copy() new_data = subtract_overscan(ccd_data, overscan=ccd_data[:, 1], overscan_axis=0) - assert not np.allclose(old_data.data, new_data.data) + assert not xp.allclose(old_data.data, new_data.data) np_testing.assert_allclose(old_data.data, ccd_data.data) @@ -892,7 +892,7 @@ def test_create_deviation_does_not_change_input(): def test_cosmicray_median_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() - error = np.zeros_like(ccd_data) + error = xp.ones_like(ccd_data) _ = cosmicray_median(ccd_data, error_image=error, thresh=5, mbox=11, gbox=0, rbox=0) np_testing.assert_allclose(original.data, ccd_data.data) assert original.unit == ccd_data.unit @@ -909,7 +909,7 @@ def test_cosmicray_lacosmic_does_not_change_input(): def test_flat_correct_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() - flat = CCDData(np.zeros_like(ccd_data), unit=ccd_data.unit) + flat = CCDData(xp.zeros_like(ccd_data), unit=ccd_data.unit) # Ignore the divide by zero warning that is raised when the flat is zero. with warnings.catch_warnings(action="ignore", category=RuntimeWarning): _ = flat_correct(ccd_data, flat=flat) @@ -928,7 +928,7 @@ def test_gain_correct_does_not_change_input(): def test_subtract_bias_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() - master_frame = CCDData(np.zeros_like(ccd_data), unit=ccd_data.unit) + master_frame = CCDData(xp.zeros_like(ccd_data), unit=ccd_data.unit) _ = subtract_bias(ccd_data, master=master_frame) np_testing.assert_allclose(original.data, ccd_data.data) assert original.unit == ccd_data.unit @@ -945,7 +945,7 @@ def test_trim_image_does_not_change_input(): def test_transform_image_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() - _ = transform_image(ccd_data, np.sqrt) + _ = transform_image(ccd_data, xp.sqrt) np_testing.assert_allclose(original.data, ccd_data) assert original.unit == ccd_data.unit @@ -961,7 +961,7 @@ def wcs_for_testing(shape): # Set up an "Airy's zenithal" projection # Vector properties may be set with Python lists, or Numpy arrays w.wcs.crpix = [shape[0] // 2, shape[1] // 2] - w.wcs.cdelt = np.array([-0.066667, 0.066667]) + w.wcs.cdelt = xp.array([-0.066667, 0.066667]) w.wcs.crval = [0, -90] w.wcs.ctype = ["RA---AIR", "DEC--AIR"] w.wcs.set_pv([(2, 1, 45.0)]) @@ -1033,7 +1033,7 @@ def test_wcs_project_onto_shifted_wcs(): # In the case of a shift, one row and one column should be nan, and they # will share one common nan where they intersect, so we know how many nan # there should be. - assert np.sum(np.isnan(new_ccd.data)) == np.sum(np.array(new_ccd.shape)) - 1 + assert xp.sum(xp.isnan(new_ccd.data)) == xp.sum(xp.array(new_ccd.shape)) - 1 # Use an odd number of pixels to make a well-defined center pixel @@ -1048,10 +1048,10 @@ def test_wcs_project_onto_scale_wcs(): ccd_data.wcs.wcs.crpix += 0.5 # Use uniform input data value for simplicity. - ccd_data.data = np.ones_like(ccd_data.data) + ccd_data.data = xp.ones_like(ccd_data.data) # Make mask zero... - ccd_data.mask = np.zeros_like(ccd_data.data) + ccd_data.mask = xp.zeros_like(ccd_data.data) # ...except the center pixel, which is one. ccd_data.mask[int(ccd_data.wcs.wcs.crpix[0]), int(ccd_data.wcs.wcs.crpix[1])] = 1 @@ -1059,7 +1059,7 @@ def test_wcs_project_onto_scale_wcs(): target_wcs.wcs.cdelt /= 2 # Choice below ensures we are really at the center pixel of an odd range. - target_shape = 2 * np.array(ccd_data.shape) + 1 + target_shape = 2 * xp.array(ccd_data.shape) + 1 target_wcs.wcs.crpix = 2 * target_wcs.wcs.crpix + 1 + 0.5 # Ugly hack for numpy-specific check in astropy.wcs @@ -1074,8 +1074,8 @@ def test_wcs_project_onto_scale_wcs(): assert new_ccd.wcs.wcs.compare(target_wcs.wcs) # Define a cutout from the new array that should match the old. - new_lower_bound = (np.array(new_ccd.shape) - np.array(ccd_data.shape)) // 2 - new_upper_bound = (np.array(new_ccd.shape) + np.array(ccd_data.shape)) // 2 + new_lower_bound = (xp.array(new_ccd.shape) - xp.array(ccd_data.shape)) // 2 + new_upper_bound = (xp.array(new_ccd.shape) + xp.array(ccd_data.shape)) // 2 data_cutout = new_ccd.data[ new_lower_bound[0] : new_upper_bound[0], new_lower_bound[1] : new_upper_bound[1] ] @@ -1086,8 +1086,8 @@ def test_wcs_project_onto_scale_wcs(): # Mask should be true for four pixels (all nearest neighbors) # of the single pixel we masked initially. - new_center = np.array(new_ccd.wcs.wcs.crpix, dtype=int, copy=_NUMPY_COPY_IF_NEEDED) - assert np.all( + new_center = xp.array(new_ccd.wcs.wcs.crpix, dtype=int, copy=_NUMPY_COPY_IF_NEEDED) + assert xp.all( new_ccd.mask[ new_center[0] : new_center[0] + 2, new_center[1] : new_center[1] + 2 ] @@ -1096,7 +1096,7 @@ def test_wcs_project_onto_scale_wcs(): # Those four, and any that reproject made nan because they draw on # pixels outside the footprint of the original image, are the only # pixels that should be masked. - assert new_ccd.mask.sum() == 4 + np.isnan(new_ccd.data).sum() + assert new_ccd.mask.sum() == 4 + xp.isnan(new_ccd.data).sum() def test_ccd_process_does_not_change_input(): @@ -1141,22 +1141,22 @@ def test_ccd_process_parameters_are_appropriate(): def test_ccd_process(): # Test the through ccd_process - ccd_data = CCDData(10.0 * np.ones((100, 100)), unit=u.adu) + ccd_data = CCDData(10.0 * xp.ones((100, 100)), unit=u.adu) # Rewrite to not change data in-place. - ccd_data.data = np.concat([ccd_data.data[:, :-10], 2 * np.ones((100, 10))], axis=1) + ccd_data.data = xp.concat([ccd_data.data[:, :-10], 2 * xp.ones((100, 10))], axis=1) ccd_data.meta["testkw"] = 100 - mask = np.zeros((100, 90)) + mask = xp.zeros((100, 90)) - masterbias = CCDData(2.0 * np.ones((100, 90)), unit=u.electron) - masterbias.uncertainty = StdDevUncertainty(np.zeros((100, 90))) + masterbias = CCDData(2.0 * xp.ones((100, 90)), unit=u.electron) + masterbias.uncertainty = StdDevUncertainty(xp.zeros((100, 90))) - dark_frame = CCDData(0.0 * np.ones((100, 90)), unit=u.electron) - dark_frame.uncertainty = StdDevUncertainty(np.zeros((100, 90))) + dark_frame = CCDData(0.0 * xp.ones((100, 90)), unit=u.electron) + dark_frame.uncertainty = StdDevUncertainty(xp.zeros((100, 90))) - masterflat = CCDData(10.0 * np.ones((100, 90)), unit=u.electron) - masterflat.uncertainty = StdDevUncertainty(np.zeros((100, 90))) + masterflat = CCDData(10.0 * xp.ones((100, 90)), unit=u.electron) + masterflat.uncertainty = StdDevUncertainty(xp.zeros((100, 90))) occd = ccd_process( ccd_data, @@ -1178,8 +1178,8 @@ def test_ccd_process(): # Final results should be (10 - 2) / 2.0 - 2 = 2 # Error should be (4 + 5)**0.5 / 0.5 = 3.0 - np_testing.assert_allclose(2.0 * np.ones((100, 90)), occd.data) - np_testing.assert_allclose(3.0 * np.ones((100, 90)), occd.uncertainty.array) + np_testing.assert_allclose(2.0 * xp.ones((100, 90)), occd.data) + np_testing.assert_allclose(3.0 * xp.ones((100, 90)), occd.uncertainty.array) np_testing.assert_array_equal(mask, occd.mask) assert occd.unit == u.electron # Make sure the original keyword is still present. Regression test for #401 @@ -1188,22 +1188,22 @@ def test_ccd_process(): def test_ccd_process_gain_corrected(): # Test the through ccd_process with gain_corrected as False - ccd_data = CCDData(10.0 * np.ones((100, 100)), unit=u.adu) + ccd_data = CCDData(10.0 * xp.ones((100, 100)), unit=u.adu) # Rewrite to not change data in-place. - ccd_data.data = np.concat([ccd_data.data[:, :-10], 2 * np.ones((100, 10))], axis=1) + ccd_data.data = xp.concat([ccd_data.data[:, :-10], 2 * xp.ones((100, 10))], axis=1) ccd_data.meta["testkw"] = 100 - mask = np.zeros((100, 90)) + mask = xp.zeros((100, 90)) - masterbias = CCDData(4.0 * np.ones((100, 90)), unit=u.adu) - masterbias.uncertainty = StdDevUncertainty(np.zeros((100, 90))) + masterbias = CCDData(4.0 * xp.ones((100, 90)), unit=u.adu) + masterbias.uncertainty = StdDevUncertainty(xp.zeros((100, 90))) - dark_frame = CCDData(0.0 * np.ones((100, 90)), unit=u.adu) - dark_frame.uncertainty = StdDevUncertainty(np.zeros((100, 90))) + dark_frame = CCDData(0.0 * xp.ones((100, 90)), unit=u.adu) + dark_frame.uncertainty = StdDevUncertainty(xp.zeros((100, 90))) - masterflat = CCDData(5.0 * np.ones((100, 90)), unit=u.adu) - masterflat.uncertainty = StdDevUncertainty(np.zeros((100, 90))) + masterflat = CCDData(5.0 * xp.ones((100, 90)), unit=u.adu) + masterflat.uncertainty = StdDevUncertainty(xp.zeros((100, 90))) occd = ccd_process( ccd_data, @@ -1226,8 +1226,8 @@ def test_ccd_process_gain_corrected(): # Final results should be (10 - 2) / 2.0 - 2 = 2 # Error should be (4 + 5)**0.5 / 0.5 = 3.0 - np_testing.assert_allclose(2.0 * np.ones((100, 90)), occd.data) - np_testing.assert_allclose(3.0 * np.ones((100, 90)), occd.uncertainty.array) + np_testing.assert_allclose(2.0 * xp.ones((100, 90)), occd.data) + np_testing.assert_allclose(3.0 * xp.ones((100, 90)), occd.uncertainty.array) np_testing.assert_array_equal(mask, occd.mask) assert occd.unit == u.electron # Make sure the original keyword is still present. Regression test for #401 diff --git a/ccdproc/tests/test_memory_use.py b/ccdproc/tests/test_memory_use.py index 173e0f96..1e56b709 100644 --- a/ccdproc/tests/test_memory_use.py +++ b/ccdproc/tests/test_memory_use.py @@ -15,12 +15,12 @@ else: memory_profile_present = True -try: - import jax # noqa -except ImportError: - JAX_PRESENT = False -else: - JAX_PRESENT = True + +# Only run memory tests if numpy is the testing array library +from ccdproc.conftest import testing_array_library + +USING_NUMPY_ARRAY_LIBRARY = testing_array_library.__name__ == "numpy" + image_size = 2000 # Square image, so 4000 x 4000 num_files = 10 @@ -37,7 +37,9 @@ def teardown_module(): fil.unlink() -@pytest.mark.skipif(JAX_PRESENT, reason="JAX is present, and does not allow os.fork") +@pytest.mark.skipif( + not USING_NUMPY_ARRAY_LIBRARY, reason="Memory test only done with numpy" +) @pytest.mark.skipif( not platform.startswith("linux"), reason="memory tests only work on linux", diff --git a/tox.ini b/tox.ini index 0ab4ee81..3a859ca5 100644 --- a/tox.ini +++ b/tox.ini @@ -7,7 +7,8 @@ isolated_build = true [testenv] setenv = - test: JAX_ENABLE_X64 = True + jax: CCDPROC_ARRAY_LIBRARY = jax + jax: JAX_ENABLE_X64 = True devdeps: PIP_EXTRA_INDEX_URL = https://pypi.anaconda.org/astropy/simple extras = test @@ -23,11 +24,12 @@ description = devdeps: with the latest developer version of key dependencies oldestdeps: with the oldest supported version of key dependencies cov: and test coverage - numpy124: with numpy 1.24.* numpy126: with numpy 1.26.* numpy200: with numpy 2.0.* numpy210: with numpy 2.1.* bottleneck: with bottleneck + jax: with JAX as the array library + # The following provides some specific pinnings for key packages deps = From ef302301aa6bfb3d62a99d9b0e72ef9eece6bffd Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Wed, 18 Jun 2025 16:43:54 -0500 Subject: [PATCH 040/111] Convert combiner tests to use Array API --- ccdproc/combiner.py | 4 +- ccdproc/tests/test_combiner.py | 427 +++++++++++++++++---------------- 2 files changed, 219 insertions(+), 212 deletions(-) diff --git a/ccdproc/combiner.py b/ccdproc/combiner.py index 4f24ba59..0f855965 100644 --- a/ccdproc/combiner.py +++ b/ccdproc/combiner.py @@ -330,7 +330,7 @@ def clip_extrema(self, nlow=0, nhigh=0): for i in range(-1 * nhigh, nlow): # create a tuple with the indices where = tuple([argsorted[i, :, :].ravel()] + [i.ravel() for i in mg]) - self.data_arr_mask[where] = True + self.data_arr_mask = xpx.at(self.data_arr_mask)[where].set(True) # set up min/max clipping algorithms def minmax_clipping(self, min_clip=None, max_clip=None): @@ -1107,7 +1107,7 @@ def combine( # Handle immutable arrays with array_api_extra ccd.uncertainty.array = xpx.at(ccd.uncertainty.array)[ x:xend, y:yend - ].set(comb_tile.uncertainty.array) + ].set(xp.astype(comb_tile.uncertainty.array, ccd.dtype)) # Free up memory to try to stay under user's limit del comb_tile del tile_combiner diff --git a/ccdproc/tests/test_combiner.py b/ccdproc/tests/test_combiner.py index 5c2bbb06..6dd5d71f 100644 --- a/ccdproc/tests/test_combiner.py +++ b/ccdproc/tests/test_combiner.py @@ -1,14 +1,13 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst import array_api_compat import array_api_extra as xpx -import astropy import astropy.units as u -import numpy as np +import numpy.ma as np_ma import pytest from astropy.nddata import CCDData from astropy.stats import median_absolute_deviation as mad from astropy.utils.data import get_pkg_data_filename -from packaging.version import Version, parse +from numpy import testing as np_testing from ccdproc.combiner import ( Combiner, @@ -17,11 +16,12 @@ combine, sigma_func, ) + +# Set up the array library to be used in tests +from ccdproc.conftest import testing_array_library as xp from ccdproc.image_collection import ImageFileCollection from ccdproc.tests.pytest_fixtures import ccd_data as ccd_data_func -SUPER_OLD_ASTROPY = parse(astropy.__version__) < Version("4.3.0") - # Several tests have many more NaNs in them than real data. numpy generates # lots of warnings in those cases and it makes more sense to suppress them # than to generate them. @@ -33,7 +33,7 @@ def _make_mean_scaler(ccd_data): def scale_by_mean(x): # scale each array to the mean of the first image - return ccd_data.data.mean() / np.ma.average(x) + return ccd_data.data.mean() / np_ma.average(x) return scale_by_mean @@ -63,7 +63,7 @@ def test_ccddata_combiner_objects(): # objects do not have the same size def test_ccddata_combiner_size(): ccd_data = ccd_data_func() - ccd_large = CCDData(np.zeros((200, 100)), unit=u.adu) + ccd_large = CCDData(xp.zeros((200, 100)), unit=u.adu) ccd_list = [ccd_data, ccd_data, ccd_large] with pytest.raises(TypeError): Combiner(ccd_list) # arrays of different sizes should fail @@ -73,7 +73,7 @@ def test_ccddata_combiner_size(): # objects do not have the same units def test_ccddata_combiner_units(): ccd_data = ccd_data_func() - ccd_large = CCDData(np.zeros((100, 100)), unit=u.second) + ccd_large = CCDData(xp.zeros((100, 100)), unit=u.second) ccd_list = [ccd_data, ccd_data, ccd_large] with pytest.raises(TypeError): Combiner(ccd_list) @@ -92,8 +92,8 @@ def test_combiner_create(): def test_combiner_dtype(): ccd_data = ccd_data_func() ccd_list = [ccd_data, ccd_data, ccd_data] - c = Combiner(ccd_list, dtype=np.float32) - assert c.data_arr.dtype == np.float32 + c = Combiner(ccd_list, dtype=xp.float32) + assert c.data_arr.dtype == xp.float32 avg = c.average_combine() # dtype of average should match input dtype assert avg.dtype == c.dtype @@ -107,8 +107,8 @@ def test_combiner_dtype(): # test mask is created from ccd.data def test_combiner_mask(): - data = np.zeros((10, 10)) - data[5, 5] = 1 + data = xp.zeros((10, 10)) + data = xpx.at(data)[5, 5].set(1) mask = data == 0 ccd = CCDData(data, unit=u.adu, mask=mask) ccd_list = [ccd, ccd, ccd] @@ -136,40 +136,40 @@ def test_weights_shape(): def test_1Dweights(): ccd_list = [ - CCDData(np.zeros((10, 10)), unit=u.adu), - CCDData(np.zeros((10, 10)) - 1000, unit=u.adu), - CCDData(np.zeros((10, 10)) + 1000, unit=u.adu), + CCDData(xp.zeros((10, 10)), unit=u.adu), + CCDData(xp.zeros((10, 10)) - 1000, unit=u.adu), + CCDData(xp.zeros((10, 10)) + 1000, unit=u.adu), ] - c = Combiner(ccd_list) - c.weights = np.array([1, 5, 10]) - ccd = c.average_combine() - np.testing.assert_almost_equal(ccd.data, 312.5) + combo = Combiner(ccd_list) + combo.weights = xp.array([1, 5, 10]) + ccd = combo.average_combine() + np_testing.assert_allclose(ccd.data, 312.5) with pytest.raises(ValueError): - c.weights = np.array([1, 5, 10, 20]) + combo.weights = xp.array([1, 5, 10, 20]) def test_pixelwise_weights(): ccd_list = [ - CCDData(np.zeros((10, 10)), unit=u.adu), - CCDData(np.zeros((10, 10)) - 1000, unit=u.adu), - CCDData(np.zeros((10, 10)) + 1000, unit=u.adu), + CCDData(xp.zeros((10, 10)), unit=u.adu), + CCDData(xp.zeros((10, 10)) - 1000, unit=u.adu), + CCDData(xp.zeros((10, 10)) + 1000, unit=u.adu), ] - c = Combiner(ccd_list) - c.weights = np.ones_like(c.data_arr) - c.weights[:, 5, 5] = [1, 5, 10] - ccd = c.average_combine() - np.testing.assert_almost_equal(ccd.data[5, 5], 312.5) - np.testing.assert_almost_equal(ccd.data[0, 0], 0) + combo = Combiner(ccd_list) + combo.weights = xp.ones_like(combo.data_arr) + combo.weights = xpx.at(combo.weights)[:, 5, 5].set(xp.array([1, 5, 10])) + ccd = combo.average_combine() + np_testing.assert_allclose(ccd.data[5, 5], 312.5) + np_testing.assert_allclose(ccd.data[0, 0], 0) # test the min-max rejection def test_combiner_minmax(): ccd_list = [ - CCDData(np.zeros((10, 10)), unit=u.adu), - CCDData(np.zeros((10, 10)) - 1000, unit=u.adu), - CCDData(np.zeros((10, 10)) + 1000, unit=u.adu), + CCDData(xp.zeros((10, 10)), unit=u.adu), + CCDData(xp.zeros((10, 10)) - 1000, unit=u.adu), + CCDData(xp.zeros((10, 10)) + 1000, unit=u.adu), ] c = Combiner(ccd_list) @@ -180,9 +180,9 @@ def test_combiner_minmax(): def test_combiner_minmax_max(): ccd_list = [ - CCDData(np.zeros((10, 10)), unit=u.adu), - CCDData(np.zeros((10, 10)) - 1000, unit=u.adu), - CCDData(np.zeros((10, 10)) + 1000, unit=u.adu), + CCDData(xp.zeros((10, 10)), unit=u.adu), + CCDData(xp.zeros((10, 10)) - 1000, unit=u.adu), + CCDData(xp.zeros((10, 10)) + 1000, unit=u.adu), ] c = Combiner(ccd_list) @@ -192,9 +192,9 @@ def test_combiner_minmax_max(): def test_combiner_minmax_min(): ccd_list = [ - CCDData(np.zeros((10, 10)), unit=u.adu), - CCDData(np.zeros((10, 10)) - 1000, unit=u.adu), - CCDData(np.zeros((10, 10)) + 1000, unit=u.adu), + CCDData(xp.zeros((10, 10)), unit=u.adu), + CCDData(xp.zeros((10, 10)) - 1000, unit=u.adu), + CCDData(xp.zeros((10, 10)) + 1000, unit=u.adu), ] c = Combiner(ccd_list) @@ -204,54 +204,54 @@ def test_combiner_minmax_min(): def test_combiner_sigmaclip_high(): ccd_list = [ - CCDData(np.zeros((10, 10)), unit=u.adu), - CCDData(np.zeros((10, 10)) - 10, unit=u.adu), - CCDData(np.zeros((10, 10)) + 10, unit=u.adu), - CCDData(np.zeros((10, 10)) - 10, unit=u.adu), - CCDData(np.zeros((10, 10)) + 10, unit=u.adu), - CCDData(np.zeros((10, 10)) + 1000, unit=u.adu), + CCDData(xp.zeros((10, 10)), unit=u.adu), + CCDData(xp.zeros((10, 10)) - 10, unit=u.adu), + CCDData(xp.zeros((10, 10)) + 10, unit=u.adu), + CCDData(xp.zeros((10, 10)) - 10, unit=u.adu), + CCDData(xp.zeros((10, 10)) + 10, unit=u.adu), + CCDData(xp.zeros((10, 10)) + 1000, unit=u.adu), ] c = Combiner(ccd_list) # using mad for more robust statistics vs. std - c.sigma_clipping(high_thresh=3, low_thresh=None, func=np.ma.median, dev_func=mad) + c.sigma_clipping(high_thresh=3, low_thresh=None, func="median", dev_func=mad) assert c.data_arr_mask[5].all() def test_combiner_sigmaclip_single_pix(): ccd_list = [ - CCDData(np.zeros((10, 10)), unit=u.adu), - CCDData(np.zeros((10, 10)) - 10, unit=u.adu), - CCDData(np.zeros((10, 10)) + 10, unit=u.adu), - CCDData(np.zeros((10, 10)) - 10, unit=u.adu), - CCDData(np.zeros((10, 10)) + 10, unit=u.adu), - CCDData(np.zeros((10, 10)) - 10, unit=u.adu), + CCDData(xp.zeros((10, 10)), unit=u.adu), + CCDData(xp.zeros((10, 10)) - 10, unit=u.adu), + CCDData(xp.zeros((10, 10)) + 10, unit=u.adu), + CCDData(xp.zeros((10, 10)) - 10, unit=u.adu), + CCDData(xp.zeros((10, 10)) + 10, unit=u.adu), + CCDData(xp.zeros((10, 10)) - 10, unit=u.adu), ] - c = Combiner(ccd_list) + combo = Combiner(ccd_list) # add a single pixel in another array to check that # that one gets rejected - c.data_arr[0, 5, 5] = 0 - c.data_arr[1, 5, 5] = -5 - c.data_arr[2, 5, 5] = 5 - c.data_arr[3, 5, 5] = -5 - c.data_arr[4, 5, 5] = 25 - c.sigma_clipping(high_thresh=3, low_thresh=None, func=np.ma.median, dev_func=mad) - assert c.data_arr_mask[4, 5, 5] + combo.data_arr = xpx.at(combo.data_arr)[0, 5, 5].set(0) + combo.data_arr = xpx.at(combo.data_arr)[1, 5, 5].set(-5) + combo.data_arr = xpx.at(combo.data_arr)[2, 5, 5].set(5) + combo.data_arr = xpx.at(combo.data_arr)[3, 5, 5].set(-5) + combo.data_arr = xpx.at(combo.data_arr)[4, 5, 5].set(25) + combo.sigma_clipping(high_thresh=3, low_thresh=None, func="median", dev_func=mad) + assert combo.data_arr_mask[4, 5, 5] def test_combiner_sigmaclip_low(): ccd_list = [ - CCDData(np.zeros((10, 10)), unit=u.adu), - CCDData(np.zeros((10, 10)) - 10, unit=u.adu), - CCDData(np.zeros((10, 10)) + 10, unit=u.adu), - CCDData(np.zeros((10, 10)) - 10, unit=u.adu), - CCDData(np.zeros((10, 10)) + 10, unit=u.adu), - CCDData(np.zeros((10, 10)) - 1000, unit=u.adu), + CCDData(xp.zeros((10, 10)), unit=u.adu), + CCDData(xp.zeros((10, 10)) - 10, unit=u.adu), + CCDData(xp.zeros((10, 10)) + 10, unit=u.adu), + CCDData(xp.zeros((10, 10)) - 10, unit=u.adu), + CCDData(xp.zeros((10, 10)) + 10, unit=u.adu), + CCDData(xp.zeros((10, 10)) - 1000, unit=u.adu), ] c = Combiner(ccd_list) # using mad for more robust statistics vs. std - c.sigma_clipping(high_thresh=None, low_thresh=3, func=np.ma.median, dev_func=mad) + c.sigma_clipping(high_thresh=None, low_thresh=3, func="median", dev_func=mad) assert c.data_arr_mask[5].all() @@ -296,10 +296,10 @@ def test_combiner_sum_weighted(): ccd_data = CCDData(data=[[0, 1], [2, 3]], unit="adu") ccd_list = [ccd_data, ccd_data, ccd_data] c = Combiner(ccd_list) - c.weights = np.array([1, 2, 3]) + c.weights = xp.array([1, 2, 3]) ccd = c.sum_combine() expected_result = sum(w * d.data for w, d in zip(c.weights, ccd_list, strict=True)) - np.testing.assert_almost_equal(ccd, expected_result) + np_testing.assert_allclose(ccd, expected_result) # test weighted sum @@ -309,10 +309,10 @@ def test_combiner_sum_weighted_by_pixel(): c = Combiner(ccd_list) # Weights below are chosen so that every entry in weights_pixel = [[8, 4], [2, 1]] - c.weights = np.array([weights_pixel] * 3) + c.weights = xp.array([weights_pixel] * 3) ccd = c.sum_combine() expected_result = [[24, 24], [24, 24]] - np.testing.assert_almost_equal(ccd, expected_result) + np_testing.assert_allclose(ccd, expected_result) # This warning is generated by numpy and is expected when @@ -323,8 +323,8 @@ def test_combiner_sum_weighted_by_pixel(): ) def test_combiner_mask_average(): # test data combined with mask is created correctly - data = np.zeros((10, 10)) - data[5, 5] = 1 + data = xp.zeros((10, 10)) + data = xpx.at(data)[5, 5].set(1) mask = data == 0 ccd = CCDData(data, unit=u.adu, mask=mask) ccd_list = [ccd, ccd, ccd] @@ -353,16 +353,16 @@ def test_combiner_with_scaling(): avg_ccd = combiner.average_combine() # Does the mean of the scaled arrays match the value to which it was # scaled? - np.testing.assert_almost_equal(avg_ccd.data.mean(), ccd_data.data.mean()) + np_testing.assert_allclose(avg_ccd.data.mean(), ccd_data.data.mean()) assert avg_ccd.shape == ccd_data.shape median_ccd = combiner.median_combine() # Does median also scale to the correct value? - np.testing.assert_almost_equal(np.median(median_ccd.data), np.median(ccd_data.data)) + np_testing.assert_allclose(xp.median(median_ccd.data), xp.median(ccd_data.data)) # Set the scaling manually... combiner.scaling = [scale_by_mean(combiner.data_arr[i]) for i in range(3)] avg_ccd = combiner.average_combine() - np.testing.assert_almost_equal(avg_ccd.data.mean(), ccd_data.data.mean()) + np_testing.assert_allclose(avg_ccd.data.mean(), ccd_data.data.mean()) assert avg_ccd.shape == ccd_data.shape @@ -380,8 +380,8 @@ def test_combiner_scaling_fails(): # test data combined with mask is created correctly def test_combiner_mask_median(): - data = np.zeros((10, 10)) - data[5, 5] = 1 + data = xp.zeros((10, 10)) + data = xpx.at(data)[5, 5].set(1) mask = data == 0 ccd = CCDData(data, unit=u.adu, mask=mask) ccd_list = [ccd, ccd, ccd] @@ -398,8 +398,8 @@ def test_combiner_mask_median(): @pytest.mark.filterwarnings("ignore:Degrees of freedom <= 0:RuntimeWarning") def test_combiner_mask_sum(): # test data combined with mask is created correctly - data = np.zeros((10, 10)) - data[5, 5] = 1 + data = xp.zeros((10, 10)) + data = xpx.at(data)[5, 5].set(1) mask = data == 0 ccd = CCDData(data, unit=u.adu, mask=mask) ccd_list = [ccd, ccd, ccd] @@ -431,7 +431,7 @@ def test_combine_average_fitsimages(): fitsfilename_list = [fitsfile] * 3 avgccd = combine(fitsfilename_list, output_file=None, method="average", unit=u.adu) # averaging same fits images should give back same fits image - np.testing.assert_allclose(avgccd.data, ccd_by_combiner.data) + np_testing.assert_allclose(avgccd.data, ccd_by_combiner.data) def test_combine_numpyndarray(): @@ -446,10 +446,10 @@ def test_combine_numpyndarray(): c = Combiner(ccd_list) ccd_by_combiner = c.average_combine() - fitsfilename_list = np.array([fitsfile] * 3) + fitsfilename_list = [fitsfile] * 3 avgccd = combine(fitsfilename_list, output_file=None, method="average", unit=u.adu) # averaging same fits images should give back same fits image - np.testing.assert_allclose(avgccd.data, ccd_by_combiner.data) + np_testing.assert_allclose(avgccd.data, ccd_by_combiner.data) def test_combiner_result_dtype(): @@ -457,18 +457,17 @@ def test_combiner_result_dtype(): The result should have the appropriate dtype not the dtype of the first input.""" - ccd = CCDData(np.ones((3, 3), dtype=np.uint16), unit="adu") + ccd = CCDData(xp.ones((3, 3), dtype=xp.uint16), unit="adu") res = combine([ccd, ccd.multiply(2)]) # The default dtype of Combiner is float64 - assert res.data.dtype == np.float64 - ref = np.ones((3, 3)) * 1.5 - np.testing.assert_allclose(res.data, ref) - + assert res.data.dtype == xp.float64 + ref = xp.ones((3, 3)) * 1.5 + np_testing.assert_allclose(res.data, ref) res = combine([ccd, ccd.multiply(2), ccd.multiply(3)], dtype=int) # The result dtype should be integer: - assert res.data.dtype == np.int_ - ref = np.ones((3, 3)) * 2 - np.testing.assert_allclose(res.data, ref) + assert res.data.dtype == xp.int_ + ref = xp.ones((3, 3)) * 2 + np_testing.assert_allclose(res.data, ref) def test_combiner_image_file_collection_input(tmp_path): @@ -479,7 +478,7 @@ def test_combiner_image_file_collection_input(tmp_path): ifc = ImageFileCollection(tmp_path) comb = Combiner(ifc.ccds()) - np.testing.assert_allclose(ccd.data, comb.average_combine().data) + np_testing.assert_allclose(ccd.data, comb.average_combine().data) def test_combine_image_file_collection_input(tmp_path): @@ -499,9 +498,9 @@ def test_combine_image_file_collection_input(tmp_path): ",".join(ifc.files_filtered(include_path=True)), method="average" ) - np.testing.assert_allclose(ccd.data, comb_files.data) - np.testing.assert_allclose(ccd.data, comb_ccds.data) - np.testing.assert_allclose(ccd.data, comb_string.data) + np_testing.assert_allclose(ccd.data, comb_files.data) + np_testing.assert_allclose(ccd.data, comb_ccds.data) + np_testing.assert_allclose(ccd.data, comb_string.data) with pytest.raises(FileNotFoundError): # This should fail because the test is not running in the @@ -519,7 +518,7 @@ def test_combine_average_ccddata(): avgccd = combine(ccd_list, output_file=None, method="average", unit=u.adu) # averaging same ccdData should give back same images - np.testing.assert_allclose(avgccd.data, ccd_by_combiner.data) + np_testing.assert_allclose(avgccd.data, ccd_by_combiner.data) # test combiner convenience function reads fits file and @@ -536,7 +535,7 @@ def test_combine_limitedmem_fitsimages(): fitsfilename_list, output_file=None, method="average", mem_limit=1e6, unit=u.adu ) # averaging same ccdData should give back same images - np.testing.assert_allclose(avgccd.data, ccd_by_combiner.data) + np_testing.assert_allclose(avgccd.data, ccd_by_combiner.data) # test combiner convenience function reads fits file and @@ -561,7 +560,7 @@ def test_combine_limitedmem_scale_fitsimages(): unit=u.adu, ) - np.testing.assert_allclose(avgccd.data, ccd_by_combiner.data) + np_testing.assert_allclose(avgccd.data, ccd_by_combiner.data) # test the optional uncertainty function in average_combine @@ -569,14 +568,14 @@ def test_average_combine_uncertainty(): ccd_data = ccd_data_func() ccd_list = [ccd_data, ccd_data, ccd_data] c = Combiner(ccd_list) - ccd = c.average_combine(uncertainty_func=np.sum) - uncert_ref = np.sum(c.data_arr, 0) / np.sqrt(3) - np.testing.assert_allclose(ccd.uncertainty.array, uncert_ref) + ccd = c.average_combine(uncertainty_func=xp.sum) + uncert_ref = xp.sum(c.data_arr, 0) / xp.sqrt(3) + np_testing.assert_allclose(ccd.uncertainty.array, uncert_ref) # Compare this also to the "combine" call - ccd2 = combine(ccd_list, method="average", combine_uncertainty_function=np.sum) - np.testing.assert_allclose(ccd.data, ccd2.data) - np.testing.assert_allclose(ccd.uncertainty.array, ccd2.uncertainty.array) + ccd2 = combine(ccd_list, method="average", combine_uncertainty_function=xp.sum) + np_testing.assert_allclose(ccd.data, ccd2.data) + np_testing.assert_allclose(ccd.uncertainty.array, ccd2.uncertainty.array) # test the optional uncertainty function in median_combine @@ -584,14 +583,14 @@ def test_median_combine_uncertainty(): ccd_data = ccd_data_func() ccd_list = [ccd_data, ccd_data, ccd_data] c = Combiner(ccd_list) - ccd = c.median_combine(uncertainty_func=np.sum) - uncert_ref = np.sum(c.data_arr, 0) / np.sqrt(3) - np.testing.assert_allclose(ccd.uncertainty.array, uncert_ref) + ccd = c.median_combine(uncertainty_func=xp.sum) + uncert_ref = xp.sum(c.data_arr, 0) / xp.sqrt(3) + np_testing.assert_allclose(ccd.uncertainty.array, uncert_ref) # Compare this also to the "combine" call - ccd2 = combine(ccd_list, method="median", combine_uncertainty_function=np.sum) - np.testing.assert_allclose(ccd.data, ccd2.data) - np.testing.assert_allclose(ccd.uncertainty.array, ccd2.uncertainty.array) + ccd2 = combine(ccd_list, method="median", combine_uncertainty_function=xp.sum) + np_testing.assert_allclose(ccd.data, ccd2.data) + np_testing.assert_allclose(ccd.uncertainty.array, ccd2.uncertainty.array) # test the optional uncertainty function in sum_combine @@ -599,14 +598,14 @@ def test_sum_combine_uncertainty(): ccd_data = ccd_data_func() ccd_list = [ccd_data, ccd_data, ccd_data] c = Combiner(ccd_list) - ccd = c.sum_combine(uncertainty_func=np.sum) - uncert_ref = np.sum(c.data_arr, 0) * np.sqrt(3) - np.testing.assert_allclose(ccd.uncertainty.array, uncert_ref) + ccd = c.sum_combine(uncertainty_func=xp.sum) + uncert_ref = xp.sum(c.data_arr, 0) * xp.sqrt(3) + np_testing.assert_allclose(ccd.uncertainty.array, uncert_ref) # Compare this also to the "combine" call - ccd2 = combine(ccd_list, method="sum", combine_uncertainty_function=np.sum) - np.testing.assert_allclose(ccd.data, ccd2.data) - np.testing.assert_allclose(ccd.uncertainty.array, ccd2.uncertainty.array) + ccd2 = combine(ccd_list, method="sum", combine_uncertainty_function=xp.sum) + np_testing.assert_allclose(ccd.data, ccd2.data) + np_testing.assert_allclose(ccd.uncertainty.array, ccd2.uncertainty.array) # Ignore warnings generated because most values are masked @@ -649,7 +648,7 @@ def test_combine_result_uncertainty_and_mask(comb_func, mask_point): ccd_list, method=combine_method_name, minmax_clip=True, minmax_clip_min=-100 ) - np.testing.assert_allclose( + np_testing.assert_allclose( ccd_comb.uncertainty.array, expected_result.uncertainty.array ) @@ -668,7 +667,7 @@ def test_combine_overwrite_output(tmp_path): """ output_file = tmp_path / "fake.fits" - ccd = CCDData(np.ones((3, 3)), unit="adu") + ccd = CCDData(xp.ones((3, 3)), unit="adu") # Make sure we have a file to overwrite ccd.write(output_file) @@ -685,88 +684,94 @@ def test_combine_overwrite_output(tmp_path): res_from_disk = CCDData.read(output_file) # Data should be the same - np.testing.assert_allclose(res.data, res_from_disk.data) + np_testing.assert_allclose(res.data, res_from_disk.data) # test resulting uncertainty is corrected for the number of images def test_combiner_uncertainty_average(): ccd_list = [ - CCDData(np.ones((10, 10)), unit=u.adu), - CCDData(np.ones((10, 10)) * 2, unit=u.adu), + CCDData(xp.ones((10, 10)), unit=u.adu), + CCDData(xp.ones((10, 10)) * 2, unit=u.adu), ] c = Combiner(ccd_list) ccd = c.average_combine() # Just the standard deviation of ccd data. - ref_uncertainty = np.ones((10, 10)) / 2 + ref_uncertainty = xp.ones((10, 10)) / 2 # Correction because we combined two images. - ref_uncertainty /= np.sqrt(2) - np.testing.assert_allclose(ccd.uncertainty.array, ref_uncertainty) + ref_uncertainty /= xp.sqrt(2) + np_testing.assert_allclose(ccd.uncertainty.array, ref_uncertainty) # test resulting uncertainty is corrected for the number of images (with mask) def test_combiner_uncertainty_average_mask(): - mask = np.zeros((10, 10), dtype=np.bool_) - mask[5, 5] = True - ccd_with_mask = CCDData(np.ones((10, 10)), unit=u.adu, mask=mask) + mask = xp.zeros((10, 10), dtype=bool) + mask = xpx.at(mask)[5, 5].set(True) + ccd_with_mask = CCDData(xp.ones((10, 10)), unit=u.adu, mask=mask) ccd_list = [ ccd_with_mask, - CCDData(np.ones((10, 10)) * 2, unit=u.adu), - CCDData(np.ones((10, 10)) * 3, unit=u.adu), + CCDData(xp.ones((10, 10)) * 2, unit=u.adu), + CCDData(xp.ones((10, 10)) * 3, unit=u.adu), ] c = Combiner(ccd_list) ccd = c.average_combine() # Just the standard deviation of ccd data. - ref_uncertainty = np.ones((10, 10)) * np.std([1, 2, 3]) + ref_uncertainty = xp.ones((10, 10)) * xp.std(xp.array([1, 2, 3])) # Correction because we combined two images. - ref_uncertainty /= np.sqrt(3) - ref_uncertainty[5, 5] = np.std([2, 3]) / np.sqrt(2) - np.testing.assert_allclose(ccd.uncertainty.array, ref_uncertainty) + ref_uncertainty /= xp.sqrt(3) + ref_uncertainty = xpx.at(ref_uncertainty)[5, 5].set( + xp.std(xp.array([2, 3])) / xp.sqrt(2) + ) + np_testing.assert_allclose(ccd.uncertainty.array, ref_uncertainty) # test resulting uncertainty is corrected for the number of images (with mask) def test_combiner_uncertainty_median_mask(): mad_to_sigma = 1.482602218505602 - mask = np.zeros((10, 10), dtype=np.bool_) - mask[5, 5] = True - ccd_with_mask = CCDData(np.ones((10, 10)), unit=u.adu, mask=mask) + mask = xp.zeros((10, 10), dtype=bool) + mask = xpx.at(mask)[5, 5].set(True) + ccd_with_mask = CCDData(xp.ones((10, 10)), unit=u.adu, mask=mask) ccd_list = [ ccd_with_mask, - CCDData(np.ones((10, 10)) * 2, unit=u.adu), - CCDData(np.ones((10, 10)) * 3, unit=u.adu), + CCDData(xp.ones((10, 10)) * 2, unit=u.adu), + CCDData(xp.ones((10, 10)) * 3, unit=u.adu), ] c = Combiner(ccd_list) ccd = c.median_combine() # Just the standard deviation of ccd data. - ref_uncertainty = np.ones((10, 10)) * mad_to_sigma * mad([1, 2, 3]) + ref_uncertainty = xp.ones((10, 10)) * mad_to_sigma * mad([1, 2, 3]) # Correction because we combined two images. - ref_uncertainty /= np.sqrt(3) # 0.855980789955 - ref_uncertainty[5, 5] = mad_to_sigma * mad([2, 3]) / np.sqrt(2) # 0.524179041254 - np.testing.assert_allclose(ccd.uncertainty.array, ref_uncertainty) + ref_uncertainty /= xp.sqrt(3) # 0.855980789955 + ref_uncertainty = xpx.at(ref_uncertainty)[5, 5].set( + mad_to_sigma * mad([2, 3]) / xp.sqrt(2) + ) # 0.524179041254 + np_testing.assert_allclose(ccd.uncertainty.array, ref_uncertainty) # test resulting uncertainty is corrected for the number of images (with mask) def test_combiner_uncertainty_sum_mask(): - mask = np.zeros((10, 10), dtype=np.bool_) - mask[5, 5] = True - ccd_with_mask = CCDData(np.ones((10, 10)), unit=u.adu, mask=mask) + mask = xp.zeros((10, 10), dtype=bool) + mask = xpx.at(mask)[5, 5].set(True) + ccd_with_mask = CCDData(xp.ones((10, 10)), unit=u.adu, mask=mask) ccd_list = [ ccd_with_mask, - CCDData(np.ones((10, 10)) * 2, unit=u.adu), - CCDData(np.ones((10, 10)) * 3, unit=u.adu), + CCDData(xp.ones((10, 10)) * 2, unit=u.adu), + CCDData(xp.ones((10, 10)) * 3, unit=u.adu), ] c = Combiner(ccd_list) ccd = c.sum_combine() # Just the standard deviation of ccd data. - ref_uncertainty = np.ones((10, 10)) * np.std([1, 2, 3]) - ref_uncertainty *= np.sqrt(3) - ref_uncertainty[5, 5] = np.std([2, 3]) * np.sqrt(2) - np.testing.assert_allclose(ccd.uncertainty.array, ref_uncertainty) + ref_uncertainty = xp.ones((10, 10)) * xp.std(xp.array([1, 2, 3])) + ref_uncertainty *= xp.sqrt(3) + ref_uncertainty = xpx.at(ref_uncertainty)[5, 5].set( + xp.std(xp.array([2, 3])) * xp.sqrt(2) + ) + np_testing.assert_allclose(ccd.uncertainty.array, ref_uncertainty) def test_combiner_3d(): - data1 = CCDData(3 * np.ones((5, 5, 5)), unit=u.adu) - data2 = CCDData(2 * np.ones((5, 5, 5)), unit=u.adu) - data3 = CCDData(4 * np.ones((5, 5, 5)), unit=u.adu) + data1 = CCDData(3 * xp.ones((5, 5, 5)), unit=u.adu) + data2 = CCDData(2 * xp.ones((5, 5, 5)), unit=u.adu) + data3 = CCDData(4 * xp.ones((5, 5, 5)), unit=u.adu) ccd_list = [data1, data2, data3] @@ -776,16 +781,16 @@ def test_combiner_3d(): ccd = c.average_combine() assert ccd.shape == (5, 5, 5) - np.testing.assert_allclose(ccd.data, data1) + np_testing.assert_allclose(ccd.data, data1) def test_3d_combiner_with_scaling(): ccd_data = ccd_data_func() # The factors below are not particularly important; just avoid anything # whose average is 1. - ccd_data = CCDData(np.ones((5, 5, 5)), unit=u.adu) - ccd_data_lower = CCDData(3 * np.ones((5, 5, 5)), unit=u.adu) - ccd_data_higher = CCDData(0.9 * np.ones((5, 5, 5)), unit=u.adu) + ccd_data = CCDData(xp.ones((5, 5, 5)), unit=u.adu) + ccd_data_lower = CCDData(3 * xp.ones((5, 5, 5)), unit=u.adu) + ccd_data_higher = CCDData(0.9 * xp.ones((5, 5, 5)), unit=u.adu) combiner = Combiner([ccd_data, ccd_data_higher, ccd_data_lower]) scale_by_mean = _make_mean_scaler(ccd_data) @@ -793,33 +798,33 @@ def test_3d_combiner_with_scaling(): avg_ccd = combiner.average_combine() # Does the mean of the scaled arrays match the value to which it was # scaled? - np.testing.assert_almost_equal(avg_ccd.data.mean(), ccd_data.data.mean()) + np_testing.assert_allclose(avg_ccd.data.mean(), ccd_data.data.mean()) assert avg_ccd.shape == ccd_data.shape median_ccd = combiner.median_combine() # Does median also scale to the correct value? - np.testing.assert_almost_equal(np.median(median_ccd.data), np.median(ccd_data.data)) + np_testing.assert_allclose(xp.median(median_ccd.data), xp.median(ccd_data.data)) # Set the scaling manually... combiner.scaling = [scale_by_mean(combiner.data_arr[i]) for i in range(3)] avg_ccd = combiner.average_combine() - np.testing.assert_almost_equal(avg_ccd.data.mean(), ccd_data.data.mean()) + np_testing.assert_allclose(avg_ccd.data.mean(), ccd_data.data.mean()) assert avg_ccd.shape == ccd_data.shape def test_clip_extrema_3d(): ccdlist = [ - CCDData(np.ones((3, 3, 3)) * 90.0, unit="adu"), - CCDData(np.ones((3, 3, 3)) * 20.0, unit="adu"), - CCDData(np.ones((3, 3, 3)) * 10.0, unit="adu"), - CCDData(np.ones((3, 3, 3)) * 40.0, unit="adu"), - CCDData(np.ones((3, 3, 3)) * 25.0, unit="adu"), - CCDData(np.ones((3, 3, 3)) * 35.0, unit="adu"), + CCDData(xp.ones((3, 3, 3)) * 90.0, unit="adu"), + CCDData(xp.ones((3, 3, 3)) * 20.0, unit="adu"), + CCDData(xp.ones((3, 3, 3)) * 10.0, unit="adu"), + CCDData(xp.ones((3, 3, 3)) * 40.0, unit="adu"), + CCDData(xp.ones((3, 3, 3)) * 25.0, unit="adu"), + CCDData(xp.ones((3, 3, 3)) * 35.0, unit="adu"), ] c = Combiner(ccdlist) c.clip_extrema(nlow=1, nhigh=1) result = c.average_combine() - expected = CCDData(np.ones((3, 3, 3)) * 30, unit="adu") - np.testing.assert_allclose(result, expected) + expected = CCDData(xp.ones((3, 3, 3)) * 30, unit="adu") + np_testing.assert_allclose(result, expected) @pytest.mark.parametrize( @@ -838,16 +843,16 @@ def test_writeable_after_combine(tmpdir, comb_func): def test_clip_extrema(): ccdlist = [ - CCDData(np.ones((3, 5)) * 90.0, unit="adu"), - CCDData(np.ones((3, 5)) * 20.0, unit="adu"), - CCDData(np.ones((3, 5)) * 10.0, unit="adu"), - CCDData(np.ones((3, 5)) * 40.0, unit="adu"), - CCDData(np.ones((3, 5)) * 25.0, unit="adu"), - CCDData(np.ones((3, 5)) * 35.0, unit="adu"), + CCDData(xp.ones((3, 5)) * 90.0, unit="adu"), + CCDData(xp.ones((3, 5)) * 20.0, unit="adu"), + CCDData(xp.ones((3, 5)) * 10.0, unit="adu"), + CCDData(xp.ones((3, 5)) * 40.0, unit="adu"), + CCDData(xp.ones((3, 5)) * 25.0, unit="adu"), + CCDData(xp.ones((3, 5)) * 35.0, unit="adu"), ] - ccdlist[0].data[0, 1] = 3.1 - ccdlist[1].data[1, 2] = 100.1 - ccdlist[1].data[2, 0] = 100.1 + ccdlist[0].data = xpx.at(ccdlist[0].data)[0, 1].set(3.1) + ccdlist[1].data = xpx.at(ccdlist[1].data)[1, 2].set(100.1) + ccdlist[1].data = xpx.at(ccdlist[1].data)[2, 0].set(100.1) c = Combiner(ccdlist) c.clip_extrema(nlow=1, nhigh=1) result = c.average_combine() @@ -856,21 +861,21 @@ def test_clip_extrema(): [30.0, 30.0, 47.5, 30.0, 30.0], [47.5, 30.0, 30.0, 30.0, 30.0], ] - np.testing.assert_allclose(result, expected) + np_testing.assert_allclose(result, expected) def test_clip_extrema_via_combine(): ccdlist = [ - CCDData(np.ones((3, 5)) * 90.0, unit="adu"), - CCDData(np.ones((3, 5)) * 20.0, unit="adu"), - CCDData(np.ones((3, 5)) * 10.0, unit="adu"), - CCDData(np.ones((3, 5)) * 40.0, unit="adu"), - CCDData(np.ones((3, 5)) * 25.0, unit="adu"), - CCDData(np.ones((3, 5)) * 35.0, unit="adu"), + CCDData(xp.ones((3, 5)) * 90.0, unit="adu"), + CCDData(xp.ones((3, 5)) * 20.0, unit="adu"), + CCDData(xp.ones((3, 5)) * 10.0, unit="adu"), + CCDData(xp.ones((3, 5)) * 40.0, unit="adu"), + CCDData(xp.ones((3, 5)) * 25.0, unit="adu"), + CCDData(xp.ones((3, 5)) * 35.0, unit="adu"), ] - ccdlist[0].data[0, 1] = 3.1 - ccdlist[1].data[1, 2] = 100.1 - ccdlist[1].data[2, 0] = 100.1 + ccdlist[0].data = xpx.at(ccdlist[0].data)[0, 1].set(3.1) + ccdlist[1].data = xpx.at(ccdlist[1].data)[1, 2].set(100.1) + ccdlist[1].data = xpx.at(ccdlist[1].data)[2, 0].set(100.1) result = combine( ccdlist, clip_extrema=True, @@ -882,26 +887,26 @@ def test_clip_extrema_via_combine(): [30.0, 30.0, 47.5, 30.0, 30.0], [47.5, 30.0, 30.0, 30.0, 30.0], ] - np.testing.assert_allclose(result, expected) + np_testing.assert_allclose(result, expected) def test_clip_extrema_with_other_rejection(): ccdlist = [ - CCDData(np.ones((3, 5)) * 90.0, unit="adu"), - CCDData(np.ones((3, 5)) * 20.0, unit="adu"), - CCDData(np.ones((3, 5)) * 10.0, unit="adu"), - CCDData(np.ones((3, 5)) * 40.0, unit="adu"), - CCDData(np.ones((3, 5)) * 25.0, unit="adu"), - CCDData(np.ones((3, 5)) * 35.0, unit="adu"), + CCDData(xp.ones((3, 5)) * 90.0, unit="adu"), + CCDData(xp.ones((3, 5)) * 20.0, unit="adu"), + CCDData(xp.ones((3, 5)) * 10.0, unit="adu"), + CCDData(xp.ones((3, 5)) * 40.0, unit="adu"), + CCDData(xp.ones((3, 5)) * 25.0, unit="adu"), + CCDData(xp.ones((3, 5)) * 35.0, unit="adu"), ] - ccdlist[0].data[0, 1] = 3.1 - ccdlist[1].data[1, 2] = 100.1 - ccdlist[1].data[2, 0] = 100.1 + ccdlist[0].data = xpx.at(ccdlist[0].data)[0, 1].set(3.1) + ccdlist[1].data = xpx.at(ccdlist[1].data)[1, 2].set(100.1) + ccdlist[1].data = xpx.at(ccdlist[1].data)[2, 0].set(100.1) c = Combiner(ccdlist) # Reject ccdlist[1].data[1,2] by other means - c.data_arr_mask[1, 1, 2] = True + c.data_arr_mask = xpx.at(c.data_arr_mask)[1, 1, 2].set(True) # Reject ccdlist[1].data[1,2] by other means - c.data_arr_mask[3, 0, 0] = True + c.data_arr_mask = xpx.at(c.data_arr_mask)[3, 0, 0].set(True) c.clip_extrema(nlow=1, nhigh=1) result = c.average_combine() @@ -910,7 +915,7 @@ def test_clip_extrema_with_other_rejection(): [30.0, 30.0, 47.5, 30.0, 30.0], [47.5, 30.0, 30.0, 30.0, 30.0], ] - np.testing.assert_allclose(result, expected) + np_testing.assert_allclose(result, expected) # The expected values below assume an image that is 2000x2000 @@ -966,7 +971,7 @@ def test_combiner_with_scaling_uncertainty(comb_func): scale_by_mean = _make_mean_scaler(ccd_data) combiner.scaling = scale_by_mean - scaled_ccds = np.array( + scaled_ccds = xp.array( [ ccd_data.data * scale_by_mean(ccd_data.data), ccd_data_lower.data * scale_by_mean(ccd_data_lower.data), @@ -977,14 +982,13 @@ def test_combiner_with_scaling_uncertainty(comb_func): avg_ccd = getattr(combiner, comb_func)() if comb_func != "median_combine": - xp = array_api_compat.array_namespace(ccd_data.data) uncertainty_func = _default_std(xp=xp) else: uncertainty_func = sigma_func expected_unc = uncertainty_func(scaled_ccds, axis=0) - np.testing.assert_allclose(avg_ccd.uncertainty.array, expected_unc, atol=1e-10) + np_testing.assert_allclose(avg_ccd.uncertainty.array, expected_unc, atol=1e-10) @pytest.mark.parametrize( @@ -995,8 +999,8 @@ def test_user_supplied_combine_func_that_relies_on_masks(comb_func): # does not affect results when the user supplies a function that # uses masks to screen out bad data. - data = np.ones((10, 10)) - data[5, 5] = 2 + data = xp.ones((10, 10)) + data = xpx.at(data)[5, 5].set(2) mask = data == 2 ccd = CCDData(data, unit=u.adu, mask=mask) # Same, but no mask @@ -1025,14 +1029,17 @@ def sum_func(_, axis=axis): actual_result = c.sum_combine(sum_func=my_summer(c.data_arr, c.data_arr_mask)) elif comb_func == "average_combine": expected_result = data - actual_result = c.average_combine(scale_func=np.mean) + actual_result = c.average_combine(scale_func=xp.mean) elif comb_func == "median_combine": expected_result = data - actual_result = c.median_combine(median_func=np.median) + if not hasattr(xp, "median"): + # If the array API does not have a median function, we + # cannot test this. + pytest.skip("The array library does not support median") + actual_result = c.median_combine(median_func=xp.median) # Two of the three values are masked, so no matter what the combination # method is the result in this pixel should be 2. - expected_result[5, 5] = 2 + expected_result = xpx.at(expected_result)[5, 5].set(2) - # THIS IS A REAL TEST FAILURE!!! - np.testing.assert_almost_equal(expected_result, actual_result) + np_testing.assert_allclose(expected_result, actual_result) From fda5ad46a8752d205d3a6e1046944f347ce61a34 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Wed, 18 Jun 2025 17:04:46 -0500 Subject: [PATCH 041/111] Remove unnecessary copy argument --- ccdproc/tests/test_ccdproc.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index f987150a..b784a3da 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -42,8 +42,6 @@ except ImportError: HAS_BLOCK_X_FUNCS = False -_NUMPY_COPY_IF_NEEDED = None # False if xp.__version__.startswith("1.") else None - RNG = np_random.default_rng # import dask.array as da @@ -1086,7 +1084,7 @@ def test_wcs_project_onto_scale_wcs(): # Mask should be true for four pixels (all nearest neighbors) # of the single pixel we masked initially. - new_center = xp.array(new_ccd.wcs.wcs.crpix, dtype=int, copy=_NUMPY_COPY_IF_NEEDED) + new_center = xp.array(new_ccd.wcs.wcs.crpix, dtype=int) assert xp.all( new_ccd.mask[ new_center[0] : new_center[0] + 2, new_center[1] : new_center[1] + 2 From 03d413ac2bea16afbf019ed2eb77e733ddf51509 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Wed, 18 Jun 2025 17:05:32 -0500 Subject: [PATCH 042/111] Use the array_api_compt numpy namespace instead of numpy This is really only needed for numpy 1.X since numpy 2.X is array API compliant. --- ccdproc/conftest.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ccdproc/conftest.py b/ccdproc/conftest.py index cf8af327..8a082dc4 100644 --- a/ccdproc/conftest.py +++ b/ccdproc/conftest.py @@ -5,6 +5,8 @@ # no matter how it is invoked within the source tree. import os +import array_api_compat # noqa: F401 + try: # When the pytest_astropy_header package is installed from pytest_astropy_header.display import PYTEST_HEADER_MODULES, TESTED_VERSIONS @@ -41,7 +43,7 @@ def pytest_configure(config): match array_library: case "numpy": - import numpy as testing_array_library # noqa: F401 + import array_api_compat.numpy as testing_array_library # noqa: F401 case "jax": import jax.numpy as testing_array_library # noqa: F401 From dfc98cf9aeb59287abc23559fe2f6532d8185666 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 19 Jun 2025 21:21:27 -0500 Subject: [PATCH 043/111] Add test against dask and fix bugs uncovered by tests --- ccdproc/combiner.py | 27 +++++++++++++--- ccdproc/conftest.py | 5 +++ ccdproc/core.py | 53 ++++++++++++++++++++------------ ccdproc/tests/pytest_fixtures.py | 2 +- ccdproc/tests/test_ccdproc.py | 5 +-- ccdproc/tests/test_combiner.py | 43 +++++++++++++++++++++----- ccdproc/tests/test_cosmicray.py | 8 +++-- ccdproc/tests/test_rebin.py | 27 +++++++++++++--- tox.ini | 3 ++ 9 files changed, 130 insertions(+), 43 deletions(-) diff --git a/ccdproc/combiner.py b/ccdproc/combiner.py index 0f855965..db74a7c8 100644 --- a/ccdproc/combiner.py +++ b/ccdproc/combiner.py @@ -15,6 +15,8 @@ from astropy.nddata import CCDData, StdDevUncertainty from astropy.stats import sigma_clip from astropy.utils import deprecated_renamed_argument +from numpy import mgrid as np_mgrid +from numpy import ravel_multi_index as np_ravel_multi_index from .core import sigma_func @@ -324,13 +326,25 @@ def clip_extrema(self, nlow=0, nhigh=0): nhigh = 0 argsorted = xp.argsort(self.data_arr, axis=0) - mg = xp.mgrid[ - [slice(ndim) for i, ndim in enumerate(self.data_arr.shape) if i > 0] - ] + # Not every array package has mgrid, so make it in numpy and convert it to the + # array package used for the data. + mg = xp.asarray( + np_mgrid[ + [slice(ndim) for i, ndim in enumerate(self.data_arr.shape) if i > 0] + ] + ) for i in range(-1 * nhigh, nlow): # create a tuple with the indices - where = tuple([argsorted[i, :, :].ravel()] + [i.ravel() for i in mg]) - self.data_arr_mask = xpx.at(self.data_arr_mask)[where].set(True) + where = tuple([argsorted[i, :, :].ravel()] + [v.ravel() for v in mg]) + # Some array libraries don't support indexing with arrays in multiple + # dimensions, so we need to flatten the mask array, set the mask + # values for a flattened array, and then reshape it back to the + # original shape. + flat_index = np_ravel_multi_index(where, self.data_arr.shape) + self.data_arr_mask = xp.reshape( + xpx.at(xp.reshape(self.data_arr_mask, (-1,)))[flat_index].set(True), + self.data_arr.shape, + ) # set up min/max clipping algorithms def minmax_clipping(self, min_clip=None, max_clip=None): @@ -528,6 +542,9 @@ def median_combine( uncertainty = uncertainty_func(data, axis=0, ignore_nan=True) else: uncertainty = uncertainty_func(data, axis=0) + # Depending on how the uncertainty ws calculated it may or may not + # be an array of the same class as the data, so make sure it is + uncertainty = xp.asarray(uncertainty) # Divide uncertainty by the number of pixel (#309) uncertainty /= xp.sqrt(len(self.data_arr) - masked_values) # Convert uncertainty to plain numpy array (#351) diff --git a/ccdproc/conftest.py b/ccdproc/conftest.py index 8a082dc4..2361270a 100644 --- a/ccdproc/conftest.py +++ b/ccdproc/conftest.py @@ -50,6 +50,11 @@ def pytest_configure(config): PYTEST_HEADER_MODULES["jax"] = "jax" + case "dask": + import array_api_compat.dask.array as testing_array_library # noqa: F401 + + PYTEST_HEADER_MODULES["dask"] = "dask" + case _: raise ValueError( f"Unsupported array library: {array_library}. " diff --git a/ccdproc/core.py b/ccdproc/core.py index daaf7be5..b80d27b1 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -8,7 +8,6 @@ import warnings import array_api_compat -import array_api_extra as xpx from astropy import nddata, stats from astropy import units as u from astropy.modeling import fitting @@ -19,6 +18,7 @@ from astropy.units.quantity import Quantity from astropy.utils import deprecated, deprecated_renamed_argument from astropy.wcs.utils import proj_plane_pixel_area +from numpy import mgrid as np_mgrid from packaging import version as pkgversion from scipy import ndimage @@ -573,7 +573,9 @@ def subtract_overscan( of = fitting.LinearLSQFitter() yarr = xp.arange(len(oscan)) oscan = of(model, yarr, oscan) - oscan = oscan(yarr) + # The model will return something array-like but it may not be the same array + # library that we started with, so convert it back to the original + oscan = xp.asarray(oscan(yarr)) if overscan_axis == 1: oscan = xp.reshape(oscan, (oscan.size, 1)) else: @@ -878,6 +880,8 @@ def flat_correct(ccd, flat, min_value=None, norm_value=None): ccd : `~astropy.nddata.CCDData` CCDData object with flat corrected. """ + # Get the array namespace + xp = array_api_compat.array_namespace(ccd.data) # Use the min_value to replace any values in the flat use_flat = flat if min_value is not None: @@ -895,7 +899,7 @@ def flat_correct(ccd, flat, min_value=None, norm_value=None): raise ValueError("norm_value must be greater than zero.") else: # norm_value was not set, use mean of the image. - flat_mean = use_flat.data.mean() * use_flat.unit + flat_mean = xp.mean(use_flat.data) * use_flat.unit # Normalize the flat. flat_normed = use_flat.divide(flat_mean) @@ -1296,21 +1300,7 @@ def rebin(ccd, newshape): except AttributeError as e: raise TypeError("ccd is not an ndarray or a CCDData object.") from e - if isinstance(ccd, xp.ndarray): - - # check to see that the two arrays are going to be the same length - if len(ccd.shape) != len(newshape): - raise ValueError("newshape does not have the same dimensions as " "ccd.") - - slices = [ - slice(0, old, old / new) - for old, new in zip(ccd.shape, newshape, strict=True) - ] - coordinates = xp.mgrid[slices] - indices = coordinates.astype("i") - return ccd[tuple(indices)] - - elif isinstance(ccd, CCDData): + if isinstance(ccd, CCDData): # check to see that the two arrays are going to be the same length if len(ccd.shape) != len(newshape): raise ValueError("newshape does not have the same dimensions as ccd.") @@ -1329,7 +1319,29 @@ def rebin(ccd, newshape): return nccd else: - raise TypeError("ccd is not an ndarray or a CCDData object.") + # check to see that the two arrays are going to be the same length + if len(ccd.shape) != len(newshape): + raise ValueError("newshape does not have the same dimensions as " "ccd.") + + slices = [ + slice(0, old, old / new) + for old, new in zip(ccd.shape, newshape, strict=True) + ] + + # Not every array package has mgrid, so we do the mgrid with + # numpy and convert to the array package used by ccd.data. + + coordinates = xp.asarray(np_mgrid[slices]) + indices = coordinates.astype("i") + + try: + result = ccd[tuple(indices)] + except Exception as e: + raise TypeError( + f"The array library {xp.__name__} does not support this method of " + "rebinning. Please use block_reduce or block_replicate instead." + ) from e + return result def block_reduce(ccd, block_size, func=None): @@ -1912,7 +1924,8 @@ def cosmicray_median(ccd, error_image=None, thresh=5, mbox=11, gbox=0, rbox=0): # make sure that mdata is the same type as data mdata = xp.asarray(ndimage.median_filter(data, rbox)) - ndata = xpx.at(ndata)[crarr == 1].set(mdata[crarr == 1]) + ndata = xp.where(crarr == 1, mdata, data) + # ndata = xpx.at(ndata)[crarr == 1].set(mdata[crarr == 1]) return ndata, crarr elif isinstance(ccd, CCDData): diff --git a/ccdproc/tests/pytest_fixtures.py b/ccdproc/tests/pytest_fixtures.py index 9b56290a..7fa65f44 100644 --- a/ccdproc/tests/pytest_fixtures.py +++ b/ccdproc/tests/pytest_fixtures.py @@ -62,7 +62,7 @@ def ccd_data( data = rng.normal(loc=mean, size=[size, size], scale=scale) fake_meta = {"my_key": 42, "your_key": "not 42"} - ccd = CCDData(array_library.array(data), unit=u.adu) + ccd = CCDData(array_library.asarray(data), unit=u.adu) ccd.header = fake_meta return ccd diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index b784a3da..5f1629b2 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -14,6 +14,7 @@ from astropy.utils.exceptions import AstropyUserWarning from astropy.wcs import WCS from numpy import array as np_array +from numpy import mgrid as np_mgrid from numpy import random as np_random from numpy import testing as np_testing @@ -278,7 +279,7 @@ def test_subtract_overscan_model(transpose): oscan_region = (slice(None), slice(0, 10)) science_region = (slice(None), slice(10, None)) - yscan, xscan = xp.mgrid[0:size, 0:size] / 10.0 + 300.0 + yscan, xscan = xp.asarray(np_mgrid[0:size, 0:size]) / 10.0 + 300.0 if transpose: oscan_region = oscan_region[::-1] @@ -907,7 +908,7 @@ def test_cosmicray_lacosmic_does_not_change_input(): def test_flat_correct_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() - flat = CCDData(xp.zeros_like(ccd_data), unit=ccd_data.unit) + flat = CCDData(xp.zeros_like(ccd_data.data), unit=ccd_data.unit) # Ignore the divide by zero warning that is raised when the flat is zero. with warnings.catch_warnings(action="ignore", category=RuntimeWarning): _ = flat_correct(ccd_data, flat=flat) diff --git a/ccdproc/tests/test_combiner.py b/ccdproc/tests/test_combiner.py index 6dd5d71f..c20fb030 100644 --- a/ccdproc/tests/test_combiner.py +++ b/ccdproc/tests/test_combiner.py @@ -7,6 +7,7 @@ from astropy.nddata import CCDData from astropy.stats import median_absolute_deviation as mad from astropy.utils.data import get_pkg_data_filename +from numpy import median as np_median from numpy import testing as np_testing from ccdproc.combiner import ( @@ -357,7 +358,21 @@ def test_combiner_with_scaling(): assert avg_ccd.shape == ccd_data.shape median_ccd = combiner.median_combine() # Does median also scale to the correct value? - np_testing.assert_allclose(xp.median(median_ccd.data), xp.median(ccd_data.data)) + # Some array libraries do not have a median, and median is not part of the + # standard array API, so we use numpy's median here. + # Odd; for dask, which does not have a full median, even falling back to numpy does + # not work. For some reason the call to np_median fails. I suppose this is maybe + # because dask just adds a median to its task list/compute graph thingy + # and then tries to evaluate it itself? + + # Try doing a compute on the data first, and if that fails it is no big deal + try: + med_ccd = median_ccd.data.compute() + med_inp_data = ccd_data.data.compute() + except AttributeError: + pass + + np_testing.assert_allclose(np_median(med_ccd), np_median(med_inp_data)) # Set the scaling manually... combiner.scaling = [scale_by_mean(combiner.data_arr[i]) for i in range(3)] @@ -465,7 +480,7 @@ def test_combiner_result_dtype(): np_testing.assert_allclose(res.data, ref) res = combine([ccd, ccd.multiply(2), ccd.multiply(3)], dtype=int) # The result dtype should be integer: - assert res.data.dtype == xp.int_ + assert xp.isdtype(res.data, "integral") ref = xp.ones((3, 3)) * 2 np_testing.assert_allclose(res.data, ref) @@ -608,10 +623,12 @@ def test_sum_combine_uncertainty(): np_testing.assert_allclose(ccd.uncertainty.array, ccd2.uncertainty.array) -# Ignore warnings generated because most values are masked +# Ignore warnings generated because most values are masked and we divide +# by zero in at least one place @pytest.mark.filterwarnings( "ignore:Mean of empty slice:RuntimeWarning", "ignore:Degrees of freedom <= 0:RuntimeWarning", + "ignore:invalid value encountered in divide:RuntimeWarning", ) @pytest.mark.parametrize("mask_point", [True, False]) @pytest.mark.parametrize( @@ -741,9 +758,12 @@ def test_combiner_uncertainty_median_mask(): ref_uncertainty = xp.ones((10, 10)) * mad_to_sigma * mad([1, 2, 3]) # Correction because we combined two images. ref_uncertainty /= xp.sqrt(3) # 0.855980789955 - ref_uncertainty = xpx.at(ref_uncertainty)[5, 5].set( - mad_to_sigma * mad([2, 3]) / xp.sqrt(2) - ) # 0.524179041254 + # It turns out that the expression below evaluates to a np.float64, which + # introduces numpy into the array namespace, which raises an error + # when arrat_api_compat tries to figure out the namespace. Casting + # it to a regular float fixes that. + med_value = float(mad_to_sigma * mad([2, 3]) / xp.sqrt(2)) + ref_uncertainty = xpx.at(ref_uncertainty)[5, 5].set(med_value) # 0.524179041254 np_testing.assert_allclose(ccd.uncertainty.array, ref_uncertainty) @@ -802,7 +822,14 @@ def test_3d_combiner_with_scaling(): assert avg_ccd.shape == ccd_data.shape median_ccd = combiner.median_combine() # Does median also scale to the correct value? - np_testing.assert_allclose(xp.median(median_ccd.data), xp.median(ccd_data.data)) + # Once again, use numpy to find the median + try: + med_ccd = median_ccd.data.compute() + med_inp_data = ccd_data.data.compute() + except AttributeError: + pass + + np_testing.assert_allclose(np_median(med_ccd), np_median(med_inp_data)) # Set the scaling manually... combiner.scaling = [scale_by_mean(combiner.data_arr[i]) for i in range(3)] @@ -841,7 +868,7 @@ def test_writeable_after_combine(tmpdir, comb_func): ccd2.write(tmp_file.strpath) -def test_clip_extrema(): +def test_clip_extrema_moo(): ccdlist = [ CCDData(xp.ones((3, 5)) * 90.0, unit="adu"), CCDData(xp.ones((3, 5)) * 20.0, unit="adu"), diff --git a/ccdproc/tests/test_cosmicray.py b/ccdproc/tests/test_cosmicray.py index 177a7f64..ab506d03 100644 --- a/ccdproc/tests/test_cosmicray.py +++ b/ccdproc/tests/test_cosmicray.py @@ -1,7 +1,6 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst import array_api_compat -import array_api_extra as xpx import pytest from astropy import units as u from astropy.utils.exceptions import AstropyDeprecationWarning @@ -35,9 +34,14 @@ def add_cosmicrays(data, scale, threshold, ncrays=NCRAYS): # ideally threshold should be set so it is not sensitive to seed, but # this is not working right now crflux = xp.asarray(10 * scale * rng.random(ncrays) + (threshold + 15) * scale) + + # Some array libraris (Dask) do not support setting individual elements, + # so use nuympy. + data_as_np = np_array(data.data) for i in range(ncrays): y, x = crrays[i] - data.data = xpx.at(data.data)[y, x].set(crflux[i]) + data_as_np[y, x] = crflux[i] + data.data = xp.asarray(data_as_np) def test_cosmicray_lacosmic(): diff --git a/ccdproc/tests/test_rebin.py b/ccdproc/tests/test_rebin.py index 4dda68f6..d240f169 100644 --- a/ccdproc/tests/test_rebin.py +++ b/ccdproc/tests/test_rebin.py @@ -34,7 +34,11 @@ def test_rebin_larger(): ccd_data = ccd_data_func(data_size=10) a = ccd_data.data with pytest.warns(AstropyDeprecationWarning): - b = rebin(a, (20, 20)) + try: + b = rebin(a, (20, 20)) + except TypeError as e: + if "does not support this method of rebinning" in str(e): + pytest.skip("Rebinning not supported for this data type") assert b.shape == (20, 20) np.testing.assert_almost_equal(b.sum(), 4 * a.sum()) @@ -45,8 +49,12 @@ def test_rebin_smaller(): ccd_data = ccd_data_func(data_size=10) a = ccd_data.data with pytest.warns(AstropyDeprecationWarning): - b = rebin(a, (20, 20)) - c = rebin(b, (10, 10)) + try: + b = rebin(a, (20, 20)) + c = rebin(b, (10, 10)) + except TypeError as e: + if "does not support this method of rebinning" in str(e): + pytest.skip("Rebinning not supported for this data type") assert c.shape == (10, 10) assert (c - a).sum() == 0 @@ -63,7 +71,11 @@ def test_rebin_ccddata(mask_data, uncertainty): ccd_data.uncertainty = StdDevUncertainty(err) with pytest.warns(AstropyDeprecationWarning): - b = rebin(ccd_data, (20, 20)) + try: + b = rebin(ccd_data, (20, 20)) + except TypeError as e: + if "does not support this method of rebinning" in str(e): + pytest.skip("Rebinning not supported for this data type") assert b.shape == (20, 20) if mask_data: @@ -76,6 +88,11 @@ def test_rebin_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() with pytest.warns(AstropyDeprecationWarning): - _ = rebin(ccd_data, (20, 20)) + try: + _ = rebin(ccd_data, (20, 20)) + except TypeError as e: + if "does not support this method of rebinning" in str(e): + pytest.skip("Rebinning not supported for this data type") + np.testing.assert_allclose(original.data, ccd_data.data) assert original.unit == ccd_data.unit diff --git a/tox.ini b/tox.ini index 3a859ca5..4d2e63c4 100644 --- a/tox.ini +++ b/tox.ini @@ -9,6 +9,7 @@ isolated_build = true setenv = jax: CCDPROC_ARRAY_LIBRARY = jax jax: JAX_ENABLE_X64 = True + dask: CCDPROC_ARRAY_LIBRARY = dask devdeps: PIP_EXTRA_INDEX_URL = https://pypi.anaconda.org/astropy/simple extras = test @@ -53,6 +54,8 @@ deps = oldestdeps: astropy==6.0.* oldestdeps: reproject==0.9.1 + dask: dask + commands = pip freeze !cov-!oldestdeps: pytest --pyargs ccdproc {toxinidir}/docs {posargs} From 191da4e3d7887fadae41f886f4d44f275a653203 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 19 Jun 2025 21:22:52 -0500 Subject: [PATCH 044/111] Add dask test to CI --- .github/workflows/ci_tests.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci_tests.yml b/.github/workflows/ci_tests.yml index ea5b783a..e75b4d0b 100644 --- a/.github/workflows/ci_tests.yml +++ b/.github/workflows/ci_tests.yml @@ -53,10 +53,10 @@ jobs: python: '3.12' tox_env: 'py312-test-alldeps-numpy126' - - name: 'macos-py312' + - name: 'macos-py312-dask' os: macos-latest python: '3.12' - tox_env: 'py312-test-alldeps' + tox_env: 'py312-alldeps-dask' - name: 'windows-py312' os: windows-latest From 8fc85a5b9c6030447c6bc8d1e194bcb75ab154fe Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 19 Jun 2025 21:32:00 -0500 Subject: [PATCH 045/111] Fix some errors introduced when changing the tests for dask --- ccdproc/tests/test_combiner.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/ccdproc/tests/test_combiner.py b/ccdproc/tests/test_combiner.py index c20fb030..25168973 100644 --- a/ccdproc/tests/test_combiner.py +++ b/ccdproc/tests/test_combiner.py @@ -365,10 +365,12 @@ def test_combiner_with_scaling(): # because dask just adds a median to its task list/compute graph thingy # and then tries to evaluate it itself? + med_ccd = median_ccd.data + med_inp_data = ccd_data.data # Try doing a compute on the data first, and if that fails it is no big deal try: - med_ccd = median_ccd.data.compute() - med_inp_data = ccd_data.data.compute() + med_ccd = med_ccd.compute() + med_inp_data = med_inp_data.compute() except AttributeError: pass @@ -480,7 +482,7 @@ def test_combiner_result_dtype(): np_testing.assert_allclose(res.data, ref) res = combine([ccd, ccd.multiply(2), ccd.multiply(3)], dtype=int) # The result dtype should be integer: - assert xp.isdtype(res.data, "integral") + assert xp.isdtype(res.data.dtype, "integral") ref = xp.ones((3, 3)) * 2 np_testing.assert_allclose(res.data, ref) @@ -823,9 +825,11 @@ def test_3d_combiner_with_scaling(): median_ccd = combiner.median_combine() # Does median also scale to the correct value? # Once again, use numpy to find the median + med_ccd = median_ccd.data + med_inp_data = ccd_data.data try: - med_ccd = median_ccd.data.compute() - med_inp_data = ccd_data.data.compute() + med_ccd = med_ccd.compute() + med_inp_data = med_inp_data.compute() except AttributeError: pass From 73cc0b08bc957589a3723618551a3e5ac20d5c25 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 24 Jun 2025 12:05:30 -0700 Subject: [PATCH 046/111] Allow cupy for testing --- ccdproc/conftest.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ccdproc/conftest.py b/ccdproc/conftest.py index 2361270a..0a9022d9 100644 --- a/ccdproc/conftest.py +++ b/ccdproc/conftest.py @@ -55,6 +55,11 @@ def pytest_configure(config): PYTEST_HEADER_MODULES["dask"] = "dask" + case "cupy": + import array_api_compat.cupy as testing_array_library # noqa: F401 + + PYTEST_HEADER_MODULES["cupy"] = "cupy" + case _: raise ValueError( f"Unsupported array library: {array_library}. " From 53fb79ab0fc1e081e370bd488925831a290dac2d Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 26 Jun 2025 15:51:35 -0700 Subject: [PATCH 047/111] Suppress square root warning generated in some array libraries A negative value in the square root will not be infrequent, so suppress this. One test was modified to avoid taking a square root since the point of the test was not to test the square root. --- ccdproc/core.py | 8 +++++++- ccdproc/tests/test_ccdproc.py | 5 ++++- ccdproc/tests/test_combiner.py | 2 +- pyproject.toml | 3 +++ 4 files changed, 15 insertions(+), 3 deletions(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index b80d27b1..d74a850e 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -441,7 +441,13 @@ def create_deviation(ccd_data, gain=None, readnoise=None, disregard_nan=False): logging.warning("Negative values in array will be replaced with nan") # calculate the deviation - var = (xp.sqrt(data) ** 2 + readnoise_value**2) ** 0.5 + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + message="invalid value encountered in sqrt", + category=RuntimeWarning, + ) + var = (xp.sqrt(data) ** 2 + readnoise_value**2) ** 0.5 # ensure uncertainty and image data have same unit ccd = ccd_data.copy() diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index 5f1629b2..31e42c56 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -944,7 +944,10 @@ def test_trim_image_does_not_change_input(): def test_transform_image_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() - _ = transform_image(ccd_data, xp.sqrt) + # Using a function here that is really unlikely to produce + # an invalid value (like square root does) to avoid + # warning messages. + _ = transform_image(ccd_data, xp.positive) np_testing.assert_allclose(original.data, ccd_data) assert original.unit == ccd_data.unit diff --git a/ccdproc/tests/test_combiner.py b/ccdproc/tests/test_combiner.py index 25168973..eadde7b8 100644 --- a/ccdproc/tests/test_combiner.py +++ b/ccdproc/tests/test_combiner.py @@ -872,7 +872,7 @@ def test_writeable_after_combine(tmpdir, comb_func): ccd2.write(tmp_file.strpath) -def test_clip_extrema_moo(): +def test_clip_extrema_alone(): ccdlist = [ CCDData(xp.ones((3, 5)) * 90.0, unit="adu"), CCDData(xp.ones((3, 5)) * 20.0, unit="adu"), diff --git a/pyproject.toml b/pyproject.toml index 2d205637..ced4109f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -148,6 +148,9 @@ filterwarnings= [ "ignore:numpy\\.ufunc size changed:RuntimeWarning", "ignore:numpy.ndarray size changed:RuntimeWarning", "ignore:`np.bool` is a deprecated alias for the builtin `bool`:DeprecationWarning", + # This is here because dask can end up generating this warning much + # later when the spot in create_deviation where a square root is + # calculated. "ignore:invalid value encountered in sqrt:RuntimeWarning", ] markers = [ From 4bef390707a70abc14f26d347ebe00c90eafe223 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 26 Jun 2025 18:54:06 -0500 Subject: [PATCH 048/111] Apply suggestions from code review Co-authored-by: Nathaniel Starkman Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- ccdproc/core.py | 2 +- ccdproc/tests/test_cosmicray.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index d74a850e..67e5b45f 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -447,7 +447,7 @@ def create_deviation(ccd_data, gain=None, readnoise=None, disregard_nan=False): message="invalid value encountered in sqrt", category=RuntimeWarning, ) - var = (xp.sqrt(data) ** 2 + readnoise_value**2) ** 0.5 + var = xp.sqrt(xp.abs(data) + readnoise_value**2) # ensure uncertainty and image data have same unit ccd = ccd_data.copy() diff --git a/ccdproc/tests/test_cosmicray.py b/ccdproc/tests/test_cosmicray.py index ab506d03..7e0ea6c2 100644 --- a/ccdproc/tests/test_cosmicray.py +++ b/ccdproc/tests/test_cosmicray.py @@ -35,8 +35,8 @@ def add_cosmicrays(data, scale, threshold, ncrays=NCRAYS): # this is not working right now crflux = xp.asarray(10 * scale * rng.random(ncrays) + (threshold + 15) * scale) - # Some array libraris (Dask) do not support setting individual elements, - # so use nuympy. + # Some array libraries (Dask) do not support setting individual elements, + # so use NumPy. data_as_np = np_array(data.data) for i in range(ncrays): y, x = crrays[i] From efeac769aa4a29c197d1dd9e1c295ed2fe90f09e Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 1 Jul 2025 16:38:47 -0700 Subject: [PATCH 049/111] Undo suggested edit --- ccdproc/core.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index 67e5b45f..324f4148 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -447,7 +447,11 @@ def create_deviation(ccd_data, gain=None, readnoise=None, disregard_nan=False): message="invalid value encountered in sqrt", category=RuntimeWarning, ) - var = xp.sqrt(xp.abs(data) + readnoise_value**2) + # The term below in which the square root of the data is calulated + # and then squared MUST stay the way it is so that negative values + # in the data end up as NaN. Do not replace it with an absolute + # value. + var = xp.sqrt(xp.sqrt(data) ** 2 + readnoise_value**2) # ensure uncertainty and image data have same unit ccd = ccd_data.copy() From 69a79d520fb10741070da18b159abf00ce47dd31 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 1 Jul 2025 16:46:38 -0700 Subject: [PATCH 050/111] Shorten up a loop with a comprehension --- ccdproc/combiner.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/ccdproc/combiner.py b/ccdproc/combiner.py index db74a7c8..fe75cd42 100644 --- a/ccdproc/combiner.py +++ b/ccdproc/combiner.py @@ -180,13 +180,10 @@ def __init__(self, ccd_iter, dtype=None): self.data_arr = xp.array([ccd.data for ccd in ccd_list], dtype=dtype) # populate self.data_arr - mask_list = [] - for ccd in ccd_list: - if ccd.mask is not None: - mask_list.append(ccd.mask) - else: - mask_list.append(xp.zeros(default_shape)) - + mask_list = [ + ccd.mask if ccd.mask is not None else xp.zeros(default_shape) + for ccd in ccd_list + ] self.data_arr_mask = xp.array(mask_list, dtype=bool) # Must be after self.data_arr is defined because it checks the From f7ea0a9c6c4da3c10cc3600f67c146b7be107e1a Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 1 Jul 2025 16:48:44 -0700 Subject: [PATCH 051/111] Add minimum pin for dependency --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index ced4109f..02faefaf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,7 +15,7 @@ authors = [ { name = "and Michael Seifert" }, ] dependencies = [ - "array_api_compat", + "array_api_compat>=1.12.0", "array_api_extra>=0.7.0", "astropy>=6.0.1", "astroscrappy>=1.1.0", From d3a732b558eec3dc24e258a758777a088d73d163 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 1 Jul 2025 16:50:00 -0700 Subject: [PATCH 052/111] Update black target versions --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 02faefaf..bc0203f8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -57,7 +57,7 @@ include = [ [tool.black] line-length = 88 -target-version = ['py310', 'py311', 'py312'] +target-version = ['py311', 'py312', 'py313'] include = '\.pyi?$|\.ipynb$' # 'extend-exclude' excludes files or directories in addition to the defaults extend-exclude = ''' From 309bbf42b53a254e7ed226952d60fd20b0490f1c Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 1 Jul 2025 21:34:24 -0500 Subject: [PATCH 053/111] Store array namespace when Combiner is created --- ccdproc/combiner.py | 33 ++++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/ccdproc/combiner.py b/ccdproc/combiner.py index fe75cd42..2143d582 100644 --- a/ccdproc/combiner.py +++ b/ccdproc/combiner.py @@ -109,6 +109,12 @@ class Combiner: description. If ``None`` it uses ``np.float64``. Default is ``None``. + xp : array namespace, optional + The array namespace to use for the data. If not provided, it will + be inferred from the first `~astropy.nddata.CCDData` object in + ``ccd_iter``. + Default is ``None``. + Raises ------ TypeError @@ -136,7 +142,7 @@ class Combiner: [ 0.66666667, 0.66666667, 0.66666667, 0.66666667]]...) """ - def __init__(self, ccd_iter, dtype=None): + def __init__(self, ccd_iter, dtype=None, xp=None): if ccd_iter is None: raise TypeError( "ccd_iter should be a list or a generator of CCDData objects." @@ -167,7 +173,8 @@ def __init__(self, ccd_iter, dtype=None): raise TypeError("CCDData objects don't have the same unit.") # Set array namespace - xp = array_api_compat.array_namespace(ccd_list[0].data) + xp = xp or array_api_compat.array_namespace(ccd_list[0].data) + self._xp = xp if dtype is None: dtype = xp.float64 self.ccd_list = ccd_list @@ -247,7 +254,7 @@ def scaling(self): @scaling.setter def scaling(self, value): - xp = array_api_compat.array_namespace(self.data_arr) + xp = self._xp if value is None: self._scaling = value else: @@ -316,7 +323,7 @@ def clip_extrema(self, nlow=0, nhigh=0): .. [0] image.imcombine help text. http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?imcombine """ - xp = array_api_compat.array_namespace(self.data_arr) + xp = self._xp if nlow is None: nlow = 0 if nhigh is None: @@ -447,7 +454,7 @@ def _get_scaled_data(self, scale_arg): return self.data_arr def _get_nan_substituted_data(self, data): - xp = array_api_compat.array_namespace(self.data_arr) + xp = self._xp # Get the data as an unmasked array with masked values filled as NaN if self.data_arr_mask.any(): @@ -462,7 +469,7 @@ def _combination_setup(self, user_func, default_func, scale_to): Handle the common pieces of image combination data/mask setup. """ data = self._get_scaled_data(scale_to) - xp = array_api_compat.array_namespace(data) + xp = self._xp # Play it safe for now and only do the nan thing if the user is using # the default combination function. if user_func is None: @@ -515,7 +522,7 @@ def median_combine( The uncertainty currently calculated using the median absolute deviation does not account for rejected pixels. """ - xp = array_api_compat.array_namespace(self.data_arr) + xp = self._xp _default_median_func = _default_median(xp=xp) @@ -565,12 +572,12 @@ def median_combine( # return the combined image return combined_image - def _weighted_sum(self, data, sum_func): + def _weighted_sum(self, data, sum_func, xp=None): """ Perform weighted sum, used by both ``sum_combine`` and in some cases by ``average_combine``. """ - xp = array_api_compat.array_namespace(data) + xp = xp or array_api_compat.array_namespace(data) if self.weights.shape != data.shape: # Add extra axes to the weights for broadcasting weights = xp.reshape(self.weights, [len(self.weights), 1, 1]) @@ -624,7 +631,7 @@ def average_combine( combined_image: `~astropy.nddata.CCDData` CCDData object based on the combined input of CCDData objects. """ - xp = array_api_compat.array_namespace(self.data_arr) + xp = self._xp _default_average_func = _default_average(xp=xp) @@ -641,7 +648,7 @@ def average_combine( # Do NOT modify data after this -- we need it to be intact when we # we get to the uncertainty calculation. if self.weights is not None: - weighted_sum, weights = self._weighted_sum(data, sum_func) + weighted_sum, weights = self._weighted_sum(data, sum_func, xp=xp) mean = weighted_sum / sum_func(weights, axis=0) else: mean = scale_func(data, axis=0) @@ -707,7 +714,7 @@ def sum_combine( CCDData object based on the combined input of CCDData objects. """ - xp = array_api_compat.array_namespace(self.data_arr) + xp = self._xp _default_sum_func = _default_sum(xp=xp) @@ -719,7 +726,7 @@ def sum_combine( ) if self.weights is not None: - summed, weights = self._weighted_sum(data, sum_func) + summed, weights = self._weighted_sum(data, sum_func, xp=xp) else: summed = sum_func(data, axis=0) From bd23ac441077e985d9ba51e2beca6ecf482f7e7c Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 1 Jul 2025 21:34:54 -0500 Subject: [PATCH 054/111] Add optional namespace argument to several functions --- ccdproc/core.py | 82 ++++++++++++++++++++++++++++++++++++------------- 1 file changed, 61 insertions(+), 21 deletions(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index 324f4148..84cbb5a4 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -93,7 +93,7 @@ def _is_array(arr): # Ideally this would eventually be covered by tests. Looks like Sparse # could be used to test this, since it has no percentile... -def _percentile_fallback(array, percentiles): # pragma: no cover +def _percentile_fallback(array, percentiles, xp=None): # pragma: no cover """ Try calculating percentile using namespace, otherwise fall back to an implmentation that uses sort. As of the 2023 version of the array API @@ -107,12 +107,16 @@ def _percentile_fallback(array, percentiles): # pragma: no cover percentiles : float or list-like Percentile to calculate. + xp : array namespace, optional + Array namespace to use for calculations. If not provided, the + namespace will be determined from the array. + Returns ------- percentile : float or list-like Calculated percentile. """ - xp = array_api_compat.array_namespace(array) + xp = xp or array_api_compat.array_namespace(array) try: return xp.percentile(array, percentiles) except AttributeError: @@ -285,11 +289,11 @@ def ccd_process( # apply the overscan correction if isinstance(oscan, CCDData): nccd = subtract_overscan( - nccd, overscan=oscan, median=oscan_median, model=oscan_model + nccd, overscan=oscan, median=oscan_median, model=oscan_model, xp=xp ) elif isinstance(oscan, str): nccd = subtract_overscan( - nccd, fits_section=oscan, median=oscan_median, model=oscan_model + nccd, fits_section=oscan, median=oscan_median, model=oscan_model, xp=xp ) elif oscan is None: pass @@ -298,6 +302,7 @@ def ccd_process( # apply the trim correction if isinstance(trim, str): + # No xp=... here because slicing can be done without knowing the array namespace nccd = trim_image(nccd, fits_section=trim) elif trim is None: pass @@ -306,7 +311,7 @@ def ccd_process( # create the error frame if error and gain is not None and readnoise is not None: - nccd = create_deviation(nccd, gain=gain, readnoise=readnoise) + nccd = create_deviation(nccd, gain=gain, readnoise=readnoise, xp=xp) elif error and (gain is None or readnoise is None): raise ValueError("gain and readnoise must be specified to create error frame.") @@ -324,10 +329,12 @@ def ccd_process( raise TypeError("gain is not None or astropy.units.Quantity.") if gain is not None and gain_corrected: + # No need for xp here because gain_correct does not need the namespace nccd = gain_correct(nccd, gain) # subtracting the master bias if isinstance(master_bias, CCDData): + # No need for xp here because subtract_bias does not need the namespace nccd = subtract_bias(nccd, master_bias) elif master_bias is None: pass @@ -336,6 +343,7 @@ def ccd_process( # subtract the dark frame if isinstance(dark_frame, CCDData): + # No need for xp here because subtract_dark does not need the namespace nccd = subtract_dark( nccd, dark_frame, @@ -352,7 +360,7 @@ def ccd_process( # test dividing the master flat if isinstance(master_flat, CCDData): - nccd = flat_correct(nccd, master_flat, min_value=min_value) + nccd = flat_correct(nccd, master_flat, min_value=min_value, xp=xp) elif master_flat is None: pass else: @@ -360,13 +368,14 @@ def ccd_process( # apply the gain correction only at the end if gain_corrected is False if gain is not None and not gain_corrected: + # No need for xp here because gain_correct does not need the namespace nccd = gain_correct(nccd, gain) return nccd @log_to_metadata -def create_deviation(ccd_data, gain=None, readnoise=None, disregard_nan=False): +def create_deviation(ccd_data, gain=None, readnoise=None, disregard_nan=False, xp=None): """ Create a uncertainty frame. The function will update the uncertainty plane which gives the standard deviation for the data. Gain is used in @@ -393,6 +402,10 @@ def create_deviation(ccd_data, gain=None, readnoise=None, disregard_nan=False): If ``True``, any value of nan in the output array will be replaced by the readnoise. + xp : array namespace, optional + Array namespace to use for calculations. If not provided, the + namespace will be determined from the array. + {log} Raises @@ -409,7 +422,7 @@ def create_deviation(ccd_data, gain=None, readnoise=None, disregard_nan=False): """ # Get array namespace - xp = array_api_compat.array_namespace(ccd_data.data) + xp = xp or array_api_compat.array_namespace(ccd_data.data) if gain is not None and not isinstance(gain, Quantity): raise TypeError("gain must be a astropy.units.Quantity.") @@ -462,7 +475,13 @@ def create_deviation(ccd_data, gain=None, readnoise=None, disregard_nan=False): @log_to_metadata def subtract_overscan( - ccd, overscan=None, overscan_axis=1, fits_section=None, median=False, model=None + ccd, + overscan=None, + overscan_axis=1, + fits_section=None, + median=False, + model=None, + xp=None, ): """ Subtract the overscan region from an image. @@ -502,6 +521,10 @@ def subtract_overscan( by the median or the mean. Default is ``None``. + xp : array namespace, optional + Array namespace to use for calculations. If not provided, the + namespace will be determined from the array. + {log} Raises @@ -555,7 +578,7 @@ def subtract_overscan( raise TypeError("ccddata is not a CCDData object.") # Set array namespace - xp = array_api_compat.array_namespace(ccd.data) + xp = xp or array_api_compat.array_namespace(ccd.data) if (overscan is not None and fits_section is not None) or ( overscan is None and fits_section is None @@ -857,7 +880,7 @@ def gain_correct(ccd, gain, gain_unit=None): @log_to_metadata -def flat_correct(ccd, flat, min_value=None, norm_value=None): +def flat_correct(ccd, flat, min_value=None, norm_value=None, xp=None): """Correct the image for flat fielding. The flat field image is normalized by its mean or a user-supplied value @@ -883,6 +906,10 @@ def flat_correct(ccd, flat, min_value=None, norm_value=None): have the same scale. If this value is negative or 0, a ``ValueError`` is raised. Default is ``None``. + xp : array namespace, optional + Array namespace to use for calculations. If not provided, the + namespace will be determined from the array. + {log} Returns @@ -891,7 +918,7 @@ def flat_correct(ccd, flat, min_value=None, norm_value=None): CCDData object with flat corrected. """ # Get the array namespace - xp = array_api_compat.array_namespace(ccd.data) + xp = xp or array_api_compat.array_namespace(ccd.data) # Use the min_value to replace any values in the flat use_flat = flat if min_value is not None: @@ -1008,7 +1035,7 @@ def transform_image(ccd, transform_func, **kwargs): @log_to_metadata -def wcs_project(ccd, target_wcs, target_shape=None, order="bilinear"): +def wcs_project(ccd, target_wcs, target_shape=None, order="bilinear", xp=None): """ Given a CCDData image with WCS, project it onto a target WCS and return the reprojected data as a new CCDData image. @@ -1039,6 +1066,10 @@ def wcs_project(ccd, target_wcs, target_shape=None, order="bilinear"): Default is ``'bilinear'``. + xp : array namespace, optional + Array namespace to use for calculations. If not provided, the + namespace will be determined from the array. + {log} Returns @@ -1050,7 +1081,7 @@ def wcs_project(ccd, target_wcs, target_shape=None, order="bilinear"): from reproject import reproject_interp # Set array namespace - xp = array_api_compat.array_namespace(ccd.data) + xp = xp or array_api_compat.array_namespace(ccd.data) if not (ccd.wcs.is_celestial and target_wcs.is_celestial): raise ValueError("one or both WCS is not celestial.") @@ -1354,10 +1385,10 @@ def rebin(ccd, newshape): return result -def block_reduce(ccd, block_size, func=None): +def block_reduce(ccd, block_size, func=None, xp=None): """Thin wrapper around `astropy.nddata.block_reduce`.""" if func is None: - xp = array_api_compat.array_namespace(ccd.data) + xp = xp or array_api_compat.array_namespace(ccd.data) func = xp.sum data = nddata.block_reduce(ccd, block_size, func) if isinstance(ccd, CCDData): @@ -1367,10 +1398,10 @@ def block_reduce(ccd, block_size, func=None): return data -def block_average(ccd, block_size): +def block_average(ccd, block_size, xp=None): """Like `block_reduce` but with predefined ``func=np.mean``.""" - xp = array_api_compat.array_namespace(ccd.data) + xp = xp or array_api_compat.array_namespace(ccd.data) data = nddata.block_reduce(ccd, block_size, xp.mean) # Like in block_reduce: @@ -1822,7 +1853,7 @@ def _astroscrappy_gain_apply_helper(cleaned_data, gain, gain_apply, old_interfac return cleaned_data -def cosmicray_median(ccd, error_image=None, thresh=5, mbox=11, gbox=0, rbox=0): +def cosmicray_median(ccd, error_image=None, thresh=5, mbox=11, gbox=0, rbox=0, xp=None): """ Identify cosmic rays through median technique. The median technique identifies cosmic rays by identifying pixels by subtracting a median image @@ -1856,6 +1887,10 @@ def cosmicray_median(ccd, error_image=None, thresh=5, mbox=11, gbox=0, rbox=0): be replaced. Default is ``0``. + xp : array namespace, optional + The array namespace to use for the calculations. If not provided, the + array namespace of the input data will be used. + Notes ----- Similar implementation to crmedian in iraf.imred.crutil.crmedian. @@ -1896,7 +1931,7 @@ def cosmicray_median(ccd, error_image=None, thresh=5, mbox=11, gbox=0, rbox=0): updated with the detected cosmic rays. """ if _is_array(ccd): - xp = array_api_compat.array_namespace(ccd) + xp = xp or array_api_compat.array_namespace(ccd) # Masked data is not part of the array API so remove mask if present. # Only look at the data array, guessing that if there is a .mask then @@ -1979,6 +2014,7 @@ def ccdmask( lsigma=9, hsigma=9, ngood=5, + xp=None, ): """ Uses method based on the IRAF ccdmask task to generate a mask based on the @@ -2035,6 +2071,10 @@ def ccdmask( pixels masked in that column. Default is ``5``. + xp : array namespace, optional + The array namespace to use for the calculations. If not provided, the + array namespace of the input data will be used. + Returns ------- mask : `numpy.ndarray` @@ -2092,7 +2132,7 @@ def ccdmask( raise ValueError('"ratio" should be a "CCDData".') from err # Get array namespace - xp = array_api_compat.array_namespace(ratio.data) + xp = xp or array_api_compat.array_namespace(ratio.data) def _sigma_mask(baseline, one_sigma_value, lower_sigma, upper_sigma): """Helper function to mask values outside of the specified sigma range.""" From fcac48f8f32391ac54391506683b4f9d1791fa1c Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 3 Jul 2025 13:22:16 -0500 Subject: [PATCH 055/111] Use array API in all cosmic ray tests --- ccdproc/combiner.py | 12 +++++------- ccdproc/core.py | 31 ++++++++++++++++++++----------- ccdproc/tests/test_cosmicray.py | 21 ++++++--------------- 3 files changed, 31 insertions(+), 33 deletions(-) diff --git a/ccdproc/combiner.py b/ccdproc/combiner.py index 2143d582..986b75e9 100644 --- a/ccdproc/combiner.py +++ b/ccdproc/combiner.py @@ -500,8 +500,7 @@ def median_combine( Parameters ---------- median_func : function, optional - Function that calculates median of a `numpy.ma.MaskedArray`. - Default is `numpy.ma.median`. + Function that calculates median of an array. scale_to : float or None, optional Scaling factor used in the average combined image. If given, @@ -612,19 +611,18 @@ def average_combine( Parameters ---------- scale_func : function, optional - Function to calculate the average. Defaults to - `numpy.nanmean`. + Function to calculate the average. scale_to : float or None, optional Scaling factor used in the average combined image. If given, it overrides `scaling`. Defaults to ``None``. uncertainty_func : function, optional - Function to calculate uncertainty. Defaults to `numpy.ma.std`. + Function to calculate uncertainty. sum_func : function, optional Function used to calculate sums, including the one done to - find the weighted average. Defaults to `numpy.nansum`. + find the weighted average. Returns ------- @@ -706,7 +704,7 @@ def sum_combine( it overrides `scaling`. Defaults to ``None``. uncertainty_func : function, optional - Function to calculate uncertainty. Defaults to `numpy.ma.std`. + Function to calculate uncertainty. Returns ------- diff --git a/ccdproc/core.py b/ccdproc/core.py index 84cbb5a4..69a5334b 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -8,6 +8,7 @@ import warnings import array_api_compat +import array_api_extra as xpx from astropy import nddata, stats from astropy import units as u from astropy.modeling import fitting @@ -101,7 +102,7 @@ def _percentile_fallback(array, percentiles, xp=None): # pragma: no cover Parameters ---------- - array : array-like + array : array_like Array from which to calculate the percentile. percentiles : float or list-like @@ -1205,7 +1206,7 @@ def setbox(x, y, mbox, xmax, ymax): return x1, x2, y1, y2 -def background_deviation_box(data, bbox): +def background_deviation_box(data, bbox, xp=None): """ Determine the background deviation with a box size of bbox. The algorithm steps through the image and calculates the deviation within each box. @@ -1214,12 +1215,16 @@ def background_deviation_box(data, bbox): Parameters ---------- - data : `numpy.ndarray` or `numpy.ma.MaskedArray` + data : `numpy.ndarray` or other array_like Data to measure background deviation. bbox : int Box size for calculating background deviation. + xp : array namespace, optional + Array namespace to use for calculations. If not provided, the + namespace will be determined from the array. + Raises ------ ValueError @@ -1227,7 +1232,7 @@ def background_deviation_box(data, bbox): Returns ------- - background : `numpy.ndarray` or `numpy.ma.MaskedArray` + background : array_like An array with the measured background deviation in each pixel. """ # Check to make sure the background box is an appropriate size @@ -1235,13 +1240,18 @@ def background_deviation_box(data, bbox): if bbox < 1: raise ValueError("bbox must be greater than 1.") + if xp is None: + # Get the array namespace + xp = array_api_compat.array_namespace(data) # make the background image - barr = data * 0.0 + data.std() + barr = data * 0.0 + xp.std(data) ylen, xlen = data.shape for i in range(int(0.5 * bbox), xlen, bbox): for j in range(int(0.5 * bbox), ylen, bbox): x1, x2, y1, y2 = setbox(i, j, bbox, xlen, ylen) - barr[y1:y2, x1:x2] = sigma_func(data[y1:y2, x1:x2]) + xpx.at(barr)[y1:y2, x1:x2].set( + xp.astype(sigma_func(data[y1:y2, x1:x2]), data.dtype) + ) return barr @@ -1266,7 +1276,7 @@ def background_deviation_filter(data, bbox): Returns ------- - background : `numpy.ndarray` or `numpy.ma.MaskedArray` + background : `numpy.ndarray` An array with the measured background deviation in each pixel. """ # Check to make sure the background box is an appropriate size @@ -1861,7 +1871,7 @@ def cosmicray_median(ccd, error_image=None, thresh=5, mbox=11, gbox=0, rbox=0, x Parameters ---------- - ccd : `~astropy.nddata.CCDData`, `numpy.ndarray` or `numpy.ma.MaskedArray` + ccd : `~astropy.nddata.CCDData`, `numpy.ndarray` or other array_like Data to have cosmic ray cleaned. thresh : float, optional @@ -2079,7 +2089,7 @@ def ccdmask( ------- mask : `numpy.ndarray` A boolean ndarray where the bad pixels have a value of 1 (True) and - valid pixels 0 (False), following the numpy.ma conventions. + valid pixels 0 (False), following the numpy convention for masking. Notes ----- @@ -2235,8 +2245,7 @@ def bitfield_to_boolean_mask(bitfield, ignore_bits=0, flip_bits=None): Returns ------- mask : `numpy.ndarray` of boolean dtype - The bitfield converted to a boolean mask that can be used for - `numpy.ma.MaskedArray` or `~astropy.nddata.CCDData`. + The bitfield converted to a boolean mask. Examples -------- diff --git a/ccdproc/tests/test_cosmicray.py b/ccdproc/tests/test_cosmicray.py index 7e0ea6c2..b3aaaf95 100644 --- a/ccdproc/tests/test_cosmicray.py +++ b/ccdproc/tests/test_cosmicray.py @@ -1,6 +1,5 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst -import array_api_compat import pytest from astropy import units as u from astropy.utils.exceptions import AstropyDeprecationWarning @@ -9,6 +8,8 @@ from numpy.random import default_rng from numpy.testing import assert_allclose +# Set up the array library to be used in tests +from ccdproc.conftest import testing_array_library as xp from ccdproc.core import ( background_deviation_box, background_deviation_filter, @@ -26,7 +27,6 @@ def add_cosmicrays(data, scale, threshold, ncrays=NCRAYS): size = data.shape[0] rng = default_rng(99) - xp = array_api_compat.array_namespace(data.data) crrays = xp.asarray(rng.integers(0, size, size=(ncrays, 2))) # use (threshold + 15) below to make sure cosmic ray is well above the # threshold no matter what the random number generator returns @@ -58,7 +58,6 @@ def test_cosmicray_lacosmic(): def test_cosmicray_lacosmic_ccddata(): ccd_data = ccd_data_func(data_scale=DATA_SCALE) - xp = array_api_compat.array_namespace(ccd_data.data) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) noise = DATA_SCALE * xp.ones_like(ccd_data.data) @@ -75,7 +74,6 @@ def test_cosmicray_lacosmic_ccddata(): def test_cosmicray_lacosmic_check_data(): ccd_data = ccd_data_func(data_scale=DATA_SCALE) - xp = array_api_compat.array_namespace(ccd_data.data) with pytest.raises(TypeError): noise = DATA_SCALE * xp.ones_like(ccd_data.data) cosmicray_lacosmic(10, noise) @@ -90,7 +88,6 @@ def test_cosmicray_gain_correct(array_input, gain_correct_data): # data and returns that gain corrected data. That is not the # intent... ccd_data = ccd_data_func(data_scale=DATA_SCALE) - xp = array_api_compat.array_namespace(ccd_data.data) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) noise = DATA_SCALE * xp.ones_like(ccd_data.data) @@ -123,7 +120,6 @@ def test_cosmicray_gain_correct(array_input, gain_correct_data): def test_cosmicray_lacosmic_accepts_quantity_gain(): ccd_data = ccd_data_func(data_scale=DATA_SCALE) - xp = array_api_compat.array_namespace(ccd_data.data) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) noise = DATA_SCALE * xp.ones_like(ccd_data.data) @@ -138,7 +134,6 @@ def test_cosmicray_lacosmic_accepts_quantity_gain(): def test_cosmicray_lacosmic_accepts_quantity_readnoise(): ccd_data = ccd_data_func(data_scale=DATA_SCALE) - xp = array_api_compat.array_namespace(ccd_data.data) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) noise = DATA_SCALE * xp.ones_like(ccd_data.data) @@ -156,7 +151,6 @@ def test_cosmicray_lacosmic_detects_inconsistent_units(): # of adu, a readnoise in electrons and a gain in adu / electron. # That is not internally inconsistent. ccd_data = ccd_data_func(data_scale=DATA_SCALE) - xp = array_api_compat.array_namespace(ccd_data.data) ccd_data.unit = "adu" threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) @@ -176,7 +170,6 @@ def test_cosmicray_lacosmic_detects_inconsistent_units(): def test_cosmicray_lacosmic_warns_on_ccd_in_electrons(): # Check that an input ccd in electrons raises a warning. ccd_data = ccd_data_func(data_scale=DATA_SCALE) - xp = array_api_compat.array_namespace(ccd_data.data) # The unit below is important for the test; this unit on # input is supposed to raise an error. ccd_data.unit = u.electron @@ -206,7 +199,6 @@ def test_cosmicray_lacosmic_invar_inbkg(new_args): # that calling with the new keyword arguments to astroscrappy # 1.1.0 raises no error. ccd_data = ccd_data_func(data_scale=DATA_SCALE) - xp = array_api_compat.array_namespace(ccd_data.data) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) noise = DATA_SCALE * xp.ones_like(ccd_data.data) @@ -304,28 +296,28 @@ def test_cosmicray_median_background_deviation(): def test_background_deviation_box(): scale = 5.3 - cd = default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale) + cd = xp.asarray(default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale)) bd = background_deviation_box(cd, 25) assert abs(bd.mean() - scale) < 0.10 def test_background_deviation_box_fail(): scale = 5.3 - cd = default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale) + cd = xp.asarray(default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale)) with pytest.raises(ValueError): background_deviation_box(cd, 0.5) def test_background_deviation_filter(): scale = 5.3 - cd = default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale) + cd = xp.asarray(default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale)) bd = background_deviation_filter(cd, 25) assert abs(bd.mean() - scale) < 0.10 def test_background_deviation_filter_fail(): scale = 5.3 - cd = default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale) + cd = xp.asarray(default_rng(seed=123).normal(loc=0, size=(100, 100), scale=scale)) with pytest.raises(ValueError): background_deviation_filter(cd, 0.5) @@ -356,7 +348,6 @@ def test_cosmicray_lacosmic_pssl_does_not_fail(): # to make sure that passing in pssl does not lead to an error # since the new interface does not include pssl. ccd_data = ccd_data_func(data_scale=DATA_SCALE) - xp = array_api_compat.array_namespace(ccd_data.data) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) noise = DATA_SCALE * xp.ones_like(ccd_data.data) From d0bcdad5dcfc5c4b3efce005a67a8200715194ba Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 3 Jul 2025 13:51:08 -0500 Subject: [PATCH 056/111] cast number to float to avoid multiple namespaces --- ccdproc/core.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index 69a5334b..02b82696 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -1249,9 +1249,7 @@ def background_deviation_box(data, bbox, xp=None): for i in range(int(0.5 * bbox), xlen, bbox): for j in range(int(0.5 * bbox), ylen, bbox): x1, x2, y1, y2 = setbox(i, j, bbox, xlen, ylen) - xpx.at(barr)[y1:y2, x1:x2].set( - xp.astype(sigma_func(data[y1:y2, x1:x2]), data.dtype) - ) + xpx.at(barr)[y1:y2, x1:x2].set(float(sigma_func(data[y1:y2, x1:x2]))) return barr From baae84fbf4da3044a814ae7d27f13e87fea7bf2f Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Sat, 5 Jul 2025 10:04:58 -0500 Subject: [PATCH 057/111] Change internal data and mask to private properties --- ccdproc/combiner.py | 64 +++++++++++++++++----------------- ccdproc/tests/test_combiner.py | 58 +++++++++++++++--------------- 2 files changed, 61 insertions(+), 61 deletions(-) diff --git a/ccdproc/combiner.py b/ccdproc/combiner.py index 986b75e9..e1ecfe88 100644 --- a/ccdproc/combiner.py +++ b/ccdproc/combiner.py @@ -184,14 +184,14 @@ def __init__(self, ccd_iter, dtype=None, xp=None): # set up the data array # new_shape = (len(ccd_list),) + default_shape - self.data_arr = xp.array([ccd.data for ccd in ccd_list], dtype=dtype) + self._data_arr = xp.array([ccd.data for ccd in ccd_list], dtype=dtype) # populate self.data_arr mask_list = [ ccd.mask if ccd.mask is not None else xp.zeros(default_shape) for ccd in ccd_list ] - self.data_arr_mask = xp.array(mask_list, dtype=bool) + self._data_arr_mask = xp.array(mask_list, dtype=bool) # Must be after self.data_arr is defined because it checks the # length of the data array. @@ -222,13 +222,13 @@ def weights(self, value): except TypeError as err: raise TypeError("weights must be an array.") from err - if value.shape != self.data_arr.shape: + if value.shape != self._data_arr.shape: if value.ndim != 1: raise ValueError( "1D weights expected when shapes of the " "data and weights differ." ) - if value.shape[0] != self.data_arr.shape[0]: + if value.shape[0] != self._data_arr.shape[0]: raise ValueError( "Length of weights not compatible with specified axis." ) @@ -258,9 +258,9 @@ def scaling(self, value): if value is None: self._scaling = value else: - n_images = self.data_arr.shape[0] + n_images = self._data_arr.shape[0] if callable(value): - self._scaling = [value(self.data_arr[i]) for i in range(n_images)] + self._scaling = [value(self._data_arr[i]) for i in range(n_images)] self._scaling = xp.array(self._scaling) else: try: @@ -277,7 +277,7 @@ def scaling(self, value): ) self._scaling = xp.array(value) # reshape so that broadcasting occurs properly - for _ in range(len(self.data_arr.shape) - 1): + for _ in range(len(self._data_arr.shape) - 1): self._scaling = self.scaling[:, xp.newaxis] # set up IRAF-like minmax clipping @@ -329,12 +329,12 @@ def clip_extrema(self, nlow=0, nhigh=0): if nhigh is None: nhigh = 0 - argsorted = xp.argsort(self.data_arr, axis=0) + argsorted = xp.argsort(self._data_arr, axis=0) # Not every array package has mgrid, so make it in numpy and convert it to the # array package used for the data. mg = xp.asarray( np_mgrid[ - [slice(ndim) for i, ndim in enumerate(self.data_arr.shape) if i > 0] + [slice(ndim) for i, ndim in enumerate(self._data_arr.shape) if i > 0] ] ) for i in range(-1 * nhigh, nlow): @@ -344,10 +344,10 @@ def clip_extrema(self, nlow=0, nhigh=0): # dimensions, so we need to flatten the mask array, set the mask # values for a flattened array, and then reshape it back to the # original shape. - flat_index = np_ravel_multi_index(where, self.data_arr.shape) - self.data_arr_mask = xp.reshape( - xpx.at(xp.reshape(self.data_arr_mask, (-1,)))[flat_index].set(True), - self.data_arr.shape, + flat_index = np_ravel_multi_index(where, self._data_arr.shape) + self._data_arr_mask = xp.reshape( + xpx.at(xp.reshape(self._data_arr_mask, (-1,)))[flat_index].set(True), + self._data_arr.shape, ) # set up min/max clipping algorithms @@ -365,13 +365,13 @@ def minmax_clipping(self, min_clip=None, max_clip=None): Default is ``None``. """ if min_clip is not None: - mask = self.data_arr < min_clip + mask = self._data_arr < min_clip # Written to avoid in-place modification of array - self.data_arr_mask = self.data_arr_mask | mask + self._data_arr_mask = self._data_arr_mask | mask if max_clip is not None: - mask = self.data_arr > max_clip + mask = self._data_arr > max_clip # Written to avoid in-place modification of array - self.data_arr_mask = self.data_arr_mask | mask + self._data_arr_mask = self._data_arr_mask | mask # set up sigma clipping algorithms @deprecated_renamed_argument( @@ -430,10 +430,10 @@ def sigma_clipping( # Remove in 3.0 _ = kwd.pop("use_astropy", True) - self.data_arr_mask = ( - self.data_arr_mask + self._data_arr_mask = ( + self._data_arr_mask | sigma_clip( - self.data_arr, + self._data_arr, sigma_lower=low_thresh, sigma_upper=high_thresh, axis=kwd.get("axis", 0), @@ -448,18 +448,18 @@ def sigma_clipping( def _get_scaled_data(self, scale_arg): if scale_arg is not None: - return self.data_arr * scale_arg + return self._data_arr * scale_arg if self.scaling is not None: - return self.data_arr * self.scaling - return self.data_arr + return self._data_arr * self.scaling + return self._data_arr def _get_nan_substituted_data(self, data): xp = self._xp # Get the data as an unmasked array with masked values filled as NaN - if self.data_arr_mask.any(): + if self._data_arr_mask.any(): # Use array_api_extra so that we can use at with all array libraries - data = xpx.at(data)[self.data_arr_mask].set(xp.nan) + data = xpx.at(data)[self._data_arr_mask].set(xp.nan) else: data = data return data @@ -478,7 +478,7 @@ def _combination_setup(self, user_func, default_func, scale_to): data = self._get_nan_substituted_data(data) masked_values = xp.isnan(data).sum(axis=0) else: - masked_values = self.data_arr_mask.sum(axis=0) + masked_values = self._data_arr_mask.sum(axis=0) combo_func = user_func return data, masked_values, combo_func @@ -532,7 +532,7 @@ def median_combine( medianed = median_func(data, axis=0) # set the mask - mask = masked_values == len(self.data_arr) + mask = masked_values == len(self._data_arr) # set the uncertainty @@ -549,7 +549,7 @@ def median_combine( # be an array of the same class as the data, so make sure it is uncertainty = xp.asarray(uncertainty) # Divide uncertainty by the number of pixel (#309) - uncertainty /= xp.sqrt(len(self.data_arr) - masked_values) + uncertainty /= xp.sqrt(len(self._data_arr) - masked_values) # Convert uncertainty to plain numpy array (#351) # There is no need to care about potential masks because the # uncertainty was calculated based on the data so potential masked @@ -566,7 +566,7 @@ def median_combine( ) # update the meta data - combined_image.meta["NCOMBINE"] = len(self.data_arr) + combined_image.meta["NCOMBINE"] = len(self._data_arr) # return the combined image return combined_image @@ -653,7 +653,7 @@ def average_combine( # calculate the mask - mask = masked_values == len(self.data_arr) + mask = masked_values == len(self._data_arr) # set up the deviation uncertainty = uncertainty_func(data, axis=0) @@ -729,7 +729,7 @@ def sum_combine( summed = sum_func(data, axis=0) # set up the mask - mask = masked_values == len(self.data_arr) + mask = masked_values == len(self._data_arr) # set up the deviation uncertainty = uncertainty_func(data, axis=0) @@ -749,7 +749,7 @@ def sum_combine( ) # update the meta data - combined_image.meta["NCOMBINE"] = len(self.data_arr) + combined_image.meta["NCOMBINE"] = len(self._data_arr) # return the combined image return combined_image diff --git a/ccdproc/tests/test_combiner.py b/ccdproc/tests/test_combiner.py index eadde7b8..ddd67412 100644 --- a/ccdproc/tests/test_combiner.py +++ b/ccdproc/tests/test_combiner.py @@ -85,8 +85,8 @@ def test_combiner_create(): ccd_data = ccd_data_func() ccd_list = [ccd_data, ccd_data, ccd_data] c = Combiner(ccd_list) - assert c.data_arr.shape == (3, 100, 100) - assert c.data_arr_mask.shape == (3, 100, 100) + assert c._data_arr.shape == (3, 100, 100) + assert c._data_arr_mask.shape == (3, 100, 100) # test if dtype matches the value that is passed @@ -94,7 +94,7 @@ def test_combiner_dtype(): ccd_data = ccd_data_func() ccd_list = [ccd_data, ccd_data, ccd_data] c = Combiner(ccd_list, dtype=xp.float32) - assert c.data_arr.dtype == xp.float32 + assert c._data_arr.dtype == xp.float32 avg = c.average_combine() # dtype of average should match input dtype assert avg.dtype == c.dtype @@ -114,9 +114,9 @@ def test_combiner_mask(): ccd = CCDData(data, unit=u.adu, mask=mask) ccd_list = [ccd, ccd, ccd] c = Combiner(ccd_list) - assert c.data_arr.shape == (3, 10, 10) - assert c.data_arr_mask.shape == (3, 10, 10) - assert not c.data_arr_mask[0, 5, 5] + assert c._data_arr.shape == (3, 10, 10) + assert c._data_arr_mask.shape == (3, 10, 10) + assert not c._data_arr_mask[0, 5, 5] def test_weights(): @@ -158,7 +158,7 @@ def test_pixelwise_weights(): CCDData(xp.zeros((10, 10)) + 1000, unit=u.adu), ] combo = Combiner(ccd_list) - combo.weights = xp.ones_like(combo.data_arr) + combo.weights = xp.ones_like(combo._data_arr) combo.weights = xpx.at(combo.weights)[:, 5, 5].set(xp.array([1, 5, 10])) ccd = combo.average_combine() np_testing.assert_allclose(ccd.data[5, 5], 312.5) @@ -188,7 +188,7 @@ def test_combiner_minmax_max(): c = Combiner(ccd_list) c.minmax_clipping(min_clip=None, max_clip=500) - assert c.data_arr_mask[2].all() + assert c._data_arr_mask[2].all() def test_combiner_minmax_min(): @@ -200,7 +200,7 @@ def test_combiner_minmax_min(): c = Combiner(ccd_list) c.minmax_clipping(min_clip=-500, max_clip=None) - assert c.data_arr_mask[1].all() + assert c._data_arr_mask[1].all() def test_combiner_sigmaclip_high(): @@ -216,7 +216,7 @@ def test_combiner_sigmaclip_high(): c = Combiner(ccd_list) # using mad for more robust statistics vs. std c.sigma_clipping(high_thresh=3, low_thresh=None, func="median", dev_func=mad) - assert c.data_arr_mask[5].all() + assert c._data_arr_mask[5].all() def test_combiner_sigmaclip_single_pix(): @@ -231,13 +231,13 @@ def test_combiner_sigmaclip_single_pix(): combo = Combiner(ccd_list) # add a single pixel in another array to check that # that one gets rejected - combo.data_arr = xpx.at(combo.data_arr)[0, 5, 5].set(0) - combo.data_arr = xpx.at(combo.data_arr)[1, 5, 5].set(-5) - combo.data_arr = xpx.at(combo.data_arr)[2, 5, 5].set(5) - combo.data_arr = xpx.at(combo.data_arr)[3, 5, 5].set(-5) - combo.data_arr = xpx.at(combo.data_arr)[4, 5, 5].set(25) + combo._data_arr = xpx.at(combo._data_arr)[0, 5, 5].set(0) + combo._data_arr = xpx.at(combo._data_arr)[1, 5, 5].set(-5) + combo._data_arr = xpx.at(combo._data_arr)[2, 5, 5].set(5) + combo._data_arr = xpx.at(combo._data_arr)[3, 5, 5].set(-5) + combo._data_arr = xpx.at(combo._data_arr)[4, 5, 5].set(25) combo.sigma_clipping(high_thresh=3, low_thresh=None, func="median", dev_func=mad) - assert combo.data_arr_mask[4, 5, 5] + assert combo._data_arr_mask[4, 5, 5] def test_combiner_sigmaclip_low(): @@ -253,7 +253,7 @@ def test_combiner_sigmaclip_low(): c = Combiner(ccd_list) # using mad for more robust statistics vs. std c.sigma_clipping(high_thresh=None, low_thresh=3, func="median", dev_func=mad) - assert c.data_arr_mask[5].all() + assert c._data_arr_mask[5].all() # test that the median combination works and returns a ccddata object @@ -377,7 +377,7 @@ def test_combiner_with_scaling(): np_testing.assert_allclose(np_median(med_ccd), np_median(med_inp_data)) # Set the scaling manually... - combiner.scaling = [scale_by_mean(combiner.data_arr[i]) for i in range(3)] + combiner.scaling = [scale_by_mean(combiner._data_arr[i]) for i in range(3)] avg_ccd = combiner.average_combine() np_testing.assert_allclose(avg_ccd.data.mean(), ccd_data.data.mean()) assert avg_ccd.shape == ccd_data.shape @@ -586,7 +586,7 @@ def test_average_combine_uncertainty(): ccd_list = [ccd_data, ccd_data, ccd_data] c = Combiner(ccd_list) ccd = c.average_combine(uncertainty_func=xp.sum) - uncert_ref = xp.sum(c.data_arr, 0) / xp.sqrt(3) + uncert_ref = xp.sum(c._data_arr, 0) / xp.sqrt(3) np_testing.assert_allclose(ccd.uncertainty.array, uncert_ref) # Compare this also to the "combine" call @@ -601,7 +601,7 @@ def test_median_combine_uncertainty(): ccd_list = [ccd_data, ccd_data, ccd_data] c = Combiner(ccd_list) ccd = c.median_combine(uncertainty_func=xp.sum) - uncert_ref = xp.sum(c.data_arr, 0) / xp.sqrt(3) + uncert_ref = xp.sum(c._data_arr, 0) / xp.sqrt(3) np_testing.assert_allclose(ccd.uncertainty.array, uncert_ref) # Compare this also to the "combine" call @@ -616,7 +616,7 @@ def test_sum_combine_uncertainty(): ccd_list = [ccd_data, ccd_data, ccd_data] c = Combiner(ccd_list) ccd = c.sum_combine(uncertainty_func=xp.sum) - uncert_ref = xp.sum(c.data_arr, 0) * xp.sqrt(3) + uncert_ref = xp.sum(c._data_arr, 0) * xp.sqrt(3) np_testing.assert_allclose(ccd.uncertainty.array, uncert_ref) # Compare this also to the "combine" call @@ -798,8 +798,8 @@ def test_combiner_3d(): ccd_list = [data1, data2, data3] c = Combiner(ccd_list) - assert c.data_arr.shape == (3, 5, 5, 5) - assert c.data_arr_mask.shape == (3, 5, 5, 5) + assert c._data_arr.shape == (3, 5, 5, 5) + assert c._data_arr_mask.shape == (3, 5, 5, 5) ccd = c.average_combine() assert ccd.shape == (5, 5, 5) @@ -836,7 +836,7 @@ def test_3d_combiner_with_scaling(): np_testing.assert_allclose(np_median(med_ccd), np_median(med_inp_data)) # Set the scaling manually... - combiner.scaling = [scale_by_mean(combiner.data_arr[i]) for i in range(3)] + combiner.scaling = [scale_by_mean(combiner._data_arr[i]) for i in range(3)] avg_ccd = combiner.average_combine() np_testing.assert_allclose(avg_ccd.data.mean(), ccd_data.data.mean()) assert avg_ccd.shape == ccd_data.shape @@ -935,9 +935,9 @@ def test_clip_extrema_with_other_rejection(): ccdlist[1].data = xpx.at(ccdlist[1].data)[2, 0].set(100.1) c = Combiner(ccdlist) # Reject ccdlist[1].data[1,2] by other means - c.data_arr_mask = xpx.at(c.data_arr_mask)[1, 1, 2].set(True) + c._data_arr_mask = xpx.at(c._data_arr_mask)[1, 1, 2].set(True) # Reject ccdlist[1].data[1,2] by other means - c.data_arr_mask = xpx.at(c.data_arr_mask)[3, 0, 0].set(True) + c._data_arr_mask = xpx.at(c._data_arr_mask)[3, 0, 0].set(True) c.clip_extrema(nlow=1, nhigh=1) result = c.average_combine() @@ -979,8 +979,8 @@ def create_gen(): yield ccd_data c = Combiner(create_gen()) - assert c.data_arr.shape == (3, 100, 100) - assert c.data_arr_mask.shape == (3, 100, 100) + assert c._data_arr.shape == (3, 100, 100) + assert c._data_arr_mask.shape == (3, 100, 100) @pytest.mark.parametrize( @@ -1057,7 +1057,7 @@ def sum_func(_, axis=axis): return xp.sum(new_data, axis=axis) expected_result = 3 * data - actual_result = c.sum_combine(sum_func=my_summer(c.data_arr, c.data_arr_mask)) + actual_result = c.sum_combine(sum_func=my_summer(c._data_arr, c._data_arr_mask)) elif comb_func == "average_combine": expected_result = data actual_result = c.average_combine(scale_func=xp.mean) From 655e8443ef97163649c2da088ef38fe615f56564 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Sat, 5 Jul 2025 10:23:29 -0500 Subject: [PATCH 058/111] Add properties for accessing the data and mask to be used in combination --- ccdproc/combiner.py | 12 ++++++++++++ docs/image_combination.rst | 4 ++-- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/ccdproc/combiner.py b/ccdproc/combiner.py index e1ecfe88..ed496453 100644 --- a/ccdproc/combiner.py +++ b/ccdproc/combiner.py @@ -199,8 +199,20 @@ def __init__(self, ccd_iter, dtype=None, xp=None): @property def dtype(self): + """The dtype of the data array to be combined.""" return self._dtype + @property + def data(self): + """The data array to be combined.""" + return self._data_arr + + @property + def mask(self): + """The mask array to be used in image combination. This is *not* the mask + of the combined image, but the mask of the data array to be combined.""" + return self._data_arr_mask + @property def weights(self): """ diff --git a/docs/image_combination.rst b/docs/image_combination.rst index 460a1570..67e79ebb 100644 --- a/docs/image_combination.rst +++ b/docs/image_combination.rst @@ -104,11 +104,11 @@ To clip iteratively, continuing the clipping process until no more pixels are rejected, loop in the code calling the clipping method: >>> old_n_masked = 0 # dummy value to make loop execute at least once - >>> new_n_masked = combiner.data_arr_mask.sum() + >>> new_n_masked = combiner.mask.sum() >>> while (new_n_masked > old_n_masked): ... combiner.sigma_clipping(func=np.ma.median) ... old_n_masked = new_n_masked - ... new_n_masked = combiner.data_arr_mask.sum() + ... new_n_masked = combiner.mask.sum() Note that the default values for the high and low thresholds for rejection are 3 standard deviations. From 17c0dcaf4087fdc32a1ad4c8a5be9555aeca1dbc Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Sat, 5 Jul 2025 16:09:16 -0500 Subject: [PATCH 059/111] Apply suggestions from code review Co-authored-by: Nathaniel Starkman --- ccdproc/combiner.py | 4 ++-- ccdproc/log_meta.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ccdproc/combiner.py b/ccdproc/combiner.py index ed496453..a0e387fc 100644 --- a/ccdproc/combiner.py +++ b/ccdproc/combiner.py @@ -110,10 +110,10 @@ class Combiner: Default is ``None``. xp : array namespace, optional - The array namespace to use for the data. If not provided, it will + The array namespace to use for the data. If `None` or not provided, it will be inferred from the first `~astropy.nddata.CCDData` object in ``ccd_iter``. - Default is ``None``. + Default is `None`. Raises ------ diff --git a/ccdproc/log_meta.py b/ccdproc/log_meta.py index ebafa320..21dfab2b 100644 --- a/ccdproc/log_meta.py +++ b/ccdproc/log_meta.py @@ -134,7 +134,7 @@ def _replace_array_with_placeholder(value): return_type_not_value = False if isinstance(value, u.Quantity): return_type_not_value = not value.isscalar - elif isinstance(value, NDData | np.ndarray): + elif isinstance(value, (NDData, np.ndarray)): try: length = len(value) except TypeError: From a4e6d4a996ed4e454c4bffab47aaa42aa24a2f4c Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Sun, 6 Jul 2025 09:36:48 -0500 Subject: [PATCH 060/111] Choose performance over style --- ccdproc/log_meta.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ccdproc/log_meta.py b/ccdproc/log_meta.py index 21dfab2b..9fc69177 100644 --- a/ccdproc/log_meta.py +++ b/ccdproc/log_meta.py @@ -134,7 +134,7 @@ def _replace_array_with_placeholder(value): return_type_not_value = False if isinstance(value, u.Quantity): return_type_not_value = not value.isscalar - elif isinstance(value, (NDData, np.ndarray)): + elif isinstance(value, (NDData, np.ndarray)): # noqa: UP038 try: length = len(value) except TypeError: From 58439553afb389369a2fdb5f691375f051d0fb9b Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Sat, 12 Jul 2025 12:32:41 -0700 Subject: [PATCH 061/111] Changes to testing for cupy One more fix --- ccdproc/tests/test_ccdproc.py | 181 +++++++++++++++++++--------------- 1 file changed, 100 insertions(+), 81 deletions(-) diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index 31e42c56..fc5a4ce9 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -2,7 +2,7 @@ import warnings -# import array_api_compat +import array_api_extra as xpx import astropy import astropy.units as u import pytest @@ -16,7 +16,6 @@ from numpy import array as np_array from numpy import mgrid as np_mgrid from numpy import random as np_random -from numpy import testing as np_testing from ccdproc.conftest import testing_array_library as xp from ccdproc.core import ( @@ -84,7 +83,7 @@ def test_create_deviation(u_image, u_gain, u_readnoise, expect_success): expected_var = xp.sqrt(2 * ccd_data.data + 5**2) / 2 else: expected_var = xp.sqrt(ccd_data.data + 5**2) - np_testing.assert_allclose(ccd_var.uncertainty.array, expected_var) + assert xp.all(xpx.isclose(ccd_var.uncertainty.array, expected_var)) assert ccd_var.unit == ccd_data.unit # Uncertainty should *not* have any units -- does it? with pytest.raises(AttributeError): @@ -101,9 +100,7 @@ def test_create_deviation_from_negative(): ccd_var = create_deviation( ccd_data, gain=None, readnoise=readnoise, disregard_nan=False ) - np_testing.assert_array_equal( - ccd_data.data < 0, xp.isnan(ccd_var.uncertainty.array) - ) + assert xp.all(xpx.isclose(ccd_data.data < 0, xp.isnan(ccd_var.uncertainty.array))) def test_create_deviation_from_negative_2(): @@ -119,7 +116,7 @@ def test_create_deviation_from_negative_2(): # libraries. ccd_data.data = ccd_data.data * ~mask expected_var = xp.sqrt(ccd_data.data + readnoise.value**2) - np_testing.assert_allclose(ccd_var.uncertainty.array, expected_var) + assert xp.all(xpx.isclose(ccd_var.uncertainty.array, expected_var)) def test_create_deviation_keywords_must_have_unit(): @@ -188,8 +185,12 @@ def test_subtract_overscan(median, transpose, data_rectangle): ) # Is the mean of the "science" region the sum of sky and the mean the # "science" section had before backgrounds were added? - np_testing.assert_allclose( - ccd_data_overscan.data[science_region].mean(), sky + original_mean, rtol=1e-6 + assert xp.all( + xpx.isclose( + ccd_data_overscan.data[science_region].mean(), + sky + original_mean, + rtol=1e-6, + ) ) # Is the overscan region zero? assert (ccd_data_overscan.data[oscan_region] == 0).all() @@ -205,17 +206,21 @@ def test_subtract_overscan(median, transpose, data_rectangle): ) # Is the mean of the "science" region the sum of sky and the mean the # "science" section had before backgrounds were added? - np_testing.assert_allclose( - ccd_data_fits_section.data[science_region].mean(), - sky + original_mean, - rtol=1e-6, + assert xp.all( + xpx.isclose( + ccd_data_fits_section.data[science_region].mean(), + sky + original_mean, + rtol=1e-6, + ) ) # Is the overscan region zero? assert (ccd_data_fits_section.data[oscan_region] == 0).all() # Do both ways of subtracting overscan give exactly the same result? - np_testing.assert_allclose( - ccd_data_overscan[science_region], ccd_data_fits_section[science_region] + assert xp.all( + xpx.isclose( + ccd_data_overscan[science_region], ccd_data_fits_section[science_region] + ) ) # Set overscan_axis to None, and let the routine figure out the axis. @@ -227,10 +232,12 @@ def test_subtract_overscan(median, transpose, data_rectangle): median=median, model=None, ) - np_testing.assert_allclose( - ccd_data_overscan_auto.data[science_region].mean(), - sky + original_mean, - rtol=1e-6, + assert xp.all( + xpx.isclose( + ccd_data_overscan_auto.data[science_region].mean(), + sky + original_mean, + rtol=1e-6, + ) ) # Use overscan_axis=None with a FITS section ccd_data_fits_section_overscan_auto = subtract_overscan( @@ -240,10 +247,12 @@ def test_subtract_overscan(median, transpose, data_rectangle): median=median, model=None, ) - np_testing.assert_allclose( - ccd_data_fits_section_overscan_auto.data[science_region].mean(), - sky + original_mean, - rtol=1e-6, + assert xp.all( + xpx.isclose( + ccd_data_fits_section_overscan_auto.data[science_region].mean(), + sky + original_mean, + rtol=1e-6, + ) ) # Overscan_axis should be 1 for a square overscan region # This test only works for a non-square data region, but the @@ -266,7 +275,7 @@ def test_subtract_overscan(median, transpose, data_rectangle): median=median, model=None, ) - np_testing.assert_allclose(ccd_data_square_overscan_auto, ccd_data_square) + assert xp.all(xpx.isclose(ccd_data_square_overscan_auto, ccd_data_square)) # A more substantial test of overscan modeling @@ -306,8 +315,8 @@ def test_subtract_overscan_model(transpose): median=False, model=models.Polynomial1D(2), ) - np_testing.assert_allclose( - ccd_data.data[science_region].mean(), original_mean, atol=1e-5 + assert xp.all( + xpx.isclose(ccd_data.data[science_region].mean(), original_mean, atol=1e-5) ) # Set the overscan_axis explicitly to None, and let the routine # figure it out. @@ -318,8 +327,8 @@ def test_subtract_overscan_model(transpose): median=False, model=models.Polynomial1D(2), ) - np_testing.assert_allclose( - ccd_data.data[science_region].mean(), original_mean, atol=1e-5 + assert xp.all( + xpx.isclose(ccd_data.data[science_region].mean(), original_mean, atol=1e-5) ) @@ -363,7 +372,7 @@ def test_trim_image_fits_section(mask_data, uncertainty): trimmed = trim_image(ccd_data, fits_section="[20:40,:]") # FITS reverse order, bounds are inclusive and starting index is 1-based assert trimmed.shape == (50, 21) - np_testing.assert_allclose(trimmed.data, ccd_data[:, 19:40]) + assert xp.all(xpx.isclose(trimmed.data, ccd_data[:, 19:40])) if mask_data: assert trimmed.shape == trimmed.mask.shape if uncertainty: @@ -374,7 +383,7 @@ def test_trim_image_no_section(): ccd_data = ccd_data_func(data_size=50) trimmed = trim_image(ccd_data[:, 19:40]) assert trimmed.shape == (50, 21) - np_testing.assert_allclose(trimmed.data, ccd_data[:, 19:40]) + assert xp.all(xpx.isclose(trimmed.data, ccd_data[:, 19:40])) def test_trim_with_wcs_alters_wcs(): @@ -404,7 +413,7 @@ def test_subtract_bias(): master_bias = CCDData(master_bias_array, unit=ccd_data.unit) no_bias = subtract_bias(ccd_data, master_bias, add_keyword=None) # Does the data we are left with have the correct average? - np_testing.assert_allclose(no_bias.data.mean(), data_avg) + assert xp.all(xpx.isclose(no_bias.data.mean(), data_avg)) # With logging turned off, metadata should not change assert no_bias.header == ccd_data.header del no_bias.header["key"] @@ -471,8 +480,8 @@ def test_subtract_dark(explicit_times, scale, exposure_keyword): (exptime / dark_exptime) * (exposure_unit / dark_exposure_unit) ) - np_testing.assert_allclose( - ccd_data.data - dark_scale * dark_level, dark_sub.data, rtol=1e-6 + assert xp.all( + xpx.isclose(ccd_data.data - dark_scale * dark_level, dark_sub.data, rtol=1e-6) ) # Headers should have the same content...do they? assert dark_sub.header == ccd_data.header @@ -574,15 +583,19 @@ def test_flat_correct(): # Check that the flat was normalized # Should be the case that flat * flat_data = ccd_data * flat.data.mean # if the normalization was done correctly. - np_testing.assert_allclose( - (flat_data.data * flat.data).mean(), - ccd_data.data.mean() * flat.data.mean(), - rtol=1e-6, + assert xp.all( + xpx.isclose( + (flat_data.data * flat.data).mean(), + ccd_data.data.mean() * flat.data.mean(), + rtol=1e-6, + ) ) - np_testing.assert_allclose( - ccd_data.data / flat_data.data, - flat.data / flat.data.mean(), - rtol=1e-6, + assert xp.all( + xpx.isclose( + ccd_data.data / flat_data.data, + flat.data / flat.data.mean(), + rtol=1e-6, + ) ) # Check that metadata is unchanged (since logging is turned off) @@ -606,15 +619,19 @@ def test_flat_correct_min_value(): # Check that the flat was normalized. The asserts below, which look a # little odd, are correctly testing that # flat_corrected_data = ccd_data / (flat_with_min / mean(flat_with_min)) - np_testing.assert_allclose( - (flat_corrected_data.data * flat_with_min.data).mean(), - (ccd_data.data * flat_with_min.data.mean()).mean(), - rtol=1e-6, + assert xp.all( + xpx.isclose( + (flat_corrected_data.data * flat_with_min.data).mean(), + (ccd_data.data * flat_with_min.data.mean()).mean(), + rtol=1e-6, + ) ) - np_testing.assert_allclose( - ccd_data.data / flat_corrected_data.data, - flat_with_min.data / flat_with_min.data.mean(), - rtol=1e-6, + assert xp.all( + xpx.isclose( + ccd_data.data / flat_corrected_data.data, + flat_with_min.data / flat_with_min.data.mean(), + rtol=1e-6, + ) ) # Test that flat is not modified. @@ -638,13 +655,15 @@ def test_flat_correct_norm_value(): # Check that the flat was normalized # Should be the case that flat * flat_data = ccd_data * flat_mean # if the normalization was done correctly. - np_testing.assert_allclose( - (flat_data.data * flat.data).mean(), - ccd_data.data.mean() * flat_mean, - rtol=1e-6, + assert xp.all( + xpx.isclose( + (flat_data.data * flat.data).mean(), + ccd_data.data.mean() * flat_mean, + rtol=1e-6, + ) ) - np_testing.assert_allclose( - ccd_data.data / flat_data.data, flat.data / flat_mean, rtol=1e-6 + assert xp.all( + xpx.isclose(ccd_data.data / flat_data.data, flat.data / flat_mean, rtol=1e-6) ) @@ -694,7 +713,7 @@ def test_gain_correct(): ccd_data = ccd_data_func() init_data = ccd_data.data gain_data = gain_correct(ccd_data, gain=3, add_keyword=None) - np_testing.assert_allclose(gain_data.data, 3 * init_data) + assert xp.all(xpx.isclose(gain_data.data, 3 * init_data)) assert ccd_data.meta == gain_data.meta @@ -704,7 +723,7 @@ def test_gain_correct_quantity(): g = Quantity(3, u.electron / u.adu) ccd_data = gain_correct(ccd_data, gain=g) - np_testing.assert_allclose(ccd_data.data, 3 * init_data) + assert xp.all(xpx.isclose(ccd_data.data, 3 * init_data)) assert ccd_data.unit == u.electron @@ -752,14 +771,14 @@ def tran(arr): tran = transform_image(ccd_data, tran) - np_testing.assert_allclose(10 * ccd_data.data, tran.data) + assert xp.all(xpx.isclose(10 * ccd_data.data, tran.data)) if mask_data: assert tran.shape == tran.mask.shape - np_testing.assert_allclose(ccd_data.mask, tran.mask) + assert xp.all(xpx.isclose(ccd_data.mask, tran.mask)) if uncertainty: assert tran.shape == tran.uncertainty.array.shape - np_testing.assert_allclose( - 10 * ccd_data.uncertainty.array, tran.uncertainty.array + assert xp.all( + xpx.isclose(10 * ccd_data.uncertainty.array, tran.uncertainty.array) ) @@ -875,7 +894,7 @@ def test__overscan_schange(): old_data = ccd_data.copy() new_data = subtract_overscan(ccd_data, overscan=ccd_data[:, 1], overscan_axis=0) assert not xp.allclose(old_data.data, new_data.data) - np_testing.assert_allclose(old_data.data, ccd_data.data) + assert xp.all(xpx.isclose(old_data.data, ccd_data.data)) def test_create_deviation_does_not_change_input(): @@ -884,7 +903,7 @@ def test_create_deviation_does_not_change_input(): _ = create_deviation( ccd_data, gain=5 * u.electron / u.adu, readnoise=10 * u.electron ) - np_testing.assert_allclose(original.data, ccd_data.data) + assert xp.all(xpx.isclose(original.data, ccd_data.data)) assert original.unit == ccd_data.unit @@ -893,7 +912,7 @@ def test_cosmicray_median_does_not_change_input(): original = ccd_data.copy() error = xp.ones_like(ccd_data) _ = cosmicray_median(ccd_data, error_image=error, thresh=5, mbox=11, gbox=0, rbox=0) - np_testing.assert_allclose(original.data, ccd_data.data) + assert xp.all(xpx.isclose(original.data, ccd_data.data)) assert original.unit == ccd_data.unit @@ -901,7 +920,7 @@ def test_cosmicray_lacosmic_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() _ = cosmicray_lacosmic(ccd_data) - np_testing.assert_allclose(original.data, ccd_data.data) + assert xp.all(xpx.isclose(original.data, ccd_data.data)) assert original.unit == ccd_data.unit @@ -912,7 +931,7 @@ def test_flat_correct_does_not_change_input(): # Ignore the divide by zero warning that is raised when the flat is zero. with warnings.catch_warnings(action="ignore", category=RuntimeWarning): _ = flat_correct(ccd_data, flat=flat) - np_testing.assert_allclose(original.data, ccd_data.data) + assert xp.all(xpx.isclose(original.data, ccd_data.data)) assert original.unit == ccd_data.unit @@ -920,7 +939,7 @@ def test_gain_correct_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() _ = gain_correct(ccd_data, gain=1, gain_unit=ccd_data.unit) - np_testing.assert_allclose(original.data, ccd_data.data) + assert xp.all(xpx.isclose(original.data, ccd_data.data)) assert original.unit == ccd_data.unit @@ -929,7 +948,7 @@ def test_subtract_bias_does_not_change_input(): original = ccd_data.copy() master_frame = CCDData(xp.zeros_like(ccd_data), unit=ccd_data.unit) _ = subtract_bias(ccd_data, master=master_frame) - np_testing.assert_allclose(original.data, ccd_data.data) + assert xp.all(xpx.isclose(original.data, ccd_data.data)) assert original.unit == ccd_data.unit @@ -937,7 +956,7 @@ def test_trim_image_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() _ = trim_image(ccd_data, fits_section=None) - np_testing.assert_allclose(original.data, ccd_data.data) + assert xp.all(xpx.isclose(original.data, ccd_data.data)) assert original.unit == ccd_data.unit @@ -948,7 +967,7 @@ def test_transform_image_does_not_change_input(): # an invalid value (like square root does) to avoid # warning messages. _ = transform_image(ccd_data, xp.positive) - np_testing.assert_allclose(original.data, ccd_data) + assert xp.all(xpx.isclose(original.data, ccd_data)) assert original.unit == ccd_data.unit @@ -985,7 +1004,7 @@ def test_wcs_project_onto_same_wcs(): assert new_ccd.wcs.wcs.compare(target_wcs.wcs) # Make sure data matches within some reasonable tolerance. - np_testing.assert_allclose(ccd_data.data, new_ccd.data, rtol=1e-5) + assert xp.all(xpx.isclose(ccd_data.data, new_ccd.data, rtol=1e-5)) def test_wcs_project_onto_same_wcs_remove_headers(): @@ -1023,10 +1042,10 @@ def test_wcs_project_onto_shifted_wcs(): # Make sure data matches within some reasonable tolerance, keeping in mind # that the pixels should all be shifted. - np_testing.assert_allclose(ccd_data.data[:-1, :-1], new_ccd[1:, 1:], rtol=1e-5) + assert xp.all(xpx.isclose(ccd_data.data[:-1, :-1], new_ccd[1:, 1:], rtol=1e-5)) # The masks should all be shifted too. - np_testing.assert_array_equal(ccd_data.mask[:-1, :-1], new_ccd.mask[1:, 1:]) + assert xp.all(xpx.isclose(ccd_data.mask[:-1, :-1], new_ccd.mask[1:, 1:])) # We should have more values that are masked in the output array # than on input because some on output were not in the footprint @@ -1084,7 +1103,7 @@ def test_wcs_project_onto_scale_wcs(): # Make sure data matches within some reasonable tolerance, keeping in mind # that the pixels have been scaled. - np_testing.assert_allclose(ccd_data.data / 4, data_cutout, rtol=1e-5) + assert xp.all(xpx.isclose(ccd_data.data / 4, data_cutout, rtol=1e-5)) # Mask should be true for four pixels (all nearest neighbors) # of the single pixel we masked initially. @@ -1105,7 +1124,7 @@ def test_ccd_process_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() _ = ccd_process(ccd_data, gain=5 * u.electron / u.adu, readnoise=10 * u.electron) - np_testing.assert_allclose(original.data, ccd_data.data) + assert xp.all(xpx.isclose(original.data, ccd_data.data)) assert original.unit == ccd_data.unit @@ -1180,9 +1199,9 @@ def test_ccd_process(): # Final results should be (10 - 2) / 2.0 - 2 = 2 # Error should be (4 + 5)**0.5 / 0.5 = 3.0 - np_testing.assert_allclose(2.0 * xp.ones((100, 90)), occd.data) - np_testing.assert_allclose(3.0 * xp.ones((100, 90)), occd.uncertainty.array) - np_testing.assert_array_equal(mask, occd.mask) + assert xp.all(xpx.isclose(2.0 * xp.ones((100, 90)), occd.data)) + assert xp.all(xpx.isclose(3.0 * xp.ones((100, 90)), occd.uncertainty.array)) + assert xp.all(xpx.isclose(mask, occd.mask)) assert occd.unit == u.electron # Make sure the original keyword is still present. Regression test for #401 assert occd.meta["testkw"] == 100 @@ -1228,9 +1247,9 @@ def test_ccd_process_gain_corrected(): # Final results should be (10 - 2) / 2.0 - 2 = 2 # Error should be (4 + 5)**0.5 / 0.5 = 3.0 - np_testing.assert_allclose(2.0 * xp.ones((100, 90)), occd.data) - np_testing.assert_allclose(3.0 * xp.ones((100, 90)), occd.uncertainty.array) - np_testing.assert_array_equal(mask, occd.mask) + assert xp.all(xpx.isclose(2.0 * xp.ones((100, 90)), occd.data)) + assert xp.all(xpx.isclose(3.0 * xp.ones((100, 90)), occd.uncertainty.array)) + assert xp.all(xpx.isclose(mask, occd.mask)) assert occd.unit == u.electron # Make sure the original keyword is still present. Regression test for #401 assert occd.meta["testkw"] == 100 From bbc1249c6f39e5d587dd8acc3908499da4536be5 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Sat, 12 Jul 2025 12:47:54 -0700 Subject: [PATCH 062/111] Make sure to use CCDData.data instead of CCDData in comparisons This is necessary because CCDData does not implement the array API currently. --- ccdproc/tests/test_ccdproc.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index fc5a4ce9..85ebe2e7 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -219,7 +219,8 @@ def test_subtract_overscan(median, transpose, data_rectangle): # Do both ways of subtracting overscan give exactly the same result? assert xp.all( xpx.isclose( - ccd_data_overscan[science_region], ccd_data_fits_section[science_region] + ccd_data_overscan.data[science_region], + ccd_data_fits_section.data[science_region], ) ) @@ -275,7 +276,9 @@ def test_subtract_overscan(median, transpose, data_rectangle): median=median, model=None, ) - assert xp.all(xpx.isclose(ccd_data_square_overscan_auto, ccd_data_square)) + assert xp.all( + xpx.isclose(ccd_data_square_overscan_auto.data, ccd_data_square.data) + ) # A more substantial test of overscan modeling @@ -372,7 +375,7 @@ def test_trim_image_fits_section(mask_data, uncertainty): trimmed = trim_image(ccd_data, fits_section="[20:40,:]") # FITS reverse order, bounds are inclusive and starting index is 1-based assert trimmed.shape == (50, 21) - assert xp.all(xpx.isclose(trimmed.data, ccd_data[:, 19:40])) + assert xp.all(xpx.isclose(trimmed.data, ccd_data.data[:, 19:40])) if mask_data: assert trimmed.shape == trimmed.mask.shape if uncertainty: @@ -383,7 +386,7 @@ def test_trim_image_no_section(): ccd_data = ccd_data_func(data_size=50) trimmed = trim_image(ccd_data[:, 19:40]) assert trimmed.shape == (50, 21) - assert xp.all(xpx.isclose(trimmed.data, ccd_data[:, 19:40])) + assert xp.all(xpx.isclose(trimmed.data, ccd_data.data[:, 19:40])) def test_trim_with_wcs_alters_wcs(): @@ -967,7 +970,7 @@ def test_transform_image_does_not_change_input(): # an invalid value (like square root does) to avoid # warning messages. _ = transform_image(ccd_data, xp.positive) - assert xp.all(xpx.isclose(original.data, ccd_data)) + assert xp.all(xpx.isclose(original.data, ccd_data.data)) assert original.unit == ccd_data.unit @@ -1042,7 +1045,7 @@ def test_wcs_project_onto_shifted_wcs(): # Make sure data matches within some reasonable tolerance, keeping in mind # that the pixels should all be shifted. - assert xp.all(xpx.isclose(ccd_data.data[:-1, :-1], new_ccd[1:, 1:], rtol=1e-5)) + assert xp.all(xpx.isclose(ccd_data.data[:-1, :-1], new_ccd.data[1:, 1:], rtol=1e-5)) # The masks should all be shifted too. assert xp.all(xpx.isclose(ccd_data.mask[:-1, :-1], new_ccd.mask[1:, 1:])) From 6af1e1eb85997c9d4b2fbbc99bee3eaf5cdbc18e Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Sat, 12 Jul 2025 13:21:00 -0700 Subject: [PATCH 063/111] Add more robust handling of open file test One more fix FIx the fix --- ccdproc/tests/test_combine_open_files.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/ccdproc/tests/test_combine_open_files.py b/ccdproc/tests/test_combine_open_files.py index b1bfcb1f..29b13902 100644 --- a/ccdproc/tests/test_combine_open_files.py +++ b/ccdproc/tests/test_combine_open_files.py @@ -1,4 +1,5 @@ import os +import re import subprocess import sys from pathlib import Path @@ -36,7 +37,9 @@ def test_open_files_combine_no_chunks(): # Make a copy args = list(common_args) args.extend(["--open-by", "combine-nochunk", NUM_FILE_LIMIT]) - p = subprocess.run(args=args, cwd=str(subprocess_dir)) + p = subprocess.run(args=args, cwd=str(subprocess_dir), capture_output=True) + if re.search(r".*No module named .*psutil.*", str(p.stderr)): + pytest.skip("psutil is not installed, skipping test") # If we have succeeded the test passes. We are only checking that # we don't have too many files open. assert p.returncode == 0 @@ -55,7 +58,9 @@ def test_open_files_combine_chunks(): # Make a copy args = list(common_args) args.extend(["--open-by", "combine-chunk", NUM_FILE_LIMIT]) - p = subprocess.run(args=args, cwd=str(subprocess_dir)) + p = subprocess.run(args=args, cwd=str(subprocess_dir), capture_output=True) + if re.search(r".*No module named .*psutil.*", str(p.stderr)): + pytest.skip("psutil is not installed, skipping test") # If we have succeeded the test passes. We are only checking that # we don't have too many files open. assert p.returncode == 0 From 5ec57595db160e776cc6c4f0a9a0a76dc523c513 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Sat, 12 Jul 2025 16:27:48 -0700 Subject: [PATCH 064/111] Add several workarounds for non-compliance of CCData with Array API --- ccdproc/core.py | 32 +++++++++++++++++++++++++++++--- ccdproc/tests/test_ccdproc.py | 28 ++++++++++++++-------------- 2 files changed, 43 insertions(+), 17 deletions(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index 02b82696..34f7d840 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -321,7 +321,10 @@ def ccd_process( # Handle this simple case first.... pass elif _is_array(bad_pixel_mask): - nccd.mask = xp.asarray(bad_pixel_mask, dtype=bool) + # TODO: the private _mask attribute is set here to avoid the + # mask.setter than sets the mask to a numpy array. This can be + # removed when CCDData supports array namespaces. + nccd._mask = xp.asarray(bad_pixel_mask, dtype=bool) else: raise TypeError("bad_pixel_mask is not None or an array.") @@ -331,7 +334,7 @@ def ccd_process( if gain is not None and gain_corrected: # No need for xp here because gain_correct does not need the namespace - nccd = gain_correct(nccd, gain) + nccd = gain_correct(nccd, gain, xp=xp) # subtracting the master bias if isinstance(master_bias, CCDData): @@ -735,6 +738,7 @@ def subtract_dark( exposure_time=None, exposure_unit=None, scale=False, + xp=None, ): """ Subtract dark current from an image. @@ -787,6 +791,9 @@ def subtract_dark( if not (isinstance(ccd, CCDData) and isinstance(master, CCDData)): raise TypeError("ccd and master must both be CCDData objects.") + # Do this after the type check above + xp = xp or array_api_compat.array_namespace(ccd.data) + if ( data_exposure is not None and dark_exposure is not None @@ -840,12 +847,17 @@ def subtract_dark( "image" ) from err + # TODO: Workaround for the fact that CCDData currently uses numpy + # for arithmetic operations. + result.data = xp.asarray(result.data) + if result.mask is not None: + result._mask = xp.asarray(result.mask) result.meta = ccd.meta.copy() return result @log_to_metadata -def gain_correct(ccd, gain, gain_unit=None): +def gain_correct(ccd, gain, gain_unit=None, xp=None): """Correct the gain in the image. Parameters @@ -868,6 +880,7 @@ def gain_correct(ccd, gain, gain_unit=None): result : `~astropy.nddata.CCDData` CCDData object with gain corrected. """ + xp = xp or array_api_compat.array_namespace(ccd.data) if isinstance(gain, Keyword): gain_value = gain.value_from(ccd.header) elif isinstance(gain, numbers.Number) and gain_unit is not None: @@ -876,6 +889,14 @@ def gain_correct(ccd, gain, gain_unit=None): gain_value = gain result = ccd.multiply(gain_value) + # TODO: Workaround for the fact that CCDData currently uses numpy + # for arithmetic operations. + result.data = xp.asarray(result.data) + if result.mask is not None: + # The private _mask attribute is set here to avoid the mask.setter + # that sets the mask to a numpy array. This can be removed when CCDData + # supports array namespaces. + result._mask = xp.asarray(result.mask) result.meta = ccd.meta.copy() return result @@ -951,6 +972,11 @@ def flat_correct(ccd, flat, min_value=None, norm_value=None, xp=None): # divide through the flat flat_corrected = ccd.divide(flat_normed) + # TODO: Workaround for the fact that CCDData currently uses numpy + # for arithmetic operations. + flat_corrected.data = xp.asarray(flat_corrected.data) + if flat_corrected.mask is not None: + flat_corrected._mask = xp.asarray(flat_corrected.mask) flat_corrected.meta = ccd.meta.copy() return flat_corrected diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index 85ebe2e7..7303cace 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -408,7 +408,7 @@ def test_trim_with_wcs_alters_wcs(): def test_subtract_bias(): ccd_data = ccd_data_func() - data_avg = ccd_data.data.mean() + data_avg = xp.mean(ccd_data.data) bias_level = 5.0 ccd_data.data = ccd_data.data + bias_level ccd_data.header["key"] = "value" @@ -416,7 +416,7 @@ def test_subtract_bias(): master_bias = CCDData(master_bias_array, unit=ccd_data.unit) no_bias = subtract_bias(ccd_data, master_bias, add_keyword=None) # Does the data we are left with have the correct average? - assert xp.all(xpx.isclose(no_bias.data.mean(), data_avg)) + assert xp.all(xpx.isclose(xp.mean(no_bias.data), data_avg)) # With logging turned off, metadata should not change assert no_bias.header == ccd_data.header del no_bias.header["key"] @@ -580,7 +580,7 @@ def test_flat_correct(): size = ccd_data.shape[0] # create the flat, with some scatter data = 2 * RNG().normal(loc=1.0, scale=0.05, size=(size, size)) - flat = CCDData(data, meta=fits.header.Header(), unit=ccd_data.unit) + flat = CCDData(xp.asarray(data), meta=fits.header.Header(), unit=ccd_data.unit) flat_data = flat_correct(ccd_data, flat, add_keyword=None) # Check that the flat was normalized @@ -588,11 +588,12 @@ def test_flat_correct(): # if the normalization was done correctly. assert xp.all( xpx.isclose( - (flat_data.data * flat.data).mean(), - ccd_data.data.mean() * flat.data.mean(), + xp.mean(flat_data.data * flat.data), + xp.mean(ccd_data.data) * xp.mean(flat.data), rtol=1e-6, ) ) + assert xp.all( xpx.isclose( ccd_data.data / flat_data.data, @@ -612,7 +613,7 @@ def test_flat_correct_min_value(): # Create the flat data = 2 * RNG().normal(loc=1.0, scale=0.05, size=(size, size)) - flat = CCDData(data, meta=fits.header.Header(), unit=ccd_data.unit) + flat = CCDData(xp.asarray(data), meta=fits.header.Header(), unit=ccd_data.unit) flat_orig_data = flat.data.copy() min_value = 2.1 # Should replace some, but not all, values flat_corrected_data = flat_correct(ccd_data, flat, min_value=min_value) @@ -622,13 +623,12 @@ def test_flat_correct_min_value(): # Check that the flat was normalized. The asserts below, which look a # little odd, are correctly testing that # flat_corrected_data = ccd_data / (flat_with_min / mean(flat_with_min)) - assert xp.all( - xpx.isclose( - (flat_corrected_data.data * flat_with_min.data).mean(), - (ccd_data.data * flat_with_min.data.mean()).mean(), - rtol=1e-6, - ) + # This first one is just comparing two numbers, so use pytest.approx + assert (flat_corrected_data.data * flat_with_min.data).mean() == pytest.approx( + ccd_data.data.mean() * flat_with_min.data.mean(), + rel=1e-6, ) + assert xp.all( xpx.isclose( ccd_data.data / flat_corrected_data.data, @@ -652,7 +652,7 @@ def test_flat_correct_norm_value(): # the mean of the flat data. flat_mean = 5.0 data = RNG().normal(loc=1.0, scale=0.05, size=ccd_data.shape) - flat = CCDData(data, meta=fits.Header(), unit=ccd_data.unit) + flat = CCDData(xp.asarray(data), meta=fits.Header(), unit=ccd_data.unit) flat_data = flat_correct(ccd_data, flat, add_keyword=None, norm_value=flat_mean) # Check that the flat was normalized @@ -660,7 +660,7 @@ def test_flat_correct_norm_value(): # if the normalization was done correctly. assert xp.all( xpx.isclose( - (flat_data.data * flat.data).mean(), + float((flat_data.data * flat.data).mean()), ccd_data.data.mean() * flat_mean, rtol=1e-6, ) From 41e17e6f7c3e824a43185e9bf48c834d3f30204d Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Wed, 16 Jul 2025 15:43:49 -0600 Subject: [PATCH 065/111] Minor changes --- ccdproc/combiner.py | 2 +- ccdproc/core.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/ccdproc/combiner.py b/ccdproc/combiner.py index a0e387fc..9c2302eb 100644 --- a/ccdproc/combiner.py +++ b/ccdproc/combiner.py @@ -184,7 +184,7 @@ def __init__(self, ccd_iter, dtype=None, xp=None): # set up the data array # new_shape = (len(ccd_list),) + default_shape - self._data_arr = xp.array([ccd.data for ccd in ccd_list], dtype=dtype) + self._data_arr = xp.asarray([ccd.data for ccd in ccd_list], dtype=dtype) # populate self.data_arr mask_list = [ diff --git a/ccdproc/core.py b/ccdproc/core.py index 34f7d840..e421ca3b 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -473,7 +473,9 @@ def create_deviation(ccd_data, gain=None, readnoise=None, disregard_nan=False, x # ensure uncertainty and image data have same unit ccd = ccd_data.copy() var /= gain_value + ccd.uncertainty = StdDevUncertainty(var) + return ccd From 7664bc02d1fefb3d7e3154e905e6e55059cf5dfa Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Wed, 16 Jul 2025 16:08:24 -0600 Subject: [PATCH 066/111] Add and use wrapper classes to ensure array API use Turns out there were several places where the functions in the array's namespace were not actually being used. This fixes that. Clean up uncertainty wrapper --- ccdproc/_ccddata_wrapper_for_array_api.py | 253 ++++++++++++++++++++++ ccdproc/core.py | 96 ++++---- 2 files changed, 309 insertions(+), 40 deletions(-) create mode 100644 ccdproc/_ccddata_wrapper_for_array_api.py diff --git a/ccdproc/_ccddata_wrapper_for_array_api.py b/ccdproc/_ccddata_wrapper_for_array_api.py new file mode 100644 index 00000000..ea460743 --- /dev/null +++ b/ccdproc/_ccddata_wrapper_for_array_api.py @@ -0,0 +1,253 @@ +import array_api_compat +import numpy as np +from astropy import units as u +from astropy.nddata import ( + CCDData, + StdDevUncertainty, +) +from astropy.nddata.compat import NDDataArray +from astropy.units import UnitsError + + +class _NDDataArray(NDDataArray): + @NDDataArray.mask.setter + def mask(self, value): + xp = array_api_compat.array_namespace(self.data) + # Check that value is not either type of null mask. + if (value is not None) and (value is not np.ma.nomask): + mask = xp.asarray(value, dtype=bool) + if mask.shape != self.data.shape: + raise ValueError( + f"dimensions of mask {mask.shape} and data " + f"{self.data.shape} do not match" + ) + else: + self._mask = mask + else: + # internal representation should be one numpy understands + self._mask = np.ma.nomask + + +class _CCDDataWrapperForArrayAPI(CCDData): + """ + Thin wrapper around CCDData to allow arithmetic operations with + arbitray array API backends. + """ + + def _arithmetic_wrapper(self, operation, operand, result_unit, **kwargs): + """ " + Use NDDataArray for arthmetic because that does not force conversion + to Quantity (and hence numpy array). If there are units on the operands + then NDArithmeticMixin will convert to Quantity. + """ + # Take the units off to make sure the arithmetic operation + # does not try to convert to Quantity. + if hasattr(self, "unit"): + self_unit = self.unit + self._unit = None + else: + self_unit = None + + if hasattr(operand, "unit"): + operand_unit = operand.unit + operand._unit = None + else: + operand_unit = None + + # Also take the units off of the uncertainty + if self_unit is not None and hasattr(self.uncertainty, "unit"): + self.uncertainty._unit = None + + if ( + operand_unit is not None + and hasattr(operand, "uncertainty") + and hasattr(operand.uncertainty, "unit") + ): + operand.uncertainty._unit = None + + _result = _NDDataArray._prepare_then_do_arithmetic( + operation, self, operand, **kwargs + ) + if self_unit: + self._unit = self_unit + if operand_unit: + operand._unit = operand_unit + # Also take the units off of the uncertainty + if hasattr(self, "uncertainty") and self.uncertainty is not None: + self.uncertainty._unit = self_unit + + if hasattr(operand, "uncertainty") and operand.uncertainty is not None: + operand.uncertainty._unit = operand_unit + + # We need to handle the mask separately if we want to return a + # genuine CCDDatta object and CCDData does not understand the + # array API. + result_mask = None + if _result.mask is not None: + result_mask = _result._mask + _result._mask = None + result = CCDData(_result, unit=result_unit) + result._mask = result_mask + return result + + def subtract(self, operand, xp=None, **kwargs): + """ + Determine the right operation to use and figure out + the units of the result. + """ + xp = xp or array_api_compat.array_namespace(self.data) + if not self.unit.is_equivalent(operand.unit): + raise UnitsError("Units must be equivalent for subtraction.") + result_unit = self.unit + handle_mask = kwargs.pop("handle_mask", xp.logical_or) + return self._arithmetic_wrapper( + xp.subtract, operand, result_unit, handle_mask=handle_mask, **kwargs + ) + + def add(self, operand, xp=None, **kwargs): + """ + Determine the right operation to use and figure out + the units of the result. + """ + xp = xp or array_api_compat.array_namespace(self.data) + if not self.unit.is_equivalent(operand.unit): + raise UnitsError("Units must be equivalent for addition.") + result_unit = self.unit + handle_mask = kwargs.pop("handle_mask", xp.logical_or) + return self._arithmetic_wrapper( + xp.add, operand, result_unit, handle_mask=handle_mask, **kwargs + ) + + def multiply(self, operand, xp=None, **kwargs): + """ + Determine the right operation to use and figure out + the units of the result. + """ + xp = xp or array_api_compat.array_namespace(self.data) + # The "1 *" below is because quantities do arithmetic properly + # but units do not necessarily. + if not hasattr(operand, "unit"): + operand_unit = 1 * u.dimensionless_unscaled + else: + operand_unit = operand.unit + result_unit = (1 * self.unit) * (1 * operand_unit) + handle_mask = kwargs.pop("handle_mask", xp.logical_or) + return self._arithmetic_wrapper( + xp.multiply, operand, result_unit, handle_mask=handle_mask, **kwargs + ) + + def divide(self, operand, xp=None, **kwargs): + """ + Determine the right operation to use and figure out + the units of the result. + """ + xp = xp or array_api_compat.array_namespace(self.data) + if not hasattr(operand, "unit"): + operand_unit = 1 * u.dimensionless_unscaled + else: + operand_unit = operand.unit + result_unit = (1 * self.unit) / (1 * operand_unit) + handle_mask = kwargs.pop("handle_mask", xp.logical_or) + return self._arithmetic_wrapper( + xp.divide, operand, result_unit, handle_mask=handle_mask, **kwargs + ) + + @NDDataArray.mask.setter + def mask(self, value): + xp = array_api_compat.array_namespace(self.data) + # Check that value is not either type of null mask. + if (value is not None) and (value is not np.ma.nomask): + mask = xp.asarray(value, dtype=bool) + if mask.shape != self.data.shape: + raise ValueError( + f"dimensions of mask {mask.shape} and data " + f"{self.data.shape} do not match" + ) + else: + self._mask = mask + else: + # internal representation should be one numpy understands + self._mask = np.ma.nomask + + +class _StdDevUncertaintyWrapper(StdDevUncertainty): + """ + Override propagate methods to make sure they use the array API. + """ + + def _propagate_add(self, other_uncert, result_data, correlation): + xp = array_api_compat.array_namespace(self.array, other_uncert.array) + return super()._propagate_add_sub( + other_uncert, + result_data, + correlation, + subtract=False, + to_variance=xp.square, + from_variance=xp.sqrt, + ) + + def _propagate_subtract(self, other_uncert, result_data, correlation): + xp = array_api_compat.array_namespace(self.array, other_uncert.array) + return super()._propagate_add_sub( + other_uncert, + result_data, + correlation, + subtract=True, + to_variance=xp.square, + from_variance=xp.sqrt, + ) + + def _propagate_multiply(self, other_uncert, result_data, correlation): + xp = array_api_compat.array_namespace(self.array, other_uncert.array) + return super()._propagate_multiply_divide( + other_uncert, + result_data, + correlation, + divide=False, + to_variance=xp.square, + from_variance=xp.sqrt, + ) + + def _propagate_divide(self, other_uncert, result_data, correlation): + xp = array_api_compat.array_namespace(self.array, other_uncert.array) + return super()._propagate_multiply_divide( + other_uncert, + result_data, + correlation, + divide=True, + to_variance=xp.square, + from_variance=xp.sqrt, + ) + + +def _wrap_ccddata_for_array_api(ccd): + """ + Wrap a CCDData object for use with array API backends. + """ + if isinstance(ccd, _CCDDataWrapperForArrayAPI): + return ccd + + _ccd = _CCDDataWrapperForArrayAPI(ccd) + if isinstance(_ccd.uncertainty, StdDevUncertainty): + _ccd.uncertainty = _StdDevUncertaintyWrapper(_ccd.uncertainty) + return _ccd + + +def _unwrap_ccddata_for_array_api(ccd): + """ + Unwrap a CCDData object from array API backends to the original CCDData. + """ + + if isinstance(ccd.uncertainty, _StdDevUncertaintyWrapper): + ccd.uncertainty = StdDevUncertainty(ccd.uncertainty.array) + + if isinstance(ccd, CCDData): + return ccd + + if not isinstance(ccd, _CCDDataWrapperForArrayAPI): + raise TypeError( + "Input must be a CCDData or _CCDDataWrapperForArrayAPI instance." + ) + + # Convert back to CCDData + return CCDData(ccd) diff --git a/ccdproc/core.py b/ccdproc/core.py index e421ca3b..32e9af2c 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -23,6 +23,10 @@ from packaging import version as pkgversion from scipy import ndimage +from ._ccddata_wrapper_for_array_api import ( + _unwrap_ccddata_for_array_api, + _wrap_ccddata_for_array_api, +) from .log_meta import log_to_metadata from .utils.slices import slice_from_string @@ -695,7 +699,7 @@ def trim_image(ccd, fits_section=None): @log_to_metadata -def subtract_bias(ccd, master): +def subtract_bias(ccd, master, xp=None): """ Subtract master bias from image. @@ -714,9 +718,11 @@ def subtract_bias(ccd, master): result : `~astropy.nddata.CCDData` CCDData object with bias subtracted. """ - + xp = xp or array_api_compat.array_namespace(ccd.data) + _ccd = _wrap_ccddata_for_array_api(ccd) + _master = _wrap_ccddata_for_array_api(master) try: - result = ccd.subtract(master) + _result = _ccd.subtract(_master, handle_mask=xp.logical_or) except ValueError as err: if "operand units" in str(err): raise u.UnitsError( @@ -726,7 +732,7 @@ def subtract_bias(ccd, master): ) from err else: raise err - + result = _unwrap_ccddata_for_array_api(_result) result.meta = ccd.meta.copy() return result @@ -796,6 +802,8 @@ def subtract_dark( # Do this after the type check above xp = xp or array_api_compat.array_namespace(ccd.data) + _ccd = _wrap_ccddata_for_array_api(ccd) + _master = _wrap_ccddata_for_array_api(master) if ( data_exposure is not None and dark_exposure is not None @@ -833,13 +841,18 @@ def subtract_dark( try: if scale: - master_scaled = master.copy() + _master_scaled = _master.copy() # data_exposure and dark_exposure are both quantities, # so we can just have subtract do the scaling - master_scaled = master_scaled.multiply(data_exposure / dark_exposure) - result = ccd.subtract(master_scaled) + # Make sure we have just a number for the scaling, or we will + # be forced to use numpy by astropy Quantity. + # The cast to float is needed because the result will be a np.float, + # which will mess up the array namespace. + scale_factor = float((data_exposure / dark_exposure).decompose().value) + _master_scaled = _master_scaled.multiply(scale_factor) + _result = _ccd.subtract(_master_scaled, handle_mask=xp.logical_or) else: - result = ccd.subtract(master) + _result = _ccd.subtract(_master, handle_mask=xp.logical_or) except (u.UnitsError, u.UnitConversionError, ValueError) as err: # Make the error message a little more explicit than what is returned # by default. @@ -849,11 +862,7 @@ def subtract_dark( "image" ) from err - # TODO: Workaround for the fact that CCDData currently uses numpy - # for arithmetic operations. - result.data = xp.asarray(result.data) - if result.mask is not None: - result._mask = xp.asarray(result.mask) + result = _unwrap_ccddata_for_array_api(_result) result.meta = ccd.meta.copy() return result @@ -882,24 +891,19 @@ def gain_correct(ccd, gain, gain_unit=None, xp=None): result : `~astropy.nddata.CCDData` CCDData object with gain corrected. """ - xp = xp or array_api_compat.array_namespace(ccd.data) + _ccd = _wrap_ccddata_for_array_api(ccd) + + xp = xp or array_api_compat.array_namespace(_ccd.data) if isinstance(gain, Keyword): - gain_value = gain.value_from(ccd.header) + gain_value = gain.value_from(_ccd.header) elif isinstance(gain, numbers.Number) and gain_unit is not None: gain_value = gain * u.Unit(gain_unit) else: gain_value = gain - result = ccd.multiply(gain_value) - # TODO: Workaround for the fact that CCDData currently uses numpy - # for arithmetic operations. - result.data = xp.asarray(result.data) - if result.mask is not None: - # The private _mask attribute is set here to avoid the mask.setter - # that sets the mask to a numpy array. This can be removed when CCDData - # supports array namespaces. - result._mask = xp.asarray(result.mask) - result.meta = ccd.meta.copy() + _result = _ccd.multiply(gain_value, xp=xp, handle_mask=xp.logical_or) + result = _unwrap_ccddata_for_array_api(_result) + result.meta = _ccd.meta.copy() return result @@ -941,45 +945,57 @@ def flat_correct(ccd, flat, min_value=None, norm_value=None, xp=None): ccd : `~astropy.nddata.CCDData` CCDData object with flat corrected. """ + + _ccd = _wrap_ccddata_for_array_api(ccd) + _flat = _wrap_ccddata_for_array_api(flat) # Get the array namespace - xp = xp or array_api_compat.array_namespace(ccd.data) + xp = xp or array_api_compat.array_namespace(_ccd.data) + # Use the min_value to replace any values in the flat - use_flat = flat + _use_flat = _flat if min_value is not None: - flat_min = flat.copy() - flat_min.data[flat_min.data < min_value] = min_value - use_flat = flat_min + _flat_min = _flat.copy() + xpx.at(_flat_min.data)[_flat_min.data < min_value].set(min_value) + _use_flat = _flat_min # If a norm_value was input and is positive, use it to scale the flat if norm_value is not None and norm_value > 0: flat_mean = ( - norm_value if hasattr(norm_value, "unit") else norm_value * use_flat.unit + norm_value if hasattr(norm_value, "unit") else norm_value * _use_flat.unit ) elif norm_value is not None: # norm_value was set to a bad value raise ValueError("norm_value must be greater than zero.") else: # norm_value was not set, use mean of the image. - flat_mean = xp.mean(use_flat.data) * use_flat.unit + flat_mean = xp.mean(_use_flat.data) * _use_flat.unit # Normalize the flat. - flat_normed = use_flat.divide(flat_mean) + # Make sure flat_mean is a plain python float so that we + # can use it with the array namespace. + flat_mean_unit = flat_mean.unit + flat_mean = float(flat_mean.decompose().value) + _flat_normed = _use_flat.divide(flat_mean, xp=xp) + + # We need to fix up the unit now wince we stripped the unit from + # the flat_mean above. + result_unit = ((1 * _use_flat.unit) / (1 * flat_mean_unit)).decompose().unit + _flat_normed.unit = result_unit # Set masked values to unity; the array element remains masked, but the data # value is set to unity to avoid runtime divide-by-zero errors that are due # to a masked value being set to 0. - if flat_normed.mask is not None and flat_normed.mask.any(): - flat_normed.data[flat_normed.mask] = 1.0 + if _flat_normed.mask is not None and _flat_normed.mask.any(): + _flat_normed.data[_flat_normed.mask] = 1.0 # divide through the flat - flat_corrected = ccd.divide(flat_normed) + _flat_corrected = _ccd.divide(_flat_normed, xp=xp, handle_mask=xp.logical_or) + flat_corrected = _unwrap_ccddata_for_array_api(_flat_corrected) # TODO: Workaround for the fact that CCDData currently uses numpy # for arithmetic operations. - flat_corrected.data = xp.asarray(flat_corrected.data) - if flat_corrected.mask is not None: - flat_corrected._mask = xp.asarray(flat_corrected.mask) - flat_corrected.meta = ccd.meta.copy() + + flat_corrected.meta = _ccd.meta.copy() return flat_corrected From fb6e7d6943b768934d74d3935ed85469e4a53ded Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Wed, 16 Jul 2025 15:55:27 -0600 Subject: [PATCH 067/111] Fix for immutable array types --- ccdproc/tests/test_ccdproc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index 7303cace..e2dd54c0 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -618,7 +618,7 @@ def test_flat_correct_min_value(): min_value = 2.1 # Should replace some, but not all, values flat_corrected_data = flat_correct(ccd_data, flat, min_value=min_value) flat_with_min = flat.copy() - flat_with_min.data[flat_with_min.data < min_value] = min_value + xpx.at(flat_with_min.data)[flat_with_min.data < min_value].set(min_value) # Check that the flat was normalized. The asserts below, which look a # little odd, are correctly testing that From c4c1277d4ea1a9dce2e9bf6a422fdbc5d1afa403 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Wed, 16 Jul 2025 15:59:06 -0600 Subject: [PATCH 068/111] Ensure array namespace is used throughout core tests --- ccdproc/tests/test_ccdproc.py | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index e2dd54c0..eee980e1 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -2,6 +2,7 @@ import warnings +import array_api_compat import array_api_extra as xpx import astropy import astropy.units as u @@ -144,6 +145,7 @@ def test_create_deviation_keywords_must_have_unit(): ) def test_subtract_overscan(median, transpose, data_rectangle): ccd_data = ccd_data_func() + xp = array_api_compat.array_namespace(ccd_data.data) # Make data non-square if desired if data_rectangle: ccd_data.data = ccd_data.data[:, :-30] @@ -170,7 +172,7 @@ def test_subtract_overscan(median, transpose, data_rectangle): # Add a fake sky background so the "science" part of the image has a # different average than the "overscan" part. sky = 10.0 - original_mean = science_data.mean() + original_mean = xp.mean(science_data) science_data = science_data + oscan + sky # Reconstruct the full image @@ -187,7 +189,7 @@ def test_subtract_overscan(median, transpose, data_rectangle): # "science" section had before backgrounds were added? assert xp.all( xpx.isclose( - ccd_data_overscan.data[science_region].mean(), + xp.mean(ccd_data_overscan.data[science_region]), sky + original_mean, rtol=1e-6, ) @@ -208,7 +210,7 @@ def test_subtract_overscan(median, transpose, data_rectangle): # "science" section had before backgrounds were added? assert xp.all( xpx.isclose( - ccd_data_fits_section.data[science_region].mean(), + xp.mean(ccd_data_fits_section.data[science_region]), sky + original_mean, rtol=1e-6, ) @@ -235,7 +237,7 @@ def test_subtract_overscan(median, transpose, data_rectangle): ) assert xp.all( xpx.isclose( - ccd_data_overscan_auto.data[science_region].mean(), + xp.mean(ccd_data_overscan_auto.data[science_region]), sky + original_mean, rtol=1e-6, ) @@ -250,7 +252,7 @@ def test_subtract_overscan(median, transpose, data_rectangle): ) assert xp.all( xpx.isclose( - ccd_data_fits_section_overscan_auto.data[science_region].mean(), + xp.mean(ccd_data_fits_section_overscan_auto.data[science_region]), sky + original_mean, rtol=1e-6, ) @@ -302,7 +304,7 @@ def test_subtract_overscan_model(transpose): overscan_axis = 1 scan = yscan - original_mean = ccd_data.data[science_region].mean() + original_mean = xp.mean(ccd_data.data[science_region]) science_data = ccd_data.data[science_region].copy() # Set any existing overscan to zero. Overscan is stored for the entire @@ -319,7 +321,7 @@ def test_subtract_overscan_model(transpose): model=models.Polynomial1D(2), ) assert xp.all( - xpx.isclose(ccd_data.data[science_region].mean(), original_mean, atol=1e-5) + xpx.isclose(xp.mean(ccd_data.data[science_region]), original_mean, atol=1e-5) ) # Set the overscan_axis explicitly to None, and let the routine # figure it out. @@ -331,7 +333,7 @@ def test_subtract_overscan_model(transpose): model=models.Polynomial1D(2), ) assert xp.all( - xpx.isclose(ccd_data.data[science_region].mean(), original_mean, atol=1e-5) + xpx.isclose(xp.mean(ccd_data.data[science_region]), original_mean, atol=1e-5) ) @@ -597,7 +599,7 @@ def test_flat_correct(): assert xp.all( xpx.isclose( ccd_data.data / flat_data.data, - flat.data / flat.data.mean(), + flat.data / xp.mean(flat.data), rtol=1e-6, ) ) @@ -624,15 +626,15 @@ def test_flat_correct_min_value(): # little odd, are correctly testing that # flat_corrected_data = ccd_data / (flat_with_min / mean(flat_with_min)) # This first one is just comparing two numbers, so use pytest.approx - assert (flat_corrected_data.data * flat_with_min.data).mean() == pytest.approx( - ccd_data.data.mean() * flat_with_min.data.mean(), + assert xp.mean(flat_corrected_data.data * flat_with_min.data) == pytest.approx( + xp.mean(ccd_data.data) * xp.mean(flat_with_min.data), rel=1e-6, ) assert xp.all( xpx.isclose( ccd_data.data / flat_corrected_data.data, - flat_with_min.data / flat_with_min.data.mean(), + flat_with_min.data / xp.mean(flat_with_min.data), rtol=1e-6, ) ) @@ -660,8 +662,8 @@ def test_flat_correct_norm_value(): # if the normalization was done correctly. assert xp.all( xpx.isclose( - float((flat_data.data * flat.data).mean()), - ccd_data.data.mean() * flat_mean, + xp.mean(flat_data.data * flat.data), + xp.mean(ccd_data.data) * flat_mean, rtol=1e-6, ) ) From 048f079770ed72e712cad32947f731f6e3a6e2da Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Wed, 16 Jul 2025 17:28:52 -0600 Subject: [PATCH 069/111] Include more uncertainty types in tests --- ccdproc/tests/test_ccdproc.py | 60 ++++++++++++++++++++++++++++++----- 1 file changed, 52 insertions(+), 8 deletions(-) diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index eee980e1..a2ea1abb 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -10,7 +10,12 @@ import skimage from astropy.io import fits from astropy.modeling import models -from astropy.nddata import CCDData, StdDevUncertainty +from astropy.nddata import ( + CCDData, + InverseVariance, + StdDevUncertainty, + VarianceUncertainty, +) from astropy.units.quantity import Quantity from astropy.utils.exceptions import AstropyUserWarning from astropy.wcs import WCS @@ -1165,10 +1170,15 @@ def test_ccd_process_parameters_are_appropriate(): ccd_process(ccd_data, master_flat=3) -def test_ccd_process(): +@pytest.mark.parametrize( + "uncertainty_type", + [StdDevUncertainty, VarianceUncertainty, InverseVariance], +) +def test_ccd_process(uncertainty_type): # Test the through ccd_process + # Data has value 10 ccd_data = CCDData(10.0 * xp.ones((100, 100)), unit=u.adu) - # Rewrite to not change data in-place. + # Sets overscan to 2 without changing data in-place, ccd_data.data = xp.concat([ccd_data.data[:, :-10], 2 * xp.ones((100, 10))], axis=1) ccd_data.meta["testkw"] = 100 @@ -1176,19 +1186,51 @@ def test_ccd_process(): mask = xp.zeros((100, 90)) masterbias = CCDData(2.0 * xp.ones((100, 90)), unit=u.electron) - masterbias.uncertainty = StdDevUncertainty(xp.zeros((100, 90))) + masterbias.uncertainty = uncertainty_type(xp.zeros((100, 90))) dark_frame = CCDData(0.0 * xp.ones((100, 90)), unit=u.electron) - dark_frame.uncertainty = StdDevUncertainty(xp.zeros((100, 90))) + dark_frame.uncertainty = uncertainty_type(xp.zeros((100, 90))) masterflat = CCDData(10.0 * xp.ones((100, 90)), unit=u.electron) - masterflat.uncertainty = StdDevUncertainty(xp.zeros((100, 90))) + masterflat.uncertainty = uncertainty_type(xp.zeros((100, 90))) + + match uncertainty_type.__name__: + case "StdDevUncertainty": + # Let create_deviation make a StdDevUncertainty + error = True + expected_error_factor = 3.0 + + case "VarianceUncertainty": + # We supply the uncertainty as a variance, equivalent to StdDevUncertainty + # with value 6.0. + error = False + # For the values of the data above (8 adu after subtracting overscan), the + # readnoise of 5**0.5 * u.electron and gain of 0.5 * u.electron / u.adu + # below, the variance prior to gain correction should be the square of + # sqrt(8 * 0.5 + 5) / 0.5 = 3.0. + ccd_data.uncertainty = VarianceUncertainty(xp.ones((100, 90)) * 6.0**2) + # Value below is just the square of the factor for standard deviation. + expected_error_factor = 3.0**2 + case "InverseVariance": + # We supply the uncertainty as an inverse variance, equivalent to + # StdDevUncertainty with value 2.0. + error = False + ccd_data.uncertainty = InverseVariance(xp.ones((100, 90)) / (6.0**2)) + # The value below is the inverse of the square of the factor for standard + # deviation. + expected_error_factor = 3.0**-2 + # We cannot use zero for the InverseVariance uncertainties in this case in + # the master images since 1 / 0 is undefined, so we needset them all to + # none. + masterbias.uncertainty = None + dark_frame.uncertainty = None + masterflat.uncertainty = None occd = ccd_process( ccd_data, oscan=ccd_data[:, -10:], trim="[1:90,1:100]", - error=True, + error=error, master_bias=masterbias, master_flat=masterflat, dark_frame=dark_frame, @@ -1205,7 +1247,9 @@ def test_ccd_process(): # Error should be (4 + 5)**0.5 / 0.5 = 3.0 assert xp.all(xpx.isclose(2.0 * xp.ones((100, 90)), occd.data)) - assert xp.all(xpx.isclose(3.0 * xp.ones((100, 90)), occd.uncertainty.array)) + assert xp.all( + xpx.isclose(expected_error_factor * xp.ones((100, 90)), occd.uncertainty.array) + ) assert xp.all(xpx.isclose(mask, occd.mask)) assert occd.unit == u.electron # Make sure the original keyword is still present. Regression test for #401 From 359126ae7d11665590062515dba1821e9530fbb8 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Wed, 16 Jul 2025 21:10:56 -0600 Subject: [PATCH 070/111] Use array API correctly in combiner tests --- ccdproc/tests/test_combiner.py | 176 +++++++++++++++++++-------------- 1 file changed, 100 insertions(+), 76 deletions(-) diff --git a/ccdproc/tests/test_combiner.py b/ccdproc/tests/test_combiner.py index ddd67412..99527b37 100644 --- a/ccdproc/tests/test_combiner.py +++ b/ccdproc/tests/test_combiner.py @@ -8,7 +8,6 @@ from astropy.stats import median_absolute_deviation as mad from astropy.utils.data import get_pkg_data_filename from numpy import median as np_median -from numpy import testing as np_testing from ccdproc.combiner import ( Combiner, @@ -143,12 +142,12 @@ def test_1Dweights(): ] combo = Combiner(ccd_list) - combo.weights = xp.array([1, 5, 10]) + combo.weights = xp.asarray([1, 5, 10]) ccd = combo.average_combine() - np_testing.assert_allclose(ccd.data, 312.5) + assert xp.all(xpx.isclose(ccd.data, 312.5)) with pytest.raises(ValueError): - combo.weights = xp.array([1, 5, 10, 20]) + combo.weights = xp.asarray([1, 5, 10, 20]) def test_pixelwise_weights(): @@ -159,10 +158,10 @@ def test_pixelwise_weights(): ] combo = Combiner(ccd_list) combo.weights = xp.ones_like(combo._data_arr) - combo.weights = xpx.at(combo.weights)[:, 5, 5].set(xp.array([1, 5, 10])) + combo.weights = xpx.at(combo.weights)[:, 5, 5].set(xp.asarray([1, 5, 10])) ccd = combo.average_combine() - np_testing.assert_allclose(ccd.data[5, 5], 312.5) - np_testing.assert_allclose(ccd.data[0, 0], 0) + assert xp.all(xpx.isclose(ccd.data[5, 5], 312.5)) + assert xp.all(xpx.isclose(ccd.data[0, 0], 0)) # test the min-max rejection @@ -294,26 +293,26 @@ def test_combiner_sum(): # test weighted sum def test_combiner_sum_weighted(): - ccd_data = CCDData(data=[[0, 1], [2, 3]], unit="adu") + ccd_data = CCDData(data=xp.asarray([[0, 1], [2, 3]]), unit="adu") ccd_list = [ccd_data, ccd_data, ccd_data] c = Combiner(ccd_list) - c.weights = xp.array([1, 2, 3]) + c.weights = xp.asarray([1, 2, 3]) ccd = c.sum_combine() expected_result = sum(w * d.data for w, d in zip(c.weights, ccd_list, strict=True)) - np_testing.assert_allclose(ccd, expected_result) + assert xp.all(xpx.isclose(ccd.data, expected_result)) # test weighted sum def test_combiner_sum_weighted_by_pixel(): - ccd_data = CCDData(data=[[1, 2], [4, 8]], unit="adu") + ccd_data = CCDData(data=xp.asarray([[1, 2], [4, 8]]), unit="adu") ccd_list = [ccd_data, ccd_data, ccd_data] c = Combiner(ccd_list) # Weights below are chosen so that every entry in weights_pixel = [[8, 4], [2, 1]] - c.weights = xp.array([weights_pixel] * 3) + c.weights = xp.asarray([weights_pixel] * 3) ccd = c.sum_combine() - expected_result = [[24, 24], [24, 24]] - np_testing.assert_allclose(ccd, expected_result) + expected_result = xp.asarray([[24, 24], [24, 24]]) + assert xp.all(xpx.isclose(ccd.data, expected_result)) # This warning is generated by numpy and is expected when @@ -354,7 +353,7 @@ def test_combiner_with_scaling(): avg_ccd = combiner.average_combine() # Does the mean of the scaled arrays match the value to which it was # scaled? - np_testing.assert_allclose(avg_ccd.data.mean(), ccd_data.data.mean()) + assert xp.all(xpx.isclose(avg_ccd.data.mean(), ccd_data.data.mean())) assert avg_ccd.shape == ccd_data.shape median_ccd = combiner.median_combine() # Does median also scale to the correct value? @@ -374,12 +373,12 @@ def test_combiner_with_scaling(): except AttributeError: pass - np_testing.assert_allclose(np_median(med_ccd), np_median(med_inp_data)) + assert xp.all(xpx.isclose(np_median(med_ccd), np_median(med_inp_data))) # Set the scaling manually... combiner.scaling = [scale_by_mean(combiner._data_arr[i]) for i in range(3)] avg_ccd = combiner.average_combine() - np_testing.assert_allclose(avg_ccd.data.mean(), ccd_data.data.mean()) + assert xp.all(xpx.isclose(avg_ccd.data.mean(), ccd_data.data.mean())) assert avg_ccd.shape == ccd_data.shape @@ -448,7 +447,7 @@ def test_combine_average_fitsimages(): fitsfilename_list = [fitsfile] * 3 avgccd = combine(fitsfilename_list, output_file=None, method="average", unit=u.adu) # averaging same fits images should give back same fits image - np_testing.assert_allclose(avgccd.data, ccd_by_combiner.data) + assert xp.all(xpx.isclose(avgccd.data, ccd_by_combiner.data)) def test_combine_numpyndarray(): @@ -466,7 +465,7 @@ def test_combine_numpyndarray(): fitsfilename_list = [fitsfile] * 3 avgccd = combine(fitsfilename_list, output_file=None, method="average", unit=u.adu) # averaging same fits images should give back same fits image - np_testing.assert_allclose(avgccd.data, ccd_by_combiner.data) + assert xp.all(xpx.isclose(avgccd.data, ccd_by_combiner.data)) def test_combiner_result_dtype(): @@ -479,12 +478,12 @@ def test_combiner_result_dtype(): # The default dtype of Combiner is float64 assert res.data.dtype == xp.float64 ref = xp.ones((3, 3)) * 1.5 - np_testing.assert_allclose(res.data, ref) + assert xp.all(xpx.isclose(res.data, ref)) res = combine([ccd, ccd.multiply(2), ccd.multiply(3)], dtype=int) # The result dtype should be integer: assert xp.isdtype(res.data.dtype, "integral") ref = xp.ones((3, 3)) * 2 - np_testing.assert_allclose(res.data, ref) + assert xp.all(xpx.isclose(res.data, ref)) def test_combiner_image_file_collection_input(tmp_path): @@ -494,14 +493,29 @@ def test_combiner_image_file_collection_input(tmp_path): ccd.write(tmp_path / f"ccd-{i}.fits") ifc = ImageFileCollection(tmp_path) - comb = Combiner(ifc.ccds()) - np_testing.assert_allclose(ccd.data, comb.average_combine().data) + ccds = list(ifc.ccds()) + + # Need to convert these to the array namespace. + for a_ccd in ccds: + a_ccd.data = xp.asarray(a_ccd.data, dtype=xp.float64) + if a_ccd.mask is not None: + a_ccd.mask = xp.asarray(a_ccd.mask, dtype=bool) + if a_ccd.uncertainty is not None: + a_ccd.uncertainty.array = xp.asarray( + a_ccd.uncertainty.array, dtype=xp.float64 + ) + comb = Combiner(ccds) + + # Do this on a separate line from the assert to make debugging easier + result = comb.average_combine() + assert xp.all(xpx.isclose(ccd.data, result.data)) def test_combine_image_file_collection_input(tmp_path): # Another regression check for #754 but this time with the # combine function instead of Combiner ccd = ccd_data_func() + xp = array_api_compat.array_namespace(ccd.data) for i in range(3): ccd.write(tmp_path / f"ccd-{i}.fits") @@ -515,9 +529,9 @@ def test_combine_image_file_collection_input(tmp_path): ",".join(ifc.files_filtered(include_path=True)), method="average" ) - np_testing.assert_allclose(ccd.data, comb_files.data) - np_testing.assert_allclose(ccd.data, comb_ccds.data) - np_testing.assert_allclose(ccd.data, comb_string.data) + assert xp.all(xpx.isclose(ccd.data, comb_files.data)) + assert xp.all(xpx.isclose(ccd.data, comb_ccds.data)) + assert xp.all(xpx.isclose(ccd.data, comb_string.data)) with pytest.raises(FileNotFoundError): # This should fail because the test is not running in the @@ -535,7 +549,7 @@ def test_combine_average_ccddata(): avgccd = combine(ccd_list, output_file=None, method="average", unit=u.adu) # averaging same ccdData should give back same images - np_testing.assert_allclose(avgccd.data, ccd_by_combiner.data) + assert xp.all(xpx.isclose(avgccd.data, ccd_by_combiner.data)) # test combiner convenience function reads fits file and @@ -552,7 +566,7 @@ def test_combine_limitedmem_fitsimages(): fitsfilename_list, output_file=None, method="average", mem_limit=1e6, unit=u.adu ) # averaging same ccdData should give back same images - np_testing.assert_allclose(avgccd.data, ccd_by_combiner.data) + assert xp.all(xpx.isclose(avgccd.data, ccd_by_combiner.data)) # test combiner convenience function reads fits file and @@ -577,7 +591,7 @@ def test_combine_limitedmem_scale_fitsimages(): unit=u.adu, ) - np_testing.assert_allclose(avgccd.data, ccd_by_combiner.data) + assert xp.all(xpx.isclose(avgccd.data, ccd_by_combiner.data)) # test the optional uncertainty function in average_combine @@ -587,12 +601,12 @@ def test_average_combine_uncertainty(): c = Combiner(ccd_list) ccd = c.average_combine(uncertainty_func=xp.sum) uncert_ref = xp.sum(c._data_arr, 0) / xp.sqrt(3) - np_testing.assert_allclose(ccd.uncertainty.array, uncert_ref) + assert xp.all(xpx.isclose(ccd.uncertainty.array, uncert_ref)) # Compare this also to the "combine" call ccd2 = combine(ccd_list, method="average", combine_uncertainty_function=xp.sum) - np_testing.assert_allclose(ccd.data, ccd2.data) - np_testing.assert_allclose(ccd.uncertainty.array, ccd2.uncertainty.array) + assert xp.all(xpx.isclose(ccd.data, ccd2.data)) + assert xp.all(xpx.isclose(ccd.uncertainty.array, ccd2.uncertainty.array)) # test the optional uncertainty function in median_combine @@ -602,12 +616,12 @@ def test_median_combine_uncertainty(): c = Combiner(ccd_list) ccd = c.median_combine(uncertainty_func=xp.sum) uncert_ref = xp.sum(c._data_arr, 0) / xp.sqrt(3) - np_testing.assert_allclose(ccd.uncertainty.array, uncert_ref) + assert xp.all(xpx.isclose(ccd.uncertainty.array, uncert_ref)) # Compare this also to the "combine" call ccd2 = combine(ccd_list, method="median", combine_uncertainty_function=xp.sum) - np_testing.assert_allclose(ccd.data, ccd2.data) - np_testing.assert_allclose(ccd.uncertainty.array, ccd2.uncertainty.array) + assert xp.all(xpx.isclose(ccd.data, ccd2.data)) + assert xp.all(xpx.isclose(ccd.uncertainty.array, ccd2.uncertainty.array)) # test the optional uncertainty function in sum_combine @@ -617,12 +631,12 @@ def test_sum_combine_uncertainty(): c = Combiner(ccd_list) ccd = c.sum_combine(uncertainty_func=xp.sum) uncert_ref = xp.sum(c._data_arr, 0) * xp.sqrt(3) - np_testing.assert_allclose(ccd.uncertainty.array, uncert_ref) + assert xp.all(xpx.isclose(ccd.uncertainty.array, uncert_ref)) # Compare this also to the "combine" call ccd2 = combine(ccd_list, method="sum", combine_uncertainty_function=xp.sum) - np_testing.assert_allclose(ccd.data, ccd2.data) - np_testing.assert_allclose(ccd.uncertainty.array, ccd2.uncertainty.array) + assert xp.all(xpx.isclose(ccd.data, ccd2.data)) + assert xp.all(xpx.isclose(ccd.uncertainty.array, ccd2.uncertainty.array)) # Ignore warnings generated because most values are masked and we divide @@ -667,8 +681,12 @@ def test_combine_result_uncertainty_and_mask(comb_func, mask_point): ccd_list, method=combine_method_name, minmax_clip=True, minmax_clip_min=-100 ) - np_testing.assert_allclose( - ccd_comb.uncertainty.array, expected_result.uncertainty.array + assert xp.all( + xpx.isclose( + ccd_comb.uncertainty.array, + expected_result.uncertainty.array, + equal_nan=True, + ) ) # Check that the right point is masked, and only one point is @@ -703,7 +721,7 @@ def test_combine_overwrite_output(tmp_path): res_from_disk = CCDData.read(output_file) # Data should be the same - np_testing.assert_allclose(res.data, res_from_disk.data) + assert xp.all(xpx.isclose(res.data, res_from_disk.data)) # test resulting uncertainty is corrected for the number of images @@ -718,7 +736,7 @@ def test_combiner_uncertainty_average(): ref_uncertainty = xp.ones((10, 10)) / 2 # Correction because we combined two images. ref_uncertainty /= xp.sqrt(2) - np_testing.assert_allclose(ccd.uncertainty.array, ref_uncertainty) + assert xp.all(xpx.isclose(ccd.uncertainty.array, ref_uncertainty)) # test resulting uncertainty is corrected for the number of images (with mask) @@ -734,13 +752,13 @@ def test_combiner_uncertainty_average_mask(): c = Combiner(ccd_list) ccd = c.average_combine() # Just the standard deviation of ccd data. - ref_uncertainty = xp.ones((10, 10)) * xp.std(xp.array([1, 2, 3])) + ref_uncertainty = xp.ones((10, 10)) * xp.std(xp.asarray([1, 2, 3])) # Correction because we combined two images. ref_uncertainty /= xp.sqrt(3) ref_uncertainty = xpx.at(ref_uncertainty)[5, 5].set( - xp.std(xp.array([2, 3])) / xp.sqrt(2) + xp.std(xp.asarray([2, 3])) / xp.sqrt(2) ) - np_testing.assert_allclose(ccd.uncertainty.array, ref_uncertainty) + assert xp.all(xpx.isclose(ccd.uncertainty.array, ref_uncertainty)) # test resulting uncertainty is corrected for the number of images (with mask) @@ -766,7 +784,7 @@ def test_combiner_uncertainty_median_mask(): # it to a regular float fixes that. med_value = float(mad_to_sigma * mad([2, 3]) / xp.sqrt(2)) ref_uncertainty = xpx.at(ref_uncertainty)[5, 5].set(med_value) # 0.524179041254 - np_testing.assert_allclose(ccd.uncertainty.array, ref_uncertainty) + assert xp.all(xpx.isclose(ccd.uncertainty.array, ref_uncertainty)) # test resulting uncertainty is corrected for the number of images (with mask) @@ -782,12 +800,12 @@ def test_combiner_uncertainty_sum_mask(): c = Combiner(ccd_list) ccd = c.sum_combine() # Just the standard deviation of ccd data. - ref_uncertainty = xp.ones((10, 10)) * xp.std(xp.array([1, 2, 3])) + ref_uncertainty = xp.ones((10, 10)) * xp.std(xp.asarray([1, 2, 3])) ref_uncertainty *= xp.sqrt(3) ref_uncertainty = xpx.at(ref_uncertainty)[5, 5].set( - xp.std(xp.array([2, 3])) * xp.sqrt(2) + xp.std(xp.asarray([2, 3])) * xp.sqrt(2) ) - np_testing.assert_allclose(ccd.uncertainty.array, ref_uncertainty) + assert xp.all(xpx.isclose(ccd.uncertainty.array, ref_uncertainty)) def test_combiner_3d(): @@ -803,7 +821,7 @@ def test_combiner_3d(): ccd = c.average_combine() assert ccd.shape == (5, 5, 5) - np_testing.assert_allclose(ccd.data, data1) + assert xp.all(xpx.isclose(ccd.data, data1.data)) def test_3d_combiner_with_scaling(): @@ -820,7 +838,7 @@ def test_3d_combiner_with_scaling(): avg_ccd = combiner.average_combine() # Does the mean of the scaled arrays match the value to which it was # scaled? - np_testing.assert_allclose(avg_ccd.data.mean(), ccd_data.data.mean()) + assert xp.all(xpx.isclose(avg_ccd.data.mean(), ccd_data.data.mean())) assert avg_ccd.shape == ccd_data.shape median_ccd = combiner.median_combine() # Does median also scale to the correct value? @@ -833,12 +851,12 @@ def test_3d_combiner_with_scaling(): except AttributeError: pass - np_testing.assert_allclose(np_median(med_ccd), np_median(med_inp_data)) + assert xp.all(xpx.isclose(np_median(med_ccd), np_median(med_inp_data))) # Set the scaling manually... combiner.scaling = [scale_by_mean(combiner._data_arr[i]) for i in range(3)] avg_ccd = combiner.average_combine() - np_testing.assert_allclose(avg_ccd.data.mean(), ccd_data.data.mean()) + assert xp.all(xpx.isclose(avg_ccd.data.mean(), ccd_data.data.mean())) assert avg_ccd.shape == ccd_data.shape @@ -855,7 +873,7 @@ def test_clip_extrema_3d(): c.clip_extrema(nlow=1, nhigh=1) result = c.average_combine() expected = CCDData(xp.ones((3, 3, 3)) * 30, unit="adu") - np_testing.assert_allclose(result, expected) + assert xp.all(xpx.isclose(result.data, expected.data)) @pytest.mark.parametrize( @@ -887,12 +905,14 @@ def test_clip_extrema_alone(): c = Combiner(ccdlist) c.clip_extrema(nlow=1, nhigh=1) result = c.average_combine() - expected = [ - [30.0, 22.5, 30.0, 30.0, 30.0], - [30.0, 30.0, 47.5, 30.0, 30.0], - [47.5, 30.0, 30.0, 30.0, 30.0], - ] - np_testing.assert_allclose(result, expected) + expected = xp.asarray( + [ + [30.0, 22.5, 30.0, 30.0, 30.0], + [30.0, 30.0, 47.5, 30.0, 30.0], + [47.5, 30.0, 30.0, 30.0, 30.0], + ] + ) + assert xp.all(xpx.isclose(result.data, expected)) def test_clip_extrema_via_combine(): @@ -913,12 +933,14 @@ def test_clip_extrema_via_combine(): nlow=1, nhigh=1, ) - expected = [ - [30.0, 22.5, 30.0, 30.0, 30.0], - [30.0, 30.0, 47.5, 30.0, 30.0], - [47.5, 30.0, 30.0, 30.0, 30.0], - ] - np_testing.assert_allclose(result, expected) + expected = xp.asarray( + [ + [30.0, 22.5, 30.0, 30.0, 30.0], + [30.0, 30.0, 47.5, 30.0, 30.0], + [47.5, 30.0, 30.0, 30.0, 30.0], + ] + ) + assert xp.all(xpx.isclose(result.data, expected)) def test_clip_extrema_with_other_rejection(): @@ -941,12 +963,14 @@ def test_clip_extrema_with_other_rejection(): c.clip_extrema(nlow=1, nhigh=1) result = c.average_combine() - expected = [ - [80.0 / 3.0, 22.5, 30.0, 30.0, 30.0], - [30.0, 30.0, 47.5, 30.0, 30.0], - [47.5, 30.0, 30.0, 30.0, 30.0], - ] - np_testing.assert_allclose(result, expected) + expected = xp.asarray( + [ + [80.0 / 3.0, 22.5, 30.0, 30.0, 30.0], + [30.0, 30.0, 47.5, 30.0, 30.0], + [47.5, 30.0, 30.0, 30.0, 30.0], + ] + ) + assert xp.all(xpx.isclose(result.data, expected)) # The expected values below assume an image that is 2000x2000 @@ -1002,7 +1026,7 @@ def test_combiner_with_scaling_uncertainty(comb_func): scale_by_mean = _make_mean_scaler(ccd_data) combiner.scaling = scale_by_mean - scaled_ccds = xp.array( + scaled_ccds = xp.asarray( [ ccd_data.data * scale_by_mean(ccd_data.data), ccd_data_lower.data * scale_by_mean(ccd_data_lower.data), @@ -1019,7 +1043,7 @@ def test_combiner_with_scaling_uncertainty(comb_func): expected_unc = uncertainty_func(scaled_ccds, axis=0) - np_testing.assert_allclose(avg_ccd.uncertainty.array, expected_unc, atol=1e-10) + assert xp.all(xpx.isclose(avg_ccd.uncertainty.array, expected_unc, atol=1e-10)) @pytest.mark.parametrize( @@ -1051,7 +1075,7 @@ def my_summer(data, mask, axis=None): else: new_data.append(xp.zeros_like(data[i])) - new_data = xp.array(new_data) + new_data = xp.asarray(new_data) def sum_func(_, axis=axis): return xp.sum(new_data, axis=axis) @@ -1073,4 +1097,4 @@ def sum_func(_, axis=axis): # method is the result in this pixel should be 2. expected_result = xpx.at(expected_result)[5, 5].set(2) - np_testing.assert_allclose(expected_result, actual_result) + assert xp.all(xpx.isclose(expected_result, actual_result.data)) From bb42d5bf718d738347709d2015a44a5e01d57950 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Wed, 16 Jul 2025 21:11:30 -0600 Subject: [PATCH 071/111] Add array namespace input to combine function --- ccdproc/combiner.py | 52 ++++++++++++++++++++++++++++++++-- ccdproc/tests/test_combiner.py | 10 +++++-- 2 files changed, 56 insertions(+), 6 deletions(-) diff --git a/ccdproc/combiner.py b/ccdproc/combiner.py index 9c2302eb..16e5a0d8 100644 --- a/ccdproc/combiner.py +++ b/ccdproc/combiner.py @@ -2,6 +2,8 @@ """This module implements the combiner class.""" +from copy import deepcopy + try: import bottleneck as bn except ImportError: @@ -842,6 +844,7 @@ def combine( dtype=None, combine_uncertainty_function=None, overwrite_output=False, + array_package=None, **ccdkwargs, ): """ @@ -945,6 +948,18 @@ def combine( has no effect otherwise. Default is ``False``. + array_package : an array namespace, optional + The array package to use for the data if the data needs to be + read in from files. This argument is ignored if the input `ccd_list` + is already a list of `~astropy.nddata.CCDData` objects. + + If not specified, the array package used will + be numpy. The array package can be specified either by passing in + an array namespace (e.g. output from ``array_api_compat.array_namespace``), + or an imported array package that follows the array API standard + (e.g. ``numpy`` or ``jax.numpy``), or an array whose namespace can be + determined (e.g. a `numpy.ndarray` or `jax.numpy.ndarray`). + ccdkwargs : Other keyword arguments for `astropy.nddata.fits_ccddata_reader`. Returns @@ -993,10 +1008,25 @@ def combine( else: # User has provided fits filenames to read from ccd = CCDData.read(img_list[0], **ccdkwargs) + # The ccd object will always read as numpy, so convert it to the + # requested namespace if there is one. + if array_package is not None: + try: + xp = array_api_compat.array_namespace(array_package) + except TypeError: + xp = array_package - # Get the array namespace - xp = array_api_compat.array_namespace(ccd.data) + ccd.data = xp.asarray(ccd.data, dtype=ccd.data.dtype.type) + if ccd.uncertainty is not None: + ccd.uncertainty.array = xp.asarray( + ccd.uncertainty.array, dtype=ccd.uncertainty.array.dtype.type + ) + if ccd.mask is not None: + ccd.mask = xp.asarray(ccd.mask, dtype=bool) + # Get the array namespace; if array_package was not None and files were read in, + # then xp the ccd.data will be the same as the array_package. + xp = array_api_compat.array_namespace(ccd.data) if dtype is None: dtype = xp.float64 @@ -1065,6 +1095,14 @@ def combine( imgccd = image else: imgccd = CCDData.read(image, **ccdkwargs) + if array_package is not None: + imgccd.data = xp.asarray(imgccd.data, dtype=dtype) + if imgccd.uncertainty is not None: + imgccd.uncertainty.array = xp.asarray( + imgccd.uncertainty.array, dtype=dtype + ) + if imgccd.mask is not None: + imgccd.mask = xp.asarray(imgccd.mask, dtype=bool) scalevalues.append(scale(imgccd.data)) @@ -1100,13 +1138,21 @@ def combine( imgccd = image else: imgccd = CCDData.read(image, **ccdkwargs) + if array_package is not None: + imgccd.data = xp.asarray(imgccd.data, dtype=dtype) + if imgccd.uncertainty is not None: + imgccd.uncertainty.array = xp.asarray( + imgccd.uncertainty.array, dtype=dtype + ) + if imgccd.mask is not None: + imgccd.mask = xp.asarray(imgccd.mask, dtype=bool) # Trim image and copy # The copy is *essential* to avoid having a bunch # of unused file references around if the files # are memory-mapped. See this PR for details # https://github.com/astropy/ccdproc/pull/630 - ccd_list.append(imgccd[x:xend, y:yend].copy()) + ccd_list.append(deepcopy(imgccd[x:xend, y:yend])) # Create Combiner for tile tile_combiner = Combiner(ccd_list, dtype=dtype) diff --git a/ccdproc/tests/test_combiner.py b/ccdproc/tests/test_combiner.py index 99527b37..bab3abbc 100644 --- a/ccdproc/tests/test_combiner.py +++ b/ccdproc/tests/test_combiner.py @@ -521,12 +521,16 @@ def test_combine_image_file_collection_input(tmp_path): ifc = ImageFileCollection(tmp_path) - comb_files = combine(ifc.files_filtered(include_path=True), method="average") + comb_files = combine( + ifc.files_filtered(include_path=True), method="average", array_package=xp + ) - comb_ccds = combine(ifc.ccds(), method="average") + comb_ccds = combine(ifc.ccds(), method="average", array_package=xp) comb_string = combine( - ",".join(ifc.files_filtered(include_path=True)), method="average" + ",".join(ifc.files_filtered(include_path=True)), + method="average", + array_package=xp, ) assert xp.all(xpx.isclose(ccd.data, comb_files.data)) From ba83726ccbb02df57e977d1eb592b02a7f8fed7b Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 17 Jul 2025 17:06:34 -0600 Subject: [PATCH 072/111] Add an array namespace conversion in one more place --- ccdproc/core.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index 32e9af2c..e47216df 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -1198,7 +1198,12 @@ def sigma_func(arr, axis=None, ignore_nan=False): uncertainty : float uncertainty of array estimated from median absolute deviation. """ - return ( + if isinstance(arr, CCDData): + xp = array_api_compat.array_namespace(arr.data) + else: + xp = array_api_compat.array_namespace(arr) + + return xp.asarray( stats.median_absolute_deviation(arr, axis=axis, ignore_nan=ignore_nan) * 1.482602218505602 ) From 698c6facb36d94433628df035ae624235003265d Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 17 Jul 2025 17:07:23 -0600 Subject: [PATCH 073/111] Add argument for desired array namespace --- ccdproc/image_collection.py | 56 +++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/ccdproc/image_collection.py b/ccdproc/image_collection.py index 6a01066b..85720c8c 100644 --- a/ccdproc/image_collection.py +++ b/ccdproc/image_collection.py @@ -7,6 +7,7 @@ from collections import OrderedDict from os import listdir, path +import array_api_compat import astropy.io.fits as fits import numpy as np # see numpy comment below import numpy.ma as ma @@ -79,6 +80,19 @@ class ImageFileCollection: The extension from which the header and data will be read in all files.Default is ``0``. + array_package : an array namespace, optional + The array package to use for the data if the data is returned. + This argument is ignored if the returned item is ``header``. The + default is to use ``numpy`` for arrays. + + If not specified, the array package used will + be numpy. The array package can be specified either by passing in + an array namespace (e.g. output from ``array_api_compat.array_namespace``), + or an imported array package that follows the array API standard + (e.g. ``numpy`` or ``jax.numpy``), or an array whose namespace can be + determined (e.g. a `numpy.ndarray` or `jax.numpy.ndarray`). + + Raises ------ ValueError @@ -95,6 +109,7 @@ def __init__( glob_include=None, glob_exclude=None, ext=0, + array_package=None, ): # Include or exclude files from the collection based on glob pattern # matching - has to go above call to _get_files() @@ -134,6 +149,17 @@ def __init__( self._ext = ext + xp = None + if array_package is not None: + try: + # Maybe we got passed an array... + xp = array_api_compat.array_namespace(array_package) + except TypeError: + # Nope, got a type error, so assume we got passed + # an array namespace. + xp = array_package + self._xp = xp + if keywords: self.keywords = keywords @@ -893,6 +919,9 @@ def _generator( if not self.summary: return + # Make sure xp is defined. + xp = self._xp + current_mask = {} for col in self.summary.columns: current_mask[col] = self.summary[col].mask @@ -914,16 +943,43 @@ def _generator( return_thing = fits.getheader(full_path, self.ext) elif return_type == "data": return_thing = fits.getdata(full_path, self.ext, **add_kwargs) + # By default we will get a numpy array, but if the user + # specified an array package, we will convert it to that type. + if xp is not None: + return_thing = xp.asarray( + # Turns out only numpy understands dtypes like ">f8" so use + # .type + return_thing, + dtype=return_thing.dtype.type, + ) elif return_type == "ccd": return_thing = fits_ccddata_reader( full_path, hdu=self.ext, **ccd_kwargs ) + if xp is not None: + return_thing.data = xp.asarray( + return_thing.data, dtype=return_thing.data.dtype.type + ) + if return_thing.uncertainty is not None: + return_thing.uncertainty.array = xp.asarray( + return_thing.uncertainty.array, + dtype=return_thing.uncertainty.array.dtype.type, + ) + if return_thing.mask is not None: + return_thing.mask = xp.asarray(return_thing.mask, dtype=bool) + elif return_type == "hdu": with fits.open(full_path, **add_kwargs) as hdulist: ext_index = hdulist.index_of(self.ext) # Need to copy the HDU to prevent lazy loading problems # and "IO operations on closed file" errors return_thing = hdulist[ext_index].copy() + if xp is not None: + # Convert the data to the specified array package + return_thing.data = xp.asarray( + return_thing.data, dtype=return_thing.data.dtype.type + ) + else: raise ValueError(f"no generator for {return_type}") From 7c7ce31d642a6c04be6e244ed0f43608659f6c4c Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 17 Jul 2025 17:07:52 -0600 Subject: [PATCH 074/111] Specify namespace in a couple of tests where data is read from disk --- ccdproc/tests/test_combiner.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/ccdproc/tests/test_combiner.py b/ccdproc/tests/test_combiner.py index bab3abbc..c03e4a21 100644 --- a/ccdproc/tests/test_combiner.py +++ b/ccdproc/tests/test_combiner.py @@ -519,7 +519,7 @@ def test_combine_image_file_collection_input(tmp_path): for i in range(3): ccd.write(tmp_path / f"ccd-{i}.fits") - ifc = ImageFileCollection(tmp_path) + ifc = ImageFileCollection(tmp_path, array_package=xp) comb_files = combine( ifc.files_filtered(include_path=True), method="average", array_package=xp @@ -703,7 +703,7 @@ def test_combine_result_uncertainty_and_mask(comb_func, mask_point): def test_combine_overwrite_output(tmp_path): """ - The combiune function should *not* overwrite the result file + The combine function should *not* overwrite the result file unless the overwrite_output argument is True """ output_file = tmp_path / "fake.fits" @@ -722,7 +722,11 @@ def test_combine_overwrite_output(tmp_path): [ccd, ccd.multiply(2)], output_file=output_file, overwrite_output=True ) + # Need to convert this to the array namespace. res_from_disk = CCDData.read(output_file) + res_from_disk.data = xp.asarray( + res_from_disk.data, dtype=res_from_disk.data.dtype.type + ) # Data should be the same assert xp.all(xpx.isclose(res.data, res_from_disk.data)) From 9a5ccd80be8d7733e7794dc6c507ac5d89bc1b1f Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Fri, 18 Jul 2025 11:09:58 -0600 Subject: [PATCH 075/111] Fix broken links in docstrings --- ccdproc/combiner.py | 4 ++-- ccdproc/image_collection.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ccdproc/combiner.py b/ccdproc/combiner.py index 16e5a0d8..78b1a81e 100644 --- a/ccdproc/combiner.py +++ b/ccdproc/combiner.py @@ -950,7 +950,7 @@ def combine( array_package : an array namespace, optional The array package to use for the data if the data needs to be - read in from files. This argument is ignored if the input `ccd_list` + read in from files. This argument is ignored if the input ``ccd_list`` is already a list of `~astropy.nddata.CCDData` objects. If not specified, the array package used will @@ -958,7 +958,7 @@ def combine( an array namespace (e.g. output from ``array_api_compat.array_namespace``), or an imported array package that follows the array API standard (e.g. ``numpy`` or ``jax.numpy``), or an array whose namespace can be - determined (e.g. a `numpy.ndarray` or `jax.numpy.ndarray`). + determined (e.g. a `numpy.ndarray` or ``jax.numpy.ndarray``). ccdkwargs : Other keyword arguments for `astropy.nddata.fits_ccddata_reader`. diff --git a/ccdproc/image_collection.py b/ccdproc/image_collection.py index 85720c8c..f6228a92 100644 --- a/ccdproc/image_collection.py +++ b/ccdproc/image_collection.py @@ -90,7 +90,7 @@ class ImageFileCollection: an array namespace (e.g. output from ``array_api_compat.array_namespace``), or an imported array package that follows the array API standard (e.g. ``numpy`` or ``jax.numpy``), or an array whose namespace can be - determined (e.g. a `numpy.ndarray` or `jax.numpy.ndarray`). + determined (e.g. a `numpy.ndarray` or ``jax.numpy.ndarray``). Raises From 4503ddf42e1eec61381663267fe51423f8d00756 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Fri, 18 Jul 2025 11:21:06 -0600 Subject: [PATCH 076/111] Switch more tests away from np.testing or explain why not switching --- ccdproc/tests/test_gain.py | 20 +++++++++----------- ccdproc/tests/test_image_collection.py | 2 ++ ccdproc/tests/test_rebin.py | 2 ++ ccdproc/tests/test_wrapped_external_funcs.py | 1 + ccdproc/utils/tests/test_slices.py | 2 ++ 5 files changed, 16 insertions(+), 11 deletions(-) diff --git a/ccdproc/tests/test_gain.py b/ccdproc/tests/test_gain.py index a52d529e..a099c346 100644 --- a/ccdproc/tests/test_gain.py +++ b/ccdproc/tests/test_gain.py @@ -1,9 +1,11 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst +import array_api_extra as xpx import astropy.units as u -import numpy as np import pytest +# Set up the array library to be used in tests +from ccdproc.conftest import testing_array_library as xp from ccdproc.core import Keyword, create_deviation, gain_correct from ccdproc.tests.pytest_fixtures import ccd_data as ccd_data_func @@ -22,7 +24,7 @@ def test_linear_gain_correct(gain): ccd_data = ccd_data_func() # The data values should be positive, so the poisson noise calculation # works without throwing warnings - ccd_data.data = np.absolute(ccd_data.data) + ccd_data.data = xp.abs(ccd_data.data) ccd_data = create_deviation(ccd_data, readnoise=1.0 * u.adu) ccd_data.meta["gainval"] = 3.0 orig_data = ccd_data.data @@ -34,10 +36,8 @@ def test_linear_gain_correct(gain): except AttributeError: gain_value = gain - np.testing.assert_array_almost_equal_nulp(ccd.data, gain_value * orig_data) - np.testing.assert_array_almost_equal_nulp( - ccd.uncertainty.array, gain_value * ccd_data.uncertainty.array - ) + xp.all(xpx.isclose(ccd.data, gain_value * orig_data)) + xp.all(xpx.isclose(ccd.uncertainty.array, gain_value * ccd_data.uncertainty.array)) if isinstance(gain, u.Quantity): assert ccd.unit == ccd_data.unit * gain.unit @@ -50,15 +50,13 @@ def test_linear_gain_unit_keyword(): ccd_data = ccd_data_func() # The data values should be positive, so the poisson noise calculation # works without throwing warnings - ccd_data.data = np.absolute(ccd_data.data) + ccd_data.data = xp.abs(ccd_data.data) ccd_data = create_deviation(ccd_data, readnoise=1.0 * u.adu) orig_data = ccd_data.data gain = 3.0 gain_unit = u.electron / u.adu ccd = gain_correct(ccd_data, gain, gain_unit=gain_unit) - np.testing.assert_array_almost_equal_nulp(ccd.data, gain * orig_data) - np.testing.assert_array_almost_equal_nulp( - ccd.uncertainty.array, gain * ccd_data.uncertainty.array - ) + xp.all(xpx.isclose(ccd.data, gain * orig_data)) + xp.all(xpx.isclose(ccd.uncertainty.array, gain * ccd_data.uncertainty.array)) assert ccd.unit == ccd_data.unit * gain_unit diff --git a/ccdproc/tests/test_image_collection.py b/ccdproc/tests/test_image_collection.py index 4b79afc0..b08d7049 100644 --- a/ccdproc/tests/test_image_collection.py +++ b/ccdproc/tests/test_image_collection.py @@ -8,6 +8,8 @@ from tempfile import NamedTemporaryFile, TemporaryDirectory, mkdtemp import astropy.io.fits as fits + +# Will continue to use np.testing here we are not testing array API import numpy as np import pytest from astropy.io.fits.verify import VerifyWarning diff --git a/ccdproc/tests/test_rebin.py b/ccdproc/tests/test_rebin.py index d240f169..1a15b0f0 100644 --- a/ccdproc/tests/test_rebin.py +++ b/ccdproc/tests/test_rebin.py @@ -1,5 +1,7 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst +# Not updating to array API in here because rebin will be removed +# in version 3.0 of ccdproc. import numpy as np import pytest from astropy.nddata import StdDevUncertainty diff --git a/ccdproc/tests/test_wrapped_external_funcs.py b/ccdproc/tests/test_wrapped_external_funcs.py index 744798e1..d5064c97 100644 --- a/ccdproc/tests/test_wrapped_external_funcs.py +++ b/ccdproc/tests/test_wrapped_external_funcs.py @@ -1,5 +1,6 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst +# Not updating to array API here until the scipy package adopts the array API. import numpy as np from astropy.nddata import CCDData, StdDevUncertainty from scipy import ndimage diff --git a/ccdproc/utils/tests/test_slices.py b/ccdproc/utils/tests/test_slices.py index 262230b8..f94f4971 100644 --- a/ccdproc/utils/tests/test_slices.py +++ b/ccdproc/utils/tests/test_slices.py @@ -1,5 +1,7 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst +# Using numpy for testing here is fine since this does not test anything +# that uses the array API. import numpy as np import pytest From 9afb5a61c65ddf8173c9206b19f3a98b2e32fa53 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Mon, 21 Jul 2025 12:12:39 -0600 Subject: [PATCH 077/111] Minimal array API docs --- ccdproc/_ccddata_wrapper_for_array_api.py | 4 + docs/array_api.rst | 113 ++++++++++++++++++++++ docs/index.rst | 2 + 3 files changed, 119 insertions(+) create mode 100644 docs/array_api.rst diff --git a/ccdproc/_ccddata_wrapper_for_array_api.py b/ccdproc/_ccddata_wrapper_for_array_api.py index ea460743..ed4a92b8 100644 --- a/ccdproc/_ccddata_wrapper_for_array_api.py +++ b/ccdproc/_ccddata_wrapper_for_array_api.py @@ -1,3 +1,7 @@ +# This file is a rough draft of the changes that will be needed +# in astropy.nddata to adopt the array API. This does not cover all +# of the changes that will be needed, but it is a start. + import array_api_compat import numpy as np from astropy import units as u diff --git a/docs/array_api.rst b/docs/array_api.rst new file mode 100644 index 00000000..f69940b4 --- /dev/null +++ b/docs/array_api.rst @@ -0,0 +1,113 @@ +Array library options in ccdproc +================================ + +.. note:: + + Who needs this? If you are currently using numpy for your image processing + there is no need to change anything about what you currently do. The changes + made in `ccdproc` to adopt the `array API`_ were made with the intent of + requiring no change to existing code that continues to use `numpy`_. + +What is the "Array API"? +------------------------ + +The Python `array API`_ specifies an interface that has been adopted by many +different array libraries (e.g. jax, dask, CuPy). The API is very similar to the +familiar `numpy`_ interface. The `array API`_ was constructed to allow users +with specialized needs to use any of the large variety of array options +available in Python. + +What array libraries are supported? +----------------------------------- + +The best list of array libraries that implement the array API is at `array-api-compat`_. +`ccdproc`_ is currently regularly tested against `numpy`_, `dask`_, and `jax`_. It +is occasionally tested against `CuPy`_; any errors your encounter running `ccdproc`_ +on a GPU using `CuPy`_ should be +`reported as an issue `_. + +Though the +`sparse`_ array library supports the array API, `ccdproc`_ does not currently work +with `sparse`_. A `pull request `_ to FIX +that would be a welcome contribution to the project. + +What limitations shuold I be aware of? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + ++ The ``median`` function is not part of the array API, but most array libraries + do provide a ``median``. If the array library you choose does not have a ``median`` + function then `ccdproc`_ will automatically fall back to using a ``median`` function from + `bottleneck`_, if that is installed, or to `numpy`_. + +Which array library should I use? +--------------------------------- + +If you have access to a GPU then using `cupy`_ will be noticeably faster than +using `numpy`_. If you routinely use very large datasets, consider using `dask`_. +The array library that the maintainers of `ccdproc` most often use is `numpy`_. + +How do I use the array API? +--------------------------- + +There are two ways to use the array API in `ccdproc`_: + +1. Use the `ccdproc`_ functions as you normally would, but pass in an array from + the array library of your choice. For example, if you want to use `dask`_ arrays, + you can do this: + + .. code-block:: python + + import dask.array as da + import ccdproc + from astropy.nddata import CCDData + + data = da.random.random((1000, 1000)) + ccd = CCDData(data, unit='adu') + ccd = ccdproc.trim_image(ccd[:900, :900]) + +2. Use `ccdproc`_ functions to read/write data in addition to + using `ccdproc`_ functions to process the data. For example, if you want to + use `dask`_ arrays to process a set of images, you can do this: + + .. code-block:: python + + import dask.array as da + import ccdproc + from astropy.nddata import CCDData + + images = ccdproc.ImageFileCollection('path/to/images/*.fits', + array_package=da) + for ccd in images.ccds(): + ccd = ccdproc.trim_image(ccd[:900, :900]) + # Do more processing with ccdproc functions + # ... + + If you do this, image combination will also be done using the array library + you specified. + + To do image combination with the array library of your choice without doing + any other processing, you can either create a `ccdproc.Combiner` object with a + list of file names and the ``array_package`` argument set to the array library + you want to use, or use the `ccdproc.combine` function a list of file names and + the ``array_package`` argument set to the array library you want to use. For + example, to combine images using `dask`_ arrays, you can do this: + + .. code-block:: python + + import dask.array as da + import ccdproc + from astropy.nddata import CCDData + + images = ccdproc.ImageFileCollection('path/to/images/*.fits', + array_package=da) + combined = ccdproc.combine_images(images.ccds(), method='median') + +.. _array API: https://data-apis.org/array-api/latest/index.html +.. _array-api-compat: https://data-apis.org/array-api-compat +.. _bottleneck: https://bottleneck.readthedocs.io/en/latest/ +.. _ccdproc: https://ccdproc.readthedocs.io/en/latest/ +.. _cupy: https://docs.cupy.dev/en/stable/ +.. _dask: https://docs.dask.org/en/stable/ +.. _jax: https://docs.jax.dev/en/latest/index.html +.. _numpy: https://numpy.org/doc/stable/reference/array_api.html +.. _sparse: https://sparse.pydata.org/en/stable/ diff --git a/docs/index.rst b/docs/index.rst index 8383d229..ae2dc9f8 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -34,6 +34,7 @@ Detailed, step-by-step guide In addition to the documentation here, a detailed guide to the topic of CCD data reduction using ``ccdproc`` and other `astropy`_ tools is available here: +(TODO: FIX LINK BELOW) https://mwcraig.github.io/ccd-as-book/00-00-Preface Getting started @@ -63,6 +64,7 @@ Using `ccdproc` reduction_toolbox image_management reduction_examples + array_api default_config .. toctree:: From 5289ddc2bd1f00f36c15c741c0358202b2bcdcb7 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 29 Jul 2025 16:21:24 -0400 Subject: [PATCH 078/111] Improve test coverage --- ccdproc/combiner.py | 6 ++--- ccdproc/tests/test_ccdproc.py | 6 +++++ ccdproc/tests/test_combiner.py | 41 ++++++++++++++++++++++++++++++++++ pyproject.toml | 1 + 4 files changed, 51 insertions(+), 3 deletions(-) diff --git a/ccdproc/combiner.py b/ccdproc/combiner.py index 78b1a81e..bf1a8769 100644 --- a/ccdproc/combiner.py +++ b/ccdproc/combiner.py @@ -4,11 +4,11 @@ from copy import deepcopy -try: +try: # pragma: no cover import bottleneck as bn except ImportError: - HAS_BOTTLENECK = False # pragma: no cover -else: + HAS_BOTTLENECK = False +else: # pragma: no cover HAS_BOTTLENECK = True import array_api_compat diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index a2ea1abb..34be6a7b 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -32,6 +32,7 @@ create_deviation, flat_correct, gain_correct, + sigma_func, subtract_bias, subtract_dark, subtract_overscan, @@ -1302,3 +1303,8 @@ def test_ccd_process_gain_corrected(): assert occd.unit == u.electron # Make sure the original keyword is still present. Regression test for #401 assert occd.meta["testkw"] == 100 + + +def test_sigma_func_for_ccddata(): + ccd_data = ccd_data_func() + assert sigma_func(ccd_data) == sigma_func(ccd_data.data) diff --git a/ccdproc/tests/test_combiner.py b/ccdproc/tests/test_combiner.py index c03e4a21..d0015b6a 100644 --- a/ccdproc/tests/test_combiner.py +++ b/ccdproc/tests/test_combiner.py @@ -9,6 +9,7 @@ from astropy.utils.data import get_pkg_data_filename from numpy import median as np_median +from ccdproc import create_deviation from ccdproc.combiner import ( Combiner, _calculate_step_sizes, @@ -86,6 +87,9 @@ def test_combiner_create(): c = Combiner(ccd_list) assert c._data_arr.shape == (3, 100, 100) assert c._data_arr_mask.shape == (3, 100, 100) + # Also test the public properties + assert c.data.shape == c._data_arr.shape + assert c.mask.shape == c._data_arr_mask.shape # test if dtype matches the value that is passed @@ -381,6 +385,14 @@ def test_combiner_with_scaling(): assert xp.all(xpx.isclose(avg_ccd.data.mean(), ccd_data.data.mean())) assert avg_ccd.shape == ccd_data.shape + # Scale by a float + avg_ccd = combiner.average_combine(scale_to=2.0) + expected_avg = 2 * xp.mean( + xp.array((ccd_data.data, ccd_data_lower.data, ccd_data_higher.data)) + ) + assert xp.all(xpx.isclose(xp.mean(avg_ccd.data), expected_avg)) + assert avg_ccd.shape == ccd_data.shape + def test_combiner_scaling_fails(): ccd_data = ccd_data_func() @@ -643,6 +655,35 @@ def test_sum_combine_uncertainty(): assert xp.all(xpx.isclose(ccd.uncertainty.array, ccd2.uncertainty.array)) +@pytest.mark.parametrize("scale", ["function", "mean"]) +def test_combine_ccd_with_uncertainty_and_mask_from_fits(scale, tmp_path): + # Test initializing a CCDData object with uncertainty and mask in the + # combine function. + fitsfile = get_pkg_data_filename("data/a8280271.fits", package="ccdproc.tests") + ccd_data = CCDData.read(fitsfile, unit=u.adu) + ccd_data.data = xp.asarray(ccd_data.data, dtype=xp.float64) + # Set ._mask instead of .mask to avoid conversion to numpy array + ccd_data._mask = xp.zeros_like(ccd_data.data, dtype=bool) + if scale == "function": + scale_by_mean = _make_mean_scaler(ccd_data) + else: + scale_by_mean = [1.0, 1.0, 1.0] + + ccd_data = create_deviation( + ccd_data, gain=1.0 * u.electron / u.adu, readnoise=5 * u.electron + ) + fits_with_uncertainty = tmp_path / "test.fits" + ccd_data.write(fits_with_uncertainty) + + ccd2 = combine( + [fits_with_uncertainty] * 3, + method="average", + array_package=xp, + scale=scale_by_mean, + ) + assert xp.all(xpx.isclose(ccd2.data, ccd_data.data)) + + # Ignore warnings generated because most values are masked and we divide # by zero in at least one place @pytest.mark.filterwarnings( diff --git a/pyproject.toml b/pyproject.toml index bc0203f8..7319cba5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -37,6 +37,7 @@ test = [ "pytest-astropy>=0.10.0", "ruff", "jax", + "dask" ] [project.urls] From 802d9a4a583f36c67528c9771e993664a37b48e4 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 29 Jul 2025 16:24:23 -0400 Subject: [PATCH 079/111] Replace .array with .asarray The former is not part of the array API but the latter is. --- ccdproc/combiner.py | 8 ++++---- ccdproc/core.py | 4 ++-- ccdproc/tests/test_ccdproc.py | 20 ++++++++++---------- ccdproc/tests/test_combiner.py | 2 +- ccdproc/tests/test_cosmicray.py | 4 ++-- 5 files changed, 19 insertions(+), 19 deletions(-) diff --git a/ccdproc/combiner.py b/ccdproc/combiner.py index bf1a8769..3fd8e986 100644 --- a/ccdproc/combiner.py +++ b/ccdproc/combiner.py @@ -193,7 +193,7 @@ def __init__(self, ccd_iter, dtype=None, xp=None): ccd.mask if ccd.mask is not None else xp.zeros(default_shape) for ccd in ccd_list ] - self._data_arr_mask = xp.array(mask_list, dtype=bool) + self._data_arr_mask = xp.asarray(mask_list, dtype=bool) # Must be after self.data_arr is defined because it checks the # length of the data array. @@ -275,7 +275,7 @@ def scaling(self, value): n_images = self._data_arr.shape[0] if callable(value): self._scaling = [value(self._data_arr[i]) for i in range(n_images)] - self._scaling = xp.array(self._scaling) + self._scaling = xp.asarray(self._scaling) else: try: len(value) @@ -289,7 +289,7 @@ def scaling(self, value): "scaling must be a function or an array " "the same length as the number of images." ) - self._scaling = xp.array(value) + self._scaling = xp.asarray(value) # reshape so that broadcasting occurs properly for _ in range(len(self._data_arr.shape) - 1): self._scaling = self.scaling[:, xp.newaxis] @@ -1106,7 +1106,7 @@ def combine( scalevalues.append(scale(imgccd.data)) - to_set_in_combiner["scaling"] = xp.array(scalevalues) + to_set_in_combiner["scaling"] = xp.asarray(scalevalues) else: to_set_in_combiner["scaling"] = scale diff --git a/ccdproc/core.py b/ccdproc/core.py index e47216df..859ed8bd 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -2235,9 +2235,9 @@ def _sigma_mask(baseline, one_sigma_value, lower_sigma, upper_sigma): subset = block[:, k] csum.append(xp.sum(subset[~block_mask[:, k]])) all_masked.append(xp.all(block_mask[:, k])) - csum = xp.array(csum) + csum = xp.asarray(csum) csum[csum <= 0] = 0 - csum_sigma = xp.array(xp.sqrt(c2 - c1 - csum)) + csum_sigma = xp.asarray(xp.sqrt(c2 - c1 - csum)) # The prior code filled the csum array with the value 1, which # only affects those cases where all of the input values to # the csum were masked, so we fill those with 1. diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index 34be6a7b..3c6a51b7 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -401,8 +401,8 @@ def test_trim_with_wcs_alters_wcs(): ccd_data = ccd_data_func() # WCS construction example pulled form astropy.wcs docs wcs = WCS(naxis=2) - wcs.wcs.crpix = xp.array(ccd_data.shape) / 2 - wcs.wcs.cdelt = xp.array([-0.066667, 0.066667]) + wcs.wcs.crpix = xp.asarray(ccd_data.shape) / 2 + wcs.wcs.cdelt = xp.asarray([-0.066667, 0.066667]) wcs.wcs.crval = [0, -90] wcs.wcs.ctype = ["RA---AIR", "DEC--AIR"] wcs.wcs.set_pv([(2, 1, 45.0)]) @@ -435,7 +435,7 @@ def test_subtract_bias(): def test_subtract_bias_fails(): ccd_data = ccd_data_func(data_size=50) # Should fail if shapes don't match - bias = CCDData(xp.array([200, 200]), unit=u.adu) + bias = CCDData(xp.asarray([200, 200]), unit=u.adu) with pytest.raises(ValueError): subtract_bias(ccd_data, bias) # Should fail because units don't match @@ -832,7 +832,7 @@ def test_block_reduce(): reason="Incompatibility between scikit-image " "and numpy 1.16", ) def test_block_average(): - data = xp.array( + data = xp.asarray( [ [2.0, 1.0, 2.0, 1.0], [1.0, 1.0, 1.0, 1.0], @@ -993,7 +993,7 @@ def wcs_for_testing(shape): # Set up an "Airy's zenithal" projection # Vector properties may be set with Python lists, or Numpy arrays w.wcs.crpix = [shape[0] // 2, shape[1] // 2] - w.wcs.cdelt = xp.array([-0.066667, 0.066667]) + w.wcs.cdelt = xp.asarray([-0.066667, 0.066667]) w.wcs.crval = [0, -90] w.wcs.ctype = ["RA---AIR", "DEC--AIR"] w.wcs.set_pv([(2, 1, 45.0)]) @@ -1065,7 +1065,7 @@ def test_wcs_project_onto_shifted_wcs(): # In the case of a shift, one row and one column should be nan, and they # will share one common nan where they intersect, so we know how many nan # there should be. - assert xp.sum(xp.isnan(new_ccd.data)) == xp.sum(xp.array(new_ccd.shape)) - 1 + assert xp.sum(xp.isnan(new_ccd.data)) == xp.sum(xp.asarray(new_ccd.shape)) - 1 # Use an odd number of pixels to make a well-defined center pixel @@ -1091,7 +1091,7 @@ def test_wcs_project_onto_scale_wcs(): target_wcs.wcs.cdelt /= 2 # Choice below ensures we are really at the center pixel of an odd range. - target_shape = 2 * xp.array(ccd_data.shape) + 1 + target_shape = 2 * xp.asarray(ccd_data.shape) + 1 target_wcs.wcs.crpix = 2 * target_wcs.wcs.crpix + 1 + 0.5 # Ugly hack for numpy-specific check in astropy.wcs @@ -1106,8 +1106,8 @@ def test_wcs_project_onto_scale_wcs(): assert new_ccd.wcs.wcs.compare(target_wcs.wcs) # Define a cutout from the new array that should match the old. - new_lower_bound = (xp.array(new_ccd.shape) - xp.array(ccd_data.shape)) // 2 - new_upper_bound = (xp.array(new_ccd.shape) + xp.array(ccd_data.shape)) // 2 + new_lower_bound = (xp.asarray(new_ccd.shape) - xp.asarray(ccd_data.shape)) // 2 + new_upper_bound = (xp.asarray(new_ccd.shape) + xp.asarray(ccd_data.shape)) // 2 data_cutout = new_ccd.data[ new_lower_bound[0] : new_upper_bound[0], new_lower_bound[1] : new_upper_bound[1] ] @@ -1118,7 +1118,7 @@ def test_wcs_project_onto_scale_wcs(): # Mask should be true for four pixels (all nearest neighbors) # of the single pixel we masked initially. - new_center = xp.array(new_ccd.wcs.wcs.crpix, dtype=int) + new_center = xp.asarray(new_ccd.wcs.wcs.crpix, dtype=int) assert xp.all( new_ccd.mask[ new_center[0] : new_center[0] + 2, new_center[1] : new_center[1] + 2 diff --git a/ccdproc/tests/test_combiner.py b/ccdproc/tests/test_combiner.py index d0015b6a..55b0fd8b 100644 --- a/ccdproc/tests/test_combiner.py +++ b/ccdproc/tests/test_combiner.py @@ -388,7 +388,7 @@ def test_combiner_with_scaling(): # Scale by a float avg_ccd = combiner.average_combine(scale_to=2.0) expected_avg = 2 * xp.mean( - xp.array((ccd_data.data, ccd_data_lower.data, ccd_data_higher.data)) + xp.asarray((ccd_data.data, ccd_data_lower.data, ccd_data_higher.data)) ) assert xp.all(xpx.isclose(xp.mean(avg_ccd.data), expected_avg)) assert avg_ccd.shape == ccd_data.shape diff --git a/ccdproc/tests/test_cosmicray.py b/ccdproc/tests/test_cosmicray.py index b3aaaf95..660cbb85 100644 --- a/ccdproc/tests/test_cosmicray.py +++ b/ccdproc/tests/test_cosmicray.py @@ -108,8 +108,8 @@ def test_cosmicray_gain_correct(array_input, gain_correct_data): # Fill masked locations with 0 since there is no simple relationship # between the original value and the corrected value. # Masking using numpy is a handy way to check the results here. - orig_data = xp.array(np_ma_array(ccd_data.data, mask=cr_mask).filled(0)) - new_data = xp.array(np_ma_array(new_data.data, mask=cr_mask).filled(0)) + orig_data = xp.asarray(np_ma_array(ccd_data.data, mask=cr_mask).filled(0)) + new_data = xp.asarray(np_ma_array(new_data.data, mask=cr_mask).filled(0)) if gain_correct_data: gain_for_test = gain else: From 126d1741995bcac246b5228d32be23ff86be21a7 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 29 Jul 2025 16:50:56 -0500 Subject: [PATCH 080/111] Apply suggestions from code review Co-authored-by: Nathaniel Starkman --- ccdproc/log_meta.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ccdproc/log_meta.py b/ccdproc/log_meta.py index 9fc69177..b212c068 100644 --- a/ccdproc/log_meta.py +++ b/ccdproc/log_meta.py @@ -134,7 +134,7 @@ def _replace_array_with_placeholder(value): return_type_not_value = False if isinstance(value, u.Quantity): return_type_not_value = not value.isscalar - elif isinstance(value, (NDData, np.ndarray)): # noqa: UP038 + elif isinstance(value, NDData) or hasattr(value, "__array_namespace__"): # noqa: UP038 try: length = len(value) except TypeError: diff --git a/pyproject.toml b/pyproject.toml index 7319cba5..de288740 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,7 +36,7 @@ test = [ "pre-commit", "pytest-astropy>=0.10.0", "ruff", - "jax", + "jax>=0.6.3", "dask" ] From ad9b9b5867f0689dac287d19848c34d21d51a098 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 29 Jul 2025 16:41:18 -0400 Subject: [PATCH 081/111] Remove reference to list that is not used --- ccdproc/combiner.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ccdproc/combiner.py b/ccdproc/combiner.py index 3fd8e986..147e106e 100644 --- a/ccdproc/combiner.py +++ b/ccdproc/combiner.py @@ -179,7 +179,7 @@ def __init__(self, ccd_iter, dtype=None, xp=None): self._xp = xp if dtype is None: dtype = xp.float64 - self.ccd_list = ccd_list + self.unit = default_unit self.weights = None self._dtype = dtype @@ -188,7 +188,7 @@ def __init__(self, ccd_iter, dtype=None, xp=None): # new_shape = (len(ccd_list),) + default_shape self._data_arr = xp.asarray([ccd.data for ccd in ccd_list], dtype=dtype) - # populate self.data_arr + # populate self._data_arr_mask mask_list = [ ccd.mask if ccd.mask is not None else xp.zeros(default_shape) for ccd in ccd_list From fd9f03e72a6e78cd4ede25a9c19fed98e65b4598 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 29 Jul 2025 16:44:43 -0400 Subject: [PATCH 082/111] Point to documentation for list of supported libraries --- ccdproc/conftest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ccdproc/conftest.py b/ccdproc/conftest.py index 0a9022d9..7806f8d5 100644 --- a/ccdproc/conftest.py +++ b/ccdproc/conftest.py @@ -63,5 +63,5 @@ def pytest_configure(config): case _: raise ValueError( f"Unsupported array library: {array_library}. " - "Supported libraries are 'numpy' and 'jax'." + "Supported libraries are listed at https://ccdproc.readthedocs.io/en/latest/array_api.html." ) From 749761fd40d702be3a3d9f64d89197349d4eb764 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 29 Jul 2025 17:50:30 -0400 Subject: [PATCH 083/111] Avoid more numpy arrays in tests --- ccdproc/tests/test_cosmicray.py | 52 +++++++++++++++------------------ 1 file changed, 23 insertions(+), 29 deletions(-) diff --git a/ccdproc/tests/test_cosmicray.py b/ccdproc/tests/test_cosmicray.py index 660cbb85..5b64b512 100644 --- a/ccdproc/tests/test_cosmicray.py +++ b/ccdproc/tests/test_cosmicray.py @@ -2,8 +2,8 @@ import pytest from astropy import units as u +from astropy.nddata import StdDevUncertainty from astropy.utils.exceptions import AstropyDeprecationWarning -from numpy import array as np_array from numpy.ma import array as np_ma_array from numpy.random import default_rng from numpy.testing import assert_allclose @@ -25,17 +25,19 @@ def add_cosmicrays(data, scale, threshold, ncrays=NCRAYS): + from numpy import array as np_array + size = data.shape[0] rng = default_rng(99) - crrays = xp.asarray(rng.integers(0, size, size=(ncrays, 2))) + crrays = rng.integers(0, size, size=(ncrays, 2)) # use (threshold + 15) below to make sure cosmic ray is well above the # threshold no matter what the random number generator returns # add_cosmicrays is highly sensitive to the seed # ideally threshold should be set so it is not sensitive to seed, but # this is not working right now - crflux = xp.asarray(10 * scale * rng.random(ncrays) + (threshold + 15) * scale) + crflux = np_array(10 * scale * rng.random(ncrays) + (threshold + 15) * scale) - # Some array libraries (Dask) do not support setting individual elements, + # Some array libraries (Jax) do not support setting individual elements, # so use NumPy. data_as_np = np_array(data.data) for i in range(ncrays): @@ -61,9 +63,8 @@ def test_cosmicray_lacosmic_ccddata(): threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) noise = DATA_SCALE * xp.ones_like(ccd_data.data) - # Workaround for the fact that upstream checks for numpy array - # specifically. - ccd_data.uncertainty = np_array(noise) + + ccd_data.uncertainty = StdDevUncertainty(noise) nccd_data = cosmicray_lacosmic(ccd_data, sigclip=5.9) # check the number of cosmic rays detected @@ -91,9 +92,8 @@ def test_cosmicray_gain_correct(array_input, gain_correct_data): threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) noise = DATA_SCALE * xp.ones_like(ccd_data.data) - # Workaround for the fact that upstream checks for numpy array - # specifically. - ccd_data.uncertainty = np_array(noise) + + ccd_data.uncertainty = StdDevUncertainty(noise) # No units here on purpose. gain = 2.0 @@ -123,9 +123,8 @@ def test_cosmicray_lacosmic_accepts_quantity_gain(): threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) noise = DATA_SCALE * xp.ones_like(ccd_data.data) - # Workaround for the fact that upstream checks for numpy array - # specifically. - ccd_data.uncertainty = np_array(noise) + + ccd_data.uncertainty = StdDevUncertainty(noise) # The units below are the point of the test gain = 2.0 * u.electron / u.adu @@ -137,9 +136,8 @@ def test_cosmicray_lacosmic_accepts_quantity_readnoise(): threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) noise = DATA_SCALE * xp.ones_like(ccd_data.data) - # Workaround for the fact that upstream checks for numpy array - # specifically. - ccd_data.uncertainty = np_array(noise) + + ccd_data.uncertainty = StdDevUncertainty(noise) gain = 2.0 * u.electron / u.adu # The units below are the point of this test readnoise = 6.5 * u.electron @@ -155,9 +153,8 @@ def test_cosmicray_lacosmic_detects_inconsistent_units(): threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) noise = DATA_SCALE * xp.ones_like(ccd_data.data) - # Workaround for the fact that upstream checks for numpy array - # specifically. - ccd_data.uncertainty = np_array(noise) + + ccd_data.uncertainty = StdDevUncertainty(noise) readnoise = 6.5 * u.electron # The units below are deliberately incorrect. @@ -176,9 +173,8 @@ def test_cosmicray_lacosmic_warns_on_ccd_in_electrons(): threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) noise = DATA_SCALE * xp.ones_like(ccd_data.data) - # Workaround for the fact that upstream checks for numpy array - # specifically. - ccd_data.uncertainty = np_array(noise) + + ccd_data.uncertainty = StdDevUncertainty(noise) # No units here on purpose. gain = 2.0 # Don't really need to set this (6.5 is the default value) but want to @@ -202,9 +198,8 @@ def test_cosmicray_lacosmic_invar_inbkg(new_args): threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) noise = DATA_SCALE * xp.ones_like(ccd_data.data) - # Workaround for the fact that upstream checks for numpy array - # specifically. - ccd_data.uncertainty = np_array(noise) + + ccd_data.uncertainty = StdDevUncertainty(noise) with pytest.raises(TypeError): cosmicray_lacosmic(ccd_data, sigclip=5.9, **new_args) @@ -231,9 +226,8 @@ def test_cosmicray_median_ccddata(): ccd_data = ccd_data_func(data_scale=DATA_SCALE) threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) - # Workaround for the fact that upstream checks for numpy array - # specifically. - ccd_data.uncertainty = np_array(ccd_data.data * 0.0 + DATA_SCALE) + + ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 0.0 + DATA_SCALE) nccd = cosmicray_median(ccd_data, thresh=5, mbox=11, error_image=None) # check the number of cosmic rays detected @@ -351,7 +345,7 @@ def test_cosmicray_lacosmic_pssl_does_not_fail(): threshold = 5 add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS) noise = DATA_SCALE * xp.ones_like(ccd_data.data) - ccd_data.uncertainty = np_array(noise) + ccd_data.uncertainty = StdDevUncertainty(noise) with pytest.warns(AstropyDeprecationWarning): # The deprecation warning is expected and should be captured nccd_data = cosmicray_lacosmic(ccd_data, sigclip=5.9, pssl=0.0001) From f5bea45a85cf289826c33b5f0638839f2f63513e Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Wed, 30 Jul 2025 09:33:09 -0500 Subject: [PATCH 084/111] Fix formatting issues --- ccdproc/log_meta.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ccdproc/log_meta.py b/ccdproc/log_meta.py index b212c068..c8877e4c 100644 --- a/ccdproc/log_meta.py +++ b/ccdproc/log_meta.py @@ -4,7 +4,6 @@ from functools import wraps from itertools import chain -import numpy as np from astropy import units as u from astropy.io import fits from astropy.nddata import NDData @@ -134,7 +133,9 @@ def _replace_array_with_placeholder(value): return_type_not_value = False if isinstance(value, u.Quantity): return_type_not_value = not value.isscalar - elif isinstance(value, NDData) or hasattr(value, "__array_namespace__"): # noqa: UP038 + elif isinstance(value, NDData) or hasattr( + value, "__array_namespace__" + ): # noqa: UP038 try: length = len(value) except TypeError: From bb8b81b4e62779f5b1872d6aabc60e0045f68cac Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Wed, 30 Jul 2025 09:54:40 -0500 Subject: [PATCH 085/111] Apply suggestions from code review Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- ccdproc/tests/test_combiner.py | 2 +- docs/array_api.rst | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ccdproc/tests/test_combiner.py b/ccdproc/tests/test_combiner.py index 55b0fd8b..cf5be461 100644 --- a/ccdproc/tests/test_combiner.py +++ b/ccdproc/tests/test_combiner.py @@ -340,7 +340,7 @@ def test_combiner_mask_average(): # are masked?! # assert ccd.data[0, 0] == 0 assert ccd.data[5, 5] == 1 - # THE LINE BELOW IS CATCHING A REAL ERROR + # Ensure that the mask is correctly applied to pixels that are fully masked assert ccd.mask[0, 0] assert not ccd.mask[5, 5] diff --git a/docs/array_api.rst b/docs/array_api.rst index f69940b4..8e54fd4e 100644 --- a/docs/array_api.rst +++ b/docs/array_api.rst @@ -28,10 +28,10 @@ on a GPU using `CuPy`_ should be Though the `sparse`_ array library supports the array API, `ccdproc`_ does not currently work -with `sparse`_. A `pull request `_ to FIX -that would be a welcome contribution to the project. +with `sparse`_. A `pull request `_ to add +support for `sparse`_ would be a welcome contribution to the project. -What limitations shuold I be aware of? +What limitations should I be aware of? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + The ``median`` function is not part of the array API, but most array libraries From 53dc6bb1721c0bc978806c976fa00eddc5dd0728 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 31 Jul 2025 09:23:51 -0500 Subject: [PATCH 086/111] Apply suggestions from code review Co-authored-by: Nathaniel Starkman --- .github/workflows/ci_tests.yml | 2 +- ccdproc/tests/pytest_fixtures.py | 4 ++-- ccdproc/tests/test_ccdproc.py | 12 +++++++----- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/.github/workflows/ci_tests.yml b/.github/workflows/ci_tests.yml index e75b4d0b..3aaa93e0 100644 --- a/.github/workflows/ci_tests.yml +++ b/.github/workflows/ci_tests.yml @@ -41,7 +41,7 @@ jobs: python: '3.12' tox_env: 'py312-test-alldeps-cov' - # Run at least one non-numpy array library + # Test non-numpy array libraries - name: 'ubuntu-py313-jax' os: ubuntu-latest python: '3.13' diff --git a/ccdproc/tests/pytest_fixtures.py b/ccdproc/tests/pytest_fixtures.py index 7fa65f44..23a03670 100644 --- a/ccdproc/tests/pytest_fixtures.py +++ b/ccdproc/tests/pytest_fixtures.py @@ -50,7 +50,7 @@ def ccd_data( test function, where m is the desired mean. """ # Need the import here to avoid circular import issues - from ..conftest import testing_array_library as array_library + from ..conftest import testing_array_library as xp size = data_size scale = data_scale @@ -62,7 +62,7 @@ def ccd_data( data = rng.normal(loc=mean, size=[size, size], scale=scale) fake_meta = {"my_key": 42, "your_key": "not 42"} - ccd = CCDData(array_library.asarray(data), unit=u.adu) + ccd = CCDData(xp.asarray(data), unit=u.adu) ccd.header = fake_meta return ccd diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index 3c6a51b7..a141252e 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -707,7 +707,7 @@ def test_flat_correct_deviation(): # Test the uncertainty on the data after flat correction def test_flat_correct_data_uncertainty(): # Regression test for #345 - # Temporarily work around the fact that NDUncertainty explicitly checks + # TODO: remove when fix that NDUncertainty explicitly checks # whether the value is a numpy array. dat = CCDData( xp.ones([100, 100]), unit="adu", uncertainty=np_array(xp.ones([100, 100])) @@ -1007,7 +1007,7 @@ def test_wcs_project_onto_same_wcs(): target_wcs = wcs_for_testing(ccd_data.shape) ccd_data.wcs = wcs_for_testing(ccd_data.shape) - # Ugly hack for numpy-specific check in astropy.wcs + # TODO: remove hack for numpy-specific check in astropy.wcs ccd_data.data = np_array(ccd_data.data) new_ccd = wcs_project(ccd_data, target_wcs) @@ -1025,7 +1025,7 @@ def test_wcs_project_onto_same_wcs_remove_headers(): ccd_data.wcs = wcs_for_testing(ccd_data.shape) ccd_data.header = ccd_data.wcs.to_header() - # Ugly hack for numpy-specific check in astropy.wcs + # TODO: remove hack for numpy-specific check in astropy.wcs ccd_data.data = np_array(ccd_data.data) new_ccd = wcs_project(ccd_data, target_wcs) @@ -1044,7 +1044,7 @@ def test_wcs_project_onto_shifted_wcs(): ccd_data.mask = RNG().choice([0, 1], size=ccd_data.shape) - # Ugly hack for numpy-specific check in astropy.wcs + # TODO: remove hack for numpy-specific check in astropy.wcs ccd_data.data = np_array(ccd_data.data) new_ccd = wcs_project(ccd_data, target_wcs) @@ -1094,7 +1094,7 @@ def test_wcs_project_onto_scale_wcs(): target_shape = 2 * xp.asarray(ccd_data.shape) + 1 target_wcs.wcs.crpix = 2 * target_wcs.wcs.crpix + 1 + 0.5 - # Ugly hack for numpy-specific check in astropy.wcs + # TODO: rm hack for numpy-specific check in astropy.wcs ccd_data.data = np_array(ccd_data.data) # Explicitly set the interpolation method so we know what to # expect for the mass. @@ -1226,6 +1226,8 @@ def test_ccd_process(uncertainty_type): masterbias.uncertainty = None dark_frame.uncertainty = None masterflat.uncertainty = None + case _: + raise ValueError("something went seriously wrong") occd = ccd_process( ccd_data, From 12f5ab5046a98500466a79550fa0c48f692f43f1 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Thu, 31 Jul 2025 09:32:39 -0500 Subject: [PATCH 087/111] Do masking operation in-place if possible --- ccdproc/combiner.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ccdproc/combiner.py b/ccdproc/combiner.py index 147e106e..6cc704f9 100644 --- a/ccdproc/combiner.py +++ b/ccdproc/combiner.py @@ -380,12 +380,12 @@ def minmax_clipping(self, min_clip=None, max_clip=None): """ if min_clip is not None: mask = self._data_arr < min_clip - # Written to avoid in-place modification of array - self._data_arr_mask = self._data_arr_mask | mask + # Do "or" in-place if possible... + self._data_arr_mask |= mask if max_clip is not None: mask = self._data_arr > max_clip - # Written to avoid in-place modification of array - self._data_arr_mask = self._data_arr_mask | mask + # Do "or" in-place if possible... + self._data_arr_mask |= mask # set up sigma clipping algorithms @deprecated_renamed_argument( From f657dfa2b9b928be6d5baad3614b20dd7b62c888 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 5 Aug 2025 08:54:15 -0500 Subject: [PATCH 088/111] Avoid converting data to numpy array Using ccd_data as the argument leads to CCDData's __array__ being called, which returns a numpy array. By using ccd_data.data we get the raw data in whatever format the array library keeps it. --- ccdproc/tests/test_ccdproc.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index a141252e..98924ab5 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -375,7 +375,7 @@ def test_trim_image_fits_section_requires_string(): def test_trim_image_fits_section(mask_data, uncertainty): ccd_data = ccd_data_func(data_size=50) if mask_data: - ccd_data.mask = xp.zeros_like(ccd_data) + ccd_data.mask = xp.zeros_like(ccd_data.data) if uncertainty: err = RNG().normal(size=ccd_data.shape) ccd_data.uncertainty = StdDevUncertainty(err) @@ -439,7 +439,7 @@ def test_subtract_bias_fails(): with pytest.raises(ValueError): subtract_bias(ccd_data, bias) # Should fail because units don't match - bias = CCDData(xp.zeros_like(ccd_data), unit=u.meter) + bias = CCDData(xp.zeros_like(ccd_data.data), unit=u.meter) with pytest.raises(u.UnitsError): subtract_bias(ccd_data, bias) @@ -771,7 +771,7 @@ def tran(arr): def test_transform_image(mask_data, uncertainty): ccd_data = ccd_data_func(data_size=50) if mask_data: - ccd_data.mask = xp.zeros_like(ccd_data) + ccd_data.mask = xp.zeros_like(ccd_data.data) ccd_data.mask[10, 10] = 1 if uncertainty: err = RNG().normal(size=ccd_data.shape) @@ -957,7 +957,7 @@ def test_gain_correct_does_not_change_input(): def test_subtract_bias_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() - master_frame = CCDData(xp.zeros_like(ccd_data), unit=ccd_data.unit) + master_frame = CCDData(xp.zeros_like(ccd_data.data), unit=ccd_data.unit) _ = subtract_bias(ccd_data, master=master_frame) assert xp.all(xpx.isclose(original.data, ccd_data.data)) assert original.unit == ccd_data.unit From 6d8edb74af3b5334ac1645ab5d97206238e23e42 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 5 Aug 2025 08:59:28 -0500 Subject: [PATCH 089/111] Change mask setting in tests to avoid a numpy conversion --- ccdproc/tests/test_ccdproc.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index 98924ab5..649632ac 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -375,7 +375,8 @@ def test_trim_image_fits_section_requires_string(): def test_trim_image_fits_section(mask_data, uncertainty): ccd_data = ccd_data_func(data_size=50) if mask_data: - ccd_data.mask = xp.zeros_like(ccd_data.data) + # TODO: Set .mask instead of ._mask when CCData is array-api compliant + ccd_data._mask = xp.zeros_like(ccd_data.data) if uncertainty: err = RNG().normal(size=ccd_data.shape) ccd_data.uncertainty = StdDevUncertainty(err) From b17566bb0ab2c834368e0edb9667e57ad7a12d6f Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 5 Aug 2025 09:38:02 -0500 Subject: [PATCH 090/111] Use CCDData compatibility wrapper in trim_image --- ccdproc/core.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index 859ed8bd..30fdc89e 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -690,11 +690,11 @@ def trim_image(ccd, fits_section=None): """ if fits_section is not None and not isinstance(fits_section, str): raise TypeError("fits_section must be a string.") - trimmed = ccd.copy() + trimmed = _wrap_ccddata_for_array_api(ccd).copy() if fits_section: python_slice = slice_from_string(fits_section, fits_convention=True) trimmed = trimmed[python_slice] - + trimmed = _unwrap_ccddata_for_array_api(trimmed) return trimmed From fdec811ba43b551e6084495ea8916e1c8bd8b297 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 5 Aug 2025 10:10:03 -0500 Subject: [PATCH 091/111] Cast number to array namespace --- ccdproc/core.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index 30fdc89e..8817ce7e 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -849,7 +849,10 @@ def subtract_dark( # The cast to float is needed because the result will be a np.float, # which will mess up the array namespace. scale_factor = float((data_exposure / dark_exposure).decompose().value) - _master_scaled = _master_scaled.multiply(scale_factor) + # The xp.asarray ensures that even the scale factor is an instance + # of the array class. Using a plain float leads (because of a conversion + # to numpy float) to numpy being used for the calculation. + _master_scaled = _master_scaled.multiply(xp.asarray(scale_factor)) _result = _ccd.subtract(_master_scaled, handle_mask=xp.logical_or) else: _result = _ccd.subtract(_master, handle_mask=xp.logical_or) From 7531eee8118ad0bb9d7a8208a3aaf5ed039c69af Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 5 Aug 2025 12:18:03 -0500 Subject: [PATCH 092/111] Remove another instance of conversion to a numpy array --- ccdproc/core.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index 8817ce7e..82562751 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -970,8 +970,12 @@ def flat_correct(ccd, flat, min_value=None, norm_value=None, xp=None): # norm_value was set to a bad value raise ValueError("norm_value must be greater than zero.") else: - # norm_value was not set, use mean of the image. - flat_mean = xp.mean(_use_flat.data) * _use_flat.unit + # norm_value was not set, use mean of the image. Do NOT multiply this value by a + # unit. This converts the data to a numpy array somewhere in the Quantity + # machinery. + # TODO: Fix this when astropy supports array namespaces + flat_mean = xp.mean(_use_flat.data) + flat_mean_unit = _use_flat.unit # Normalize the flat. # Make sure flat_mean is a plain python float so that we @@ -980,7 +984,7 @@ def flat_correct(ccd, flat, min_value=None, norm_value=None, xp=None): flat_mean = float(flat_mean.decompose().value) _flat_normed = _use_flat.divide(flat_mean, xp=xp) - # We need to fix up the unit now wince we stripped the unit from + # We need to fix up the unit now since we stripped the unit from # the flat_mean above. result_unit = ((1 * _use_flat.unit) / (1 * flat_mean_unit)).decompose().unit _flat_normed.unit = result_unit From c196b936638919d885e7736c07b92a0c4dcebdb6 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 5 Aug 2025 12:22:21 -0500 Subject: [PATCH 093/111] More fixes for flat_correct --- ccdproc/core.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index 82562751..41ee4118 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -966,6 +966,8 @@ def flat_correct(ccd, flat, min_value=None, norm_value=None, xp=None): flat_mean = ( norm_value if hasattr(norm_value, "unit") else norm_value * _use_flat.unit ) + flat_mean_unit = flat_mean.unit + flat_mean = float(flat_mean.decompose().value) elif norm_value is not None: # norm_value was set to a bad value raise ValueError("norm_value must be greater than zero.") @@ -980,7 +982,7 @@ def flat_correct(ccd, flat, min_value=None, norm_value=None, xp=None): # Normalize the flat. # Make sure flat_mean is a plain python float so that we # can use it with the array namespace. - flat_mean_unit = flat_mean.unit + # flat_mean_unit = flat_mean.unit flat_mean = float(flat_mean.decompose().value) _flat_normed = _use_flat.divide(flat_mean, xp=xp) From f9ad7539757c22b39c7b74a158dbe980c5a692ff Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 5 Aug 2025 12:23:05 -0500 Subject: [PATCH 094/111] One more flat fix --- ccdproc/core.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index 41ee4118..bfba5760 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -983,7 +983,6 @@ def flat_correct(ccd, flat, min_value=None, norm_value=None, xp=None): # Make sure flat_mean is a plain python float so that we # can use it with the array namespace. # flat_mean_unit = flat_mean.unit - flat_mean = float(flat_mean.decompose().value) _flat_normed = _use_flat.divide(flat_mean, xp=xp) # We need to fix up the unit now since we stripped the unit from From a0f738a6c5173c9f47922793c1567ddb66f6aea0 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 5 Aug 2025 12:28:31 -0500 Subject: [PATCH 095/111] Do not use pytest.approx because it forces a numpy conversion --- ccdproc/tests/test_ccdproc.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index 649632ac..d735e271 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -632,10 +632,13 @@ def test_flat_correct_min_value(): # Check that the flat was normalized. The asserts below, which look a # little odd, are correctly testing that # flat_corrected_data = ccd_data / (flat_with_min / mean(flat_with_min)) - # This first one is just comparing two numbers, so use pytest.approx - assert xp.mean(flat_corrected_data.data * flat_with_min.data) == pytest.approx( + # This first one is just comparing two numbers, so use pytest.approx... + # ...actually, not. pytest.approx converts its argument to numpy, whcih + # raises an error in cupy. + assert xpx.isclose( + xp.mean(flat_corrected_data.data * flat_with_min.data), xp.mean(ccd_data.data) * xp.mean(flat_with_min.data), - rel=1e-6, + rtol=1e-6, ) assert xp.all( From 65ef951a95ead3737a91b5b990c6cf0811e858fd Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 5 Aug 2025 12:38:27 -0500 Subject: [PATCH 096/111] Add cast to array namespace --- ccdproc/core.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index bfba5760..fe9207cf 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -981,9 +981,9 @@ def flat_correct(ccd, flat, min_value=None, norm_value=None, xp=None): # Normalize the flat. # Make sure flat_mean is a plain python float so that we - # can use it with the array namespace. - # flat_mean_unit = flat_mean.unit - _flat_normed = _use_flat.divide(flat_mean, xp=xp) + # can use it with the array namespace. -- actually, we need to cast + # flat_mean to the array namespace. + _flat_normed = _use_flat.divide(xp.asarray(flat_mean), xp=xp) # We need to fix up the unit now since we stripped the unit from # the flat_mean above. From 52d6d456d7f58831495d77c4807f0a04115ae18c Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 5 Aug 2025 12:41:52 -0500 Subject: [PATCH 097/111] Add cast to array namespace in gain_correct --- ccdproc/core.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index fe9207cf..58dc8fe2 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -904,7 +904,7 @@ def gain_correct(ccd, gain, gain_unit=None, xp=None): else: gain_value = gain - _result = _ccd.multiply(gain_value, xp=xp, handle_mask=xp.logical_or) + _result = _ccd.multiply(xp.asarray(gain_value), xp=xp, handle_mask=xp.logical_or) result = _unwrap_ccddata_for_array_api(_result) result.meta = _ccd.meta.copy() return result From 9f59fb56c1d501613a5b6bbb7a70af0f1f460d6c Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Sun, 17 Aug 2025 20:28:07 -0500 Subject: [PATCH 098/111] Fix handling of gain value/unit Properly set uncertainty unit in gain_correct --- ccdproc/core.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/ccdproc/core.py b/ccdproc/core.py index 58dc8fe2..ca2989b6 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -904,7 +904,20 @@ def gain_correct(ccd, gain, gain_unit=None, xp=None): else: gain_value = gain + # By this point, gain should have a unit, so separate it out + gain_unit = gain_value.unit if isinstance(gain_value, Quantity) else None + gain_value = ( + gain_value.decompose().value if isinstance(gain_value, Quantity) else gain_value + ) _result = _ccd.multiply(xp.asarray(gain_value), xp=xp, handle_mask=xp.logical_or) + if gain_unit: + # Set unit of the data + _result.unit = _ccd.unit * gain_unit + if _result.uncertainty is not None: + # Set unit of the uncertainty + _result.uncertainty.unit = ( + _result.uncertainty._data_unit_to_uncertainty_unit(_result.unit) + ) result = _unwrap_ccddata_for_array_api(_result) result.meta = _ccd.meta.copy() return result From c3ffbdfe329b21852e194fc9d843ab5c7f0ff9eb Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 5 Aug 2025 12:58:52 -0500 Subject: [PATCH 099/111] Avoid mask setter in more places --- ccdproc/tests/test_ccdproc.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index d735e271..fd34e65a 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -775,8 +775,10 @@ def tran(arr): def test_transform_image(mask_data, uncertainty): ccd_data = ccd_data_func(data_size=50) if mask_data: - ccd_data.mask = xp.zeros_like(ccd_data.data) - ccd_data.mask[10, 10] = 1 + # Set the _mask rather than .mask to avoid issues with array-api + # TODO: Fix this when CCDData is array-api compliant + ccd_data._mask = xp.zeros_like(ccd_data.data) + xpx.at(ccd_data._mask)[10, 10].set_value(1) if uncertainty: err = RNG().normal(size=ccd_data.shape) ccd_data.uncertainty = StdDevUncertainty(err) @@ -1046,7 +1048,8 @@ def test_wcs_project_onto_shifted_wcs(): target_wcs = wcs_for_testing(ccd_data.shape) target_wcs.wcs.crpix += [1, 1] - ccd_data.mask = RNG().choice([0, 1], size=ccd_data.shape) + # TODO: change back to .mask when CCDData is array-api complian + ccd_data._mask = RNG().choice([0, 1], size=ccd_data.shape) # TODO: remove hack for numpy-specific check in astropy.wcs ccd_data.data = np_array(ccd_data.data) @@ -1087,7 +1090,8 @@ def test_wcs_project_onto_scale_wcs(): ccd_data.data = xp.ones_like(ccd_data.data) # Make mask zero... - ccd_data.mask = xp.zeros_like(ccd_data.data) + # TODO: change back to .mask when CCDData is array-api compliant + ccd_data._mask = xp.zeros_like(ccd_data.data) # ...except the center pixel, which is one. ccd_data.mask[int(ccd_data.wcs.wcs.crpix[0]), int(ccd_data.wcs.wcs.crpix[1])] = 1 From 9de6a92514de0e295dada61519b818c754ef28ce Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 5 Aug 2025 13:00:45 -0500 Subject: [PATCH 100/111] Fix typo --- ccdproc/tests/test_ccdproc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index fd34e65a..d2381fad 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -778,7 +778,7 @@ def test_transform_image(mask_data, uncertainty): # Set the _mask rather than .mask to avoid issues with array-api # TODO: Fix this when CCDData is array-api compliant ccd_data._mask = xp.zeros_like(ccd_data.data) - xpx.at(ccd_data._mask)[10, 10].set_value(1) + xpx.at(ccd_data._mask)[10, 10].set(1) if uncertainty: err = RNG().normal(size=ccd_data.shape) ccd_data.uncertainty = StdDevUncertainty(err) From 558ee794deef6729fede3872a71c9c32330f1bc6 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 5 Aug 2025 13:03:30 -0500 Subject: [PATCH 101/111] Wrap the CCDData object inside transform_image --- ccdproc/core.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index ca2989b6..059f768c 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -1076,27 +1076,33 @@ def transform_image(ccd, transform_func, **kwargs): # make a copy of the object nccd = ccd.copy() + # Wrap the CCDData object to ensure it is compatible with array API + _nccd = _wrap_ccddata_for_array_api(nccd) + # transform the image plane try: - nccd.data = transform_func(nccd.data, **kwargs) + _nccd.data = transform_func(_nccd.data, **kwargs) except TypeError as exc: if "is not callable" in str(exc): raise TypeError("transform_func is not a callable.") from exc raise # transform the uncertainty plane if it exists - if nccd.uncertainty is not None: - nccd.uncertainty.array = transform_func(nccd.uncertainty.array, **kwargs) + if _nccd.uncertainty is not None: + _nccd.uncertainty.array = transform_func(_nccd.uncertainty.array, **kwargs) # transform the mask plane - if nccd.mask is not None: - mask = transform_func(nccd.mask, **kwargs) - nccd.mask = mask > 0 + if _nccd.mask is not None: + mask = transform_func(_nccd.mask, **kwargs) + _nccd.mask = mask > 0 - if nccd.wcs is not None: + if _nccd.wcs is not None: warn = "WCS information may be incorrect as no transformation was applied to it" warnings.warn(warn, UserWarning, stacklevel=2) + # Unwrap the CCDData object to ensure it is compatible with array API + nccd = _unwrap_ccddata_for_array_api(_nccd) + return nccd From df71b4d89a146e708ac2cdb3461b66be8e97ce9e Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 5 Aug 2025 13:05:26 -0500 Subject: [PATCH 102/111] Wrap before cop because copy sets mask --- ccdproc/core.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index 059f768c..73f22987 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -1073,11 +1073,10 @@ def transform_image(ccd, transform_func, **kwargs): if not isinstance(ccd, CCDData): raise TypeError("ccd is not a CCDData.") - # make a copy of the object - nccd = ccd.copy() - # Wrap the CCDData object to ensure it is compatible with array API - _nccd = _wrap_ccddata_for_array_api(nccd) + _ccd = _wrap_ccddata_for_array_api(ccd) + # make a copy of the object + _nccd = ccd.copy() # transform the image plane try: From 4845ec2657fc52d12d50d807b027155d95aaa938 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 5 Aug 2025 13:06:19 -0500 Subject: [PATCH 103/111] Fix typo --- ccdproc/core.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index 73f22987..0c8da875 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -1076,7 +1076,7 @@ def transform_image(ccd, transform_func, **kwargs): # Wrap the CCDData object to ensure it is compatible with array API _ccd = _wrap_ccddata_for_array_api(ccd) # make a copy of the object - _nccd = ccd.copy() + _nccd = _ccd.copy() # transform the image plane try: From 406ded98b0656e07390305d61bb1649dd2b26e12 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 5 Aug 2025 13:08:05 -0500 Subject: [PATCH 104/111] Ensure uncertainty in test uses array namespace --- ccdproc/tests/test_ccdproc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index d2381fad..05c34ba6 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -781,7 +781,7 @@ def test_transform_image(mask_data, uncertainty): xpx.at(ccd_data._mask)[10, 10].set(1) if uncertainty: err = RNG().normal(size=ccd_data.shape) - ccd_data.uncertainty = StdDevUncertainty(err) + ccd_data.uncertainty = StdDevUncertainty(xp.asarray(err)) def tran(arr): return 10 * arr From b0624916377122d36366331e5f7afce2ae3d2ec7 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 5 Aug 2025 13:12:20 -0500 Subject: [PATCH 105/111] Set mask properly in test --- ccdproc/tests/test_ccdproc.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index 05c34ba6..527bc828 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -810,9 +810,10 @@ def test_block_reduce(): xp.ones((4, 4)), unit="adu", meta={"testkw": 1}, - mask=xp.zeros((4, 4), dtype=bool), uncertainty=StdDevUncertainty(xp.ones((4, 4))), ) + # TODO: Set mask in caller above when CCDData is array-api compliant + ccd._mask = xp.zeros((4, 4), dtype=bool) with pytest.warns(AstropyUserWarning) as w: ccd_summed = block_reduce(ccd, (2, 2)) assert len(w) == 1 From 42bb7168c110f28a1aaff761f1d4f12a379f90b7 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 5 Aug 2025 13:15:09 -0500 Subject: [PATCH 106/111] Make error from data, not ccd object --- ccdproc/tests/test_ccdproc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index 527bc828..835c82ac 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -928,7 +928,7 @@ def test_create_deviation_does_not_change_input(): def test_cosmicray_median_does_not_change_input(): ccd_data = ccd_data_func() original = ccd_data.copy() - error = xp.ones_like(ccd_data) + error = xp.ones_like(ccd_data.data) _ = cosmicray_median(ccd_data, error_image=error, thresh=5, mbox=11, gbox=0, rbox=0) assert xp.all(xpx.isclose(original.data, ccd_data.data)) assert original.unit == ccd_data.unit From c204f4d4463868aa14f9ce2239634eafe1664aaf Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Tue, 5 Aug 2025 13:25:16 -0500 Subject: [PATCH 107/111] Fix several array API issues in lacosmic --- ccdproc/core.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/ccdproc/core.py b/ccdproc/core.py index 0c8da875..f4c672e2 100644 --- a/ccdproc/core.py +++ b/ccdproc/core.py @@ -1819,7 +1819,11 @@ def cosmicray_lacosmic( gain = gain.value * u.one # Check unit consistency before taking the time to check for # cosmic rays. - if not (gain * ccd).unit.is_equivalent(readnoise.unit): + # Check this using the units, not the data, to avoid both an unnecessary + # array multiplication and a possible change of array namespace. + if not ((1.0 * gain.unit) * (1.0 * ccd.unit)).unit.is_equivalent( + readnoise.unit + ): raise ValueError( f"Inconsistent units for gain ({gain.unit}) " + f" ccd ({ccd.unit}) and readnoise ({readnoise.unit})." @@ -1848,7 +1852,9 @@ def cosmicray_lacosmic( ) # create the new ccd data object - nccd = ccd.copy() + # Wrap the CCDData object to ensure it is compatible with array API + _ccd = _wrap_ccddata_for_array_api(ccd) + nccd = _ccd.copy() cleanarr = cleanarr - data_offset cleanarr = _astroscrappy_gain_apply_helper( @@ -1856,14 +1862,18 @@ def cosmicray_lacosmic( ) # Fix the units if the gain is being applied. - nccd.unit = ccd.unit * gain.unit + nccd.unit = _ccd.unit * gain.unit + + xp = array_api_compat.array_namespace(_ccd.data) - nccd.data = cleanarr + nccd.data = xp.asarray(cleanarr) if nccd.mask is None: nccd.mask = crmask else: nccd.mask = nccd.mask + crmask + # Unwrap the CCDData object to ensure it is compatible with array API + nccd = _unwrap_ccddata_for_array_api(nccd) return nccd elif _is_array(ccd): data = ccd From ebf91270a146f7c482d0a236b6af7850e0f29cc0 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Sun, 17 Aug 2025 19:38:55 -0500 Subject: [PATCH 108/111] Ensure uncertainty is wrapped even if CCDData is already wrapped --- ccdproc/_ccddata_wrapper_for_array_api.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ccdproc/_ccddata_wrapper_for_array_api.py b/ccdproc/_ccddata_wrapper_for_array_api.py index ed4a92b8..52069157 100644 --- a/ccdproc/_ccddata_wrapper_for_array_api.py +++ b/ccdproc/_ccddata_wrapper_for_array_api.py @@ -229,6 +229,8 @@ def _wrap_ccddata_for_array_api(ccd): Wrap a CCDData object for use with array API backends. """ if isinstance(ccd, _CCDDataWrapperForArrayAPI): + if isinstance(ccd.uncertainty, StdDevUncertainty): + ccd.uncertainty = _StdDevUncertaintyWrapper(ccd.uncertainty) return ccd _ccd = _CCDDataWrapperForArrayAPI(ccd) From aedb3b2935269a026656ace039d364ab1921384c Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Sun, 17 Aug 2025 20:09:51 -0500 Subject: [PATCH 109/111] Add numpy conversion for now --- ccdproc/tests/test_ccdproc.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ccdproc/tests/test_ccdproc.py b/ccdproc/tests/test_ccdproc.py index 835c82ac..2e2cf226 100644 --- a/ccdproc/tests/test_ccdproc.py +++ b/ccdproc/tests/test_ccdproc.py @@ -1094,7 +1094,9 @@ def test_wcs_project_onto_scale_wcs(): # TODO: change back to .mask when CCDData is array-api compliant ccd_data._mask = xp.zeros_like(ccd_data.data) # ...except the center pixel, which is one. - ccd_data.mask[int(ccd_data.wcs.wcs.crpix[0]), int(ccd_data.wcs.wcs.crpix[1])] = 1 + ccd_data._mask = xpx.at(ccd_data._mask)[ + int(ccd_data.wcs.wcs.crpix[0]), int(ccd_data.wcs.wcs.crpix[1]) + ].set(1) target_wcs = wcs_for_testing(ccd_data.shape) target_wcs.wcs.cdelt /= 2 @@ -1105,6 +1107,7 @@ def test_wcs_project_onto_scale_wcs(): # TODO: rm hack for numpy-specific check in astropy.wcs ccd_data.data = np_array(ccd_data.data) + ccd_data._mask = np_array(ccd_data._mask) # Explicitly set the interpolation method so we know what to # expect for the mass. new_ccd = wcs_project( From 2a877a9a9db2754da5e33befe94aefd8e87e52a9 Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Mon, 18 Aug 2025 09:24:11 -0500 Subject: [PATCH 110/111] Fix error in test new_data never has a .data the way the test is written --- ccdproc/tests/test_cosmicray.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/ccdproc/tests/test_cosmicray.py b/ccdproc/tests/test_cosmicray.py index 5b64b512..683b3d84 100644 --- a/ccdproc/tests/test_cosmicray.py +++ b/ccdproc/tests/test_cosmicray.py @@ -1,5 +1,6 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst +import array_api_extra as xpx import pytest from astropy import units as u from astropy.nddata import StdDevUncertainty @@ -107,9 +108,9 @@ def test_cosmicray_gain_correct(array_input, gain_correct_data): cr_mask = new_ccd.mask # Fill masked locations with 0 since there is no simple relationship # between the original value and the corrected value. - # Masking using numpy is a handy way to check the results here. - orig_data = xp.asarray(np_ma_array(ccd_data.data, mask=cr_mask).filled(0)) - new_data = xp.asarray(np_ma_array(new_data.data, mask=cr_mask).filled(0)) + orig_data = xpx.at(ccd_data.data)[cr_mask].set(0.0) + new_data = xpx.at(new_data)[cr_mask].set(0.0) + if gain_correct_data: gain_for_test = gain else: From 40ffbaef2c726b58bfdd3de930d2e70e1a601edf Mon Sep 17 00:00:00 2001 From: Matt Craig Date: Mon, 18 Aug 2025 09:42:21 -0500 Subject: [PATCH 111/111] Cast mask to array API object --- ccdproc/tests/test_cosmicray.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ccdproc/tests/test_cosmicray.py b/ccdproc/tests/test_cosmicray.py index 683b3d84..bc77e2ca 100644 --- a/ccdproc/tests/test_cosmicray.py +++ b/ccdproc/tests/test_cosmicray.py @@ -106,6 +106,9 @@ def test_cosmicray_gain_correct(array_input, gain_correct_data): new_ccd = cosmicray_lacosmic(ccd_data, gain=gain, gain_apply=gain_correct_data) new_data = new_ccd.data cr_mask = new_ccd.mask + + # Turn the mask into array API compatible thing + cr_mask = xp.asarray(cr_mask) # Fill masked locations with 0 since there is no simple relationship # between the original value and the corrected value. orig_data = xpx.at(ccd_data.data)[cr_mask].set(0.0)