Skip to content

Commit 284e8ea

Browse files
committed
python: np.asfarray() removed in Numpy 2.
1 parent f76cb9e commit 284e8ea

File tree

1 file changed

+25
-23
lines changed

1 file changed

+25
-23
lines changed

python/src/moocore/_moocore.py

Lines changed: 25 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -148,11 +148,11 @@ def _parse_maximise(maximise, nobj):
148148

149149
def _unary_refset_common(data, ref, maximise):
150150
# Convert to numpy.array in case the user provides a list. We use
151-
# np.asfarray to convert it to floating-point, otherwise if a user inputs
151+
# np.asarray(dtype=float) to convert it to floating-point, otherwise if a user inputs
152152
# something like ref = np.array([10, 10]) then numpy would interpret it as
153153
# an int array.
154-
data = np.asfarray(data)
155-
ref = np.atleast_2d(np.asfarray(ref))
154+
data = np.asarray(data, dtype=float)
155+
ref = np.atleast_2d(np.asarray(ref, dtype=float))
156156
nobj = data.shape[1]
157157
if nobj != ref.shape[1]:
158158
raise ValueError(
@@ -357,9 +357,9 @@ def hypervolume(data: ArrayLike, /, ref) -> float:
357357
# np.asfarray to convert it to floating-point, otherwise if a user inputs
358358
# something like ref = np.array([10, 10]) then numpy would interpret it as
359359
# an int array.
360-
data = np.asfarray(data)
360+
data = np.asarray(data, dtype=float)
361361
nobj = data.shape[1]
362-
ref = atleast_1d_of_length_n(np.asfarray(ref), nobj)
362+
ref = atleast_1d_of_length_n(np.asarray(ref, dtype=float), nobj)
363363
if nobj != ref.shape[0]:
364364
raise ValueError(
365365
f"data and ref need to have the same number of objectives ({nobj} != {ref.shape[0]})"
@@ -413,7 +413,7 @@ def is_nondominated(data, maximise=False, keep_weakly: bool = False):
413413
[1, 0]])
414414
415415
"""
416-
data = np.asfarray(data)
416+
data = np.asarray(data, dtype=float)
417417
nrows, nobj = data.shape
418418
maximise = _parse_maximise(maximise, nobj)
419419
data_p, npoints, nobj = np2d_to_double_array(data)
@@ -484,7 +484,7 @@ def filter_dominated_within_sets(
484484
With a single dataset, use :func:`filter_dominated`
485485
486486
"""
487-
data = np.asfarray(data)
487+
data = np.asarray(data, dtype=float)
488488
ncols = data.shape[1]
489489
if ncols < 3:
490490
raise ValueError(
@@ -579,7 +579,7 @@ def pareto_rank(data, /, *, maximise=False):
579579
True
580580
581581
"""
582-
data = np.asfarray(data)
582+
data = np.asarray(data, dtype=float)
583583
nrows, nobj = data.shape
584584
maximise = _parse_maximise(maximise, nobj)
585585
if maximise.any():
@@ -686,15 +686,15 @@ def normalise(
686686
687687
"""
688688
# Normalise modifies the data, so we need to create a copy.
689-
data = np.asfarray(data).copy()
689+
data = np.asarray(data, dtype=float).copy()
690690
npoints, nobj = data.shape
691691
if nobj == 1:
692692
raise ValueError("'data' must have at least two columns")
693-
to_range = np.asfarray(to_range)
693+
to_range = np.asarray(to_range, dtype=float)
694694
if to_range.shape[0] != 2:
695695
raise ValueError("'to_range' must have length 2")
696-
lower = atleast_1d_of_length_n(np.asfarray(lower), nobj)
697-
upper = atleast_1d_of_length_n(np.asfarray(upper), nobj)
696+
lower = atleast_1d_of_length_n(np.asarray(lower, dtype=float), nobj)
697+
upper = atleast_1d_of_length_n(np.asarray(upper, dtype=float), nobj)
698698
if np.any(np.isnan(lower)):
699699
lower = np.where(np.isnan(lower), data.min(axis=0), lower)
700700
if np.any(np.isnan(upper)):
@@ -805,7 +805,7 @@ def eaf(data, /, percentiles=[]):
805805
[ 7.92511295, 3.92669598, 100. ]])
806806
807807
"""
808-
data = np.asfarray(data)
808+
data = np.asarray(data, dtype=float)
809809
ncols = data.shape[1]
810810
if ncols < 3:
811811
raise ValueError(
@@ -824,7 +824,7 @@ def eaf(data, /, percentiles=[]):
824824
if len(percentiles) == 0:
825825
percentiles = np.arange(1.0, nsets + 1) * (100.0 / nsets)
826826
else:
827-
percentiles = np.unique(np.asfarray(percentiles))
827+
percentiles = np.unique(np.asarray(percentiles, dtype=float))
828828
percentile_p, npercentiles = np1d_to_double_array(percentiles)
829829

830830
# Get C pointers + matrix size for calling CFFI generated extension module
@@ -886,7 +886,7 @@ def vorobT(data, /, reference):
886886
8943.333191728081
887887
888888
"""
889-
data = np.asfarray(data)
889+
data = np.asarray(data, dtype=float)
890890
ncols = data.shape[1]
891891
if ncols < 3:
892892
raise ValueError(
@@ -957,7 +957,7 @@ def vorobDev(x, /, reference, *, VE=None) -> float:
957957
if VE is None:
958958
VE = vorobT(x, reference)["VE"]
959959

960-
x = np.asfarray(x)
960+
x = np.asarray(x, dtype=float)
961961
ncols = x.shape[1]
962962
if ncols < 3:
963963
raise ValueError(
@@ -1075,8 +1075,8 @@ def eafdiff(x, y, /, *, intervals=None, maximise=False, rectangles=False):
10751075
[ 4. , 2.5, inf, 3. , 1. ]])
10761076
10771077
"""
1078-
x = np.asfarray(x)
1079-
y = np.asfarray(y)
1078+
x = np.asarray(x, dtype=float)
1079+
y = np.asarray(y, dtype=float)
10801080
assert (
10811081
x.shape[1] == y.shape[1]
10821082
), "'x' and 'y' must have the same number of columns"
@@ -1158,7 +1158,7 @@ def eafdiff(x, y, /, *, intervals=None, maximise=False, rectangles=False):
11581158
# intervals = int(intervals)
11591159

11601160
# data = np.ascontiguousarray(
1161-
# np.asfarray(data)
1161+
# np.asarray(data, dtype=float)
11621162
# ) # C function requires contiguous data
11631163
# num_data_columns = data.shape[1]
11641164
# data_p, npoints, ncols = np2d_to_double_array(data)
@@ -1286,13 +1286,15 @@ def whv_hype(
12861286
# Convert to numpy.array in case the user provides a list. We use
12871287
# np.asfarray to convert it to floating-point, otherwise if a user inputs
12881288
# something like [10, 10] then numpy would interpret it as an int array.
1289-
data = np.asfarray(data)
1289+
data = np.asarray(data, dtype=float)
12901290
nobj = data.shape[1]
12911291
if nobj != 2:
12921292
raise NotImplementedError("Only 2D datasets are currently supported")
12931293

1294-
reference = atleast_1d_of_length_n(np.asfarray(reference), nobj)
1295-
ideal = atleast_1d_of_length_n(np.asfarray(ideal), nobj)
1294+
reference = atleast_1d_of_length_n(
1295+
np.asarray(reference, dtype=float), nobj
1296+
)
1297+
ideal = atleast_1d_of_length_n(np.asarray(ideal, dtype=float), nobj)
12961298

12971299
maximise = _parse_maximise(maximise, nobj)
12981300
data[:, maximise] = -data[:, maximise]
@@ -1319,7 +1321,7 @@ def whv_hype(
13191321
data_p, npoints, ideal, reference, nsamples, seed, mu
13201322
)
13211323
elif dist == "point":
1322-
mu = atleast_1d_of_length_n(np.asfarray(mu), nobj)
1324+
mu = atleast_1d_of_length_n(np.asarray(mu, dtype=float), nobj)
13231325
mu, _ = np1d_to_double_array(mu)
13241326
hv = lib.whv_hype_gaus(
13251327
data_p, npoints, ideal, reference, nsamples, seed, mu

0 commit comments

Comments
 (0)