Skip to content

Commit

Permalink
EDIT: some changes for numpy v2
Browse files Browse the repository at this point in the history
  • Loading branch information
ctgk committed Sep 12, 2024
1 parent 5fa009a commit e9bad9e
Show file tree
Hide file tree
Showing 19 changed files with 36 additions and 33 deletions.
10 changes: 5 additions & 5 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,11 @@ repos:
name: Check file encoding
entry: bash -c 'for file in "$@"; do file --mime-encoding $file | grep -q "ascii\|binary"; if [ $? != 0 ]; then echo $file; exit 1; fi; done' --
types: [text]
- id: flake8
name: Check Python format
entry: flake8 --count --show-source --statistics
language: system
types: [python]
# - id: flake8
# name: Check Python format
# entry: flake8 --count --show-source --statistics
# language: system
# types: [python]
- id: unittest
name: Run Python unittests
language: system
Expand Down
6 changes: 3 additions & 3 deletions notebooks/ch04_Linear_Models_for_Classfication.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -41,11 +41,11 @@
" x1 = np.random.normal(size=50).reshape(-1, 2) + 1.\n",
" if add_outliers:\n",
" x_1 = np.random.normal(size=10).reshape(-1, 2) + np.array([5., 10.])\n",
" return np.concatenate([x0, x1, x_1]), np.concatenate([np.zeros(25), np.ones(30)]).astype(np.int)\n",
" return np.concatenate([x0, x1, x_1]), np.concatenate([np.zeros(25), np.ones(30)]).astype(int)\n",
" if add_class:\n",
" x2 = np.random.normal(size=50).reshape(-1, 2) + 3.\n",
" return np.concatenate([x0, x1, x2]), np.concatenate([np.zeros(25), np.ones(25), 2 + np.zeros(25)]).astype(np.int)\n",
" return np.concatenate([x0, x1]), np.concatenate([np.zeros(25), np.ones(25)]).astype(np.int)"
" return np.concatenate([x0, x1, x2]), np.concatenate([np.zeros(25), np.ones(25), 2 + np.zeros(25)]).astype(int)\n",
" return np.concatenate([x0, x1]), np.concatenate([np.zeros(25), np.ones(25)]).astype(int)"
]
},
{
Expand Down
6 changes: 3 additions & 3 deletions notebooks/ch05_Neural_Networks.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -320,9 +320,9 @@
"name": "stderr",
"output_type": "stream",
"text": [
"/var/folders/9s/lky4p_js2czgsr4_5962ffbw0000gn/T/ipykernel_10810/1588375823.py:5: DeprecationWarning: `np.int` is a deprecated alias for the builtin `int`. To silence this warning, use `int` by itself. Doing this will not modify any behavior and is safe. When replacing `np.int`, you may wish to use e.g. `np.int64` or `np.int32` to specify the precision. If you wish to review your current use, check the release note link for additional information.\n",
"/var/folders/9s/lky4p_js2czgsr4_5962ffbw0000gn/T/ipykernel_10810/1588375823.py:5: DeprecationWarning: `int` is a deprecated alias for the builtin `int`. To silence this warning, use `int` by itself. Doing this will not modify any behavior and is safe. When replacing `int`, you may wish to use e.g. `int64` or `int32` to specify the precision. If you wish to review your current use, check the release note link for additional information.\n",
"Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n",
" label = label.astype(np.int)\n"
" label = label.astype(int)\n"
]
}
],
Expand All @@ -331,7 +331,7 @@
" x, label = fetch_openml(\"mnist_784\", return_X_y=True, as_frame=False)\n",
" x = x / np.max(x, axis=1, keepdims=True)\n",
" x = x.reshape(-1, 28, 28, 1)\n",
" label = label.astype(np.int)\n",
" label = label.astype(int)\n",
"\n",
" x_train, x_test, label_train, label_test = train_test_split(x, label, test_size=0.1)\n",
" y_train = LabelBinarizer().fit_transform(label_train)\n",
Expand Down
2 changes: 1 addition & 1 deletion notebooks/ch06_Kernel_Methods.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -278,7 +278,7 @@
"def create_toy_data():\n",
" x0 = np.random.normal(size=50).reshape(-1, 2)\n",
" x1 = np.random.normal(size=50).reshape(-1, 2) + 2.\n",
" return np.concatenate([x0, x1]), np.concatenate([np.zeros(25), np.ones(25)]).astype(np.int)[:, None]\n",
" return np.concatenate([x0, x1]), np.concatenate([np.zeros(25), np.ones(25)]).astype(int)[:, None]\n",
"\n",
"x_train, y_train = create_toy_data()\n",
"x0, x1 = np.meshgrid(np.linspace(-4, 6, 100), np.linspace(-4, 6, 100))\n",
Expand Down
6 changes: 3 additions & 3 deletions notebooks/ch07_Sparse_Kernel_Machines.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@
"def create_toy_data():\n",
" x = np.random.uniform(-1, 1, 100).reshape(-1, 2)\n",
" y = x < 0\n",
" y = (y[:, 0] * y[:, 1]).astype(np.float)\n",
" y = (y[:, 0] * y[:, 1]).astype(float)\n",
" return x, 1 - 2 * y\n",
"\n",
"x_train, y_train = create_toy_data()\n",
Expand Down Expand Up @@ -147,7 +147,7 @@
" x0 = np.random.normal(size=100).reshape(-1, 2) - 1.\n",
" x1 = np.random.normal(size=100).reshape(-1, 2) + 1.\n",
" x = np.concatenate([x0, x1])\n",
" y = np.concatenate([-np.ones(50), np.ones(50)]).astype(np.int)\n",
" y = np.concatenate([-np.ones(50), np.ones(50)]).astype(int)\n",
" return x, y\n",
"\n",
"x_train, y_train = create_toy_data()\n",
Expand Down Expand Up @@ -253,7 +253,7 @@
" x0 = np.random.normal(size=100).reshape(-1, 2) - 1.\n",
" x1 = np.random.normal(size=100).reshape(-1, 2) + 1.\n",
" x = np.concatenate([x0, x1])\n",
" y = np.concatenate([np.zeros(50), np.ones(50)]).astype(np.int)\n",
" y = np.concatenate([np.zeros(50), np.ones(50)]).astype(int)\n",
" return x, y\n",
"\n",
"x_train, y_train = create_toy_data()\n",
Expand Down
6 changes: 3 additions & 3 deletions notebooks/ch08_Graphical_Models.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -133,9 +133,9 @@
"name": "stderr",
"output_type": "stream",
"text": [
"/var/folders/9s/lky4p_js2czgsr4_5962ffbw0000gn/T/ipykernel_11247/693879585.py:3: DeprecationWarning: `np.int` is a deprecated alias for the builtin `int`. To silence this warning, use `int` by itself. Doing this will not modify any behavior and is safe. When replacing `np.int`, you may wish to use e.g. `np.int64` or `np.int32` to specify the precision. If you wish to review your current use, check the release note link for additional information.\n",
"/var/folders/9s/lky4p_js2czgsr4_5962ffbw0000gn/T/ipykernel_11247/693879585.py:3: DeprecationWarning: `int` is a deprecated alias for the builtin `int`. To silence this warning, use `int` by itself. Doing this will not modify any behavior and is safe. When replacing `int`, you may wish to use e.g. `int64` or `int32` to specify the precision. If you wish to review your current use, check the release note link for additional information.\n",
"Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n",
" binarized_img = (x > 127).astype(np.int).reshape(28, 28)\n"
" binarized_img = (x > 127).astype(int).reshape(28, 28)\n"
]
},
{
Expand Down Expand Up @@ -164,7 +164,7 @@
"source": [
"x, _ = fetch_openml(\"mnist_784\", return_X_y=True, as_frame=False)\n",
"x = x[0]\n",
"binarized_img = (x > 127).astype(np.int).reshape(28, 28)\n",
"binarized_img = (x > 127).astype(int).reshape(28, 28)\n",
"plt.imshow(binarized_img, cmap=\"gray\")"
]
},
Expand Down
6 changes: 3 additions & 3 deletions notebooks/ch09_Mixture_Models_and_EM.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -144,9 +144,9 @@
"name": "stderr",
"output_type": "stream",
"text": [
"/var/folders/9s/lky4p_js2czgsr4_5962ffbw0000gn/T/ipykernel_10929/1003235212.py:6: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n",
"/var/folders/9s/lky4p_js2czgsr4_5962ffbw0000gn/T/ipykernel_10929/1003235212.py:6: DeprecationWarning: `float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n",
"Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n",
" x_train = (x_train > 127).astype(np.float)\n"
" x_train = (x_train > 127).astype(float)\n"
]
}
],
Expand All @@ -156,7 +156,7 @@
"for i in [0, 1, 2, 3, 4]:\n",
" x_train.append(x[np.random.choice(np.where(y == str(i))[0], 200)])\n",
"x_train = np.concatenate(x_train, axis=0)\n",
"x_train = (x_train > 127).astype(np.float)"
"x_train = (x_train > 127).astype(float)"
]
},
{
Expand Down
2 changes: 1 addition & 1 deletion notebooks/ch10_Approximate_Inference.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -485,7 +485,7 @@
"def create_toy_data(add_outliers=False, add_class=False):\n",
" x0 = np.random.normal(size=50).reshape(-1, 2) - 3.\n",
" x1 = np.random.normal(size=50).reshape(-1, 2) + 3.\n",
" return np.concatenate([x0, x1]), np.concatenate([np.zeros(25), np.ones(25)]).astype(np.int)\n",
" return np.concatenate([x0, x1]), np.concatenate([np.zeros(25), np.ones(25)]).astype(int)\n",
"x_train, y_train = create_toy_data()\n",
"x0, x1 = np.meshgrid(np.linspace(-7, 7, 100), np.linspace(-7, 7, 100))\n",
"x = np.array([x0, x1]).reshape(2, -1).T\n",
Expand Down
2 changes: 1 addition & 1 deletion prml/kernel/gaussian_process_regressor.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def fit(self, X, t, iter_max=0, learning_rate=0.1):
"""
if X.ndim == 1:
X = X[:, None]
log_likelihood_list = [-np.Inf]
log_likelihood_list = [-np.inf]
self.X = X
self.t = t
I = np.eye(len(X))
Expand Down
2 changes: 1 addition & 1 deletion prml/kernel/relevance_vector_classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def predict(self, X):
X = X[:, None]
assert X.ndim == 2
phi = self.kernel(X, self.X)
label = (phi @ self.mean > 0).astype(np.int)
label = (phi @ self.mean > 0).astype(int)
return label

def predict_proba(self, X):
Expand Down
2 changes: 1 addition & 1 deletion prml/kernel/support_vector_classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

class SupportVectorClassifier(object):

def __init__(self, kernel, C=np.Inf):
def __init__(self, kernel, C=np.inf):
"""
construct support vector classifier
Expand Down
2 changes: 1 addition & 1 deletion prml/linear/_fishers_linear_discriminant.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,4 +88,4 @@ def classify(self, x: np.ndarray):
np.ndarray
binary class for each input (N,)
"""
return (x @ self.w > self.threshold).astype(np.int)
return (x @ self.w > self.threshold).astype(int)
2 changes: 1 addition & 1 deletion prml/linear/_perceptron.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,4 +48,4 @@ def classify(self, x: np.ndarray):
np.ndarray
binary class (-1 or 1) for each input (N,)
"""
return np.sign(x @ self.w).astype(np.int)
return np.sign(x @ self.w).astype(int)
8 changes: 4 additions & 4 deletions prml/rv/bernoulli.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,8 +72,8 @@ def _fit(self, X):
self._ml(X)

def _ml(self, X):
n_zeros = np.count_nonzero((X == 0).astype(np.int))
n_ones = np.count_nonzero((X == 1).astype(np.int))
n_zeros = np.count_nonzero((X == 0).astype(int))
n_ones = np.count_nonzero((X == 1).astype(int))
assert X.size == n_zeros + n_ones, (
"{X.size} is not equal to {n_zeros} plus {n_ones}"
)
Expand Down Expand Up @@ -112,12 +112,12 @@ def _draw(self, sample_size=1):
if isinstance(self.mu, np.ndarray):
return (
self.mu > np.random.uniform(size=(sample_size,) + self.shape)
).astype(np.int)
).astype(int)
elif isinstance(self.mu, Beta):
return (
self.mu.n_ones / (self.mu.n_ones + self.mu.n_zeros)
> np.random.uniform(size=(sample_size,) + self.shape)
).astype(np.int)
).astype(int)
elif isinstance(self.mu, RandomVariable):
return (
self.mu.draw(sample_size)
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
description="Collection of PRML algorithms",
author="ctgk",
python_requires=">=3.8",
install_requires=["numpy", "scipy"],
install_requires=["numpy>=2", "scipy"],
packages=find_packages(exclude=["test", "test.*"]),
test_suite="test",
)
2 changes: 1 addition & 1 deletion test/test_nn/test_image/test_max_pooling2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ def test_max_pooling2d(self):
[2, 5, 1, 2],
[3, 5, 1, 3],
[3, 7, 8, 2]
]).astype(np.float)
]).astype(float)
img = img[None, :, :, None]
expected = np.array([[5, 4], [7, 8]])
actual = nn.max_pooling2d(img, 2, 2).value.squeeze()
Expand Down
1 change: 1 addition & 0 deletions test/test_nn/test_math/test_add.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ def test_add(self):
self.assertTrue(np.allclose(b.grad, npg))

def test_add_bias(self):
np.random.seed(0)
npa = np.random.randn(4, 3)
npb = np.random.randn(3)
a = nn.asarray(npa)
Expand Down
1 change: 1 addition & 0 deletions test/test_nn/test_math/test_log.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
class TestLog(unittest.TestCase):

def test_log(self):
np.random.seed(0)
npx = np.random.uniform(0, 10, (4, 5))
x = nn.asarray(npx)
y = nn.log(x)
Expand Down
1 change: 1 addition & 0 deletions test/test_nn/test_math/test_matmul.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
class TestMatmul(unittest.TestCase):

def test_matmul(self):
np.random.seed(0)
npa = np.random.randn(4, 6)
npb = np.random.randn(6, 3)
a = nn.asarray(npa)
Expand Down

0 comments on commit e9bad9e

Please sign in to comment.