Spaces:
Sleeping
Sleeping
Commit
·
4abcbfe
1
Parent(s):
be36d4a
Fix issue with torch imported before Julia init
Browse files- test/test_torch.py +4 -10
test/test_torch.py
CHANGED
|
@@ -2,6 +2,10 @@ import unittest
|
|
| 2 |
import numpy as np
|
| 3 |
import pandas as pd
|
| 4 |
from pysr import sympy2torch, PySRRegressor
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
import sympy
|
| 6 |
|
| 7 |
|
|
@@ -13,8 +17,6 @@ class TestTorch(unittest.TestCase):
|
|
| 13 |
x, y, z = sympy.symbols("x y z")
|
| 14 |
cosx = 1.0 * sympy.cos(x) + y
|
| 15 |
|
| 16 |
-
import torch
|
| 17 |
-
|
| 18 |
X = torch.tensor(np.random.randn(1000, 3))
|
| 19 |
true = 1.0 * torch.cos(X[:, 0]) + X[:, 1]
|
| 20 |
torch_module = sympy2torch(cosx, [x, y, z])
|
|
@@ -49,7 +51,6 @@ class TestTorch(unittest.TestCase):
|
|
| 49 |
model.refresh(checkpoint_file="equation_file.csv")
|
| 50 |
tformat = model.pytorch()
|
| 51 |
self.assertEqual(str(tformat), "_SingleSymPyModule(expression=cos(x1)**2)")
|
| 52 |
-
import torch
|
| 53 |
|
| 54 |
np.testing.assert_almost_equal(
|
| 55 |
tformat(torch.tensor(X.values)).detach().numpy(),
|
|
@@ -85,8 +86,6 @@ class TestTorch(unittest.TestCase):
|
|
| 85 |
tformat = model.pytorch()
|
| 86 |
self.assertEqual(str(tformat), "_SingleSymPyModule(expression=cos(x1)**2)")
|
| 87 |
|
| 88 |
-
import torch
|
| 89 |
-
|
| 90 |
np.testing.assert_almost_equal(
|
| 91 |
tformat(torch.tensor(X)).detach().numpy(),
|
| 92 |
np.square(np.cos(X[:, 1])), # 2nd feature
|
|
@@ -99,8 +98,6 @@ class TestTorch(unittest.TestCase):
|
|
| 99 |
|
| 100 |
module = sympy2torch(expression, [x, y, z])
|
| 101 |
|
| 102 |
-
import torch
|
| 103 |
-
|
| 104 |
X = torch.rand(100, 3).float() * 10
|
| 105 |
|
| 106 |
true_out = (
|
|
@@ -135,8 +132,6 @@ class TestTorch(unittest.TestCase):
|
|
| 135 |
"equation_file_custom_operator.csv.bkup", sep="|"
|
| 136 |
)
|
| 137 |
|
| 138 |
-
import torch
|
| 139 |
-
|
| 140 |
model.set_params(
|
| 141 |
equation_file="equation_file_custom_operator.csv",
|
| 142 |
extra_sympy_mappings={"mycustomoperator": sympy.sin},
|
|
@@ -168,7 +163,6 @@ class TestTorch(unittest.TestCase):
|
|
| 168 |
torch_module = model.pytorch()
|
| 169 |
|
| 170 |
np_output = model.predict(X.values)
|
| 171 |
-
import torch
|
| 172 |
|
| 173 |
torch_output = torch_module(torch.tensor(X.values)).detach().numpy()
|
| 174 |
|
|
|
|
| 2 |
import numpy as np
|
| 3 |
import pandas as pd
|
| 4 |
from pysr import sympy2torch, PySRRegressor
|
| 5 |
+
# Need to initialize Julia before importing torch...
|
| 6 |
+
from pysr.julia_helpers import init_julia
|
| 7 |
+
Main = init_julia()
|
| 8 |
+
import torch
|
| 9 |
import sympy
|
| 10 |
|
| 11 |
|
|
|
|
| 17 |
x, y, z = sympy.symbols("x y z")
|
| 18 |
cosx = 1.0 * sympy.cos(x) + y
|
| 19 |
|
|
|
|
|
|
|
| 20 |
X = torch.tensor(np.random.randn(1000, 3))
|
| 21 |
true = 1.0 * torch.cos(X[:, 0]) + X[:, 1]
|
| 22 |
torch_module = sympy2torch(cosx, [x, y, z])
|
|
|
|
| 51 |
model.refresh(checkpoint_file="equation_file.csv")
|
| 52 |
tformat = model.pytorch()
|
| 53 |
self.assertEqual(str(tformat), "_SingleSymPyModule(expression=cos(x1)**2)")
|
|
|
|
| 54 |
|
| 55 |
np.testing.assert_almost_equal(
|
| 56 |
tformat(torch.tensor(X.values)).detach().numpy(),
|
|
|
|
| 86 |
tformat = model.pytorch()
|
| 87 |
self.assertEqual(str(tformat), "_SingleSymPyModule(expression=cos(x1)**2)")
|
| 88 |
|
|
|
|
|
|
|
| 89 |
np.testing.assert_almost_equal(
|
| 90 |
tformat(torch.tensor(X)).detach().numpy(),
|
| 91 |
np.square(np.cos(X[:, 1])), # 2nd feature
|
|
|
|
| 98 |
|
| 99 |
module = sympy2torch(expression, [x, y, z])
|
| 100 |
|
|
|
|
|
|
|
| 101 |
X = torch.rand(100, 3).float() * 10
|
| 102 |
|
| 103 |
true_out = (
|
|
|
|
| 132 |
"equation_file_custom_operator.csv.bkup", sep="|"
|
| 133 |
)
|
| 134 |
|
|
|
|
|
|
|
| 135 |
model.set_params(
|
| 136 |
equation_file="equation_file_custom_operator.csv",
|
| 137 |
extra_sympy_mappings={"mycustomoperator": sympy.sin},
|
|
|
|
| 163 |
torch_module = model.pytorch()
|
| 164 |
|
| 165 |
np_output = model.predict(X.values)
|
|
|
|
| 166 |
|
| 167 |
torch_output = torch_module(torch.tensor(X.values)).detach().numpy()
|
| 168 |
|