Spaces:
Running
Running
Commit
·
e0d94cd
1
Parent(s):
3090581
Try additional initial conditions during optimization
Browse files- README.md +2 -0
- julia/sr.jl +9 -0
README.md
CHANGED
|
@@ -207,11 +207,13 @@ pd.DataFrame, Results dataframe, giving complexity, MSE, and equations
|
|
| 207 |
- [ ] Consider adding mutation for constant<->variable
|
| 208 |
- [ ] Hierarchical model, so can re-use functional forms. Output of one equation goes into second equation?
|
| 209 |
- [ ] Use NN to generate weights over all probability distribution conditional on error and existing equation, and train on some randomly-generated equations
|
|
|
|
| 210 |
- [ ] Performance:
|
| 211 |
- [ ] Use an enum for functions instead of storing them?
|
| 212 |
- Current most expensive operations:
|
| 213 |
- [ ] Calculating the loss function - there is duplicate calculations happening.
|
| 214 |
- [x] Declaration of the weights array every iteration
|
|
|
|
| 215 |
- [x] Make scaling of changes to constant a hyperparameter
|
| 216 |
- [x] Make deletion op join deleted subtree to parent
|
| 217 |
- [x] Update hall of fame every iteration?
|
|
|
|
| 207 |
- [ ] Consider adding mutation for constant<->variable
|
| 208 |
- [ ] Hierarchical model, so can re-use functional forms. Output of one equation goes into second equation?
|
| 209 |
- [ ] Use NN to generate weights over all probability distribution conditional on error and existing equation, and train on some randomly-generated equations
|
| 210 |
+
- [ ] Add GPU capability?
|
| 211 |
- [ ] Performance:
|
| 212 |
- [ ] Use an enum for functions instead of storing them?
|
| 213 |
- Current most expensive operations:
|
| 214 |
- [ ] Calculating the loss function - there is duplicate calculations happening.
|
| 215 |
- [x] Declaration of the weights array every iteration
|
| 216 |
+
- [x] Try other initial conditions for optimizer
|
| 217 |
- [x] Make scaling of changes to constant a hyperparameter
|
| 218 |
- [x] Make deletion op join deleted subtree to parent
|
| 219 |
- [x] Update hall of fame every iteration?
|
julia/sr.jl
CHANGED
|
@@ -687,6 +687,14 @@ function optimizeConstants(member::PopMember)::PopMember
|
|
| 687 |
result = Optim.optimize(f, x0, Optim.Newton(), Optim.Options(iterations=20))
|
| 688 |
else
|
| 689 |
result = Optim.optimize(f, x0, Optim.NelderMead(), Optim.Options(iterations=100))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 690 |
end
|
| 691 |
if Optim.converged(result)
|
| 692 |
setConstants(member.tree, result.minimizer)
|
|
@@ -731,6 +739,7 @@ function fullRun(niterations::Integer;
|
|
| 731 |
bestSubPops[i] = bestSubPop(allPops[i], topn=topn)
|
| 732 |
for j=1:bestSubPops[i].n
|
| 733 |
bestSubPops[i].members[j].tree = simplifyTree(bestSubPops[i].members[j].tree)
|
|
|
|
| 734 |
if shouldOptimizeConstants
|
| 735 |
bestSubPops[i].members[j] = optimizeConstants(bestSubPops[i].members[j])
|
| 736 |
end
|
|
|
|
| 687 |
result = Optim.optimize(f, x0, Optim.Newton(), Optim.Options(iterations=20))
|
| 688 |
else
|
| 689 |
result = Optim.optimize(f, x0, Optim.NelderMead(), Optim.Options(iterations=100))
|
| 690 |
+
|
| 691 |
+
# Try other initial conditions:
|
| 692 |
+
for i=1:5
|
| 693 |
+
tmpresult = Optim.optimize(f, x0 .* (1f0 .+ 5f-1*randn(Float32, size(x0)[1])), Optim.NelderMead(), Optim.Options(iterations=100))
|
| 694 |
+
if tmpresult.minimum < result.minimum
|
| 695 |
+
result = tmpresult
|
| 696 |
+
end
|
| 697 |
+
end
|
| 698 |
end
|
| 699 |
if Optim.converged(result)
|
| 700 |
setConstants(member.tree, result.minimizer)
|
|
|
|
| 739 |
bestSubPops[i] = bestSubPop(allPops[i], topn=topn)
|
| 740 |
for j=1:bestSubPops[i].n
|
| 741 |
bestSubPops[i].members[j].tree = simplifyTree(bestSubPops[i].members[j].tree)
|
| 742 |
+
bestSubPops[i].members[j].tree = combineOperators(bestSubPops[i].members[j].tree)
|
| 743 |
if shouldOptimizeConstants
|
| 744 |
bestSubPops[i].members[j] = optimizeConstants(bestSubPops[i].members[j])
|
| 745 |
end
|