|
|
import torch |
|
|
import torch.nn as nn |
|
|
import matplotlib.pyplot as plt |
|
|
import gradio as gr |
|
|
|
|
|
|
|
|
torch.manual_seed(0) |
|
|
X = torch.linspace(0, 1, 100).unsqueeze(1) |
|
|
y = 2 * X + 3 + 0.1 * torch.randn_like(X) |
|
|
|
|
|
|
|
|
class LinearModel(nn.Module): |
|
|
def __init__(self): |
|
|
super().__init__() |
|
|
self.linear = nn.Linear(1, 1) |
|
|
|
|
|
def forward(self, x): |
|
|
return self.linear(x) |
|
|
|
|
|
|
|
|
def train(optimizer_name): |
|
|
model = LinearModel() |
|
|
criterion = nn.MSELoss() |
|
|
optimizer = { |
|
|
'SGD': torch.optim.SGD(model.parameters(), lr=0.1), |
|
|
'Adam': torch.optim.Adam(model.parameters(), lr=0.1), |
|
|
'RMSProp': torch.optim.RMSprop(model.parameters(), lr=0.1), |
|
|
'Adagrad': torch.optim.Adagrad(model.parameters(), lr=0.1) |
|
|
}[optimizer_name] |
|
|
|
|
|
losses = [] |
|
|
for epoch in range(100): |
|
|
optimizer.zero_grad() |
|
|
output = model(X) |
|
|
loss = criterion(output, y) |
|
|
loss.backward() |
|
|
optimizer.step() |
|
|
losses.append(loss.item()) |
|
|
return losses |
|
|
|
|
|
|
|
|
def plot_optimizer(opt_name): |
|
|
losses = train(opt_name) |
|
|
plt.figure(figsize=(6, 4)) |
|
|
plt.plot(losses, label=opt_name) |
|
|
plt.xlabel('Epoch') |
|
|
plt.ylabel('Loss') |
|
|
plt.title(f'{opt_name} Loss Curve') |
|
|
plt.grid(True) |
|
|
plt.legend() |
|
|
return plt.gcf() |
|
|
|
|
|
demo = gr.Interface( |
|
|
fn=plot_optimizer, |
|
|
inputs=gr.Dropdown(['SGD', 'Adam', 'RMSProp', 'Adagrad'], label="Choose Optimizer"), |
|
|
outputs=gr.Plot(label="Loss Curve"), |
|
|
title="Optimizer Comparison on Linear Regression", |
|
|
description="Select an optimizer to visualize its convergence on a simple linear regression task.'SGD':, 'Adam', 'RMSProp':,'Adagrad':" |
|
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.launch() |
|
|
|
|
|
|