49 lines
1.5 KiB
Python
49 lines
1.5 KiB
Python
import torch
|
|
|
|
from DataLoader import get_image_loader
|
|
from Net import ImageNN
|
|
|
|
|
|
# 01.05.22 -- 0.5h
|
|
from netio import save_model, load_model
|
|
|
|
|
|
def train_model():
|
|
train_loader, test_loader = get_image_loader("my/supercool/image/dir")
|
|
nn = ImageNN() # todo pass size ason.
|
|
nn.train() # init with train mode
|
|
|
|
optimizer = torch.optim.SGD(nn.parameters(), lr=0.1) # todo adjust parameters and lr
|
|
loss_function = torch.nn.CrossEntropyLoss()
|
|
n_epochs = 15 # todo epcchs here
|
|
|
|
losses = []
|
|
for epoch in range(n_epochs):
|
|
print(f"Epoch {epoch}/{n_epochs}\n")
|
|
for input_tensor, target_tensor in train_loader:
|
|
output = nn(input_tensor) # get model output (forward pass)
|
|
loss = loss_function(output, target_tensor) # compute loss given model output and true target
|
|
loss.backward() # compute gradients (backward pass)
|
|
optimizer.step() # perform gradient descent update step
|
|
optimizer.zero_grad() # reset gradients
|
|
losses.append(loss.item())
|
|
|
|
# switch net to eval mode
|
|
nn.eval()
|
|
with torch.no_grad():
|
|
for input_tensor, target_tensor in test_loader:
|
|
# iterate testloader and we have to decide somhow now the goodness of the prediction
|
|
out = nn(input_tensor) # apply model
|
|
|
|
diff = out - target_tensor
|
|
# todo evaluate trained model on testset loader
|
|
|
|
# todo save trained model to blob file
|
|
save_model(nn)
|
|
|
|
|
|
def apply_model():
|
|
model = load_model()
|
|
|
|
pass
|