add mask to training
export trainpickle file in correct format
This commit is contained in:
parent
1add9d278f
commit
0f0c789981
@ -60,6 +60,6 @@ def test():
|
||||
Compress.compress(PICKEL_PATH + "_target.pkl")
|
||||
|
||||
if __name__ == '__main__':
|
||||
apply_model("training/000/000017.jpg")
|
||||
# apply_model("training/000/000017.jpg")
|
||||
eval_evalset()
|
||||
# test()
|
||||
|
@ -37,7 +37,7 @@ class ImageDataset(Dataset):
|
||||
spacing = (random.randint(*self.spacingrange), random.randint(*self.spacingrange))
|
||||
doomed_image = ex4.ex4(target_image, offset, spacing)
|
||||
|
||||
return doomed_image[0], np.transpose(target_image, (2, 0, 1))
|
||||
return doomed_image[0], doomed_image[1], np.transpose(target_image, (2, 0, 1))
|
||||
|
||||
def __len__(self):
|
||||
return len(self.image_files)
|
||||
|
@ -3,7 +3,6 @@ import sys
|
||||
|
||||
import PIL
|
||||
import numpy as np
|
||||
import packaging
|
||||
import torch
|
||||
from matplotlib import pyplot as plt
|
||||
from packaging.version import Version
|
||||
@ -32,7 +31,8 @@ def train_model():
|
||||
|
||||
# Load datasets
|
||||
train_loader, test_loader = get_image_loader("training/", precision=np.float32)
|
||||
nn = ImageNN(n_in_channels=3, precision=np.float32) # todo net params
|
||||
|
||||
nn = ImageNN(n_in_channels=6, precision=np.float32) # todo net params
|
||||
nn.train() # init with train modeAdam
|
||||
nn.to(device) # send net to device available
|
||||
|
||||
@ -47,8 +47,9 @@ def train_model():
|
||||
for epoch in range(n_epochs):
|
||||
print(f"Epoch {epoch}/{n_epochs}\n")
|
||||
i = 0
|
||||
for input_tensor, target_tensor in train_loader:
|
||||
for input_tensor, mask, target_tensor in train_loader:
|
||||
optimizer.zero_grad() # reset gradients
|
||||
input_tensor = torch.cat((input_tensor, mask), 1)
|
||||
|
||||
output = nn(input_tensor.to(device)) # get model output (forward pass)
|
||||
|
||||
@ -77,8 +78,8 @@ def train_model():
|
||||
|
||||
# Plot output
|
||||
if i % 100 == 0:
|
||||
plot(input_tensor.detach().cpu().numpy()[:1], target_tensor.detach().cpu().numpy()[:1],
|
||||
output.detach().cpu().numpy()[:1],
|
||||
plot(input_tensor.detach().cpu().numpy()[0], target_tensor.detach().cpu().numpy()[0],
|
||||
output.detach().cpu().numpy()[0],
|
||||
plotpath, i, epoch)
|
||||
|
||||
# evaluate model with submission pkl file
|
||||
@ -93,10 +94,12 @@ def eval_model(model: torch.nn.Module, dataloader: torch.utils.data.DataLoader,
|
||||
# disable gradient calculations
|
||||
with torch.no_grad():
|
||||
i = 0
|
||||
for input, target in dataloader:
|
||||
for input, mask, target in dataloader:
|
||||
input = input.to(device)
|
||||
target = target.to(device)
|
||||
mask = mask.to(device)
|
||||
|
||||
input = torch.cat((input, mask), 1)
|
||||
out = model(input)
|
||||
loss += loss_fn(out, target).item()
|
||||
print(f'\rEval prog[{i}/{len(dataloader) * dataloader.batch_size}]', end='')
|
||||
@ -111,14 +114,13 @@ def plot(inputs, targets, predictions, path, update, epoch):
|
||||
os.makedirs(path, exist_ok=True)
|
||||
fig, axes = plt.subplots(ncols=3, figsize=(15, 5))
|
||||
|
||||
for i in range(len(inputs)):
|
||||
for ax, data, title in zip(axes, [inputs, targets, predictions], ["Input", "Target", "Prediction"]):
|
||||
ax.clear()
|
||||
ax.set_title(title)
|
||||
ax.imshow(DataLoader.postprocess(np.transpose(data[i], (1, 2, 0))), interpolation="none")
|
||||
ax.imshow(DataLoader.postprocess(np.transpose(data[:3, :, :], (1, 2, 0))), interpolation="none")
|
||||
# ax.imshow(np.transpose((data[i]), (1, 2, 0)), interpolation="none")
|
||||
ax.set_axis_off()
|
||||
fig.savefig(os.path.join(path, f"{epoch:02d}_{update:07d}_{i:02d}.png"), dpi=100)
|
||||
fig.savefig(os.path.join(path, f"{epoch:02d}_{update:07d}.png"), dpi=100)
|
||||
|
||||
plt.close(fig)
|
||||
|
||||
|
6
Net.py
6
Net.py
@ -1,9 +1,13 @@
|
||||
import math
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
|
||||
class ImageNN(torch.nn.Module):
|
||||
def __init__(self, precision: np.float32 or np.float64, n_in_channels: int = 1, n_hidden_layers: int = 3, n_kernels: int = 32, kernel_size: int = 7):
|
||||
def __init__(self, precision: np.float32 or np.float64, n_in_channels: int = 1, n_hidden_layers: int = 3,
|
||||
n_kernels: int = 32, kernel_size: int = 7):
|
||||
"""Simple CNN with `n_hidden_layers`, `n_kernels`, and `kernel_size` as hyperparameters"""
|
||||
super().__init__()
|
||||
|
||||
|
18
netio.py
18
netio.py
@ -15,6 +15,9 @@ def save_model(model: torch.nn.Module):
|
||||
print(f"Saved raw model to {MODEL_PATH}")
|
||||
torch.save(model, MODEL_PATH)
|
||||
|
||||
dummy_input = torch.randn(1, 6, 100, 100)
|
||||
torch.onnx.export(model, dummy_input, MODEL_PATH + ".onnx", verbose=False, opset_version=11)
|
||||
|
||||
|
||||
def eval_evalset():
|
||||
# read the provided testing pickle file
|
||||
@ -28,14 +31,19 @@ def eval_evalset():
|
||||
model.eval()
|
||||
with open('testing/inputs.pkl', 'rb') as handle:
|
||||
b: dict = pickle.load(handle)
|
||||
outarr = np.zeros(dtype=np.uint8, shape=(len(b['input_arrays']), 3, 100, 100))
|
||||
outarr = ()
|
||||
i = 0
|
||||
piclen = len(b['input_arrays'])
|
||||
for pic in b['input_arrays']:
|
||||
pic = DataLoader.preprocess(pic, precision=np.float32)
|
||||
out = model(torch.from_numpy(pic))
|
||||
for input_array, known_array in zip(b['input_arrays'], b['known_arrays']):
|
||||
input_array = DataLoader.preprocess(input_array, precision=np.float32)
|
||||
input_tensor = torch.cat((torch.from_numpy(input_array), torch.from_numpy(known_array)), 0)
|
||||
out = model(input_tensor)
|
||||
out = DataLoader.postprocess(out.cpu().detach().numpy())
|
||||
outarr[i] = out
|
||||
|
||||
rest = out * (1 - known_array)
|
||||
rest = rest[1 - known_array > 0]
|
||||
|
||||
outarr = (*outarr, rest)
|
||||
|
||||
print(f'\rApplying model [{i}/{piclen}]', end='')
|
||||
i += 1
|
||||
|
Loading…
Reference in New Issue
Block a user