From 444a6c5a983ebb3565a03e95ab5d264807ca760d Mon Sep 17 00:00:00 2001 From: Falguni Ghosh <falguni.ghosh@fau.de> Date: Sun, 15 Oct 2023 21:21:59 +0000 Subject: [PATCH] Upload New File --- .../trainer.py | 206 ++++++++++++++++++ 1 file changed, 206 insertions(+) create mode 100644 4_Resnet_Solar_panel_defect_Identification/trainer.py diff --git a/4_Resnet_Solar_panel_defect_Identification/trainer.py b/4_Resnet_Solar_panel_defect_Identification/trainer.py new file mode 100644 index 0000000..1ea793b --- /dev/null +++ b/4_Resnet_Solar_panel_defect_Identification/trainer.py @@ -0,0 +1,206 @@ +import numpy as np +import torch as t +from sklearn.metrics import f1_score +from tqdm.autonotebook import tqdm + +print(t.cuda.is_available()) + + +class Trainer: + + def __init__(self, + model, # Model to be trained. + crit, # Loss function + optim=None, # Optimizer + train_dl=None, # Training data set + val_test_dl=None, # Validation (or test) data set + cuda=True, # Whether to use the GPU + early_stopping_patience=-1): # The patience for early stopping + self._model = model + self._crit = crit + self._optim = optim + self._train_dl = train_dl + self._val_test_dl = val_test_dl + self._cuda = cuda + # early stopping related variables + self._early_stopping_patience = early_stopping_patience + self._min_loss = np.Inf + self._epochs_without_loss_decrease = None + + if cuda: + self._model = model.cuda() + self._crit = crit.cuda() + + def save_checkpoint(self, epoch): + t.save({'state_dict': self._model.state_dict()}, 'checkpoints/checkpoint_{:03d}.ckp'.format(epoch)) + + def restore_checkpoint(self, epoch_n): + ckp = t.load('checkpoints/checkpoint_{:03d}.ckp'.format(epoch_n), 'cuda' if self._cuda else None) + self._model.load_state_dict(ckp['state_dict']) + + def save_onnx(self, fn): + m = self._model.cpu() + m.eval() + x = t.randn(1, 3, 300, 300, requires_grad=True) + y = self._model(x) + t.onnx.export(m, # model being run + x, # model input (or a tuple for multiple inputs) + fn, # where to save the model (can be a file or file-like object) + export_params=True, # store the trained parameter weights inside the model file + opset_version=10, # the ONNX version to export the model to + do_constant_folding=True, # whether to execute constant folding for optimization + input_names = ['input'], # the model's input names + output_names = ['output'], # the model's output names + dynamic_axes={'input' : {0 : 'batch_size'}, # variable lenght axes + 'output' : {0 : 'batch_size'}}) + + def train_step(self, x, y): + # perform following steps: + # -reset the gradients. By default, PyTorch accumulates (sums up) gradients when backward() is called. This behavior is not required here, so you need to ensure that all the gradients are zero before calling the backward. + self._optim.zero_grad() + # -propagate through the network + pred_op = self._model(x) + # -calculate the loss + l = self._crit(pred_op, y) + # -compute gradient by backward propagation + l.backward() + # -update weights + self._optim.step() + # -return the loss + #TODO + return l + + + + + + def val_test_step(self, x, y): + + # predict + # propagate through the network and calculate the loss and predictions + # return the loss and the predictions + #TODO + pred_op = self._model(x) + y = y.float() + l = self._crit(pred_op, y) + return l, pred_op + + + def train_epoch(self): + # set training mode + self._model.train() + # iterate through the training set + # transfer the batch to "cuda()" -> the gpu if a gpu is given + # perform a training step + # calculate the average loss for the epoch and return it + #TODO + num_batch = 0 + total_l = 0 + for i in tqdm(self._train_dl, desc = 'Training in progress....' ): + x, y = i + if self._cuda: + self._model = self._model.cuda() + self._crit = self._crit.cuda() + x = x.cuda() + y = y.cuda() + total_l += self.train_step(x, y) + num_batch += 1 + + return total_l / num_batch + + + + + def val_test(self): + # set eval mode. Some layers have different behaviors during training and testing (for example: Dropout, BatchNorm, etc.). To handle those properly, you'd want to call model.eval() + # disable gradient computation. Since you don't need to update the weights during testing, gradients aren't required anymore. + # iterate through the validation set + # transfer the batch to the gpu if given + # perform a validation step + # save the predictions and the labels for each batch + # calculate the average loss and average metrics of your choice. You might want to calculate these metrics in designated functions + # return the loss and print the calculated metrics + #TODO + self._model.eval() + self.f1_score_list = [] + + with t.no_grad(): + predictions = [] + labels = [] + num_batch = 0 + total_l = 0 + for i in tqdm(self._val_test_dl, desc='Validation in progress....'): + x, y = i + if self._cuda: + self._model = self._model.cuda() + self._crit = self._crit.cuda() + x = x.cuda() + y = y.cuda() + batch_loss, batch_pred = self.val_test_step(x, y) + total_l += batch_loss + num_batch += 1 + batch_pred[batch_pred >= 0.5] =1 + batch_pred[batch_pred < 0.5] = 0 + prediction_crack_inactive = batch_pred.cpu().detach().numpy() + label_crack_inactive = y.cpu().detach().numpy() + f1_score_value = f1_score(label_crack_inactive, prediction_crack_inactive, average='macro') + self.f1_score_list.append(f1_score_value) + predictions.append(batch_pred) + labels.append(y) + return total_l / num_batch, f1_score_value + + def fit(self, epochs=-1): + assert self._early_stopping_patience > 0 or epochs > 0 + # create a list for the train and validation losses, and create a counter for the epoch + # TODO + train_loss_list = [] + validation_loss_list = [] + num_epoch = 0 + + while True: + # stop by epoch number + # train for a epoch and then calculate the loss and metrics on the validation set + # append the losses to the respective lists + # use the save_checkpoint function to save the model (can be restricted to epochs with improvement) + # check whether early stopping should be performed using the early stopping criterion and stop if so + # return the losses for both training and validation + # TODO + if num_epoch == epochs: + print("all epochs ran") + break + + avg_train_l = self.train_epoch() + train_loss_list.append(avg_train_l) + avg_val_l, f1_score_value = self.val_test() + validation_loss_list.append(avg_val_l) + print("Num_Epoch: ", num_epoch, "train_loss: ", avg_train_l, "validation_loss: ", avg_val_l, "f1_score: ", f1_score_value) + + self.save_checkpoint(num_epoch) + + early_stopping_criterion = False + if self._min_loss > avg_val_l: + self._min_loss = avg_val_l + self._epochs_without_loss_decrease = 0 + self.save_onnx('checkpoints/checkpoint_{:03d}.onnx'.format(num_epoch)) + else: # no need to save model + self._epochs_without_loss_decrease += 1 + print("loss did not improve from last min loss") + if self._epochs_without_loss_decrease == self._early_stopping_patience: + print("Early stopping") + early_stopping_criterion = True + if early_stopping_criterion: + break + + num_epoch += 1 + + self.save_onnx('checkpoints/checkpoint_{:03d}.onnx'.format(num_epoch)) + return train_loss_list, validation_loss_list, f1_score_value + + + + + + + + + -- GitLab