Awesome
Classifying custom image datasets by creating Convolutional Neural Networks and Residual Networks from scratch with PyTorch
-
Dataset
EuroSAT: Land Use and Land Cover Classification with Sentinel-2
-
Create data and dataloaders
train_dataset = datasets.ImageFolder(os.path.join(DATASET_PATH, 'train'), data_transforms)
test_dataset = datasets.ImageFolder(os.path.join(DATASET_PATH, 'test'), data_transforms)
train_loader = DataLoader(train_dataset, BATCH_SIZE, shuffle=True, num_workers=2, pin_memory=True)
test_loader = DataLoader(test_dataset, BATCH_SIZE, shuffle=False, num_workers=2, pin_memory=True)
-
Create transformations
from torchvision import datasets, models, transforms
data_transforms = transforms.Compose([
transforms.Resize(64),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
-
Create models
# Create Convolutional Neural Network
model = CNN()
# Create Residual Network
model = ResNet(ResidualBlock, [2, 2, 2])
- Open and run
train_cnn.ipynb
andtrain_resnet.ipynb
jupyter notebooks for train and evaluate models for Euro SAT dataset.
# Main loop
train_loss = []
train_accuracy = []
test_loss = []
test_accuracy = []
epochs = []
for epoch in range(1, NUM_EPOCHS+1):
print(f'\n\nRunning epoch {epoch} of {NUM_EPOCHS}...\n')
epochs.append(epoch)
#-------------------------Train-------------------------
#Reset these below variables to 0 at the begining of every epoch
correct = 0
iterations = 0
iter_loss = 0.0
model.train() # Put the network into training mode
for i, (inputs, labels) in enumerate(train_loader):
if USE_CUDA:
inputs = inputs.cuda()
labels = labels.cuda()
outputs = model(inputs)
loss = criterion(outputs, labels)
iter_loss += loss.item() # Accumulate the loss
optimizer.zero_grad() # Clear off the gradient in (w = w - gradient)
loss.backward() # Backpropagation
optimizer.step() # Update the weights
# Record the correct predictions for training data
_, predicted = torch.max(outputs, 1)
correct += (predicted == labels).sum()
iterations += 1
scheduler.step()
# Record the training loss
train_loss.append(iter_loss/iterations)
# Record the training accuracy
train_accuracy.append((100 * correct / len(train_dataset)))
#-------------------------Test--------------------------
correct = 0
iterations = 0
testing_loss = 0.0
model.eval() # Put the network into evaluation mode
for i, (inputs, labels) in enumerate(test_loader):
if USE_CUDA:
inputs = inputs.cuda()
labels = labels.cuda()
outputs = model(inputs)
loss = criterion(outputs, labels) # Calculate the loss
testing_loss += loss.item()
# Record the correct predictions for training data
_, predicted = torch.max(outputs, 1)
correct += (predicted == labels).sum()
iterations += 1
# Record the Testing loss
test_loss.append(testing_loss/iterations)
# Record the Testing accuracy
test_accuracy.append((100 * correct / len(test_dataset)))
print(f'\nEpoch {epoch} validation results: Loss={test_loss[-1]} | Accuracy={test_accuracy[-1]}\n')