Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Johan/devbranch #29

Merged
merged 15 commits into from
Feb 4, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions utils/load_metric.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import numpy as np
import torch.nn as nn

from .metrics import EntropyPrediction, F1Score
from .metrics import EntropyPrediction, F1Score, precision


class MetricWrapper(nn.Module):
Expand Down Expand Up @@ -39,7 +39,7 @@ def _get_metric(self, key):
case "recall":
raise NotImplementedError("Recall score not implemented yet")
case "precision":
raise NotImplementedError("Precision score not implemented yet")
return precision()
case "accuracy":
raise NotImplementedError("Accuracy score not implemented yet")
case _:
Expand Down
105 changes: 105 additions & 0 deletions utils/metrics/precision.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
import torch
import torch.nn as nn

USE_MEAN = True

# Precision = TP / (TP + FP)


class Precision(nn.Module):
"""Metric module for precision. Can calculate precision both as a mean of precisions or as brute function of true positives and false positives. This is for now controller with the USE_MEAN macro.

Parameters
----------
num_classes : int
Number of classes in the dataset.
"""

def __init__(self, num_classes):
super().__init__()

self.num_classes = num_classes

def forward(self, y_true, y_pred):
"""Calculates the precision score given number of classes and the true and predicted labels.

Parameters
----------
y_true : torch.tensor
true labels
y_pred : torch.tensor
predicted labels

Returns
-------
torch.tensor
precision score
"""
# One-hot encode the target tensor
true_oh = torch.zeros(y_true.size(0), self.num_classes).scatter_(
1, y_true.unsqueeze(1), 1
)
pred_oh = torch.zeros(y_pred.size(0), self.num_classes).scatter_(
1, y_pred.unsqueeze(1), 1
)

if USE_MEAN:
tp = torch.sum(true_oh * pred_oh, 0)
fp = torch.sum(~true_oh.bool() * pred_oh, 0)

else:
tp = torch.sum(true_oh * pred_oh)
fp = torch.sum(~true_oh[pred_oh.bool()].bool())

return torch.nanmean(tp / (tp + fp))


def test_precision_case1():
true_precision = 25.0 / 36 if USE_MEAN else 7.0 / 10

true1 = torch.tensor([0, 1, 2, 1, 0, 2, 1, 0, 2, 1])
pred1 = torch.tensor([0, 2, 1, 1, 0, 2, 0, 0, 2, 1])
P = Precision(3)
precision1 = P(true1, pred1)
assert precision1.allclose(torch.tensor(true_precision), atol=1e-5), (
f"Precision Score: {precision1.item()}"
)


def test_precision_case2():
true_precision = 8.0 / 15 if USE_MEAN else 6.0 / 15

true2 = torch.tensor([0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
pred2 = torch.tensor([0, 0, 4, 3, 4, 0, 4, 4, 2, 3, 4, 1, 2, 4, 0])
P = Precision(5)
precision2 = P(true2, pred2)
assert precision2.allclose(torch.tensor(true_precision), atol=1e-5), (
f"Precision Score: {precision2.item()}"
)


def test_precision_case3():
true_precision = 3.0 / 4 if USE_MEAN else 4.0 / 5

true3 = torch.tensor([0, 0, 0, 1, 0])
pred3 = torch.tensor([1, 0, 0, 1, 0])
P = Precision(2)
precision3 = P(true3, pred3)
assert precision3.allclose(torch.tensor(true_precision), atol=1e-5), (
f"Precision Score: {precision3.item()}"
)


def test_for_zero_denominator():
true_precision = 0.0
true4 = torch.tensor([1, 1, 1, 1, 1])
pred4 = torch.tensor([0, 0, 0, 0, 0])
P = Precision(2)
precision4 = P(true4, pred4)
assert precision4.allclose(torch.tensor(true_precision), atol=1e-5), (
f"Precision Score: {precision4.item()}"
)


if __name__ == "__main__":
pass
62 changes: 62 additions & 0 deletions utils/models/johan_model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
import torch.nn as nn

"""
Multi-layer perceptron model for image classification.
"""

# class NeuronLayer(nn.Module):
# def __init__(self, in_features, out_features):
# super().__init__()

# self.fc = nn.Linear(in_features, out_features)
# self.relu = nn.ReLU()

# def forward(self, x):
# x = self.fc(x)
# x = self.relu(x)
# return x


class JohanModel(nn.Module):
"""Small MLP model for image classification.

Parameters
----------
in_features : int
Numer of input features.
num_classes : int
Number of classes in the dataset.

"""

def __init__(self, image_shape, num_classes):
super().__init__()

# Extract features from image shape
self.in_channels = image_shape[0]
self.height = image_shape[1]
self.width = image_shape[2]
self.num_classes = num_classes
self.in_features = self.in_channels * self.height * self.width

self.fc1 = nn.Linear(self.in_features, 77)
self.fc2 = nn.Linear(77, 77)
self.fc3 = nn.Linear(77, 77)
self.fc4 = nn.Linear(77, num_classes)
self.softmax = nn.Softmax(dim=1)
self.relu = nn.ReLU()

def forward(self, x):
for layer in [self.fc1, self.fc2, self.fc3, self.fc4]:
x = layer(x)
x = self.relu(x)
x = self.softmax(x)
return x


# TODO
# Add your tests here


if __name__ == "__main__":
pass # Add your tests here