Skip to content

Commit

Permalink
Merge branch 'johan/devbranch' of github.com:SFI-Visual-Intelligence/…
Browse files Browse the repository at this point in the history
…Collaborative-Coding-Exam into johan/devbranch
  • Loading branch information
Johanmkr committed Feb 4, 2025
2 parents 3af0a71 + 5056735 commit fb7353e
Show file tree
Hide file tree
Showing 12 changed files with 195 additions and 147 deletions.
29 changes: 29 additions & 0 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
name: Test

on:
push:
branches: [ main ]
pull_request:
branches: [ main ]

jobs:
test:
runs-on: ubuntu-latest

steps:
- name: Checkout
uses: actions/checkout@v4

- uses: mamba-org/setup-micromamba@v1
with:
micromamba-version: '2.0.5-0' # any version from https://github.com/mamba-org/micromamba-releases
environment-file: environment.yml
init-shell: bash
cache-environment: true
post-cleanup: 'all'
generate-run-shell: false

- name: Run tests
run: |
PYTHONPATH=. pytest tests
shell: bash -el {0}
2 changes: 1 addition & 1 deletion environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ dependencies:
- pytest
- ruff
- scalene
pip:
- pip:
- torch
- torchvision
prefix: /opt/miniconda3/envs/cc-exam
Expand Down
49 changes: 49 additions & 0 deletions tests/test_createfolders.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
from utils import createfolders


def test_createfolders():
import argparse
from pathlib import Path
from tempfile import TemporaryDirectory

with TemporaryDirectory() as temp_dir:
temp_dir = Path(temp_dir)

parser = argparse.ArgumentParser()

# Structuture related values
parser.add_argument(
"--datafolder",
type=Path,
default=temp_dir / "Data",
help="Path to where data will be saved during training.",
)
parser.add_argument(
"--resultfolder",
type=Path,
default=temp_dir / "Results",
help="Path to where results will be saved during evaluation.",
)
parser.add_argument(
"--modelfolder",
type=Path,
default=temp_dir / "Experiments",
help="Path to where model weights will be saved at the end of training.",
)

args = parser.parse_args(
[
"--datafolder",
str(temp_dir / "Data"),
"--resultfolder",
str(temp_dir / "Results"),
"--modelfolder",
str(temp_dir / "Experiments"),
]
)

createfolders(args.datafolder, args.resultfolder, args.modelfolder)

assert (temp_dir / "Data").exists()
assert (temp_dir / "Results").exists()
assert (temp_dir / "Experiments").exists()
20 changes: 20 additions & 0 deletions tests/test_dataloaders.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
from utils.dataloaders.usps_0_6 import USPSDataset0_6


def test_uspsdataset0_6():
from pathlib import Path
from tempfile import TemporaryFile

import h5py
import numpy as np

with TemporaryFile() as tf:
with h5py.File(tf, "w") as f:
f["train/data"] = np.random.rand(10, 16 * 16)
f["train/target"] = np.array([6, 5, 4, 3, 2, 1, 0, 0, 0, 0])

dataset = USPSDataset0_6(data_path=tf, train=True)
assert len(dataset) == 10
data, target = dataset[0]
assert data.shape == (1, 16, 16)
assert all(target == np.array([0, 0, 0, 0, 0, 0, 1]))
16 changes: 16 additions & 0 deletions tests/test_metrics.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
from utils.metrics import Recall


def test_recall():
import torch

recall = Recall(7)

y_true = torch.tensor([0, 1, 2, 3, 4, 5, 6])
y_pred = torch.tensor([2, 1, 2, 1, 4, 5, 6])

recall_score = recall(y_true, y_pred)

assert recall_score.allclose(torch.tensor(0.7143), atol=1e-5), (
f"Recall Score: {recall_score.item()}"
)
19 changes: 19 additions & 0 deletions tests/test_models.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
import pytest
import torch

from utils.models import ChristianModel


@pytest.mark.parametrize("in_channels, num_classes", [(1, 6), (3, 6)])
def test_christian_model(in_channels, num_classes):
n, c, h, w = 5, in_channels, 16, 16

model = ChristianModel(c, num_classes)

x = torch.randn(n, c, h, w)
y = model(x)

assert y.shape == (n, num_classes), f"Shape: {y.shape}"
assert y.sum(dim=1).allclose(torch.ones(n), atol=1e-5), (
f"Softmax output should sum to 1, but got: {y.sum()}"
)
46 changes: 0 additions & 46 deletions utils/createfolders.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,4 @@
import argparse
from pathlib import Path
from tempfile import TemporaryDirectory


def createfolders(*dirs: Path) -> None:
Expand All @@ -16,47 +14,3 @@ def createfolders(*dirs: Path) -> None:

for dir in dirs:
dir.mkdir(parents=True, exist_ok=True)


def test_createfolders():
with TemporaryDirectory() as temp_dir:
temp_dir = Path(temp_dir)

parser = argparse.ArgumentParser()

# Structuture related values
parser.add_argument(
"--datafolder",
type=Path,
default=temp_dir / "Data",
help="Path to where data will be saved during training.",
)
parser.add_argument(
"--resultfolder",
type=Path,
default=temp_dir / "Results",
help="Path to where results will be saved during evaluation.",
)
parser.add_argument(
"--modelfolder",
type=Path,
default=temp_dir / "Experiments",
help="Path to where model weights will be saved at the end of training.",
)

args = parser.parse_args(
[
"--datafolder",
temp_dir / "Data",
"--resultfolder",
temp_dir / "Results",
"--modelfolder",
temp_dir / "Experiments",
]
)

createfolders(args.datafolder, args.resultfolder, args.modelfolder)

assert (temp_dir / "Data").exists()
assert (temp_dir / "Results").exists()
assert (temp_dir / "Experiments").exists()
18 changes: 1 addition & 17 deletions utils/dataloaders/usps_0_6.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def __init__(
download: bool = False,
):
super().__init__()
self.path = list(data_path.glob("*.h5"))[0]
self.path = data_path
self.transform = transform
self.num_classes = 7

Expand Down Expand Up @@ -116,19 +116,3 @@ def __getitem__(self, idx):
data = self.transform(data)

return data, target


def test_uspsdataset0_6():
import pytest

datapath = Path("data/USPS/usps.h5")

dataset = USPSDataset0_6(path=datapath, mode="train")
assert len(dataset) == 5460
data, target = dataset[0]
assert data.shape == (16, 16)
assert target == 6

# Test for an invalid mode
with pytest.raises(ValueError):
USPSDataset0_6(path=datapath, mode="inference")
31 changes: 19 additions & 12 deletions utils/dataloaders/uspsh5_7_9.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
from torch.utils.data import Dataset
import numpy as np
import h5py
from torchvision import transforms
from PIL import Image
import numpy as np
import torch
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms


class USPSH5_Digit_7_9_Dataset(Dataset):
Expand Down Expand Up @@ -95,14 +95,20 @@ def __getitem__(self, id):

def main():
# Example Usage:
transform = transforms.Compose([
transforms.Resize((16, 16)), # Ensure images are 16x16
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,)) # Normalize to [-1, 1]
])
transform = transforms.Compose(
[
transforms.Resize((16, 16)), # Ensure images are 16x16
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,)), # Normalize to [-1, 1]
]
)

# Load the dataset
dataset = USPSH5_Digit_7_9_Dataset(h5_path="C:/Users/Solveig/OneDrive/Dokumente/UiT PhD/Courses/Git/usps.h5", mode="train", transform=transform)
dataset = USPSH5_Digit_7_9_Dataset(
h5_path="C:/Users/Solveig/OneDrive/Dokumente/UiT PhD/Courses/Git/usps.h5",
mode="train",
transform=transform,
)
data_loader = torch.utils.data.DataLoader(dataset, batch_size=2, shuffle=True)
batch = next(iter(data_loader)) # grab a batch from the dataloader
img, label = batch
Expand All @@ -112,5 +118,6 @@ def main():
# Check dataset size
print(f"Dataset size: {len(dataset)}")

if __name__ == '__main__':
main()

if __name__ == "__main__":
main()
76 changes: 40 additions & 36 deletions utils/metrics/F1.py
Original file line number Diff line number Diff line change
@@ -1,40 +1,41 @@
import torch.nn as nn
import torch
import torch.nn as nn


class F1Score(nn.Module):
"""
F1 Score implementation with direct averaging inside the compute method.
F1 Score implementation with direct averaging inside the compute method.
Parameters
----------
num_classes : int
Number of classes.
Parameters
----------
num_classes : int
Number of classes.
Attributes
----------
num_classes : int
The number of classes.
Attributes
----------
num_classes : int
The number of classes.
tp : torch.Tensor
Tensor for True Positives (TP) for each class.
tp : torch.Tensor
Tensor for True Positives (TP) for each class.
fp : torch.Tensor
Tensor for False Positives (FP) for each class.
fp : torch.Tensor
Tensor for False Positives (FP) for each class.
fn : torch.Tensor
Tensor for False Negatives (FN) for each class.
"""

fn : torch.Tensor
Tensor for False Negatives (FN) for each class.
"""
def __init__(self, num_classes):
"""
Initializes the F1Score object, setting up the necessary state variables.
Initializes the F1Score object, setting up the necessary state variables.
Parameters
----------
num_classes : int
The number of classes in the classification task.
Parameters
----------
num_classes : int
The number of classes in the classification task.
"""
"""

super().__init__()

Expand All @@ -47,16 +48,16 @@ def __init__(self, num_classes):

def update(self, preds, target):
"""
Update the variables with predictions and true labels.
Update the variables with predictions and true labels.
Parameters
----------
preds : torch.Tensor
Predicted logits (shape: [batch_size, num_classes]).
Parameters
----------
preds : torch.Tensor
Predicted logits (shape: [batch_size, num_classes]).
target : torch.Tensor
True labels (shape: [batch_size]).
"""
target : torch.Tensor
True labels (shape: [batch_size]).
"""
preds = torch.argmax(preds, dim=1)

# Calculate True Positives (TP), False Positives (FP), and False Negatives (FN) per class
Expand All @@ -76,17 +77,20 @@ def compute(self):
"""

# Compute F1 score based on the specified averaging method
f1_score = 2 * torch.sum(self.tp) / (2 * torch.sum(self.tp) + torch.sum(self.fp) + torch.sum(self.fn))
f1_score = (
2
* torch.sum(self.tp)
/ (2 * torch.sum(self.tp) + torch.sum(self.fp) + torch.sum(self.fn))
)

return f1_score


def test_f1score():
f1_metric = F1Score(num_classes=3)
preds = torch.tensor([[0.8, 0.1, 0.1],
[0.2, 0.7, 0.1],
[0.2, 0.3, 0.5],
[0.1, 0.2, 0.7]])
preds = torch.tensor(
[[0.8, 0.1, 0.1], [0.2, 0.7, 0.1], [0.2, 0.3, 0.5], [0.1, 0.2, 0.7]]
)

target = torch.tensor([0, 1, 0, 2])

Expand Down
Loading

0 comments on commit fb7353e

Please sign in to comment.