Skip to content

Commit

Permalink
Merge branch 'main' into sweden
Browse files Browse the repository at this point in the history
  • Loading branch information
nilsleh committed Mar 15, 2022
2 parents d3effe6 + 1d8e010 commit bc307a5
Show file tree
Hide file tree
Showing 122 changed files with 844 additions and 1,359 deletions.
4 changes: 2 additions & 2 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,9 @@ repos:
additional_dependencies: ["toml"]

- repo: https://github.com/pre-commit/mirrors-mypy
rev: v0.931
rev: v0.940
hooks:
- id: mypy
args: [--strict, --ignore-missing-imports, --show-error-codes]
additional_dependencies: [torch>=1.7, torchmetrics>=0.7, pytorch-lightning>=1.3, pytest>=6, omegaconf>=2.1, kornia>=0.6, numpy>=1.22.0]
additional_dependencies: [torch>=1.11, torchmetrics>=0.7, pytorch-lightning>=1.3, pytest>=6, omegaconf>=2.1, kornia>=0.6, numpy>=1.22.0]
exclude: (build|data|dist|logo|logs|output)/
6 changes: 6 additions & 0 deletions .readthedocs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,12 @@
# Required
version: 2

# Set the version of Python
build:
os: ubuntu-20.04
tools:
python: "3.9"

# Configuration of the Python environment to be used
python:
install:
Expand Down
10 changes: 4 additions & 6 deletions benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -208,17 +208,15 @@ def main(args: argparse.Namespace) -> None:
# Benchmark model
model = resnet34()
# Change number of input channels to match Landsat
model.conv1 = nn.Conv2d( # type: ignore[attr-defined]
model.conv1 = nn.Conv2d(
len(bands), 64, kernel_size=7, stride=2, padding=3, bias=False
)

criterion = nn.CrossEntropyLoss() # type: ignore[attr-defined]
criterion = nn.CrossEntropyLoss()
params = model.parameters()
optimizer = optim.SGD(params, lr=0.0001)

device = torch.device( # type: ignore[attr-defined]
"cuda" if torch.cuda.is_available() else "cpu", args.device
)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu", args.device)
model = model.to(device)

tic = time.time()
Expand All @@ -227,7 +225,7 @@ def main(args: argparse.Namespace) -> None:
num_total_patches += args.batch_size
x = torch.rand(args.batch_size, len(bands), args.patch_size, args.patch_size)
# y = torch.randint(0, 256, (args.batch_size, args.patch_size, args.patch_size))
y = torch.randint(0, 256, (args.batch_size,)) # type: ignore[attr-defined]
y = torch.randint(0, 256, (args.batch_size,))
x = x.to(device)
y = y.to(device)

Expand Down
17 changes: 9 additions & 8 deletions evaluate.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@

import pytorch_lightning as pl
import torch
from torchmetrics import Accuracy, JaccardIndex, Metric, MetricCollection
from torchmetrics import Accuracy, JaccardIndex, MetricCollection

from torchgeo.trainers import ClassificationTask, SemanticSegmentationTask
from train import TASK_TO_MODULES_MAPPING
Expand Down Expand Up @@ -85,19 +85,20 @@ def set_up_parser() -> argparse.ArgumentParser:
def run_eval_loop(
model: pl.LightningModule,
dataloader: Any,
device: torch.device, # type: ignore[name-defined]
metrics: Metric,
device: torch.device,
metrics: MetricCollection,
) -> Any:
"""Runs a standard test loop over a dataloader and records metrics.
Args:
model: the model used for inference
dataloader: the dataloader to get samples from
device: the device to put data on
metrics: a torchmetrics compatible Metric to score the output from the model
metrics: a torchmetrics compatible metric collection to score the output
from the model
Returns:
the result of ``metric.compute()``
the result of ``metrics.compute()``
"""
for batch in dataloader:
x = batch["image"].to(device)
Expand Down Expand Up @@ -158,7 +159,7 @@ def main(args: argparse.Namespace) -> None:
"loss": model.hparams["loss"],
}
elif issubclass(TASK, SemanticSegmentationTask):
val_row: Dict[str, Union[str, float]] = { # type: ignore[no-redef]
val_row = {
"split": "val",
"segmentation_model": model.hparams["segmentation_model"],
"encoder_name": model.hparams["encoder_name"],
Expand All @@ -167,7 +168,7 @@ def main(args: argparse.Namespace) -> None:
"loss": model.hparams["loss"],
}

test_row: Dict[str, Union[str, float]] = { # type: ignore[no-redef]
test_row = {
"split": "test",
"segmentation_model": model.hparams["segmentation_model"],
"encoder_name": model.hparams["encoder_name"],
Expand All @@ -179,7 +180,7 @@ def main(args: argparse.Namespace) -> None:
raise ValueError(f"{TASK} is not supported")

# Compute metrics
device = torch.device("cuda:%d" % (args.gpu)) # type: ignore[attr-defined]
device = torch.device("cuda:%d" % (args.gpu))
model = model.to(device)

if args.task == "etci2021": # Custom metric setup for testing ETCI2021
Expand Down
13 changes: 3 additions & 10 deletions tests/datamodules/test_chesapeake.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,17 +25,10 @@ def datamodule(self) -> ChesapeakeCVPRDataModule:

def test_nodata_check(self, datamodule: ChesapeakeCVPRDataModule) -> None:
nodata_check = datamodule.nodata_check(4)
sample = {
"image": torch.ones(1, 2, 2), # type: ignore[attr-defined]
"mask": torch.ones(2, 2), # type: ignore[attr-defined]
}
sample = {"image": torch.ones(1, 2, 2), "mask": torch.ones(2, 2)}
out = nodata_check(sample)
assert torch.equal( # type: ignore[attr-defined]
out["image"], torch.zeros(1, 4, 4) # type: ignore[attr-defined]
)
assert torch.equal( # type: ignore[attr-defined]
out["mask"], torch.zeros(4, 4) # type: ignore[attr-defined]
)
assert torch.equal(out["image"], torch.zeros(1, 4, 4))
assert torch.equal(out["mask"], torch.zeros(4, 4))

def test_invalid_param_config(self) -> None:
with pytest.raises(ValueError, match="The pre-generated prior labels"):
Expand Down
4 changes: 2 additions & 2 deletions tests/datamodules/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@

def test_dataset_split() -> None:
num_samples = 24
x = torch.ones(num_samples, 5) # type: ignore[attr-defined]
y = torch.randint(low=0, high=2, size=(num_samples,)) # type: ignore[attr-defined]
x = torch.ones(num_samples, 5)
y = torch.randint(low=0, high=2, size=(num_samples,))
ds = TensorDataset(x, y)

# Test only train/val set split
Expand Down
24 changes: 8 additions & 16 deletions tests/datasets/test_advance.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import os
import shutil
from pathlib import Path
from typing import Any, Generator
from typing import Any

import matplotlib.pyplot as plt
import pytest
Expand All @@ -23,38 +23,30 @@ def download_url(url: str, root: str, *args: str) -> None:

class TestADVANCE:
@pytest.fixture
def dataset(
self, monkeypatch: Generator[MonkeyPatch, None, None], tmp_path: Path
) -> ADVANCE:
monkeypatch.setattr( # type: ignore[attr-defined]
torchgeo.datasets.utils, "download_url", download_url
)
def dataset(self, monkeypatch: MonkeyPatch, tmp_path: Path) -> ADVANCE:
monkeypatch.setattr(torchgeo.datasets.utils, "download_url", download_url)
data_dir = os.path.join("tests", "data", "advance")
urls = [
os.path.join(data_dir, "ADVANCE_vision.zip"),
os.path.join(data_dir, "ADVANCE_sound.zip"),
]
md5s = ["43acacecebecd17a82bc2c1e719fd7e4", "039b7baa47879a8a4e32b9dd8287f6ad"]
monkeypatch.setattr(ADVANCE, "urls", urls) # type: ignore[attr-defined]
monkeypatch.setattr(ADVANCE, "md5s", md5s) # type: ignore[attr-defined]
monkeypatch.setattr(ADVANCE, "urls", urls)
monkeypatch.setattr(ADVANCE, "md5s", md5s)
root = str(tmp_path)
transforms = nn.Identity() # type: ignore[attr-defined]
transforms = nn.Identity() # type: ignore[no-untyped-call]
return ADVANCE(root, transforms, download=True, checksum=True)

@pytest.fixture
def mock_missing_module(
self, monkeypatch: Generator[MonkeyPatch, None, None]
) -> None:
def mock_missing_module(self, monkeypatch: MonkeyPatch) -> None:
import_orig = builtins.__import__

def mocked_import(name: str, *args: Any, **kwargs: Any) -> Any:
if name == "scipy.io":
raise ImportError()
return import_orig(name, *args, **kwargs)

monkeypatch.setattr( # type: ignore[attr-defined]
builtins, "__import__", mocked_import
)
monkeypatch.setattr(builtins, "__import__", mocked_import)

def test_getitem(self, dataset: ADVANCE) -> None:
pytest.importorskip("scipy", minversion="0.9.0")
Expand Down
11 changes: 4 additions & 7 deletions tests/datasets/test_agb_live_woody_density.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
import os
import shutil
from pathlib import Path
from typing import Generator

import matplotlib.pyplot as plt
import pytest
Expand All @@ -28,11 +27,11 @@ def download_url(url: str, root: str, *args: str, **kwargs: str) -> None:
class TestAbovegroundLiveWoodyBiomassDensity:
@pytest.fixture
def dataset(
self, monkeypatch: Generator[MonkeyPatch, None, None], tmp_path: Path
self, monkeypatch: MonkeyPatch, tmp_path: Path
) -> AbovegroundLiveWoodyBiomassDensity:

transforms = nn.Identity() # type: ignore[attr-defined]
monkeypatch.setattr( # type: ignore[attr-defined]
transforms = nn.Identity() # type: ignore[no-untyped-call]
monkeypatch.setattr(
torchgeo.datasets.agb_live_woody_density, "download_url", download_url
)
url = os.path.join(
Expand All @@ -41,9 +40,7 @@ def dataset(
"agb_live_woody_density",
"Aboveground_Live_Woody_Biomass_Density.geojson",
)
monkeypatch.setattr( # type: ignore[attr-defined]
AbovegroundLiveWoodyBiomassDensity, "url", url
)
monkeypatch.setattr(AbovegroundLiveWoodyBiomassDensity, "url", url)

root = str(tmp_path)
return AbovegroundLiveWoodyBiomassDensity(
Expand Down
2 changes: 1 addition & 1 deletion tests/datasets/test_astergdem.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ def dataset(self, tmp_path: Path) -> AsterGDEM:
zipfile = os.path.join("tests", "data", "astergdem", "astergdem.zip")
shutil.unpack_archive(zipfile, tmp_path, "zip")
root = str(tmp_path)
transforms = nn.Identity() # type: ignore[attr-defined]
transforms = nn.Identity() # type: ignore[no-untyped-call]
return AsterGDEM(root, transforms=transforms)

def test_datasetmissing(self, tmp_path: Path) -> None:
Expand Down
21 changes: 6 additions & 15 deletions tests/datasets/test_benin_cashews.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
import os
import shutil
from pathlib import Path
from typing import Generator

import matplotlib.pyplot as plt
import pytest
Expand All @@ -31,25 +30,17 @@ def fetch(dataset_id: str, **kwargs: str) -> Dataset:
class TestBeninSmallHolderCashews:
@pytest.fixture
def dataset(
self, monkeypatch: Generator[MonkeyPatch, None, None], tmp_path: Path
self, monkeypatch: MonkeyPatch, tmp_path: Path
) -> BeninSmallHolderCashews:
radiant_mlhub = pytest.importorskip("radiant_mlhub", minversion="0.2.1")
monkeypatch.setattr( # type: ignore[attr-defined]
radiant_mlhub.Dataset, "fetch", fetch
)
monkeypatch.setattr(radiant_mlhub.Dataset, "fetch", fetch)
source_md5 = "255efff0f03bc6322470949a09bc76db"
labels_md5 = "ed2195d93ca6822d48eb02bc3e81c127"
monkeypatch.setitem( # type: ignore[attr-defined]
BeninSmallHolderCashews.image_meta, "md5", source_md5
)
monkeypatch.setitem( # type: ignore[attr-defined]
BeninSmallHolderCashews.target_meta, "md5", labels_md5
)
monkeypatch.setattr( # type: ignore[attr-defined]
BeninSmallHolderCashews, "dates", ("2019_11_05",)
)
monkeypatch.setitem(BeninSmallHolderCashews.image_meta, "md5", source_md5)
monkeypatch.setitem(BeninSmallHolderCashews.target_meta, "md5", labels_md5)
monkeypatch.setattr(BeninSmallHolderCashews, "dates", ("2019_11_05",))
root = str(tmp_path)
transforms = nn.Identity() # type: ignore[attr-defined]
transforms = nn.Identity() # type: ignore[no-untyped-call]
bands = BeninSmallHolderCashews.ALL_BANDS

return BeninSmallHolderCashews(
Expand Down
24 changes: 7 additions & 17 deletions tests/datasets/test_bigearthnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
import os
import shutil
from pathlib import Path
from typing import Generator

import matplotlib.pyplot as plt
import pytest
Expand All @@ -26,14 +25,9 @@ class TestBigEarthNet:
params=zip(["all", "s1", "s2"], [43, 19, 19], ["train", "val", "test"])
)
def dataset(
self,
monkeypatch: Generator[MonkeyPatch, None, None],
tmp_path: Path,
request: SubRequest,
self, monkeypatch: MonkeyPatch, tmp_path: Path, request: SubRequest
) -> BigEarthNet:
monkeypatch.setattr( # type: ignore[attr-defined]
torchgeo.datasets.bigearthnet, "download_url", download_url
)
monkeypatch.setattr(torchgeo.datasets.bigearthnet, "download_url", download_url)
data_dir = os.path.join("tests", "data", "bigearthnet")
metadata = {
"s1": {
Expand Down Expand Up @@ -66,15 +60,11 @@ def dataset(
"md5": "851a6bdda484d47f60e121352dcb1bf5",
},
}
monkeypatch.setattr( # type: ignore[attr-defined]
BigEarthNet, "metadata", metadata
)
monkeypatch.setattr( # type: ignore[attr-defined]
BigEarthNet, "splits_metadata", splits_metadata
)
monkeypatch.setattr(BigEarthNet, "metadata", metadata)
monkeypatch.setattr(BigEarthNet, "splits_metadata", splits_metadata)
bands, num_classes, split = request.param
root = str(tmp_path)
transforms = nn.Identity() # type: ignore[attr-defined]
transforms = nn.Identity() # type: ignore[no-untyped-call]
return BigEarthNet(
root, split, bands, num_classes, transforms, download=True, checksum=True
)
Expand All @@ -85,8 +75,8 @@ def test_getitem(self, dataset: BigEarthNet) -> None:
assert isinstance(x["image"], torch.Tensor)
assert isinstance(x["label"], torch.Tensor)
assert x["label"].shape == (dataset.num_classes,)
assert x["image"].dtype == torch.int32 # type: ignore[attr-defined]
assert x["label"].dtype == torch.int64 # type: ignore[attr-defined]
assert x["image"].dtype == torch.int32
assert x["label"].dtype == torch.int64

if dataset.bands == "all":
assert x["image"].shape == (14, 120, 120)
Expand Down
21 changes: 6 additions & 15 deletions tests/datasets/test_cbf.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
import os
import shutil
from pathlib import Path
from typing import Generator

import matplotlib.pyplot as plt
import pytest
Expand All @@ -29,11 +28,9 @@ def download_url(url: str, root: str, *args: str) -> None:
class TestCanadianBuildingFootprints:
@pytest.fixture
def dataset(
self, monkeypatch: Generator[MonkeyPatch, None, None], tmp_path: Path
self, monkeypatch: MonkeyPatch, tmp_path: Path
) -> CanadianBuildingFootprints:
monkeypatch.setattr( # type: ignore[attr-defined]
torchgeo.datasets.utils, "download_url", download_url
)
monkeypatch.setattr(torchgeo.datasets.utils, "download_url", download_url)
md5s = [
"8a4a0a57367f67c69608d1452e30df13",
"1829f4054a9a81bb23871ca797a3895c",
Expand All @@ -49,18 +46,12 @@ def dataset(
"067664d066c4152fb96a5c129cbabadf",
"474bc084bc41b124aa4919e7a37a9648",
]
monkeypatch.setattr( # type: ignore[attr-defined]
CanadianBuildingFootprints, "md5s", md5s
)
monkeypatch.setattr(CanadianBuildingFootprints, "md5s", md5s)
url = os.path.join("tests", "data", "cbf") + os.sep
monkeypatch.setattr( # type: ignore[attr-defined]
CanadianBuildingFootprints, "url", url
)
monkeypatch.setattr( # type: ignore[attr-defined]
plt, "show", lambda *args: None
)
monkeypatch.setattr(CanadianBuildingFootprints, "url", url)
monkeypatch.setattr(plt, "show", lambda *args: None)
root = str(tmp_path)
transforms = nn.Identity() # type: ignore[attr-defined]
transforms = nn.Identity() # type: ignore[no-untyped-call]
return CanadianBuildingFootprints(
root, res=0.1, transforms=transforms, download=True, checksum=True
)
Expand Down
Loading

0 comments on commit bc307a5

Please sign in to comment.