diff --git a/anomalib/data/utils/generators/perlin.py b/anomalib/data/utils/generators/perlin.py index 760222fa44..32d50e03e4 100644 --- a/anomalib/data/utils/generators/perlin.py +++ b/anomalib/data/utils/generators/perlin.py @@ -41,7 +41,7 @@ def generate_perlin_noise_2d(shape, res): """Fractal perlin noise.""" def f(t): - return 6 * t**5 - 15 * t**4 + 10 * t**3 + return 6 * t ** 5 - 15 * t ** 4 + 10 * t ** 3 delta = (res[0] / shape[0], res[1] / shape[1]) d = (shape[0] // res[0], shape[1] // res[1]) @@ -68,7 +68,7 @@ def f(t): def random_2d_perlin( shape: Tuple, res: Tuple[Union[int, Tensor], Union[int, Tensor]], - fade=lambda t: 6 * t**5 - 15 * t**4 + 10 * t**3, + fade=lambda t: 6 * t ** 5 - 15 * t ** 4 + 10 * t ** 3, ) -> Union[np.ndarray, Tensor]: """Returns a random 2d perlin noise array. @@ -90,7 +90,7 @@ def random_2d_perlin( return result -def _rand_perlin_2d_np(shape, res, fade=lambda t: 6 * t**5 - 15 * t**4 + 10 * t**3): +def _rand_perlin_2d_np(shape, res, fade=lambda t: 6 * t ** 5 - 15 * t ** 4 + 10 * t ** 3): """Generate a random image containing Perlin noise. Numpy version.""" delta = (res[0] / shape[0], res[1] / shape[1]) d = (shape[0] // res[0], shape[1] // res[1]) @@ -116,7 +116,7 @@ def dot(grad, shift): return math.sqrt(2) * lerp_np(lerp_np(n00, n10, t[..., 0]), lerp_np(n01, n11, t[..., 0]), t[..., 1]) -def _rand_perlin_2d(shape, res, fade=lambda t: 6 * t**5 - 15 * t**4 + 10 * t**3): +def _rand_perlin_2d(shape, res, fade=lambda t: 6 * t ** 5 - 15 * t ** 4 + 10 * t ** 3): """Generate a random image containing Perlin noise. PyTorch version.""" delta = (res[0] / shape[0], res[1] / shape[1]) d = (shape[0] // res[0], shape[1] // res[1]) diff --git a/anomalib/models/cflow/utils.py b/anomalib/models/cflow/utils.py index e2fd0e592e..aaf4dcd047 100644 --- a/anomalib/models/cflow/utils.py +++ b/anomalib/models/cflow/utils.py @@ -28,7 +28,7 @@ def get_logp(dim_feature_vector: int, p_u: torch.Tensor, logdet_j: torch.Tensor) torch.Tensor: Log probability """ ln_sqrt_2pi = -np.log(np.sqrt(2 * np.pi)) # ln(sqrt(2*pi)) - logp = dim_feature_vector * ln_sqrt_2pi - 0.5 * torch.sum(p_u**2, 1) + logdet_j + logp = dim_feature_vector * ln_sqrt_2pi - 0.5 * torch.sum(p_u ** 2, 1) + logdet_j return logp diff --git a/anomalib/models/components/dimensionality_reduction/random_projection.py b/anomalib/models/components/dimensionality_reduction/random_projection.py index f239595d2a..3d3ddfcf2f 100644 --- a/anomalib/models/components/dimensionality_reduction/random_projection.py +++ b/anomalib/models/components/dimensionality_reduction/random_projection.py @@ -88,7 +88,7 @@ def johnson_lindenstrauss_min_dim(self, n_samples: int, eps: float = 0.1): eps (float, optional): Minimum distortion rate. Defaults to 0.1. """ - denominator = (eps**2 / 2) - (eps**3 / 3) + denominator = (eps ** 2 / 2) - (eps ** 3 / 3) return (4 * np.log(n_samples) / denominator).astype(np.int64) def fit(self, embedding: Tensor) -> "SparseRandomProjection": diff --git a/anomalib/models/components/stats/kde.py b/anomalib/models/components/stats/kde.py index 0929165869..15875b9487 100644 --- a/anomalib/models/components/stats/kde.py +++ b/anomalib/models/components/stats/kde.py @@ -67,7 +67,7 @@ def fit(self, dataset: Tensor) -> None: cov_mat = self.cov(dataset.T) inv_cov_mat = torch.linalg.inv(cov_mat) - inv_cov = inv_cov_mat / factor**2 + inv_cov = inv_cov_mat / factor ** 2 # transform data to account for bandwidth bw_transform = torch.linalg.cholesky(inv_cov) diff --git a/anomalib/models/fastflow/anomaly_map.py b/anomalib/models/fastflow/anomaly_map.py index ea1ecdabd1..a4f62352b7 100644 --- a/anomalib/models/fastflow/anomaly_map.py +++ b/anomalib/models/fastflow/anomaly_map.py @@ -34,7 +34,7 @@ def forward(self, hidden_variables: List[Tensor]) -> Tensor: """ flow_maps: List[Tensor] = [] for hidden_variable in hidden_variables: - log_prob = -torch.mean(hidden_variable**2, dim=1, keepdim=True) * 0.5 + log_prob = -torch.mean(hidden_variable ** 2, dim=1, keepdim=True) * 0.5 prob = torch.exp(log_prob) flow_map = F.interpolate( input=-prob, diff --git a/anomalib/models/fastflow/loss.py b/anomalib/models/fastflow/loss.py index 608b0cfc87..874f2f3795 100644 --- a/anomalib/models/fastflow/loss.py +++ b/anomalib/models/fastflow/loss.py @@ -24,5 +24,5 @@ def forward(self, hidden_variables: List[Tensor], jacobians: List[Tensor]) -> Te """ loss = torch.tensor(0.0, device=hidden_variables[0].device) # pylint: disable=not-callable for (hidden_variable, jacobian) in zip(hidden_variables, jacobians): - loss += torch.mean(0.5 * torch.sum(hidden_variable**2, dim=(1, 2, 3)) - jacobian) + loss += torch.mean(0.5 * torch.sum(hidden_variable ** 2, dim=(1, 2, 3)) - jacobian) return loss diff --git a/anomalib/models/ganomaly/torch_model.py b/anomalib/models/ganomaly/torch_model.py index fbf67fc1b5..e354b2656d 100644 --- a/anomalib/models/ganomaly/torch_model.py +++ b/anomalib/models/ganomaly/torch_model.py @@ -123,7 +123,7 @@ def __init__( # Calculate input channel size to recreate inverse pyramid exp_factor = math.ceil(math.log(min(input_size) // 2, 2)) - 2 - n_input_features = n_features * (2**exp_factor) + n_input_features = n_features * (2 ** exp_factor) # CNN layer for latent vector input self.latent_input.add_module( diff --git a/anomalib/models/patchcore/config.yaml b/anomalib/models/patchcore/config.yaml index 66a55f1e5c..731727f301 100644 --- a/anomalib/models/patchcore/config.yaml +++ b/anomalib/models/patchcore/config.yaml @@ -6,7 +6,7 @@ dataset: category: bottle image_size: 224 train_batch_size: 32 - test_batch_size: 1 + test_batch_size: 32 num_workers: 8 transform_config: train: null diff --git a/anomalib/models/patchcore/torch_model.py b/anomalib/models/patchcore/torch_model.py index 5187f41178..32647b8bde 100644 --- a/anomalib/models/patchcore/torch_model.py +++ b/anomalib/models/patchcore/torch_model.py @@ -154,7 +154,11 @@ def nearest_neighbors(self, embedding: Tensor, n_neighbors: int) -> Tuple[Tensor Tensor: Locations of the nearest neighbor(s). """ distances = torch.cdist(embedding, self.memory_bank, p=2.0) # euclidean norm - patch_scores, locations = distances.topk(k=n_neighbors, largest=False, dim=1) + if n_neighbors == 1: + # when n_neighbors is 1, speed up computation by using min instead of topk + patch_scores, locations = distances.min(1) + else: + patch_scores, locations = distances.topk(k=n_neighbors, largest=False, dim=1) return patch_scores, locations def compute_anomaly_score(self, patch_scores: Tensor, locations: Tensor, embedding: Tensor) -> Tensor: @@ -168,6 +172,9 @@ def compute_anomaly_score(self, patch_scores: Tensor, locations: Tensor, embeddi Tensor: Image-level anomaly scores """ + # Don't need to compute weights if num_neighbors is 1 + if self.num_neighbors == 1: + return patch_scores.amax(1) # 1. Find the patch with the largest distance to it's nearest neighbor in each image max_patches = torch.argmax(patch_scores, dim=1) # (m^test,* in the paper) # 2. Find the distance of the patch to it's nearest neighbor, and the location of the nn in the membank @@ -179,7 +186,7 @@ def compute_anomaly_score(self, patch_scores: Tensor, locations: Tensor, embeddi # 4. Find the distance of the patch features to each of the support samples distances = torch.cdist(embedding[max_patches].unsqueeze(1), self.memory_bank[support_samples], p=2.0) # 5. Apply softmax to find the weights - weights = (1 - F.softmax(distances.squeeze()))[..., 0] + weights = (1 - F.softmax(distances.squeeze(), 1))[..., 0] # 6. Apply the weight factor to the score score = weights * score # S^* in the paper return score