Skip to content

Commit

Permalink
ruff: enable & fixing RET (#17540)
Browse files Browse the repository at this point in the history
  • Loading branch information
Borda authored May 5, 2023
1 parent 21a7aa2 commit 4413e98
Show file tree
Hide file tree
Showing 152 changed files with 383 additions and 481 deletions.
2 changes: 1 addition & 1 deletion .actions/assistant.py
Original file line number Diff line number Diff line change
Expand Up @@ -332,7 +332,7 @@ def copy_replace_imports(
if not isfile(fp_new):
shutil.copy(fp, fp_new)
continue
elif ext in (".pyc",):
if ext in (".pyc",):
continue
# Try to parse everything else
with open(fp, encoding="utf-8") as fo:
Expand Down
1 change: 1 addition & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,7 @@ repos:
- flake8-pytest-style
- flake8-bandit
- flake8-simplify
- flake8-return

- repo: https://github.com/PyCQA/isort
rev: 5.12.0
Expand Down
5 changes: 2 additions & 3 deletions examples/app/pickle_or_not/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,9 @@ def run(self, pickle_image: bytes):
parsed = self.parse_image(pickle_image)
if parsed == b"it is a pickle":
return True
elif parsed == b"it is not a pickle":
if parsed == b"it is not a pickle":
return False
else:
raise Exception("Couldn't parse the image")
raise Exception("Couldn't parse the image")

@staticmethod
def parse_image(image_str: bytes):
Expand Down
18 changes: 9 additions & 9 deletions examples/fabric/build_your_own_trainer/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,10 +149,10 @@ def fit(
# currently, there is no way to support fsdp with model.configure_optimizers in fabric
# as it would require fabric to hold a reference to the model, which we don't want to.
raise NotImplementedError("BYOT currently does not support FSDP")
else:
optimizer, scheduler_cfg = self._parse_optimizers_schedulers(model.configure_optimizers())
assert optimizer is not None
model, optimizer = self.fabric.setup(model, optimizer)

optimizer, scheduler_cfg = self._parse_optimizers_schedulers(model.configure_optimizers())
assert optimizer is not None
model, optimizer = self.fabric.setup(model, optimizer)

# assemble state (current epoch and global step will be added in save)
state = {"model": model, "optim": optimizer, "scheduler": scheduler_cfg}
Expand Down Expand Up @@ -274,7 +274,7 @@ def val_loop(
return

# no validation but warning if val_loader was passed, but validation_step not implemented
elif val_loader is not None and not is_overridden("validation_step", _unwrap_objects(model), L.LightningModule):
if val_loader is not None and not is_overridden("validation_step", _unwrap_objects(model), L.LightningModule):
L.fabric.utilities.rank_zero_warn(
"Your LightningModule does not have a validation_step implemented, "
"but you passed a validation dataloder. Skipping Validation."
Expand Down Expand Up @@ -467,24 +467,24 @@ def _parse_optimizers_schedulers(
return configure_optim_output, None

# single lr scheduler
elif isinstance(configure_optim_output, L.fabric.utilities.types.LRScheduler):
if isinstance(configure_optim_output, L.fabric.utilities.types.LRScheduler):
return None, _lr_sched_defaults.update(scheduler=configure_optim_output)

# single lr scheduler config
elif isinstance(configure_optim_output, Mapping):
if isinstance(configure_optim_output, Mapping):
_lr_sched_defaults.update(configure_optim_output)
return None, _lr_sched_defaults

# list or tuple
elif isinstance(configure_optim_output, (list, tuple)):
if isinstance(configure_optim_output, (list, tuple)):
if all(isinstance(_opt_cand, L.fabric.utilities.types.Optimizable) for _opt_cand in configure_optim_output):
# single optimizer in list
if len(configure_optim_output) == 1:
return configure_optim_output[0][0], None

raise NotImplementedError("BYOT only supports a single optimizer")

elif all(
if all(
isinstance(_lr_cand, (L.fabric.utilities.types.LRScheduler, Mapping))
for _lr_cand in configure_optim_output
):
Expand Down
3 changes: 1 addition & 2 deletions examples/fabric/image_classifier/train_fabric.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,7 @@ def forward(self, x):
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
return F.log_softmax(x, dim=1)


def run(hparams):
Expand Down
3 changes: 1 addition & 2 deletions examples/fabric/image_classifier/train_torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,8 +49,7 @@ def forward(self, x):
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
return F.log_softmax(x, dim=1)


def run(hparams):
Expand Down
3 changes: 1 addition & 2 deletions examples/fabric/kfold_cv/train_fabric.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,7 @@ def forward(self, x):
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
return F.log_softmax(x, dim=1)


def train_dataloader(model, data_loader, optimizer, fabric, epoch, hparams, fold):
Expand Down
3 changes: 1 addition & 2 deletions examples/fabric/reinforcement_learning/rl/loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,7 @@
def policy_loss(advantages: torch.Tensor, ratio: torch.Tensor, clip_coef: float) -> torch.Tensor:
pg_loss1 = -advantages * ratio
pg_loss2 = -advantages * torch.clamp(ratio, 1 - clip_coef, 1 + clip_coef)
pg_loss = torch.max(pg_loss1, pg_loss2).mean()
return pg_loss
return torch.max(pg_loss1, pg_loss2).mean()


def value_loss(
Expand Down
3 changes: 1 addition & 2 deletions examples/fabric/reinforcement_learning/rl/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,8 +122,7 @@ def parse_args():
parser.add_argument("--ent-coef", type=float, default=0.0, help="coefficient of the entropy")
parser.add_argument("--vf-coef", type=float, default=1.0, help="coefficient of the value function")
parser.add_argument("--max-grad-norm", type=float, default=0.5, help="the maximum norm for the gradient clipping")
args = parser.parse_args()
return args
return parser.parse_args()


def layer_init(
Expand Down
6 changes: 2 additions & 4 deletions examples/pytorch/basics/autoencoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,8 +122,7 @@ def __init__(self, hidden_dim: int = 64, learning_rate=10e-3):

def forward(self, x):
z = self.encoder(x)
x_hat = self.decoder(z)
return x_hat
return self.decoder(z)

def training_step(self, batch, batch_idx):
return self._common_step(batch, batch_idx, "train")
Expand All @@ -139,8 +138,7 @@ def predict_step(self, batch, batch_idx, dataloader_idx=None):
return self(x)

def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
return optimizer
return torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)

def _prepare_batch(self, batch):
x, _ = batch
Expand Down
3 changes: 1 addition & 2 deletions examples/pytorch/basics/backbone_image_classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,7 @@ def __init__(self, backbone: Optional[Backbone] = None, learning_rate: float = 0

def forward(self, x):
# use forward for inference/predictions
embedding = self.backbone(x)
return embedding
return self.backbone(x)

def training_step(self, batch, batch_idx):
x, y = batch
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,9 +89,7 @@ def __init__(self, img_shape):

def forward(self, img):
img_flat = img.view(img.size(0), -1)
validity = self.model(img_flat)

return validity
return self.model(img_flat)


class GAN(LightningModule):
Expand Down
6 changes: 2 additions & 4 deletions examples/pytorch/domain_templates/reinforce_learn_Qnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -283,8 +283,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
Returns:
q values
"""
output = self.net(x)
return output
return self.net(x)

def dqn_mse_loss(self, batch: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""Calculates the mse loss using a mini batch from the replay buffer.
Expand Down Expand Up @@ -353,8 +352,7 @@ def configure_optimizers(self) -> List[Optimizer]:
def __dataloader(self) -> DataLoader:
"""Initialize the Replay Buffer dataset used for retrieving experiences."""
dataset = RLDataset(self.buffer, self.episode_length)
dataloader = DataLoader(dataset=dataset, batch_size=self.batch_size, sampler=None)
return dataloader
return DataLoader(dataset=dataset, batch_size=self.batch_size, sampler=None)

def train_dataloader(self) -> DataLoader:
"""Get train loader."""
Expand Down
20 changes: 6 additions & 14 deletions examples/pytorch/domain_templates/reinforce_learn_ppo.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,16 +42,14 @@

def create_mlp(input_shape: Tuple[int], n_actions: int, hidden_size: int = 128):
"""Simple Multi-Layer Perceptron network."""
network = nn.Sequential(
return nn.Sequential(
nn.Linear(input_shape[0], hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, n_actions),
)

return network


class ActorCategorical(nn.Module):
"""Policy network, for discrete action spaces, which returns a distribution and an action given an
Expand Down Expand Up @@ -135,8 +133,7 @@ def __init__(self, generate_batch: Callable):
self.generate_batch = generate_batch

def __iter__(self) -> Iterator:
iterator = self.generate_batch()
return iterator
return self.generate_batch()


class PPOLightning(LightningModule):
Expand Down Expand Up @@ -275,9 +272,7 @@ def calc_advantage(self, rewards: List[float], values: List[float], last_value:
vals = values + [last_value]
# GAE
delta = [rews[i] + self.gamma * vals[i + 1] - vals[i] for i in range(len(rews) - 1)]
adv = self.discount_rewards(delta, self.gamma * self.lam)

return adv
return self.discount_rewards(delta, self.gamma * self.lam)

def generate_trajectory_samples(self) -> Tuple[List[torch.Tensor], List[torch.Tensor], List[torch.Tensor]]:
"""
Expand Down Expand Up @@ -367,13 +362,11 @@ def actor_loss(self, state, action, logp_old, qval, adv) -> torch.Tensor:
logp = self.actor.get_log_prob(pi, action)
ratio = torch.exp(logp - logp_old)
clip_adv = torch.clamp(ratio, 1 - self.clip_ratio, 1 + self.clip_ratio) * adv
loss_actor = -(torch.min(ratio * adv, clip_adv)).mean()
return loss_actor
return -(torch.min(ratio * adv, clip_adv)).mean()

def critic_loss(self, state, action, logp_old, qval, adv) -> torch.Tensor:
value = self.critic(state)
loss_critic = (qval - value).pow(2).mean()
return loss_critic
return (qval - value).pow(2).mean()

def training_step(self, batch: Tuple[torch.Tensor, torch.Tensor]):
"""Carries out a single update to actor and critic network from a batch of replay buffer.
Expand Down Expand Up @@ -420,8 +413,7 @@ def optimizer_step(self, *args, **kwargs):
def _dataloader(self) -> DataLoader:
"""Initialize the Replay Buffer dataset used for retrieving experiences."""
dataset = ExperienceSourceDataset(self.generate_trajectory_samples)
dataloader = DataLoader(dataset=dataset, batch_size=self.batch_size)
return dataloader
return DataLoader(dataset=dataset, batch_size=self.batch_size)

def train_dataloader(self) -> DataLoader:
"""Get train loader."""
Expand Down
6 changes: 2 additions & 4 deletions examples/pytorch/hpu/mnist_sample.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,7 @@ def forward(self, x):

def training_step(self, batch, batch_idx):
x, y = batch
loss = F.cross_entropy(self(x), y)
return loss
return F.cross_entropy(self(x), y)

def validation_step(self, batch, batch_idx):
x, y = batch
Expand All @@ -48,8 +47,7 @@ def test_step(self, batch, batch_idx):

@staticmethod
def accuracy(logits, y):
acc = torch.sum(torch.eq(torch.argmax(logits, -1), y).to(torch.float32)) / len(y)
return acc
return torch.sum(torch.eq(torch.argmax(logits, -1), y).to(torch.float32)) / len(y)

def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=0.02)
Expand Down
6 changes: 2 additions & 4 deletions examples/pytorch/ipu/mnist_sample.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,7 @@ def forward(self, x):
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
loss = F.cross_entropy(y_hat, y)
return loss
return F.cross_entropy(y_hat, y)

def validation_step(self, batch, batch_idx):
x, y = batch
Expand All @@ -60,8 +59,7 @@ def accuracy(self, logits, y):
# currently IPU poptorch doesn't implicit convert bools to tensor
# hence we use an explicit calculation for accuracy here. Once fixed in poptorch
# we can use the accuracy metric.
acc = torch.sum(torch.eq(torch.argmax(logits, -1), y).to(torch.float32)) / len(y)
return acc
return torch.sum(torch.eq(torch.argmax(logits, -1), y).to(torch.float32)) / len(y)

def on_validation_epoch_end(self) -> None:
# since the training step/validation step and test step are run on the IPU device
Expand Down
3 changes: 1 addition & 2 deletions examples/pytorch/servable_module/production.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,8 +89,7 @@ def configure_payload(self):
pil_image.save(buffered, format="JPEG")
img_str = base64.b64encode(buffered.getvalue()).decode("UTF-8")

payload = {"body": {"x": img_str}}
return payload
return {"body": {"x": img_str}}

def configure_serialization(self):
return {"x": Image(224, 224).deserialize}, {"output": Top1().serialize}
Expand Down
3 changes: 3 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ select = [
extend-select = [
"C4", # see: https://pypi.org/project/flake8-comprehensions
"SIM", # see: https://pypi.org/project/flake8-simplify
"RET", # see: https://pypi.org/project/flake8-return
"PT", # see: https://pypi.org/project/flake8-pytest-style
]
ignore = [
Expand Down Expand Up @@ -96,6 +97,7 @@ ignore-init-module-imports = true
"S603", # todo: `subprocess` call: check for execution of untrusted input
"S605", # todo: Starting a process with a shell: seems safe, but may be changed in the future; consider rewriting without `shell`
"S607", # todo: Starting a process with a partial executable path
"RET504", # todo:Unnecessary variable assignment before `return` statement
]
"tests/**" = [
"S101", # Use of `assert` detected
Expand All @@ -108,6 +110,7 @@ ignore-init-module-imports = true
"S603", # todo: `subprocess` call: check for execution of untrusted input
"S605", # todo: Starting a process with a shell: seems safe, but may be changed in the future; consider rewriting without `shell`
"S607", # todo: Starting a process with a partial executable path
"RET504", # todo:Unnecessary variable assignment before `return` statement
"PT004", # todo: Fixture `tmpdir_unittest_fixture` does not return anything, add leading underscore
"PT011", # todo: `pytest.raises(ValueError)` is too broad, set the `match` parameter or use a more specific exception
"PT012", # todo: `pytest.raises()` block should contain a single simple statement
Expand Down
Loading

0 comments on commit 4413e98

Please sign in to comment.