Skip to content

Commit

Permalink
Support for Prodigy(Dadapt variety for Dylora) (#585)
Browse files Browse the repository at this point in the history
* Update train_util.py for DAdaptLion

* Update train_README-zh.md for dadaptlion

* Update train_README-ja.md for DAdaptLion

* add DAdatpt V3

* Alignment

* Update train_util.py for experimental

* Update train_util.py V3

* Update train_README-zh.md

* Update train_README-ja.md

* Update train_util.py fix

* Update train_util.py

* support Prodigy

* add lower
  • Loading branch information
sdbds authored Jun 15, 2023
1 parent f0bb3ae commit e97d67a
Show file tree
Hide file tree
Showing 8 changed files with 41 additions and 7 deletions.
1 change: 1 addition & 0 deletions docs/train_README-ja.md
Original file line number Diff line number Diff line change
Expand Up @@ -622,6 +622,7 @@ masterpiece, best quality, 1boy, in business suit, standing at street, looking b
- DAdaptAdanIP : 引数は同上
- DAdaptLion : 引数は同上
- DAdaptSGD : 引数は同上
- Prodigy : https://github.com/konstmish/prodigy
- AdaFactor : [Transformers AdaFactor](https://huggingface.co/docs/transformers/main_classes/optimizer_schedules)
- 任意のオプティマイザ
Expand Down
3 changes: 2 additions & 1 deletion docs/train_README-zh.md
Original file line number Diff line number Diff line change
Expand Up @@ -555,9 +555,10 @@ masterpiece, best quality, 1boy, in business suit, standing at street, looking b
- DAdaptAdam : 参数同上
- DAdaptAdaGrad : 参数同上
- DAdaptAdan : 参数同上
- DAdaptAdanIP : 引数は同上
- DAdaptAdanIP : 参数同上
- DAdaptLion : 参数同上
- DAdaptSGD : 参数同上
- Prodigy : https://github.com/konstmish/prodigy
- AdaFactor : [Transformers AdaFactor](https://huggingface.co/docs/transformers/main_classes/optimizer_schedules)
- 任何优化器
Expand Down
2 changes: 1 addition & 1 deletion fine_tune.py
Original file line number Diff line number Diff line change
Expand Up @@ -397,7 +397,7 @@ def fn_recursive_set_mem_eff(module: torch.nn.Module):
current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず
if args.logging_dir is not None:
logs = {"loss": current_loss, "lr": float(lr_scheduler.get_last_lr()[0])}
if args.optimizer_type.lower().startswith("DAdapt".lower()): # tracking d*lr value
if args.optimizer_type.lower().startswith("DAdapt".lower()) or args.optimizer_type.lower() == "Prodigy": # tracking d*lr value
logs["lr/d*lr"] = (
lr_scheduler.optimizers[0].param_groups[0]["d"] * lr_scheduler.optimizers[0].param_groups[0]["lr"]
)
Expand Down
32 changes: 32 additions & 0 deletions library/train_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -2808,6 +2808,38 @@ def get_optimizer(args, trainable_params):

optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs)

elif optimizer_type == "Prodigy".lower():
# Prodigy
# check Prodigy is installed
try:
import prodigyopt
except ImportError:
raise ImportError("No Prodigy / Prodigy がインストールされていないようです")

# check lr and lr_count, and print warning
actual_lr = lr
lr_count = 1
if type(trainable_params) == list and type(trainable_params[0]) == dict:
lrs = set()
actual_lr = trainable_params[0].get("lr", actual_lr)
for group in trainable_params:
lrs.add(group.get("lr", actual_lr))
lr_count = len(lrs)

if actual_lr <= 0.1:
print(
f"learning rate is too low. If using Prodigy, set learning rate around 1.0 / 学習率が低すぎるようです。1.0前後の値を指定してください: lr={actual_lr}"
)
print("recommend option: lr=1.0 / 推奨は1.0です")
if lr_count > 1:
print(
f"when multiple learning rates are specified with Prodigy (e.g. for Text Encoder and U-Net), only the first one will take effect / Prodigyで複数の学習率を指定した場合(Text EncoderとU-Netなど)、最初の学習率のみが有効になります: lr={actual_lr}"
)

print(f"use Prodigy optimizer | {optimizer_kwargs}")
optimizer_class = prodigyopt.Prodigy
optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs)

elif optimizer_type == "Adafactor".lower():
# 引数を確認して適宜補正する
if "relative_step" not in optimizer_kwargs:
Expand Down
2 changes: 1 addition & 1 deletion train_db.py
Original file line number Diff line number Diff line change
Expand Up @@ -384,7 +384,7 @@ def train(args):
current_loss = loss.detach().item()
if args.logging_dir is not None:
logs = {"loss": current_loss, "lr": float(lr_scheduler.get_last_lr()[0])}
if args.optimizer_type.lower().startswith("DAdapt".lower()): # tracking d*lr value
if args.optimizer_type.lower().startswith("DAdapt".lower()) or args.optimizer_type.lower() == "Prodigy".lower(): # tracking d*lr value
logs["lr/d*lr"] = (
lr_scheduler.optimizers[0].param_groups[0]["d"] * lr_scheduler.optimizers[0].param_groups[0]["lr"]
)
Expand Down
4 changes: 2 additions & 2 deletions train_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ def generate_step_logs(
logs["lr/textencoder"] = float(lrs[0])
logs["lr/unet"] = float(lrs[-1]) # may be same to textencoder

if args.optimizer_type.lower().startswith("DAdapt".lower()): # tracking d*lr value of unet.
if args.optimizer_type.lower().startswith("DAdapt".lower()) or args.optimizer_type.lower() == "Prodigy".lower(): # tracking d*lr value of unet.
logs["lr/d*lr"] = lr_scheduler.optimizers[-1].param_groups[0]["d"] * lr_scheduler.optimizers[-1].param_groups[0]["lr"]
else:
idx = 0
Expand All @@ -67,7 +67,7 @@ def generate_step_logs(

for i in range(idx, len(lrs)):
logs[f"lr/group{i}"] = float(lrs[i])
if args.optimizer_type.lower().startswith("DAdapt".lower()):
if args.optimizer_type.lower().startswith("DAdapt".lower()) or args.optimizer_type.lower() == "Prodigy".lower():
logs[f"lr/d*lr/group{i}"] = (
lr_scheduler.optimizers[-1].param_groups[i]["d"] * lr_scheduler.optimizers[-1].param_groups[i]["lr"]
)
Expand Down
2 changes: 1 addition & 1 deletion train_textual_inversion.py
Original file line number Diff line number Diff line change
Expand Up @@ -476,7 +476,7 @@ def remove_model(old_ckpt_name):
current_loss = loss.detach().item()
if args.logging_dir is not None:
logs = {"loss": current_loss, "lr": float(lr_scheduler.get_last_lr()[0])}
if args.optimizer_type.lower().startswith("DAdapt".lower()): # tracking d*lr value
if args.optimizer_type.lower().startswith("DAdapt".lower()) or args.optimizer_type.lower() == "Prodigy".lower(): # tracking d*lr value
logs["lr/d*lr"] = (
lr_scheduler.optimizers[0].param_groups[0]["d"] * lr_scheduler.optimizers[0].param_groups[0]["lr"]
)
Expand Down
2 changes: 1 addition & 1 deletion train_textual_inversion_XTI.py
Original file line number Diff line number Diff line change
Expand Up @@ -515,7 +515,7 @@ def remove_model(old_ckpt_name):
current_loss = loss.detach().item()
if args.logging_dir is not None:
logs = {"loss": current_loss, "lr": float(lr_scheduler.get_last_lr()[0])}
if args.optimizer_type.lower().startswith("DAdapt".lower()): # tracking d*lr value
if args.optimizer_type.lower().startswith("DAdapt".lower()) or args.optimizer_type.lower() == "Prodigy".lower(): # tracking d*lr value
logs["lr/d*lr"] = (
lr_scheduler.optimizers[0].param_groups[0]["d"] * lr_scheduler.optimizers[0].param_groups[0]["lr"]
)
Expand Down

2 comments on commit e97d67a

@idlebg
Copy link

@idlebg idlebg commented on e97d67a Jun 16, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

00040-389399940
00055-389399941
in under 15 min on 4090 with just 1 rep and 2 epoch.. ha :D... 🤟 🥃
--v2 --v_parameterization --enable_bucket --pretrained_model_name_or_path="FFusion/di.FFUSION.ai-v2.1-768-BaSE-alpha" --resolution="1024,1024" --network_alpha="64" --training_comment="di.FFusion.ai" --save_model_as=safetensors --network_module=networks.dylora --network_args conv_dim="64" conv_alpha="64" unit="2" rank_dropout="0.04" module_dropout="0.02" --text_encoder_lr=1.0 --unet_lr=1.0 --network_dim=64 --output_name="cyberbunnyfusion11" --lr_scheduler_num_cycles="2" --scale_weight_norms="1" --network_dropout="0.1" --learning_rate="1.0" --lr_scheduler="linear" --lr_warmup_steps="20" --train_batch_size="4" --save_every_n_epochs="1" --mixed_precision="fp16" --save_precision="fp16" --caption_extension=".txt" --cache_latents --optimizer_type="prodigy" --optimizer_args safeguard_warmup=True --max_token_length=225 --clip_skip=2 --keep_tokens="2" --vae_batch_size="2" --bucket_reso_steps=64 --min_snr_gamma=4 --save_state --xformers --bucket_no_upscale --scale_v_pred_loss_like_noise_pred --noise_offset=0.12 --adaptive_noise_scale=0.01

@idlebg
Copy link

@idlebg idlebg commented on e97d67a Jun 16, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

image

planning to test this one on a larger scale soon
along with CosineAnnealingLR

Please sign in to comment.