Skip to content

Commit

Permalink
Merge pull request #2068 from lllyasviel/dev0127
Browse files Browse the repository at this point in the history
2.1.864
  • Loading branch information
lllyasviel authored Jan 27, 2024
2 parents f6d67d7 + 0cb2db9 commit 3b1cd37
Show file tree
Hide file tree
Showing 56 changed files with 2,021 additions and 264 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ user_path_config.txt
user_path_config-deprecated.txt
/modules/*.png
/repositories
/fooocus_env
/venv
/tmp
/ui-config.json
Expand Down
6 changes: 6 additions & 0 deletions args_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,12 @@
args_parser.parser.add_argument("--disable-analytics", action='store_true',
help="Disables analytics for Gradio", default=False)

args_parser.parser.add_argument("--disable-preset-download", action='store_true',
help="Disables downloading models for presets", default=False)

args_parser.parser.add_argument("--always-download-new-model", action='store_true',
help="Always download newer models ", default=False)

args_parser.parser.set_defaults(
disable_cuda_malloc=True,
in_browser=True,
Expand Down
2 changes: 1 addition & 1 deletion fooocus_version.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
version = '2.1.862'
version = '2.1.864'
59 changes: 38 additions & 21 deletions launch.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,7 @@
from build_launcher import build_launcher
from modules.launch_util import is_installed, run, python, run_pip, requirements_met
from modules.model_loader import load_file_from_url
from modules.config import path_checkpoints, path_loras, path_vae_approx, path_fooocus_expansion, \
checkpoint_downloads, path_embeddings, embeddings_downloads, lora_downloads
from modules import config


REINSTALL_ALL = False
Expand Down Expand Up @@ -70,25 +69,6 @@ def prepare_environment():
]


def download_models():
for file_name, url in checkpoint_downloads.items():
load_file_from_url(url=url, model_dir=path_checkpoints, file_name=file_name)
for file_name, url in embeddings_downloads.items():
load_file_from_url(url=url, model_dir=path_embeddings, file_name=file_name)
for file_name, url in lora_downloads.items():
load_file_from_url(url=url, model_dir=path_loras, file_name=file_name)
for file_name, url in vae_approx_filenames:
load_file_from_url(url=url, model_dir=path_vae_approx, file_name=file_name)

load_file_from_url(
url='https://huggingface.co/lllyasviel/misc/resolve/main/fooocus_expansion.bin',
model_dir=path_fooocus_expansion,
file_name='pytorch_model.bin'
)

return


def ini_args():
from args_manager import args
return args
Expand All @@ -104,6 +84,43 @@ def ini_args():
print("Set device to:", args.gpu_device_id)


def download_models():
for file_name, url in vae_approx_filenames:
load_file_from_url(url=url, model_dir=config.path_vae_approx, file_name=file_name)

load_file_from_url(
url='https://huggingface.co/lllyasviel/misc/resolve/main/fooocus_expansion.bin',
model_dir=config.path_fooocus_expansion,
file_name='pytorch_model.bin'
)

if args.disable_preset_download:
print('Skipped model download.')
return

if not args.always_download_new_model:
if not os.path.exists(os.path.join(config.path_checkpoints, config.default_base_model_name)):
for alternative_model_name in config.previous_default_models:
if os.path.exists(os.path.join(config.path_checkpoints, alternative_model_name)):
print(f'You do not have [{config.default_base_model_name}] but you have [{alternative_model_name}].')
print(f'Fooocus will use [{alternative_model_name}] to avoid downloading new models, '
f'but you are not using latest models.')
print('Use --always-download-new-model to avoid fallback and always get new models.')
config.checkpoint_downloads = {}
config.default_base_model_name = alternative_model_name
break

for file_name, url in config.checkpoint_downloads.items():
load_file_from_url(url=url, model_dir=config.path_checkpoints, file_name=file_name)
for file_name, url in config.embeddings_downloads.items():
load_file_from_url(url=url, model_dir=config.path_embeddings, file_name=file_name)
for file_name, url in config.lora_downloads.items():
load_file_from_url(url=url, model_dir=config.path_loras, file_name=file_name)

return


download_models()


from webui import *
68 changes: 66 additions & 2 deletions ldm_patched/contrib/external.py
Original file line number Diff line number Diff line change
Expand Up @@ -361,6 +361,62 @@ def encode(self, vae, pixels, mask, grow_mask_by=6):

return ({"samples":t, "noise_mask": (mask_erosion[:,:,:x,:y].round())}, )


class InpaintModelConditioning:
@classmethod
def INPUT_TYPES(s):
return {"required": {"positive": ("CONDITIONING", ),
"negative": ("CONDITIONING", ),
"vae": ("VAE", ),
"pixels": ("IMAGE", ),
"mask": ("MASK", ),
}}

RETURN_TYPES = ("CONDITIONING","CONDITIONING","LATENT")
RETURN_NAMES = ("positive", "negative", "latent")
FUNCTION = "encode"

CATEGORY = "conditioning/inpaint"

def encode(self, positive, negative, pixels, vae, mask):
x = (pixels.shape[1] // 8) * 8
y = (pixels.shape[2] // 8) * 8
mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")

orig_pixels = pixels
pixels = orig_pixels.clone()
if pixels.shape[1] != x or pixels.shape[2] != y:
x_offset = (pixels.shape[1] % 8) // 2
y_offset = (pixels.shape[2] % 8) // 2
pixels = pixels[:,x_offset:x + x_offset, y_offset:y + y_offset,:]
mask = mask[:,:,x_offset:x + x_offset, y_offset:y + y_offset]

m = (1.0 - mask.round()).squeeze(1)
for i in range(3):
pixels[:,:,:,i] -= 0.5
pixels[:,:,:,i] *= m
pixels[:,:,:,i] += 0.5
concat_latent = vae.encode(pixels)
orig_latent = vae.encode(orig_pixels)

out_latent = {}

out_latent["samples"] = orig_latent
out_latent["noise_mask"] = mask

out = []
for conditioning in [positive, negative]:
c = []
for t in conditioning:
d = t[1].copy()
d["concat_latent_image"] = concat_latent
d["concat_mask"] = mask
n = [t[0], d]
c.append(n)
out.append(c)
return (out[0], out[1], out_latent)


class SaveLatent:
def __init__(self):
self.output_dir = ldm_patched.utils.path_utils.get_output_directory()
Expand Down Expand Up @@ -1417,6 +1473,8 @@ def load_image(self, image):
output_masks = []
for i in ImageSequence.Iterator(img):
i = ImageOps.exif_transpose(i)
if i.mode == 'I':
i = i.point(lambda i: i * (1 / 255))
image = i.convert("RGB")
image = np.array(image).astype(np.float32) / 255.0
image = torch.from_numpy(image)[None,]
Expand Down Expand Up @@ -1472,6 +1530,8 @@ def load_image(self, image, channel):
i = Image.open(image_path)
i = ImageOps.exif_transpose(i)
if i.getbands() != ("R", "G", "B", "A"):
if i.mode == 'I':
i = i.point(lambda i: i * (1 / 255))
i = i.convert("RGBA")
mask = None
c = channel[0].upper()
Expand Down Expand Up @@ -1626,10 +1686,11 @@ def INPUT_TYPES(s):
def expand_image(self, image, left, top, right, bottom, feathering):
d1, d2, d3, d4 = image.size()

new_image = torch.zeros(
new_image = torch.ones(
(d1, d2 + top + bottom, d3 + left + right, d4),
dtype=torch.float32,
)
) * 0.5

new_image[:, top:top + d2, left:left + d3, :] = image

mask = torch.ones(
Expand Down Expand Up @@ -1721,6 +1782,7 @@ def expand_image(self, image, left, top, right, bottom, feathering):
"unCLIPCheckpointLoader": unCLIPCheckpointLoader,
"GLIGENLoader": GLIGENLoader,
"GLIGENTextBoxApply": GLIGENTextBoxApply,
"InpaintModelConditioning": InpaintModelConditioning,

"CheckpointLoader": CheckpointLoader,
"DiffusersLoader": DiffusersLoader,
Expand Down Expand Up @@ -1882,6 +1944,8 @@ def init_custom_nodes():
"nodes_sag.py",
"nodes_perpneg.py",
"nodes_stable3d.py",
"nodes_sdupscale.py",
"nodes_photomaker.py",
]

for node_file in extras_files:
Expand Down
12 changes: 10 additions & 2 deletions ldm_patched/contrib/external_custom_sampler.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,15 +15,22 @@ def INPUT_TYPES(s):
{"model": ("MODEL",),
"scheduler": (ldm_patched.modules.samplers.SCHEDULER_NAMES, ),
"steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
"denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
}
}
RETURN_TYPES = ("SIGMAS",)
CATEGORY = "sampling/custom_sampling/schedulers"

FUNCTION = "get_sigmas"

def get_sigmas(self, model, scheduler, steps):
sigmas = ldm_patched.modules.samplers.calculate_sigmas_scheduler(model.model, scheduler, steps).cpu()
def get_sigmas(self, model, scheduler, steps, denoise):
total_steps = steps
if denoise < 1.0:
total_steps = int(steps/denoise)

ldm_patched.modules.model_management.load_models_gpu([model])
sigmas = ldm_patched.modules.samplers.calculate_sigmas_scheduler(model.model, scheduler, total_steps).cpu()
sigmas = sigmas[-(steps + 1):]
return (sigmas, )


Expand Down Expand Up @@ -100,6 +107,7 @@ def INPUT_TYPES(s):
def get_sigmas(self, model, steps, denoise):
start_step = 10 - int(10 * denoise)
timesteps = torch.flip(torch.arange(1, 11) * 100 - 1, (0,))[start_step:start_step + steps]
ldm_patched.modules.model_management.load_models_gpu([model])
sigmas = model.model.model_sampling.sigma(timesteps)
sigmas = torch.cat([sigmas, sigmas.new_zeros([1])])
return (sigmas, )
Expand Down
4 changes: 2 additions & 2 deletions ldm_patched/contrib/external_freelunch.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def INPUT_TYPES(s):
RETURN_TYPES = ("MODEL",)
FUNCTION = "patch"

CATEGORY = "_for_testing"
CATEGORY = "model_patches"

def patch(self, model, b1, b2, s1, s2):
model_channels = model.model.model_config.unet_config["model_channels"]
Expand Down Expand Up @@ -75,7 +75,7 @@ def INPUT_TYPES(s):
RETURN_TYPES = ("MODEL",)
FUNCTION = "patch"

CATEGORY = "_for_testing"
CATEGORY = "model_patches"

def patch(self, model, b1, b2, s1, s2):
model_channels = model.model.model_config.unet_config["model_channels"]
Expand Down
18 changes: 9 additions & 9 deletions ldm_patched/contrib/external_hypertile.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,29 +34,29 @@ def INPUT_TYPES(s):
RETURN_TYPES = ("MODEL",)
FUNCTION = "patch"

CATEGORY = "_for_testing"
CATEGORY = "model_patches"

def patch(self, model, tile_size, swap_size, max_depth, scale_depth):
model_channels = model.model.model_config.unet_config["model_channels"]

apply_to = set()
temp = model_channels
for x in range(max_depth + 1):
apply_to.add(temp)
temp *= 2

latent_tile_size = max(32, tile_size) // 8
self.temp = None

def hypertile_in(q, k, v, extra_options):
if q.shape[-1] in apply_to:
model_chans = q.shape[-2]
orig_shape = extra_options['original_shape']
apply_to = []
for i in range(max_depth + 1):
apply_to.append((orig_shape[-2] / (2 ** i)) * (orig_shape[-1] / (2 ** i)))

if model_chans in apply_to:
shape = extra_options["original_shape"]
aspect_ratio = shape[-1] / shape[-2]

hw = q.size(1)
h, w = round(math.sqrt(hw * aspect_ratio)), round(math.sqrt(hw / aspect_ratio))

factor = 2**((q.shape[-1] // model_channels) - 1) if scale_depth else 1
factor = (2 ** apply_to.index(model_chans)) if scale_depth else 1
nh = random_divisor(h, latent_tile_size * factor, swap_size)
nw = random_divisor(w, latent_tile_size * factor, swap_size)

Expand Down
24 changes: 24 additions & 0 deletions ldm_patched/contrib/external_latent.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,10 +124,34 @@ def batch(self, samples1, samples2):
samples_out["batch_index"] = samples1.get("batch_index", [x for x in range(0, s1.shape[0])]) + samples2.get("batch_index", [x for x in range(0, s2.shape[0])])
return (samples_out,)

class LatentBatchSeedBehavior:
@classmethod
def INPUT_TYPES(s):
return {"required": { "samples": ("LATENT",),
"seed_behavior": (["random", "fixed"],),}}

RETURN_TYPES = ("LATENT",)
FUNCTION = "op"

CATEGORY = "latent/advanced"

def op(self, samples, seed_behavior):
samples_out = samples.copy()
latent = samples["samples"]
if seed_behavior == "random":
if 'batch_index' in samples_out:
samples_out.pop('batch_index')
elif seed_behavior == "fixed":
batch_number = samples_out.get("batch_index", [0])[0]
samples_out["batch_index"] = [batch_number] * latent.shape[0]

return (samples_out,)

NODE_CLASS_MAPPINGS = {
"LatentAdd": LatentAdd,
"LatentSubtract": LatentSubtract,
"LatentMultiply": LatentMultiply,
"LatentInterpolate": LatentInterpolate,
"LatentBatch": LatentBatch,
"LatentBatchSeedBehavior": LatentBatchSeedBehavior,
}
Loading

0 comments on commit 3b1cd37

Please sign in to comment.