Skip to content

Commit

Permalink
Merge pull request #94 from LlmKira/dev
Browse files Browse the repository at this point in the history
✨ refactor: update logic
  • Loading branch information
sudoskys authored Jan 2, 2025
2 parents 8e9fdaf + 402b999 commit 03e15d0
Show file tree
Hide file tree
Showing 5 changed files with 187 additions and 60 deletions.
32 changes: 30 additions & 2 deletions playground/generate_image.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,14 @@
import asyncio
import os
import pathlib
import random

from dotenv import load_dotenv
from pydantic import SecretStr

from novelai_python import APIError, LoginCredential
from novelai_python import GenerateImageInfer, ImageGenerateResp, JwtCredential
from novelai_python.sdk.ai.generate_image import Action, Model, Sampler, Character, UCPreset
from novelai_python.sdk.ai.generate_image import Action, Model, Sampler, Character, UCPreset, Params
from novelai_python.sdk.ai.generate_image.schema import PositionMap
from novelai_python.utils.useful import enum_to_list

Expand All @@ -33,6 +34,7 @@ async def generate(
print(f"Action List:{enum_to_list(Action)}")
print(
"""
PositionMap
.1 .3 .5 .7 .9
A1 B1 C1 D1 E1
A2 B2 C2 D2 E2
Expand All @@ -59,7 +61,6 @@ async def generate(
sampler=Sampler.K_EULER_ANCESTRAL,
ucPreset=UCPreset.TYPE0,
# Recommended, using preset negative_prompt depends on selected model
qualitySuffix=True,
qualityToggle=True,
decrisp_mode=False,
variety_boost=True,
Expand All @@ -81,6 +82,33 @@ async def generate(
f.write(file[1])


async def direct_use():
"""
Don't like use build-in method? you can directly initialize the class.
that's pydantic!
:return:
"""
credential = JwtCredential(jwt_token=SecretStr("pst-5555"))
result = await GenerateImageInfer(
input="1girl",
model=Model.NAI_DIFFUSION_4_CURATED_PREVIEW,
parameters=Params(
width=832,
height=1216,
characterPrompts=[],
seed=random.randint(0, 4294967295 - 7),
scale=5,
negative_prompt="lowres",
qualityToggle=True,
sampler=Sampler.K_EULER_ANCESTRAL,
ucPreset=UCPreset.TYPE0,
steps=23,
n_samples=1,
)
).request(session=credential)
print(f"Meta: {result.meta}")


load_dotenv()
loop = asyncio.new_event_loop()
loop.run_until_complete(generate())
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "novelai-python"
version = "0.7.0"
version = "0.7.1"
description = "NovelAI Python Binding With Pydantic"
authors = [
{ name = "sudoskys", email = "[email protected]" },
Expand Down
109 changes: 75 additions & 34 deletions src/novelai_python/sdk/ai/_enum.py
Original file line number Diff line number Diff line change
Expand Up @@ -361,6 +361,7 @@ def get_default_noise_schedule(sample_type: Sampler) -> NoiseSchedule:


def get_model_group(model: ModelTypeAlias) -> ModelGroups:
# 一般情况禁止转换
if isinstance(model, Enum):
model = model.value
else:
Expand All @@ -387,49 +388,89 @@ def get_model_group(model: ModelTypeAlias) -> ModelGroups:
return mapping.get(model, ModelGroups.STABLE_DIFFUSION)


def get_default_uc_preset(model: ModelTypeAlias, uc_preset: int) -> str:
if isinstance(model, Enum):
model = model.value
if isinstance(uc_preset, Enum):
uc_preset = uc_preset.value
@dataclass
class UcPrompt:
category: str
name: str
text: str

@dataclass
class UcPrompt:
label: str
text: str

mapper = {
ModelGroups.STABLE_DIFFUSION: [
UcPrompt("lowQualityPlusBadAnatomy",
def get_uc_preset(model: ModelTypeAlias) -> List[UcPrompt]:
prompts: List[UcPrompt] = []
if model in [
Model.SAFE_DIFFUSION,
Model.NAI_DIFFUSION,
Model.NAI_DIFFUSION_INPAINTING,
Model.SAFE_DIFFUSION_INPAINTING,
]:
prompts = [
UcPrompt(category="heavy", name="lowQualityPlusBadAnatomy",
text="lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry"),
UcPrompt("lowQuality",
UcPrompt(category="light", name="lowQuality",
text="lowres, text, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry"),
UcPrompt("none", text="lowres")
],
ModelGroups.STABLE_DIFFUSION_GROUP_2: [
UcPrompt("heavy",
UcPrompt(category="none", name="none", text="lowres"),
]
elif model in [
Model.NAI_DIFFUSION_FURRY,
Model.FURRY_DIFFUSION_INPAINTING,
]:
prompts = [
UcPrompt(category="light", name="lowQuality",
text="worst quality, low quality, what has science done, what, nightmare fuel, eldritch horror, where is your god now, why"),
UcPrompt(category="heavy", name="badAnatomy",
text="{worst quality}, low quality, distracting watermark, [nightmare fuel], {{unfinished}}, deformed, outline, pattern, simple background"),
UcPrompt(category="none", name="none", text="low res"),
]
elif model in [
Model.NAI_DIFFUSION_2,
]:
prompts = [
UcPrompt(category="heavy", name="heavy",
text="lowres, bad, text, error, missing, extra, fewer, cropped, jpeg artifacts, worst quality, bad quality, watermark, displeasing, unfinished, chromatic aberration, scan, scan artifacts"),
UcPrompt("light", text="lowres, jpeg artifacts, worst quality, watermark, blurry, very displeasing"),
UcPrompt("none", text="lowres")
],
ModelGroups.STABLE_DIFFUSION_XL: [
UcPrompt("heavy",
UcPrompt(category="light", name="light",
text="lowres, jpeg artifacts, worst quality, watermark, blurry, very displeasing"),
UcPrompt(category="none", name="none", text="lowres"),
]
elif model in [
Model.NAI_DIFFUSION_3,
Model.NAI_DIFFUSION_3_INPAINTING,
]:
prompts = [
UcPrompt(category="heavy", name="heavy",
text="lowres, {bad}, error, fewer, extra, missing, worst quality, jpeg artifacts, bad quality, watermark, unfinished, displeasing, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]"),
UcPrompt("light", text="lowres, jpeg artifacts, worst quality, watermark, blurry, very displeasing"),
UcPrompt("humanFocus",
UcPrompt(category="light", name="light",
text="lowres, jpeg artifacts, worst quality, watermark, blurry, very displeasing"),
UcPrompt(category="human", name="humanFocus",
text="lowres, {bad}, error, fewer, extra, missing, worst quality, jpeg artifacts, bad quality, watermark, unfinished, displeasing, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract], bad anatomy, bad hands, @_@, mismatched pupils, heart-shaped pupils, glowing eyes"),
UcPrompt("none", text="lowres")
],
ModelGroups.STABLE_DIFFUSION_XL_FURRY: [
UcPrompt("heavy",
UcPrompt(category="none", name="none", text="lowres"),
]
elif model in [
Model.NAI_DIFFUSION_FURRY_3,
Model.NAI_DIFFUSION_FURRY_3_INPAINTING,
]:
prompts = [
UcPrompt(category="heavy", name="heavy",
text="{{worst quality}}, [displeasing], {unusual pupils}, guide lines, {{unfinished}}, {bad}, url, artist name, {{tall image}}, mosaic, {sketch page}, comic panel, impact (font), [dated], {logo}, ych, {what}, {where is your god now}, {distorted text}, repeated text, {floating head}, {1994}, {widescreen}, absolutely everyone, sequence, {compression artifacts}, hard translated, {cropped}, {commissioner name}, unknown text, high contrast"),
UcPrompt("light",
UcPrompt(category="light", name="light",
text="{worst quality}, guide lines, unfinished, bad, url, tall image, widescreen, compression artifacts, unknown text"),
UcPrompt("none", text="lowres")
],
}
model_group = get_model_group(model)
prompts: List[UcPrompt] = mapper.get(model_group, [UcPrompt("none", "lowres")])
UcPrompt(category="none", name="none", text="lowres"),
]
elif model in [
Model.CUSTOM,
Model.NAI_DIFFUSION_4_CURATED_PREVIEW,
]:
prompts = [
UcPrompt(category="heavy", name="heavy",
text="blurry, lowres, error, film grain, scan artifacts, worst quality, bad quality, jpeg artifacts, very displeasing, chromatic aberration, logo, dated, signature, multiple views, gigantic breasts"),
UcPrompt(category="light", name="light",
text="blurry, lowres, error, worst quality, bad quality, jpeg artifacts, very displeasing, logo, dated, signature"),
UcPrompt(category="none", name="none", text=""),
]
return prompts


def get_default_uc_preset(model: ModelTypeAlias, uc_preset: int) -> str:
prompts = get_uc_preset(model)
if 0 <= uc_preset < len(prompts):
return prompts[uc_preset].text
else:
Expand Down
Loading

0 comments on commit 03e15d0

Please sign in to comment.