t2i_test1 / t2i_config.py
John6666's picture
Upload 16 files
adefd5c verified
models = [
'Yntec/YiffyMix',
'Raelina/Rae-Diffusion-XL-V2',
'Raelina/Raemu-XL-V4',
'Raelina/Raemu-XL-V5',
'Raelina/Raena-XL-V2',
'Raelina/Raehoshi-illust-XL',
'Raelina/Raehoshi-illust-xl-2',
'Raelina/Raehoshi-Illust-XL-2.1',
'Raelina/Raehoshi-illust-XL-3',
'Raelina/Raehoshi-illust-XL-4',
'Raelina/Raehoshi-illust-XL-8',
"https://huggingface.co/Yntec/epiCPhotoGasm/blob/main/epiCPhotoGasmVAE.safetensors",
]
sdxl_vaes = [
"madebyollin/sdxl-vae-fp16-fix",
"https://huggingface.co/nubby/blessed-sdxl-vae-fp16-fix/blob/main/sdxl_vae-fp16fix-blessed.safetensors",
]
sd15_vaes = []
STORAGE_MAX_GIB = 40
PIPELINE_MAX_GIB = 30
DEFAULT_DURATION = 0 # if 0, auto
IS_DEBUG = True
# kernels attention backend (Diffusers attention dispatcher)
# '' or None: disabled. 'auto': Hopper->'_flash_3_hub' else ATTENTION_BACKEND_NON_HOPPER.
ATTENTION_BACKEND = 'auto'
ATTENTION_BACKEND_NON_HOPPER = 'flash_hub'
# kernels hub prefetch (to avoid first-inference heavy download)
# Notes:
# - This does not remove the download requirement; it moves it to app startup.
# - Add more repos if you also use 'flash_hub' (FlashAttention2) or 'sage_hub'.
KERNELS_PREFETCH_ON_STARTUP = True
KERNELS_PREFETCH_REPOS = [
"kernels-community/flash-attn3",
# "kernels-community/flash-attn2",
# "kernels-community/sage_attention",
]