Skip to content

Commit

Permalink
Merge pull request #329 from Dartvauder/dev
Browse files Browse the repository at this point in the history
FIXES
  • Loading branch information
Dartvauder authored Oct 27, 2024
2 parents ab85fcb + 42acedd commit ffef8e5
Show file tree
Hide file tree
Showing 11 changed files with 68 additions and 11 deletions.
1 change: 1 addition & 0 deletions Install.bat
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ if "%INSTALL_TYPE%"=="CPU" (
)
echo INSTALL_TYPE=%INSTALL_TYPE%> "%CURRENT_DIR%install_config.txt"

pip install https://huggingface.co/madbuda/triton-windows-builds/resolve/main/triton-3.0.0-cp310-cp310-win_amd64.whl 2>> %ERROR_LOG%
pip install --no-build-isolation -e git+https://github.com/PanQiWei/AutoGPTQ.git#egg=auto_gptq@v0.7.1 2>> %ERROR_LOG%
pip install --no-build-isolation -e git+https://github.com/casper-hansen/AutoAWQ.git#egg=autoawq@v0.2.6 2>> %ERROR_LOG%
pip install --no-build-isolation -e git+https://github.com/turboderp/exllamav2.git#egg=exllamav2@v0.2.3 2>> %ERROR_LOG%
Expand Down
1 change: 1 addition & 0 deletions Install.sh
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ else
fi
echo "INSTALL_TYPE=$INSTALL_TYPE" > "$CURRENT_DIR/install_config.txt"

pip install triton==3.0.0 2>> "$ERROR_LOG"
pip install --no-build-isolation -e git+https://github.com/PanQiWei/AutoGPTQ.git#egg=auto_gptq@v0.7.1 2>> "$ERROR_LOG"
pip install --no-build-isolation -e git+https://github.com/casper-hansen/AutoAWQ.git#egg=autoawq@v0.2.6 2>> "$ERROR_LOG"
pip install --no-build-isolation -e git+https://github.com/turboderp/exllamav2.git#egg=exllamav2@v0.2.3 2>> "$ERROR_LOG"
Expand Down
29 changes: 26 additions & 3 deletions LaunchFile/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
temp_dir = os.path.join("temp")
os.makedirs(temp_dir, exist_ok=True)
os.environ["TMPDIR"] = temp_dir
sys.modules['triton'] = None
from threading import Thread
import gradio as gr
import langdetect
Expand Down Expand Up @@ -115,6 +116,7 @@ def wrapper():
diffusers = lazy_import('diffusers', '')
BlipDiffusionPipeline = lazy_import('diffusers.pipelines', 'BlipDiffusionPipeline')
StableDiffusionPipeline = lazy_import('diffusers', 'StableDiffusionPipeline')
StableDiffusionPanoramaPipeline = lazy_import('diffusers', 'StableDiffusionPanoramaPipeline')
StableDiffusion3Pipeline = lazy_import('diffusers', 'StableDiffusion3Pipeline')
StableDiffusionXLPipeline = lazy_import('diffusers', 'StableDiffusionXLPipeline')
StableDiffusionImg2ImgPipeline = lazy_import('diffusers', 'StableDiffusionImg2ImgPipeline')
Expand Down Expand Up @@ -2047,7 +2049,7 @@ def generate_image_txt2img(prompt, negative_prompt, style_name, stable_diffusion
enable_freeu, freeu_s1, freeu_s2, freeu_b1, freeu_b2,
enable_sag, sag_scale, enable_pag, pag_scale, enable_token_merging, ratio,
enable_deepcache, cache_interval, cache_branch_id, enable_tgate, gate_step,
enable_magicprompt, magicprompt_max_new_tokens, enable_cdvae, enable_taesd, output_format, progress=gr.Progress()):
enable_magicprompt, magicprompt_max_new_tokens, enable_cdvae, enable_taesd, enable_multidiffusion, circular_padding, output_format, progress=gr.Progress()):
global stop_signal
stop_signal = False
stop_idx = None
Expand Down Expand Up @@ -2145,6 +2147,10 @@ def generate_image_txt2img(prompt, negative_prompt, style_name, stable_diffusion
stable_diffusion_model = StableDiffusionXLPipeline().StableDiffusionXLPipeline.from_single_file(
stable_diffusion_model_path, use_safetensors=True, device_map="auto", attention_slice=1,
torch_dtype=torch_dtype, variant=variant, vae=vae_xl)
elif enable_multidiffusion:
stable_diffusion_model = StableDiffusionPanoramaPipeline().StableDiffusionPanoramaPipeline.from_single_file(
stable_diffusion_model_path, use_safetensors=True, device_map="auto",
torch_dtype=torch_dtype, variant=variant)
else:
if stable_diffusion_model_type == "SD":
stable_diffusion_model = StableDiffusionPipeline().StableDiffusionPipeline.from_single_file(
Expand Down Expand Up @@ -2494,6 +2500,21 @@ def combined_callback(stable_diffusion_model, i, t, callback_kwargs):
num_images_per_prompt=num_images_per_prompt,
generator=generator, callback_on_step_end=combined_callback,
callback_on_step_end_tensor_inputs=["latents"]).images
elif enable_multidiffusion:
images = stable_diffusion_model(
prompt=prompt,
negative_prompt=negative_prompt,
num_inference_steps=stable_diffusion_steps,
guidance_scale=stable_diffusion_cfg,
height=stable_diffusion_height,
width=stable_diffusion_width,
clip_skip=stable_diffusion_clip_skip,
circular_padding=circular_padding,
num_images_per_prompt=num_images_per_prompt,
generator=generator,
callback_on_step_end=combined_callback,
callback_on_step_end_tensor_inputs=["latents"]
).images
else:
compel_proc = Compel(tokenizer=stable_diffusion_model.tokenizer,
text_encoder=stable_diffusion_model.text_encoder)
Expand Down Expand Up @@ -10583,10 +10604,10 @@ def create_footer():
footer_html = """
<div style="text-align: center; background-color: #f0f0f0; padding: 10px; border-radius: 5px; margin-top: 20px;">
<span style="margin-right: 15px;">🔥 diffusers: 0.31.0</span>
<span style="margin-right: 15px;">📄 transformers: 4.45.2</span>
<span style="margin-right: 15px;">📄 transformers: 4.46.0</span>
<span style="margin-right: 15px;">🦙 llama-cpp-python: 0.3.1</span>
<span style="margin-right: 15px;">🖼️ stable-diffusion-cpp-python: 0.1.8</span>
<span>ℹ️ gradio: 5.3.0</span>
<span>ℹ️ gradio: 5.4.0</span>
</div>
"""
return gr.Markdown(footer_html)
Expand Down Expand Up @@ -10859,6 +10880,8 @@ def create_footer():
gr.Slider(minimum=32, maximum=256, value=50, step=1, label=_("MagicPrompt Max New Tokens", lang)),
gr.Checkbox(label=_("Enable CDVAE", lang), value=False),
gr.Checkbox(label=_("Enable TAESD", lang), value=False),
gr.Checkbox(label=_("Enable MultiDiffusion", lang), value=False),
gr.Checkbox(label=_("Enable Circular padding (for MultiDiffusion)", lang), value=False),
gr.Radio(choices=["png", "jpeg"], label=_("Select output format", lang), value="png", interactive=True)
],
additional_inputs_accordion=gr.Accordion(label=_("Additional StableDiffusion Settings", lang), open=False),
Expand Down
7 changes: 4 additions & 3 deletions RequirementsFiles/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ GitPython==3.1.43
google-pasta==0.2.0
GPUtil==1.4.0
gpytoolbox==0.3.2
gradio==5.3.0
gradio==5.4.0
gradio_client==1.4.2
grpcio==1.62.2
gruut==2.2.3
Expand Down Expand Up @@ -217,7 +217,7 @@ python-crfsuite==0.9.10
python-dateutil==2.9.0.post0
python-dotenv==1.0.1
python-ffmpeg==2.0.12
python-multipart==0.0.9
python-multipart==0.0.12
pytorch-lightning==2.4.0
pytz==2024.1
PyWavelets==1.6.0
Expand All @@ -232,6 +232,7 @@ rich==13.7.1
rpds-py==0.18.0
ruff==0.4.2
safetensors==0.4.3
safehttpx==0.1.1
scikit-image==0.23.2
scikit-learn==1.4.2
scipy==1.11.4
Expand Down Expand Up @@ -281,7 +282,7 @@ torchsde==0.2.6
tqdm==4.66.5
trainer==0.0.36
trampoline==0.1.2
transformers==4.45.2
transformers==4.46.0
treetable==0.2.5
trimesh==4.4.7
trio==0.25.0
Expand Down
1 change: 1 addition & 0 deletions Update.bat
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ if "%INSTALL_TYPE%"=="CPU" (
pip install --no-deps -r "%CURRENT_DIR%RequirementsFiles\requirements-stable-diffusion-cpp.txt" 2>> %ERROR_LOG%
)

pip install https://huggingface.co/madbuda/triton-windows-builds/resolve/main/triton-3.0.0-cp310-cp310-win_amd64.whl 2>> %ERROR_LOG%
pip install --no-build-isolation -e git+https://github.com/PanQiWei/AutoGPTQ.git#egg=auto_gptq@v0.7.1 2>> %ERROR_LOG%
pip install --no-build-isolation -e git+https://github.com/casper-hansen/AutoAWQ.git#egg=autoawq@v0.2.6 2>> %ERROR_LOG%
pip install --no-build-isolation -e git+https://github.com/turboderp/exllamav2.git#egg=exllamav2@v0.2.3 2>> %ERROR_LOG%
Expand Down
1 change: 1 addition & 0 deletions Update.sh
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ else
pip install --no-deps -r "$CURRENT_DIR/RequirementsFiles/requirements-stable-diffusion-cpp.txt" 2>> "$ERROR_LOG"
fi

pip install triton==3.0.0 2>> "$ERROR_LOG"
pip install --no-build-isolation -e git+https://github.com/PanQiWei/AutoGPTQ.git#egg=auto_gptq@v0.7.1 2>> "$ERROR_LOG"
pip install --no-build-isolation -e git+https://github.com/casper-hansen/AutoAWQ.git#egg=autoawq@v0.2.6 2>> "$ERROR_LOG"
pip install --no-build-isolation -e git+https://github.com/turboderp/exllamav2.git#egg=exllamav2@v0.2.3 2>> "$ERROR_LOG"
Expand Down
5 changes: 2 additions & 3 deletions Venv.bat
Original file line number Diff line number Diff line change
Expand Up @@ -32,9 +32,8 @@ if "%choice%"=="3" (
if /i "%confirm%"=="y" (
echo Deleting application...
cd ..
rd /s /q "%~dp0"
echo Application deleted successfully.
pause
set DELETE_DIR=%CURRENT_DIR%
start cmd /c "timeout /t 1 & rd /s /q "%DELETE_DIR%" & exit"
exit
)
goto menu
Expand Down
2 changes: 1 addition & 1 deletion Venv.sh
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ while true; do
read -p "Are you sure you want to delete the application? (y/n): " confirm
if [[ $confirm == [Yy]* ]]; then
echo "Deleting application..."
cd ..
cd .. || exit 1
rm -rf "${CURRENT_DIR}"
echo "Application deleted successfully."
exit 0
Expand Down
28 changes: 27 additions & 1 deletion first_setup.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import json
import os
import sys
from typing import Dict, Optional
from typing import Dict, Optional, Tuple


def load_settings() -> Dict:
Expand Down Expand Up @@ -55,6 +55,28 @@ def select_auto_launch() -> bool:
print("\nНеверный выбор! / Invalid choice! / 选择无效!")


def input_auth_credentials() -> Tuple[str, str]:
print("\nВведите логин и пароль (формат login:password) или нажмите Enter для пропуска:")
print("Enter login and password (format login:password) or press Enter to skip:")
print("输入登录名和密码(格式 login:password)或按 Enter 跳过:")

credentials = input().strip()

if not credentials:
return "admin", "admin"

try:
username, password = credentials.split(':')
if username and password:
return username.strip(), password.strip()
raise ValueError
except ValueError:
print("\nНеверный формат! Используется значение по умолчанию (admin:admin)")
print("Invalid format! Using default value (admin:admin)")
print("格式无效!使用默认值 (admin:admin)")
return "admin", "admin"


def input_hf_token() -> Optional[str]:
print("\nВведите ваш Hugging Face токен (или нажмите Enter для пропуска):")
print("Enter your Hugging Face token (or press Enter to skip):")
Expand Down Expand Up @@ -90,6 +112,10 @@ def main():
auto_launch = select_auto_launch()
settings['auto_launch'] = auto_launch

username, password = input_auth_credentials()
settings['auth']['username'] = username
settings['auth']['password'] = password

token = input_hf_token()
if token:
settings['hf_token'] = token
Expand Down
2 changes: 2 additions & 0 deletions translations/ru.json
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@
"Context batch (N_BATCH) for llama type models": "Размер пакета контекста (N_BATCH) для моделей типа llama",
"Min P": "Минимальное P",
"Typical P": "Типичное P",
"Enable MultiDiffusion": "Включить мультидиффузию",
"Enable Circular padding (for MultiDiffusion)": "Включить круговое заполнение (для мультидиффузии)",
"Stop sequences (optional)": "Последовательности остановки (необязательно)",
"TTS Repetition penalty": "TTS Штраф за повторение",
"TTS Length penalty": "TTS Штраф за длину",
Expand Down
2 changes: 2 additions & 0 deletions translations/zh.json
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@
"Context batch (N_BATCH) for llama type models": "llama类型模型的上下文批次 (N_BATCH)",
"Min P": "最小P值",
"Typical P": "典型P值",
"Enable MultiDiffusion": "启用多重扩散",
"Enable Circular padding (for MultiDiffusion)": "启用循环填充(用于多重扩散)",
"Stop sequences (optional)": "停止序列(可选)",
"TTS Repetition penalty": "TTS重复惩罚",
"TTS Length penalty": "TTS长度惩罚",
Expand Down

0 comments on commit ffef8e5

Please sign in to comment.