Thanks to HuggingFace Diffusers team for the GPU sponsorship!
This repository is for extracting and visualizing cross attention maps, based on the latest Diffusers code (v0.32.0
).
For errors reports or feature requests, feel free to raise an issue.
[2024-12-22] It is now compatible with "Stable Diffusion 3.5", "Flux-dev" and "Flux-schnell"! (βSana" will be the focus of the next update.)
[2024-12-17] Refactor and add setup.py
[2024-11-12] "Stable Diffusion 3" is compatible and supports batch operations! (Flux and "Stable Diffusion 3.5" is not compatible yet.)
[2024-07-04] Added features for saving attention maps based on timesteps and layers.
Compatible with various models listed below.
- black-forest-labs/FLUX.1-schnell
- black-forest-labs/FLUX.1-dev
- stabilityai/stable-diffusion-3.5-medium
- stabilityai/stable-diffusion-3-medium-diffusers
- stable-diffusion-xl-base-1.0
- stable-diffusion-2-1-base
- ...
git clone https://github.com/wooyeolBaek/attention-map-diffusers.git
cd attention-map-diffusers
pip install -e .
or
pip install attention_map_diffusers
import torch
from diffusers import FluxPipeline
from attention_map_diffusers import (
attn_maps,
init_pipeline,
save_attention_maps
)
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-dev",
torch_dtype=torch.bfloat16
)
# pipe.enable_model_cpu_offload() #save some VRAM by offloading the model to CPU. Remove this if you have enough GPU power
pipe.to('cuda')
##### 1. Replace modules and Register hook #####
pipe = init_pipeline(pipe)
################################################
# recommend not using batch operations for sd3, as cpu memory could be exceeded.
prompts = [
# "A photo of a puppy wearing a hat.",
"A capybara holding a sign that reads Hello World.",
]
images = pipe(
prompts,
num_inference_steps=15,
guidance_scale=4.5,
).images
for batch, image in enumerate(images):
image.save(f'{batch}-flux-dev.png')
##### 2. Process and Save attention map #####
save_attention_maps(attn_maps, pipe.tokenizer, prompts, base_dir='attn_maps-flux-dev', unconditional=False)
#############################################
import torch
from diffusers import FluxPipeline
from attention_map_diffusers import (
attn_maps,
init_pipeline,
save_attention_maps
)
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell",
torch_dtype=torch.bfloat16
)
# pipe.enable_model_cpu_offload() #save some VRAM by offloading the model to CPU. Remove this if you have enough GPU power
pipe.to('cuda')
##### 1. Replace modules and Register hook #####
pipe = init_pipeline(pipe)
################################################
# recommend not using batch operations for sd3, as cpu memory could be exceeded.
prompts = [
# "A photo of a puppy wearing a hat.",
"A capybara holding a sign that reads Hello World.",
]
images = pipe(
prompts,
num_inference_steps=15,
guidance_scale=4.5,
).images
for batch, image in enumerate(images):
image.save(f'{batch}-flux-schnell.png')
##### 2. Process and Save attention map #####
save_attention_maps(attn_maps, pipe.tokenizer, prompts, base_dir='attn_maps-flux-schnell', unconditional=False)
#############################################
import torch
from diffusers import StableDiffusion3Pipeline
from attention_map_diffusers import (
attn_maps,
init_pipeline,
save_attention_maps
)
pipe = StableDiffusion3Pipeline.from_pretrained(
"stabilityai/stable-diffusion-3.5-medium",
torch_dtype=torch.bfloat16
)
pipe = pipe.to("cuda")
##### 1. Replace modules and Register hook #####
pipe = init_pipeline(pipe)
################################################
# recommend not using batch operations for sd3, as cpu memory could be exceeded.
prompts = [
# "A photo of a puppy wearing a hat.",
"A capybara holding a sign that reads Hello World.",
]
images = pipe(
prompts,
num_inference_steps=15,
guidance_scale=4.5,
).images
for batch, image in enumerate(images):
image.save(f'{batch}-sd3-5.png')
##### 2. Process and Save attention map #####
save_attention_maps(attn_maps, pipe.tokenizer, prompts, base_dir='attn_maps-sd3-5', unconditional=True)
#############################################
import torch
from diffusers import StableDiffusion3Pipeline
from attention_map_diffusers import (
attn_maps,
init_pipeline,
save_attention_maps
)
pipe = StableDiffusion3Pipeline.from_pretrained(
"stabilityai/stable-diffusion-3-medium-diffusers",
torch_dtype=torch.bfloat16
)
pipe = pipe.to("cuda")
##### 1. Replace modules and Register hook #####
pipe = init_pipeline(pipe)
################################################
# recommend not using batch operations for sd3, as cpu memory could be exceeded.
prompts = [
# "A photo of a puppy wearing a hat.",
"A capybara holding a sign that reads Hello World.",
]
images = pipe(
prompts,
num_inference_steps=15,
guidance_scale=4.5,
).images
for batch, image in enumerate(images):
image.save(f'{batch}-sd3.png')
##### 2. Process and Save attention map #####
save_attention_maps(attn_maps, pipe.tokenizer, prompts, base_dir='attn_maps', unconditional=True)
#############################################
import torch
from diffusers import DiffusionPipeline
from attention_map_diffusers import (
attn_maps,
init_pipeline,
save_attention_maps
)
pipe = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0",
torch_dtype=torch.float16,
)
pipe = pipe.to("cuda")
##### 1. Replace modules and Register hook #####
pipe = init_pipeline(pipe)
################################################
prompts = [
"A photo of a puppy wearing a hat.",
"A capybara holding a sign that reads Hello World.",
]
images = pipe(
prompts,
num_inference_steps=15,
).images
for batch, image in enumerate(images):
image.save(f'{batch}-sdxl.png')
##### 2. Process and Save attention map #####
save_attention_maps(attn_maps, pipe.tokenizer, prompts, base_dir='attn_maps', unconditional=True)
#############################################
import torch
from diffusers import DiffusionPipeline
from attention_map_diffusers import (
attn_maps,
init_pipeline,
save_attention_maps
)
pipe = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1",
torch_dtype=torch.float16,
)
pipe = pipe.to("cuda")
##### 1. Replace modules and Register hook #####
pipe = init_pipeline(pipe)
################################################
prompts = [
"A photo of a puppy wearing a hat.",
"A capybara holding a sign that reads Hello World.",
]
images = pipe(
prompts,
num_inference_steps=15,
).images
for batch, image in enumerate(images):
image.save(f'{batch}-sd2-1.png')
##### 2. Process and Save attention map #####
save_attention_maps(attn_maps, pipe.tokenizer, prompts, base_dir='attn_maps', unconditional=True)
#############################################