-
Notifications
You must be signed in to change notification settings - Fork 16
/
Copy pathinference-sde.py
289 lines (251 loc) · 11.8 KB
/
inference-sde.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
import os
from datetime import datetime
from pathlib import Path
import argparse
from einops import repeat
from omegaconf import OmegaConf
from PIL import Image
from torchvision import transforms
from transformers import CLIPVisionModelWithProjection
from diffusers.utils.import_utils import is_xformers_available
from diffusers import AutoencoderKL, DDIMScheduler
import torch
import imageio
import numpy as np
import os.path as osp
from einops import rearrange
import torch.nn.functional as F
from src.models.attention_processor import AttnProcessor
from src.models.estimator import Estimator
from src.models.pose_guider import PoseGuider
from src.models.unet_3d import UNet3DConditionModel
from src.pipelines.motion_editor import MotionEditor
from src.pipelines.pipeline_pose2vid_collection import Pose2VideoCollectionPipeline
from src.utils.util import save_videos_grid, seed_everything, save_videos_as_frames
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--config")
parser.add_argument("--video_root", type=str)
parser.add_argument("--pose_root", type=str)
parser.add_argument("--ref_pose_root", type=str)
parser.add_argument("--source_mask_root", type=str)
parser.add_argument("--target_mask_root", type=str)
parser.add_argument("-W", type=int, default=512)
parser.add_argument("-H", type=int, default=512)
parser.add_argument("-L", type=int, default=24)
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--cfg", type=float, default=3.5)
parser.add_argument("--steps", type=int, default=50)
parser.add_argument("--fps", type=int, default=8)
parser.add_argument("--accumulation_steps", type=int, default=1)
parser.add_argument("--enable_xformers_memory_efficient_attention", type=str, default="Ture")
parser.add_argument("--gradient_checkpointing", type=str, default="True")
parser.add_argument("--num_inv_steps", type=int, default=50)
parser.add_argument("--suffix", type=str, default="jpg")
parser.add_argument("--camera", type=str, default="False")
args = parser.parse_args()
return args
def main():
args = parse_args()
config = OmegaConf.load(args.config)
if args.seed is not None:
seed_everything(args.seed)
if config.weight_dtype == "fp16":
weight_dtype = torch.float16
else:
weight_dtype = torch.float32
vae = AutoencoderKL.from_pretrained(config.pretrained_vae_path, ).to("cuda", dtype=weight_dtype)
inference_config_path = config.inference_config
infer_config = OmegaConf.load(inference_config_path)
denoising_unet = UNet3DConditionModel.from_pretrained_2d(config.pretrained_base_model_path,config.motion_module_path, subfolder="unet",unet_additional_kwargs=infer_config.unet_additional_kwargs, ).to(dtype=weight_dtype, device="cuda")
estimator = Estimator.from_pretrained_2d(config.pretrained_base_model_path,
config.motion_module_path, subfolder="unet",
unet_additional_kwargs=infer_config.unet_additional_kwargs, ).to(dtype=weight_dtype, device="cuda")
pose_guider = PoseGuider(320, block_out_channels=(16, 32, 96, 256)).to(dtype=weight_dtype, device="cuda")
image_enc = CLIPVisionModelWithProjection.from_pretrained(config.image_encoder_path).to(dtype=weight_dtype,
device="cuda")
sched_kwargs = OmegaConf.to_container(infer_config.noise_scheduler_kwargs)
scheduler = DDIMScheduler(**sched_kwargs)
ref_scheduler = DDIMScheduler(**sched_kwargs)
vae.requires_grad_(False)
image_enc.requires_grad_(False)
denoising_unet.requires_grad_(False)
pose_guider.requires_grad_(False)
vae.eval()
image_enc.eval()
denoising_unet.eval()
pose_guider.eval()
generator = torch.manual_seed(args.seed)
width, height = args.W, args.H
# load pretrained weights
denoising_unet.load_state_dict(
torch.load(config.denoising_unet_path, map_location="cpu"),
strict=False,
)
pose_guider.load_state_dict(
torch.load(config.pose_guider_path, map_location="cpu"),
)
if args.enable_xformers_memory_efficient_attention:
if is_xformers_available():
denoising_unet.enable_xformers_memory_efficient_attention()
else:
raise ValueError(
"xformers is not available. Make sure it is installed correctly"
)
if args.gradient_checkpointing:
denoising_unet.enable_gradient_checkpointing()
attention_processor = AttnProcessor(camera=args.camera)
denoising_unet.set_attn_processor(attention_processor)
pipe = MotionEditor(
vae=vae,
image_encoder=image_enc,
denoising_unet=denoising_unet,
pose_guider=pose_guider,
scheduler=scheduler,
estimator=estimator,
)
pipe = pipe.to("cuda", dtype=weight_dtype)
date_str = datetime.now().strftime("%Y%m%d")
time_str = datetime.now().strftime("%H%M")
save_dir_name = f"{time_str}--seed_{args.seed}-{args.W}x{args.H}"
save_dir = Path(f"output/{date_str}/{save_dir_name}")
save_dir.mkdir(exist_ok=True, parents=True)
ref_pipe = Pose2VideoCollectionPipeline(
vae=vae,
image_encoder=image_enc,
denoising_unet=denoising_unet,
pose_guider=pose_guider,
scheduler=ref_scheduler,
)
ref_pipe = ref_pipe.to("cuda", dtype=weight_dtype)
ref_images_path = args.video_root
pose_video_path = args.pose_root
ref_pose_video_path = args.ref_pose_root
source_masks_path = args.source_mask_root
target_masks_path = args.target_mask_root
ref_suffix = args.suffix
ref_suffix = "." + ref_suffix
ref_file_names = os.listdir(ref_images_path)
# ref_image_files = [file for file in ref_file_names if file.endswith('.jpg')]
ref_image_files = [file for file in ref_file_names if file.endswith(ref_suffix)]
ref_image_files.sort(key=lambda x: int(x.split('_')[1].split('.')[0]))
pose_file_names = os.listdir(pose_video_path)
pose_files = [file for file in pose_file_names if file.endswith('.png')]
pose_files.sort(key=lambda x: int(x.split('_')[1].split('.')[0]))
ref_pose_file_names = os.listdir(ref_pose_video_path)
ref_pose_files = [file for file in ref_pose_file_names if file.endswith('.png')]
ref_pose_files.sort(key=lambda x: int(x.split('_')[1].split('.')[0]))
source_mask_file_names = os.listdir(source_masks_path)
source_mask_files = [file for file in source_mask_file_names if file.endswith('.png')]
source_mask_files.sort(key=lambda x: int(x.split('_')[1].split('.')[0]))
target_mask_file_names = os.listdir(target_masks_path)
target_mask_files = [file for file in target_mask_file_names if file.endswith('.png')]
target_mask_files.sort(key=lambda x: int(x.split('_')[1].split('.')[0]))
ref_images_list = []
ref_images_tensor_list = []
ref_transform = transforms.Compose(
[transforms.Resize((height, width)), transforms.ToTensor()]
)
for ref_image_file in ref_image_files:
ref_path = osp.join(ref_images_path, ref_image_file)
ref_image_pil = Image.open(ref_path).convert("RGB")
ref_images_tensor_list.append(ref_transform(ref_image_pil))
ref_images_list.append(ref_image_pil)
ref_images_tensor = torch.stack(ref_images_tensor_list, dim=0) # (f, c, h, w)
ref_images_tensor = ref_images_tensor.transpose(0, 1)
pose_list = []
pose_tensor_list = []
pose_transform = transforms.Compose(
[transforms.Resize((height, width)), transforms.ToTensor()]
)
for pose_file in pose_files:
pose_path = osp.join(pose_video_path, pose_file)
pose_image_pil = Image.open(pose_path).convert("RGB")
pose_tensor_list.append(pose_transform(pose_image_pil))
pose_list.append(pose_image_pil)
pose_tensor = torch.stack(pose_tensor_list, dim=0) # (f, c, h, w)
pose_tensor = pose_tensor.transpose(0, 1)
ref_pose_list = []
ref_pose_tensor_list = []
ref_pose_transform = transforms.Compose(
[transforms.Resize((height, width)), transforms.ToTensor()]
)
for ref_pose_file in ref_pose_files:
ref_pose_path = osp.join(ref_pose_video_path, ref_pose_file)
ref_pose_image_pil = Image.open(ref_pose_path).convert("RGB")
ref_pose_tensor_list.append(ref_pose_transform(ref_pose_image_pil))
ref_pose_list.append(ref_pose_image_pil)
ref_pose_tensor = torch.stack(ref_pose_tensor_list, dim=0) # (f, c, h, w)
ref_pose_tensor = ref_pose_tensor.transpose(0, 1)
source_mask_list = []
target_mask_list = []
for source_mask_file in source_mask_files:
source_mask_path = osp.join(source_masks_path, source_mask_file)
_source_mask = imageio.imread(source_mask_path).astype(np.float32) ## H,W 0 and 255
_source_mask /= 255
source_mask_list.append(_source_mask)
source_masks = torch.from_numpy(np.stack(source_mask_list, axis=0)).float() # f,h,w
source_masks = rearrange(source_masks[:, :, :, None], "f h w c -> f c h w")
source_masks = F.interpolate(source_masks, size=(height, width), mode='nearest')
for target_mask_file in target_mask_files:
target_mask_path = osp.join(target_masks_path, target_mask_file)
_target_mask = imageio.imread(target_mask_path).astype(np.float32) ## H,W 0 and 255
_target_mask /= 255
target_mask_list.append(_target_mask)
target_masks = torch.from_numpy(np.stack(target_mask_list, axis=0)).float() # f,h,w
target_masks = rearrange(target_masks[:, :, :, None], "f h w c -> f c h w")
target_masks = F.interpolate(target_masks, size=(height, width), mode='nearest')
print("Attention! We are collecting the latents from reference frames in terms of reconstruction.")
print("You should confirm whether the initial latents of edited frames and reconstructed frames are identical")
ref_outputs = ref_pipe(
ref_images_list,
ref_pose_list,
width,
height,
args.L,
args.steps,
guidance_scale=0,
generator=generator,
save_kv=True,
)
ref_video = ref_outputs.videos
latents_collection = ref_outputs.collections
latents_org = ref_outputs.latents_org
video = pipe(
ref_images_list,
pose_list,
width,
height,
args.L,
args.steps,
args.cfg,
generator=generator,
source_masks=source_masks,
target_masks=target_masks,
ref_pose_images=ref_pose_list,
ref_latents_collections=latents_collection,
latents_org=latents_org,
save_kv=False,
).videos
edited_video = video
pose_tensor = pose_tensor.unsqueeze(0)
ref_images_tensor = ref_images_tensor.unsqueeze(0)
video = torch.cat([ref_images_tensor, pose_tensor, video], dim=0)
ref_name = os.path.basename(ref_images_path)
pose_name = os.path.basename(pose_video_path)
save_videos_grid(
video,
f"{save_dir}/{ref_name}_{pose_name}_{args.H}x{args.W}_{int(args.cfg)}_{time_str}.gif",
n_rows=3,
fps=args.fps if args.fps is None else args.fps,
)
ref_pose_tensor = ref_pose_tensor.unsqueeze(0)
ref_video = torch.cat([ref_images_tensor, ref_pose_tensor, ref_video], dim=0)
save_videos_grid(
ref_video,
f"{save_dir}/{ref_name}_{pose_name}_{args.H}x{args.W}_{int(args.cfg)}_{time_str}_ref.gif",
n_rows=3,
fps=args.fps if args.fps is None else args.fps,
)
if __name__ == "__main__":
main()