-
Notifications
You must be signed in to change notification settings - Fork 13
/
Copy pathrun_editing_instructpix2pix.py
210 lines (172 loc) · 8.26 KB
/
run_editing_instructpix2pix.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
from __future__ import annotations
import math
import random
import sys
from argparse import ArgumentParser
import einops
import k_diffusion as K
import numpy as np
import torch
import torch.nn as nn
from einops import rearrange
from omegaconf import OmegaConf
from PIL import Image, ImageOps
from torch import autocast
import json
import os
import argparse
from utils.utils import txt_draw
sys.path.append("models/instructpix2pix/stable_diffusion")
from ldm.util import instantiate_from_config
def setup_seed(seed=1234):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
class CFGDenoiser(nn.Module):
def __init__(self, model):
super().__init__()
self.inner_model = model
def forward(self, z, sigma, cond, uncond, text_cfg_scale, image_cfg_scale):
cfg_z = einops.repeat(z, "1 ... -> n ...", n=3)
cfg_sigma = einops.repeat(sigma, "1 ... -> n ...", n=3)
cfg_cond = {
"c_crossattn": [torch.cat([cond["c_crossattn"][0], uncond["c_crossattn"][0], uncond["c_crossattn"][0]])],
"c_concat": [torch.cat([cond["c_concat"][0], cond["c_concat"][0], uncond["c_concat"][0]])],
}
out_cond, out_img_cond, out_uncond = self.inner_model(cfg_z, cfg_sigma, cond=cfg_cond).chunk(3)
return out_uncond + text_cfg_scale * (out_cond - out_img_cond) + image_cfg_scale * (out_img_cond - out_uncond)
def load_model_from_config(config, ckpt, vae_ckpt=None, verbose=False):
print(f"Loading model from {ckpt}")
pl_sd = torch.load(ckpt, map_location="cpu")
if "global_step" in pl_sd:
print(f"Global Step: {pl_sd['global_step']}")
sd = pl_sd["state_dict"]
if vae_ckpt is not None:
print(f"Loading VAE from {vae_ckpt}")
vae_sd = torch.load(vae_ckpt, map_location="cpu")["state_dict"]
sd = {
k: vae_sd[k[len("first_stage_model.") :]] if k.startswith("first_stage_model.") else v
for k, v in sd.items()
}
model = instantiate_from_config(config.model)
m, u = model.load_state_dict(sd, strict=False)
if len(m) > 0 and verbose:
print("missing keys:")
print(m)
if len(u) > 0 and verbose:
print("unexpected keys:")
print(u)
return model
def mask_decode(encoded_mask,image_shape=[512,512]):
length=image_shape[0]*image_shape[1]
mask_array=np.zeros((length,))
for i in range(0,len(encoded_mask),2):
splice_len=min(encoded_mask[i+1],length-encoded_mask[i])
for j in range(splice_len):
mask_array[encoded_mask[i]+j]=1
mask_array=mask_array.reshape(image_shape[0], image_shape[1])
# to avoid annotation errors in boundary
mask_array[0,:]=1
mask_array[-1,:]=1
mask_array[:,0]=1
mask_array[:,-1]=1
return mask_array
def edit_instruct_pix2pix(
edit_method,
input,
edit,
resolution=512,
steps=50,
cfg_text=7.5,
cfg_image=1.5,
):
if edit_method=="instruct-pix2pix":
model_wrap = K.external.CompVisDenoiser(model)
model_wrap_cfg = CFGDenoiser(model_wrap)
null_token = model.get_learned_conditioning([""])
input_image_numpy = Image.open(input).convert("RGB")
width, height = input_image_numpy.size
factor = resolution / max(width, height)
factor = math.ceil(min(width, height) * factor / 64) * 64 / min(width, height)
width = int((width * factor) // 64) * 64
height = int((height * factor) // 64) * 64
input_image = ImageOps.fit(input_image_numpy, (width, height), method=Image.Resampling.LANCZOS)
with torch.no_grad(), autocast("cuda"), model.ema_scope():
cond = {}
cond["c_crossattn"] = [model.get_learned_conditioning([edit])]
input_image = 2 * torch.tensor(np.array(input_image)).float() / 255 - 1
input_image = rearrange(input_image, "h w c -> 1 c h w").to(model.device)
cond["c_concat"] = [model.encode_first_stage(input_image).mode()]
uncond = {}
uncond["c_crossattn"] = [null_token]
uncond["c_concat"] = [torch.zeros_like(cond["c_concat"][0])]
sigmas = model_wrap.get_sigmas(steps)
extra_args = {
"cond": cond,
"uncond": uncond,
"text_cfg_scale": cfg_text,
"image_cfg_scale": cfg_image,
}
z = torch.randn_like(cond["c_concat"][0]) * sigmas[0]
z = K.sampling.sample_euler_ancestral(model_wrap_cfg, z, sigmas, extra_args=extra_args)
x = model.decode_first_stage(z)
x = torch.clamp((x + 1.0) / 2.0, min=0.0, max=1.0)
x = 255.0 * rearrange(x, "1 c h w -> h w c")
edited_image = Image.fromarray(x.type(torch.uint8).cpu().numpy())
image_instruct = txt_draw(f"edit prompt: {edit}")
return Image.fromarray(np.concatenate((image_instruct, input_image_numpy, np.zeros_like(image_instruct), np.array(edited_image)),axis=1))
else:
raise NotImplementedError(f"No edit method named {edit_method}")
image_save_paths={
"instruct-pix2pix":"instruct-pix2pix",
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--rerun_exist_images', action= "store_true") # rerun existing images
parser.add_argument('--data_path', type=str, default="data") # the editing category that needed to run
parser.add_argument('--output_path', type=str, default="output") # the editing category that needed to run
parser.add_argument('--edit_category_list', nargs = '+', type=str, default=["0","1","2","3","4","5","6","7","8","9"]) # the editing category that needed to run
parser.add_argument('--edit_method_list', nargs = '+', type=str, default=["instruct-pix2pix"]) # the editing methods that needed to run
parser.add_argument('--checkpoint', type=str, default="data/checkpoints/instruct-pix2pix-00-22000.ckpt") # the editing methods that needed to run
args = parser.parse_args()
rerun_exist_images=args.rerun_exist_images
data_path=args.data_path
output_path=args.output_path
edit_category_list=args.edit_category_list
edit_method_list=args.edit_method_list
checkpoint=args.checkpoint
rerun_exist_images=False
with open("data/mapping_file.json", "r") as f:
editing_instruction = json.load(f)
config = OmegaConf.load("models/instructpix2pix/configs/generate.yaml")
model = load_model_from_config(config, checkpoint, None)
model.eval().cuda()
for key, item in editing_instruction.items():
if item["editing_type_id"] not in edit_category_list:
continue
original_prompt = item["original_prompt"].replace("[", "").replace("]", "")
editing_prompt = item["editing_prompt"].replace("[", "").replace("]", "")
image_path = os.path.join(f"{data_path}/annotation_images", item["image_path"])
editing_instruction = item["editing_instruction"]
blended_word = item["blended_word"].split(" ") if item["blended_word"] != "" else []
mask = Image.fromarray(np.uint8(mask_decode(item["mask"])[:,:,np.newaxis].repeat(3,2))).convert("L")
for edit_method in edit_method_list:
present_image_save_path=image_path.replace(data_path, os.path.join(output_path,image_save_paths[edit_method]))
if ((not os.path.exists(present_image_save_path)) or rerun_exist_images):
print(f"editing image [{image_path}] with [{edit_method}]")
setup_seed()
torch.cuda.empty_cache()
edited_image = edit_instruct_pix2pix(
edit_method=edit_method,
input=image_path,
edit=editing_instruction
)
if not os.path.exists(os.path.dirname(present_image_save_path)):
os.makedirs(os.path.dirname(present_image_save_path))
edited_image.save(present_image_save_path)
print(f"finish")
else:
print(f"skip image [{image_path}] with [{edit_method}]")