Spaces:
Runtime error
Runtime error
Update model.py
Browse files
model.py
CHANGED
|
@@ -122,8 +122,8 @@ class Model:
|
|
| 122 |
elif preprocessor_name == 'No preprocess':
|
| 123 |
condition_img = image
|
| 124 |
print('get edge')
|
| 125 |
-
del self.preprocessor.model
|
| 126 |
-
torch.cuda.empty_cache()
|
| 127 |
condition_img = condition_img.resize((512,512))
|
| 128 |
W, H = condition_img.size
|
| 129 |
|
|
@@ -131,10 +131,10 @@ class Model:
|
|
| 131 |
self.load_gpt_weight('edge')
|
| 132 |
self.gpt_model.to('cuda').to(torch.bfloat16)
|
| 133 |
self.vq_model.to('cuda')
|
| 134 |
-
condition_img = torch.from_numpy(np.array(condition_img)).unsqueeze(0).permute(0,3,1,2).repeat(
|
| 135 |
condition_img = condition_img.to(self.device)
|
| 136 |
condition_img = 2*(condition_img/255 - 0.5)
|
| 137 |
-
prompts = [prompt] *
|
| 138 |
caption_embs, emb_masks = self.t5_model.get_text_embeddings(prompts)
|
| 139 |
|
| 140 |
print(f"processing left-padding...")
|
|
@@ -181,7 +181,7 @@ class Model:
|
|
| 181 |
samples = [
|
| 182 |
Image.fromarray(
|
| 183 |
sample.permute(1, 2, 0).cpu().detach().numpy().clip(
|
| 184 |
-
0, 255).astype(np.uint8)) for sample in samples
|
| 185 |
]
|
| 186 |
del condition_img
|
| 187 |
torch.cuda.empty_cache()
|
|
@@ -225,10 +225,10 @@ class Model:
|
|
| 225 |
self.load_gpt_weight('depth')
|
| 226 |
self.gpt_model.to('cuda').to(torch.bfloat16)
|
| 227 |
self.vq_model.to(self.device)
|
| 228 |
-
condition_img = torch.from_numpy(np.array(condition_img)).unsqueeze(0).permute(0,3,1,2).repeat(
|
| 229 |
condition_img = condition_img.to(self.device)
|
| 230 |
condition_img = 2*(condition_img/255 - 0.5)
|
| 231 |
-
prompts = [prompt] *
|
| 232 |
caption_embs, emb_masks = self.t5_model.get_text_embeddings(prompts)
|
| 233 |
|
| 234 |
print(f"processing left-padding...")
|
|
@@ -276,7 +276,7 @@ class Model:
|
|
| 276 |
samples = 255 * (samples * 0.5 + 0.5)
|
| 277 |
samples = [
|
| 278 |
Image.fromarray(
|
| 279 |
-
sample.permute(1, 2, 0).cpu().detach().numpy().clip(0, 255).astype(np.uint8))
|
| 280 |
for sample in samples
|
| 281 |
]
|
| 282 |
del condition_img
|
|
|
|
| 122 |
elif preprocessor_name == 'No preprocess':
|
| 123 |
condition_img = image
|
| 124 |
print('get edge')
|
| 125 |
+
# del self.preprocessor.model
|
| 126 |
+
# torch.cuda.empty_cache()
|
| 127 |
condition_img = condition_img.resize((512,512))
|
| 128 |
W, H = condition_img.size
|
| 129 |
|
|
|
|
| 131 |
self.load_gpt_weight('edge')
|
| 132 |
self.gpt_model.to('cuda').to(torch.bfloat16)
|
| 133 |
self.vq_model.to('cuda')
|
| 134 |
+
condition_img = torch.from_numpy(np.array(condition_img)).unsqueeze(0).permute(0,3,1,2).repeat(3,1,1,1)
|
| 135 |
condition_img = condition_img.to(self.device)
|
| 136 |
condition_img = 2*(condition_img/255 - 0.5)
|
| 137 |
+
prompts = [prompt] * 3
|
| 138 |
caption_embs, emb_masks = self.t5_model.get_text_embeddings(prompts)
|
| 139 |
|
| 140 |
print(f"processing left-padding...")
|
|
|
|
| 181 |
samples = [
|
| 182 |
Image.fromarray(
|
| 183 |
sample.permute(1, 2, 0).cpu().detach().numpy().clip(
|
| 184 |
+
0, 255).astype(np.uint8).resize((origin_W, origin_H))) for sample in samples
|
| 185 |
]
|
| 186 |
del condition_img
|
| 187 |
torch.cuda.empty_cache()
|
|
|
|
| 225 |
self.load_gpt_weight('depth')
|
| 226 |
self.gpt_model.to('cuda').to(torch.bfloat16)
|
| 227 |
self.vq_model.to(self.device)
|
| 228 |
+
condition_img = torch.from_numpy(np.array(condition_img)).unsqueeze(0).permute(0,3,1,2).repeat(3,1,1,1)
|
| 229 |
condition_img = condition_img.to(self.device)
|
| 230 |
condition_img = 2*(condition_img/255 - 0.5)
|
| 231 |
+
prompts = [prompt] * 3
|
| 232 |
caption_embs, emb_masks = self.t5_model.get_text_embeddings(prompts)
|
| 233 |
|
| 234 |
print(f"processing left-padding...")
|
|
|
|
| 276 |
samples = 255 * (samples * 0.5 + 0.5)
|
| 277 |
samples = [
|
| 278 |
Image.fromarray(
|
| 279 |
+
sample.permute(1, 2, 0).cpu().detach().numpy().clip(0, 255).astype(np.uint8).resize((origin_W, origin_H)))
|
| 280 |
for sample in samples
|
| 281 |
]
|
| 282 |
del condition_img
|