Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -7,6 +7,7 @@ from PIL import Image
|
|
| 7 |
from os import path
|
| 8 |
from torchvision import transforms
|
| 9 |
from dataclasses import dataclass
|
|
|
|
| 10 |
import math
|
| 11 |
from typing import Callable
|
| 12 |
import spaces
|
|
@@ -51,10 +52,12 @@ pipe = diffusers.ZImagePipeline.from_pretrained("dimitribarbot/Z-Image-Turbo-BF1
|
|
| 51 |
|
| 52 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 53 |
|
| 54 |
-
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.float16).to("cuda")
|
| 55 |
|
| 56 |
-
|
| 57 |
|
|
|
|
|
|
|
| 58 |
#pipe.enable_model_cpu_offload()
|
| 59 |
|
| 60 |
try: # A temp hack for some version diffusers lora loading problem
|
|
|
|
| 7 |
from os import path
|
| 8 |
from torchvision import transforms
|
| 9 |
from dataclasses import dataclass
|
| 10 |
+
from io import BytesIO
|
| 11 |
import math
|
| 12 |
from typing import Callable
|
| 13 |
import spaces
|
|
|
|
| 52 |
|
| 53 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 54 |
|
| 55 |
+
#pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.float16).to("cuda")
|
| 56 |
|
| 57 |
+
pipe.vae = AutoencoderKL.from_pretrained("REPA-E/e2e-flux-vae", torch_dtype=torch.float16).to("cuda")
|
| 58 |
|
| 59 |
+
#pipe.vae = DiffusersAutoencoderKL.from_pretrained("kaiyuyue/FLUX.2-dev-vae", torch_dtype=torch.float16, scaling_factor = 0.3611, shift_factor = 0.1159).to("cuda")
|
| 60 |
+
## Alas, the model would need to be retrained to work with the Flux2 vae, with its doubled channel count of 32.
|
| 61 |
#pipe.enable_model_cpu_offload()
|
| 62 |
|
| 63 |
try: # A temp hack for some version diffusers lora loading problem
|