Spaces:
Running
on
Zero
Running
on
Zero
File size: 1,840 Bytes
39d9406 dc382c8 b71a3ad dc382c8 b71a3ad 5c395b2 b71a3ad 5bebd85 b71a3ad 1c1b97a 5bebd85 b71a3ad 39d9406 b71a3ad 5bebd85 b71a3ad 55d79e2 5bebd85 39d9406 b71a3ad 39d9406 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
from functools import partial
import gradio as gr
from PIL.Image import Image
from huggingface_hub import InferenceClient
def text_to_image(client: InferenceClient, model: str, prompt: str) -> Image:
"""Generate an image from a text prompt using Hugging Face Inference API.
This function uses the Hugging Face Inference API to generate images from text prompts.
This approach offloads the model loading and inference to Hugging Face's infrastructure,
which is more suitable for environments with limited GPU memory or time constraints
(like Hugging Face Spaces with Zero GPU).
Args:
client: Hugging Face InferenceClient instance for API calls.
model: Hugging Face model ID to use for text-to-image generation.
prompt: Text description of the desired image.
Returns:
PIL Image object representing the generated image.
"""
return client.text_to_image(prompt, model=model)
def create_text_to_image_tab(client: InferenceClient, model: str):
"""Create the text-to-image generation tab in the Gradio interface.
This function sets up all UI components for text-to-image generation,
including input textbox, generate button, and output image display.
Args:
client: Hugging Face InferenceClient instance to pass to the text_to_image function.
model: Hugging Face model ID to use for text-to-image generation.
"""
gr.Markdown("Generate an image from a text prompt.")
text_to_image_prompt = gr.Textbox(label="Prompt")
text_to_image_generate_button = gr.Button("Generate")
text_to_image_output = gr.Image(label="Image", type="pil")
text_to_image_generate_button.click(
fn=partial(text_to_image, client, model),
inputs=text_to_image_prompt,
outputs=text_to_image_output
)
|