|
1 | 1 | import gradio as gr
|
2 | 2 | from diffusers.utils import load_image
|
3 | 3 | import numpy as np
|
| 4 | +from PIL import Image |
| 5 | + |
| 6 | +import openvino as ov |
| 7 | +import openvino_genai as ov_genai |
4 | 8 |
|
5 | 9 |
|
6 | 10 | # TODO Consider reusing make_demo_segmind_vegart
|
7 | 11 | def make_demo_sd_xl_text2image(pipeline):
|
8 | 12 | def generate_from_text(text, seed, num_steps):
|
9 |
| - result = pipeline( |
| 13 | + image_tensor = pipeline.generate( |
10 | 14 | text,
|
11 | 15 | num_inference_steps=num_steps,
|
12 |
| - generator=np.random.RandomState(seed), |
13 | 16 | height=512,
|
14 | 17 | width=512,
|
15 |
| - ).images[0] |
16 |
| - return result |
| 18 | + generator=ov_genai.TorchGenerator(seed), |
| 19 | + ) |
| 20 | + image = Image.fromarray(image_tensor.data[0]) |
| 21 | + |
| 22 | + return image |
17 | 23 |
|
18 | 24 | with gr.Blocks() as demo:
|
19 | 25 | with gr.Column():
|
@@ -59,13 +65,21 @@ def make_demo_sd_xl_image2image(pipeline):
|
59 | 65 | )
|
60 | 66 |
|
61 | 67 | def generate_from_image(text, image, seed, num_steps):
|
62 |
| - result = pipeline( |
| 68 | + def image_to_tensor(image: Image) -> ov.Tensor: |
| 69 | + pic = image.convert("RGB") |
| 70 | + image_data = np.array(pic.getdata()).reshape(1, pic.size[1], pic.size[0], 3).astype(np.uint8) |
| 71 | + return ov.Tensor(image_data) |
| 72 | + |
| 73 | + init_image = image_to_tensor(image) |
| 74 | + photo_image_tensor = pipeline.generate( |
63 | 75 | text,
|
64 |
| - image=image, |
| 76 | + image=init_image, |
65 | 77 | num_inference_steps=num_steps,
|
66 |
| - generator=np.random.RandomState(seed), |
67 |
| - ).images[0] |
68 |
| - return result |
| 78 | + generator=ov_genai.TorchGenerator(seed), |
| 79 | + ) |
| 80 | + photo_image = Image.fromarray(photo_image_tensor.data[0]) |
| 81 | + |
| 82 | + return photo_image |
69 | 83 |
|
70 | 84 | with gr.Blocks() as demo:
|
71 | 85 | with gr.Column():
|
|
0 commit comments