Skip to content

Commit ec32ee6

Browse files
authored
fix style iwht black (#2769)
1 parent c31f8d0 commit ec32ee6

File tree

3 files changed

+28
-8
lines changed

3 files changed

+28
-8
lines changed

notebooks/image-to-image-genai/gradio_helper.py

+14-2
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,19 @@
1515

1616

1717
def make_demo(pipeline, generator_cls, image_to_tensor):
18-
def infer(input_image, prompt, negative_prompt, seed, strength, randomize_seed, num_inference_steps, use_custom_size, height, width, progress=gr.Progress(track_tqdm=True)):
18+
def infer(
19+
input_image,
20+
prompt,
21+
negative_prompt,
22+
seed,
23+
strength,
24+
randomize_seed,
25+
num_inference_steps,
26+
use_custom_size,
27+
height,
28+
width,
29+
progress=gr.Progress(track_tqdm=True),
30+
):
1931
if randomize_seed:
2032
seed = np.random.randint(0, MAX_SEED)
2133

@@ -30,12 +42,12 @@ def callback(step, num_steps, latent):
3042
pbar.update(1)
3143
sys.stdout.flush()
3244
return False
45+
3346
additional_args = {}
3447

3548
if use_custom_size:
3649
additional_args = {"height": height, "width": width}
3750

38-
3951
image_tensor = pipeline.generate(
4052
prompt,
4153
init_image_tensor,

notebooks/image-to-image-genai/image-to-image-genai.ipynb

+8-1
Original file line numberDiff line numberDiff line change
@@ -499,7 +499,14 @@
499499
"\n",
500500
"\n",
501501
"image_tensor = pipe.generate(\n",
502-
" prompt, init_image_tensor, negative_prompt=negative_prompt, strength=strength, num_inference_steps=20, num_images_per_prompt=1, generator=random_generator, callback=callback\n",
502+
" prompt,\n",
503+
" init_image_tensor,\n",
504+
" negative_prompt=negative_prompt,\n",
505+
" strength=strength,\n",
506+
" num_inference_steps=20,\n",
507+
" num_images_per_prompt=1,\n",
508+
" generator=random_generator,\n",
509+
" callback=callback,\n",
503510
")\n",
504511
"\n",
505512
"pbar.close()\n",

supplementary_materials/notebooks/fastdraft-deepseek/fastdraft_deepseek.ipynb

+6-5
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,6 @@
102102
"metadata": {},
103103
"outputs": [],
104104
"source": [
105-
"\n",
106105
"if not model_dir.exists():\n",
107106
" ! optimum-cli export openvino --model $model_id --task text-generation-with-past --weight-format int4 $model_dir\n",
108107
"\n",
@@ -407,9 +406,10 @@
407406
"\n",
408407
"print(\"Loading prompts...\")\n",
409408
"import json\n",
410-
"f= open('prompts.json')\n",
409+
"\n",
410+
"f = open(\"prompts.json\")\n",
411411
"prompts = json.load(f)\n",
412-
"prompts = [[{\"role\": \"user\", \"content\": p }] for p in prompts]\n",
412+
"prompts = [[{\"role\": \"user\", \"content\": p}] for p in prompts]\n",
413413
"\n",
414414
"# We will first do a short warmup to the model so the time measurement will not include the warmup overhead.\n",
415415
"generation_config.max_new_tokens = 8\n",
@@ -470,9 +470,10 @@
470470
"\n",
471471
"print(\"Loading prompts...\")\n",
472472
"import json\n",
473-
"f= open('prompts.json')\n",
473+
"\n",
474+
"f = open(\"prompts.json\")\n",
474475
"prompts = json.load(f)\n",
475-
"prompts = [[{\"role\": \"user\", \"content\": p }] for p in prompts]\n",
476+
"prompts = [[{\"role\": \"user\", \"content\": p}] for p in prompts]\n",
476477
"\n",
477478
"# Define scheduler\n",
478479
"scheduler_config = ov_genai.SchedulerConfig()\n",

0 commit comments

Comments
 (0)