Skip to content

Commit 5203929

Browse files
authored
Fix genai helper typo and format (#2766)
Minor fix after #2765
1 parent 171c36d commit 5203929

File tree

2 files changed

+3
-4
lines changed

2 files changed

+3
-4
lines changed

notebooks/llm-chatbot/gradio_helper_genai.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
import openvino_genai as ov_genai
33
from uuid import uuid4
44
from threading import Event, Thread
5-
from gena_helper import ChunkStreamer
5+
from genai_helper import ChunkStreamer
66

77
max_new_tokens = 256
88

@@ -64,7 +64,6 @@ def get_system_prompt(model_language, system_prompt=None):
6464
)
6565

6666

67-
6867
def make_demo(pipe, model_configuration, model_id, model_language, disable_advanced=False):
6968
import gradio as gr
7069

utils/genai_helper.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
import queue
33
import sys
44

5+
56
class IterableStreamer(ov_genai.StreamerBase):
67
"""
78
A custom streamer class for handling token streaming and detokenization with buffering.
@@ -130,7 +131,6 @@ def reset(self):
130131
super().reset()
131132

132133

133-
134134
class ChunkStreamer(IterableStreamer):
135135

136136
def __init__(self, tokenizer, tokens_len):
@@ -142,4 +142,4 @@ def put(self, token_id: int) -> bool:
142142
self.tokens_cache.append(token_id)
143143
self.decoded_lengths.append(-1)
144144
return False
145-
return super().put(token_id)
145+
return super().put(token_id)

0 commit comments

Comments
 (0)