Skip to content

Commit 05bf241

Browse files
authored
offline experience fixes (#2711)
1 parent 9f4fefd commit 05bf241

File tree

15 files changed

+41
-25
lines changed

15 files changed

+41
-25
lines changed

notebooks/bark-text-to-audio/bark-text-to-audio.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -142,7 +142,7 @@
142142
"source": [
143143
"text_use_small = True\n",
144144
"\n",
145-
"text_encoder = load_model(model_type=\"text\", use_gpu=False, use_small=text_use_small, force_reload=False)\n",
145+
"text_encoder = load_model(model_type=\"text\", use_gpu=False, use_small=text_use_small, force_reload=False, weights_only=False)\n",
146146
"\n",
147147
"text_encoder_model = text_encoder[\"model\"]\n",
148148
"tokenizer = text_encoder[\"tokenizer\"]"

notebooks/blip-visual-language-processing/blip-visual-language-processing.ipynb

+1
Original file line numberDiff line numberDiff line change
@@ -1644,6 +1644,7 @@
16441644
"\n",
16451645
"comp_text_encoder = core.compile_model(TEXT_ENCODER_OV, device.value)\n",
16461646
"comp_text_decoder_with_past = core.compile_model(TEXT_DECODER_OV, device.value)\n",
1647+
"comp_vision_model = core.compile_model(VISION_MODEL_OV, device.value)\n",
16471648
"fp_text_decoder.forward = partial(text_decoder_forward, ov_text_decoder_with_past=comp_text_decoder_with_past)\n",
16481649
"fp16_model = OVBlipModel(model.config, model.decoder_start_token_id, comp_vision_model, comp_text_encoder, fp_text_decoder)"
16491650
]

notebooks/depth-anything/depth-anything.ipynb

+8-8
Original file line numberDiff line numberDiff line change
@@ -69,13 +69,7 @@
6969
" r = requests.get(\n",
7070
" url=\"https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/utils/cmd_helper.py\",\n",
7171
" )\n",
72-
" open(\"cmd_helper.py\", \"w\").write(r.text)\n",
73-
"\n",
74-
"if not Path(\"notebook_utils.py\").exists():\n",
75-
" r = requests.get(\n",
76-
" url=\"https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/utils/notebook_utils.py\",\n",
77-
" )\n",
78-
" open(\"notebook_utils.py\", \"w\").write(r.text)"
72+
" open(\"cmd_helper.py\", \"w\").write(r.text)"
7973
]
8074
},
8175
{
@@ -109,7 +103,13 @@
109103
"if platform.system() == \"Darwin\":\n",
110104
" %pip install -q \"numpy<2.0.0\"\n",
111105
"if platform.python_version_tuple()[1] in [\"8\", \"9\"]:\n",
112-
" %pip install -q \"gradio-imageslider<=0.0.17\" \"typing-extensions>=4.9.0\""
106+
" %pip install -q \"gradio-imageslider<=0.0.17\" \"typing-extensions>=4.9.0\"\n",
107+
"\n",
108+
"if not Path(\"notebook_utils.py\").exists():\n",
109+
" r = requests.get(\n",
110+
" url=\"https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/utils/notebook_utils.py\",\n",
111+
" )\n",
112+
" open(\"notebook_utils.py\", \"w\").write(r.text)"
113113
]
114114
},
115115
{

notebooks/instruct-pix2pix-image-editing/instruct-pix2pix-image-editing.ipynb

+2-1
Original file line numberDiff line numberDiff line change
@@ -1080,12 +1080,13 @@
10801080
"source": [
10811081
"import io\n",
10821082
"import requests\n",
1083+
"from diffusers.utils import load_image\n",
10831084
"\n",
10841085
"default_image_path = Path(\"default_image.png\")\n",
10851086
"default_url = \"https://user-images.githubusercontent.com/29454499/223343459-4ac944f0-502e-4acf-9813-8e9f0abc8a16.jpg\"\n",
10861087
"\n",
10871088
"if not default_image_path.exists():\n",
1088-
" img = PIL.Image.open(io.BytesIO(requests.get(default_url, stream=True).raw))\n",
1089+
" img = load_image(default_url)\n",
10891090
" img.save(default_image_path)\n",
10901091
"\n",
10911092
"default_image = PIL.Image.open(default_image_path)\n",

notebooks/llm-agent-react/llm-agent-react-langchain.ipynb

+5
Original file line numberDiff line numberDiff line change
@@ -486,6 +486,11 @@
486486
"import openvino.properties.hint as hints\n",
487487
"import openvino.properties.streams as streams\n",
488488
"\n",
489+
"import torch\n",
490+
"\n",
491+
"if hasattr(torch, \"mps\") and torch.mps.is_available:\n",
492+
" torch.mps.is_available = lambda: False\n",
493+
"\n",
489494
"\n",
490495
"class StopSequenceCriteria(StoppingCriteria):\n",
491496
" \"\"\"\n",

notebooks/multimodal-rag/multimodal-rag-llamaindex.ipynb

+5
Original file line numberDiff line numberDiff line change
@@ -605,6 +605,11 @@
605605
"metadata": {},
606606
"outputs": [],
607607
"source": [
608+
"import torch\n",
609+
"\n",
610+
"if hasattr(torch, \"mps\") and torch.mps.is_available:\n",
611+
" torch.mps.is_available = lambda: False\n",
612+
"\n",
608613
"from llama_index.core.indices import MultiModalVectorStoreIndex\n",
609614
"from llama_index.vector_stores.qdrant import QdrantVectorStore\n",
610615
"from llama_index.core import StorageContext, Settings\n",

notebooks/optical-character-recognition/optical-character-recognition.ipynb

+6-6
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@
9494
" )\n",
9595
"\n",
9696
" open(\"notebook_utils.py\", \"w\").write(r.text)\n",
97-
"from notebook_utils import load_image, device_widget\n",
97+
"from notebook_utils import download_file, device_widget\n",
9898
"\n",
9999
"# Read more about telemetry collection at https://github.com/openvinotoolkit/openvino_notebooks?tab=readme-ov-file#-telemetry\n",
100100
"from notebook_utils import collect_telemetry\n",
@@ -290,14 +290,14 @@
290290
}
291291
],
292292
"source": [
293-
"# The `image_file` variable can point to a URL or a local image.\n",
294-
"image_url = \"https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/image/intel_rnb.jpg\"\n",
295-
"\n",
296293
"image_path = Path(\"intel_rnb.jpg\")\n",
297294
"\n",
298295
"if not image_path.exists():\n",
299-
" image = load_image(image_url)\n",
300-
" cv2.imwrite(str(image_path), image)\n",
296+
" download_file(\n",
297+
" url=\"https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/image/intel_rnb.jpg\",\n",
298+
" filename=image_path.name,\n",
299+
" directory=image_path.parent,\n",
300+
" )\n",
301301
"else:\n",
302302
" image = cv2.imread(str(image_path))\n",
303303
"\n",

notebooks/paddle-ocr-webcam/pre_post_processing.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -461,7 +461,7 @@ def draw_ocr_box_txt(image, boxes, txts, scores=None, drop_score=0.5, font_path=
461461
try:
462462
char_size = font.getbox(c)
463463
y_idx = -1
464-
except AttributeError:
464+
except Exception:
465465
char_size = font.getsize(c)
466466
y_idx = 1
467467

notebooks/softvc-voice-conversion/softvc-voice-conversion.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,7 @@
108108
"\n",
109109
"%pip install -q \"openvino>=2023.2.0\"\n",
110110
"clone_repo(\"https://github.com/svc-develop-team/so-vits-svc\", revision=\"4.1-Stable\", add_to_sys_path=False)\n",
111-
"%pip install -q --extra-index-url https://download.pytorch.org/whl/cpu tqdm librosa \"torch>=2.1.0\" \"torchaudio>=2.1.0\" faiss-cpu \"gradio>=4.19\" \"numpy>=1.23.5\" praat-parselmouth"
111+
"%pip install -q --extra-index-url https://download.pytorch.org/whl/cpu tqdm librosa \"torch>=2.1.0,<2.6.0\" \"torchaudio>=2.1.0,<2.6.0\" faiss-cpu \"gradio>=4.19\" \"numpy>=1.23.5\" praat-parselmouth"
112112
]
113113
},
114114
{

notebooks/whisper-asr-genai/whisper-asr-genai.ipynb

+5-2
Original file line numberDiff line numberDiff line change
@@ -243,6 +243,7 @@
243243
"source": [
244244
"from transformers import AutoProcessor, AutoModelForSpeechSeq2Seq, pipeline\n",
245245
"from transformers.utils import logging\n",
246+
"import torch\n",
246247
"\n",
247248
"processor = AutoProcessor.from_pretrained(model_id.value)\n",
248249
"\n",
@@ -253,7 +254,7 @@
253254
" model=pt_model,\n",
254255
" tokenizer=processor.tokenizer,\n",
255256
" feature_extractor=processor.feature_extractor,\n",
256-
" device=\"cpu\",\n",
257+
" device=torch.device(\"cpu\"),\n",
257258
")"
258259
]
259260
},
@@ -1017,7 +1018,9 @@
10171018
" model=ov_model,\n",
10181019
" chunk_length_s=30,\n",
10191020
" tokenizer=ov_processor.tokenizer,\n",
1020-
" feature_extractor=ov_processor.feature_extractor)\n",
1021+
" feature_extractor=ov_processor.feature_extractor,\n",
1022+
" device=torch.device(\"cpu\")\n",
1023+
" )\n",
10211024
" try:\n",
10221025
" calibration_dataset = dataset = load_dataset(\"openslr/librispeech_asr\", \"clean\", split=\"validation\", streaming=True, trust_remote_code=True)\n",
10231026
" for sample in tqdm(islice(calibration_dataset, calibration_dataset_size), desc=\"Collecting calibration data\",\n",

notebooks/whisper-subtitles-generation/whisper-subtitles-generation.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -763,7 +763,7 @@
763763
" model=ov_model,\n",
764764
" chunk_length_s=30,\n",
765765
" tokenizer=processor.tokenizer,\n",
766-
" feature_extractor=processor.feature_extractor)\n",
766+
" feature_extractor=processor.feature_extractor, devide=torch.device(\"cpu\"))\n",
767767
" try:\n",
768768
" calibration_dataset = dataset = load_dataset(\"openslr/librispeech_asr\", \"clean\", split=\"validation\", streaming=True, trust_remote_code=True)\n",
769769
" for sample in tqdm(islice(calibration_dataset, calibration_dataset_size), desc=\"Collecting calibration data\",\n",

notebooks/yolov10-optimization/yolov10-optimization.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@
8686
"%pip install -q \"nncf>=2.11.0\"\n",
8787
"%pip install -Uq \"openvino>=2024.3.0\"\n",
8888
"%pip install -q \"git+https://github.com/THU-MIG/yolov10.git\" --extra-index-url https://download.pytorch.org/whl/cpu\n",
89-
"%pip install -q \"torch>=2.1\" \"torchvision>=0.16\" tqdm opencv-python \"gradio>=4.19\" \"matplotlib>=3.9\" --extra-index-url https://download.pytorch.org/whl/cpu"
89+
"%pip install -q \"torch>=2.1,<2.6\" \"torchvision>=0.16\" tqdm opencv-python \"gradio>=4.19\" \"matplotlib>=3.9\" --extra-index-url https://download.pytorch.org/whl/cpu"
9090
]
9191
},
9292
{

notebooks/yolov11-optimization/yolov11-keypoint-detection.ipynb

+2-1
Original file line numberDiff line numberDiff line change
@@ -117,12 +117,13 @@
117117
"# Fetch `notebook_utils` module\n",
118118
"import requests\n",
119119
"\n",
120-
"if Path(\"notebook_utils.py\").exists():\n",
120+
"if not Path(\"notebook_utils.py\").exists():\n",
121121
" r = requests.get(\n",
122122
" url=\"https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/utils/notebook_utils.py\",\n",
123123
" )\n",
124124
"\n",
125125
" open(\"notebook_utils.py\", \"w\").write(r.text)\n",
126+
"\n",
126127
"from notebook_utils import download_file, VideoPlayer, device_widget\n",
127128
"\n",
128129
"# Read more about telemetry collection at https://github.com/openvinotoolkit/openvino_notebooks?tab=readme-ov-file#-telemetry\n",

notebooks/yolov8-optimization/yolov8-object-detection.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,7 @@
132132
"# Fetch `notebook_utils` module\n",
133133
"import requests\n",
134134
"\n",
135-
"if not Path(\"notebook_utils.py\").exists()\n",
135+
"if not Path(\"notebook_utils.py\").exists():\n",
136136
" r = requests.get(\n",
137137
" url=\"https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/utils/notebook_utils.py\",\n",
138138
" )\n",

notebooks/yolov9-optimization/yolov9-optimization.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@
6363
"metadata": {},
6464
"outputs": [],
6565
"source": [
66-
"%pip install -q \"openvino>=2023.3.0\" \"nncf>=2.8.1\" \"opencv-python\" \"matplotlib>=3.4\" \"seaborn\" \"pandas\" \"scikit-learn\" \"torch\" \"torchvision\" \"tqdm\" --extra-index-url https://download.pytorch.org/whl/cpu"
66+
"%pip install -q \"openvino>=2023.3.0\" \"nncf>=2.8.1\" \"opencv-python\" \"matplotlib>=3.4\" \"seaborn\" \"pandas\" \"scikit-learn\" \"torch<2.6.0\" \"torchvision\" \"tqdm\" --extra-index-url https://download.pytorch.org/whl/cpu"
6767
]
6868
},
6969
{

0 commit comments

Comments
 (0)