|
85 | 85 | "id": "f95c904716cd"
|
86 | 86 | },
|
87 | 87 | "source": [
|
88 |
| - "| | |\n", |
89 |
| - "|-|-|\n", |
90 |
| - "|Author(s) | [Eric Dong](https://github.com/gericdong) |" |
| 88 | + "| Authors |\n", |
| 89 | + "| --- |\n", |
| 90 | + "| [Eric Dong](https://github.com/gericdong), [Holt Skinner](https://github.com/holtskinner) |" |
91 | 91 | ]
|
92 | 92 | },
|
93 | 93 | {
|
|
104 | 104 | " <img src=\"https://img.youtube.com/vi/YfiLUpNejpE/maxresdefault.jpg\" alt=\"Introduction to Gemini on Vertex AI\" width=\"500\">\n",
|
105 | 105 | "</a>\n",
|
106 | 106 | "\n",
|
107 |
| - "This notebook demonstrates how to send chat prompts to the Gemini model by using the Vertex AI SDK for Python and LangChain. Gemini supports prompts with text-only input, including natural language tasks, multi-turn text and code chat, and code generation. It can output text and code.\n", |
| 107 | + "This notebook demonstrates how to send chat prompts to the Gemini model. Gemini supports prompts with multimodal input, including natural language tasks, multi-turn text, images, video, audio, and code generation. It can output text and code.\n", |
108 | 108 | "\n",
|
109 | 109 | "Learn more about [Sending chat prompt requests (Gemini)](https://cloud.google.com/vertex-ai/docs/generative-ai/multimodal/send-chat-prompts-gemini)."
|
110 | 110 | ]
|
|
117 | 117 | "source": [
|
118 | 118 | "### Objectives\n",
|
119 | 119 | "\n",
|
120 |
| - "In this tutorial, you learn how to send chat prompts to the Gemini model using the Vertex AI SDK for Python and LangChain.\n", |
| 120 | + "In this tutorial, you learn how to send chat prompts to the Gemini model.\n", |
121 | 121 | "\n",
|
122 | 122 | "You will complete the following tasks:\n",
|
123 | 123 | "\n",
|
124 |
| - "- Sending chat prompts using Vertex AI SDK for Python\n", |
| 124 | + "- Sending chat prompts using Google Gen AI SDK for Python\n", |
125 | 125 | "- Sending chat prompts using LangChain"
|
126 | 126 | ]
|
127 | 127 | },
|
|
165 | 165 | },
|
166 | 166 | "outputs": [],
|
167 | 167 | "source": [
|
168 |
| - "%pip install --upgrade --quiet google-cloud-aiplatform langchain-google-vertexai langchain" |
| 168 | + "%pip install --upgrade --quiet google-genai langchain-google-vertexai langchain" |
169 | 169 | ]
|
170 | 170 | },
|
171 | 171 | {
|
|
223 | 223 | "# Use the environment variable if the user doesn't provide Project ID.\n",
|
224 | 224 | "import os\n",
|
225 | 225 | "\n",
|
226 |
| - "import vertexai\n", |
| 226 | + "from google import genai\n", |
227 | 227 | "\n",
|
228 |
| - "PROJECT_ID = \"[your-project-id]\" # @param {type: \"string\", placeholder: \"[your-project-id]\" isTemplate: true}\n", |
| 228 | + "PROJECT_ID = \"[your-project-id]\" # @param {type: \"string\", placeholder: \"[your-project-id]\", isTemplate: true}\n", |
229 | 229 | "if not PROJECT_ID or PROJECT_ID == \"[your-project-id]\":\n",
|
230 | 230 | " PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n",
|
231 | 231 | "\n",
|
232 | 232 | "LOCATION = os.environ.get(\"GOOGLE_CLOUD_REGION\", \"us-central1\")\n",
|
233 | 233 | "\n",
|
234 |
| - "vertexai.init(project=PROJECT_ID, location=LOCATION)" |
| 234 | + "client = genai.Client(vertexai=True, project=PROJECT_ID, location=LOCATION)" |
235 | 235 | ]
|
236 | 236 | },
|
237 | 237 | {
|
|
251 | 251 | },
|
252 | 252 | "outputs": [],
|
253 | 253 | "source": [
|
254 |
| - "from IPython.display import Markdown\n", |
| 254 | + "from IPython.display import Markdown, display\n", |
| 255 | + "from google.genai.types import GenerateContentConfig, ModelContent, UserContent\n", |
255 | 256 | "from langchain.chains import ConversationChain\n",
|
256 | 257 | "from langchain.memory import ConversationBufferMemory\n",
|
257 | 258 | "from langchain.prompts import (\n",
|
|
261 | 262 | " SystemMessagePromptTemplate,\n",
|
262 | 263 | ")\n",
|
263 | 264 | "from langchain_core.messages import HumanMessage, SystemMessage\n",
|
264 |
| - "from langchain_google_vertexai import ChatVertexAI, HarmBlockThreshold, HarmCategory\n", |
265 |
| - "from vertexai.generative_models import Content, GenerativeModel, Part" |
| 265 | + "from langchain_google_vertexai import ChatVertexAI, HarmBlockThreshold, HarmCategory" |
266 | 266 | ]
|
267 | 267 | },
|
268 | 268 | {
|
|
271 | 271 | "id": "4437b7608c8e"
|
272 | 272 | },
|
273 | 273 | "source": [
|
274 |
| - "## Sending chat prompts using Vertex AI SDK for Python\n", |
| 274 | + "## Sending chat prompts using Gen AI SDK for Python\n", |
275 | 275 | "\n",
|
276 | 276 | "### Load the Gemini model"
|
277 | 277 | ]
|
|
284 | 284 | },
|
285 | 285 | "outputs": [],
|
286 | 286 | "source": [
|
287 |
| - "model = GenerativeModel(\"gemini-2.0-flash\")" |
| 287 | + "MODEL_ID = \"gemini-2.0-flash\"" |
288 | 288 | ]
|
289 | 289 | },
|
290 | 290 | {
|
|
306 | 306 | },
|
307 | 307 | "outputs": [],
|
308 | 308 | "source": [
|
309 |
| - "chat = model.start_chat()\n", |
| 309 | + "chat = client.chats.create(\n", |
| 310 | + " model=MODEL_ID,\n", |
| 311 | + " config=GenerateContentConfig(\n", |
| 312 | + " system_instruction=\"You are an astronomer, knowledgeable about the solar system..\"\n", |
| 313 | + " ),\n", |
| 314 | + ")\n", |
310 | 315 | "\n",
|
311 | 316 | "response = chat.send_message(\n",
|
312 |
| - " \"\"\"You are an astronomer, knowledgeable about the solar system.\n", |
313 |
| - "How many moons does Mars have? Tell me some fun facts about them.\n", |
314 |
| - "\"\"\"\n", |
| 317 | + " \"\"\"How many moons does Mars have? Tell me some fun facts about them.\"\"\"\n", |
315 | 318 | ")\n",
|
316 | 319 | "\n",
|
317 |
| - "print(response.text)" |
318 |
| - ] |
319 |
| - }, |
320 |
| - { |
321 |
| - "cell_type": "markdown", |
322 |
| - "metadata": { |
323 |
| - "id": "e45ReUIxvTxX" |
324 |
| - }, |
325 |
| - "source": [ |
326 |
| - "You can use `Markdown` to display the generated text." |
327 |
| - ] |
328 |
| - }, |
329 |
| - { |
330 |
| - "cell_type": "code", |
331 |
| - "execution_count": null, |
332 |
| - "metadata": { |
333 |
| - "id": "8QU6brtOuyAx" |
334 |
| - }, |
335 |
| - "outputs": [], |
336 |
| - "source": [ |
337 |
| - "Markdown(response.text)" |
| 320 | + "display(Markdown(response.text))" |
338 | 321 | ]
|
339 | 322 | },
|
340 | 323 | {
|
|
374 | 357 | },
|
375 | 358 | "outputs": [],
|
376 | 359 | "source": [
|
377 |
| - "print(chat.history)" |
| 360 | + "print(chat.get_history())" |
378 | 361 | ]
|
379 | 362 | },
|
380 | 363 | {
|
|
396 | 379 | },
|
397 | 380 | "outputs": [],
|
398 | 381 | "source": [
|
399 |
| - "code_chat = model.start_chat()\n", |
| 382 | + "code_chat = client.chats.create(\n", |
| 383 | + " model=MODEL_ID,\n", |
| 384 | + " config=GenerateContentConfig(\n", |
| 385 | + " system_instruction=\"You are an expert software engineer, proficient in Python.\"\n", |
| 386 | + " ),\n", |
| 387 | + ")\n", |
| 388 | + "\n", |
400 | 389 | "\n",
|
401 | 390 | "response = code_chat.send_message(\n",
|
402 | 391 | " \"Write a function that checks if a year is a leap year\"\n",
|
403 | 392 | ")\n",
|
404 | 393 | "\n",
|
405 |
| - "print(response.text)" |
| 394 | + "display(Markdown(response.text))" |
406 | 395 | ]
|
407 | 396 | },
|
408 | 397 | {
|
|
424 | 413 | "source": [
|
425 | 414 | "response = code_chat.send_message(\"Write a unit test of the generated function\")\n",
|
426 | 415 | "\n",
|
427 |
| - "print(response.text)" |
| 416 | + "display(Markdown(response.text))" |
428 | 417 | ]
|
429 | 418 | },
|
430 | 419 | {
|
|
446 | 435 | },
|
447 | 436 | "outputs": [],
|
448 | 437 | "source": [
|
449 |
| - "chat2 = model.start_chat(\n", |
| 438 | + "chat2 = client.chats.create(\n", |
| 439 | + " model=MODEL_ID,\n", |
450 | 440 | " history=[\n",
|
451 |
| - " Content(\n", |
452 |
| - " role=\"user\",\n", |
453 |
| - " parts=[\n", |
454 |
| - " Part.from_text(\n", |
455 |
| - " \"\"\"\n", |
456 |
| - " My name is Ned. You are my personal assistant. My favorite movies are Lord of the Rings and Hobbit.\n", |
| 441 | + " UserContent(\n", |
| 442 | + " \"\"\"My name is Ned. You are my personal assistant. My favorite movies are Lord of the Rings and Hobbit.\n", |
457 | 443 | " Who do you work for?\n",
|
458 | 444 | " \"\"\"\n",
|
459 |
| - " )\n", |
460 |
| - " ],\n", |
461 | 445 | " ),\n",
|
462 |
| - " Content(role=\"model\", parts=[Part.from_text(\"I work for Ned.\")]),\n", |
463 |
| - " Content(role=\"user\", parts=[Part.from_text(\"What do I like?\")]),\n", |
464 |
| - " Content(role=\"model\", parts=[Part.from_text(\"Ned likes watching movies.\")]),\n", |
465 |
| - " ]\n", |
| 446 | + " ModelContent(\"I work for Ned.\"),\n", |
| 447 | + " UserContent(\"What do I like?\"),\n", |
| 448 | + " ModelContent(\"Ned likes watching movies.\"),\n", |
| 449 | + " ],\n", |
466 | 450 | ")\n",
|
467 | 451 | "\n",
|
468 | 452 | "response = chat2.send_message(\"Are my favorite movies based on a book series?\")\n",
|
469 |
| - "Markdown(response.text)" |
| 453 | + "display(Markdown(response.text))" |
470 | 454 | ]
|
471 | 455 | },
|
472 | 456 | {
|
|
478 | 462 | "outputs": [],
|
479 | 463 | "source": [
|
480 | 464 | "response = chat2.send_message(\"When were these books published?\")\n",
|
481 |
| - "Markdown(response.text)" |
| 465 | + "display(Markdown(response.text))" |
| 466 | + ] |
| 467 | + }, |
| 468 | + { |
| 469 | + "cell_type": "markdown", |
| 470 | + "metadata": { |
| 471 | + "id": "c4e8deb52116" |
| 472 | + }, |
| 473 | + "source": [ |
| 474 | + "### Multimodal " |
482 | 475 | ]
|
483 | 476 | },
|
484 | 477 | {
|
|
517 | 510 | "messages = [SystemMessage(content=system_message), HumanMessage(content=human_message)]\n",
|
518 | 511 | "\n",
|
519 | 512 | "chat = ChatVertexAI(\n",
|
520 |
| - " model_name=\"gemini-2.0-flash\",\n", |
| 513 | + " model_name=MODEL_ID,\n", |
521 | 514 | " convert_system_message_to_human=True,\n",
|
522 | 515 | " safety_settings={\n",
|
523 | 516 | " HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE\n",
|
|
572 | 565 | "prompt = ChatPromptTemplate.from_messages(messages)\n",
|
573 | 566 | "\n",
|
574 | 567 | "chat = ChatVertexAI(\n",
|
575 |
| - " model_name=\"gemini-2.0-flash\",\n", |
| 568 | + " model_name=MODEL_ID,\n", |
576 | 569 | " convert_system_message_to_human=True,\n",
|
577 | 570 | " safety_settings={\n",
|
578 | 571 | " HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE\n",
|
|
603 | 596 | "outputs": [],
|
604 | 597 | "source": [
|
605 | 598 | "model = ChatVertexAI(\n",
|
606 |
| - " model_name=\"gemini-2.0-flash\",\n", |
| 599 | + " model_name=MODEL_ID,\n", |
607 | 600 | " convert_system_message_to_human=True,\n",
|
608 | 601 | " safety_settings={\n",
|
609 | 602 | " HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE\n",
|
|
0 commit comments