Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit 9a5fa50

Browse files
committedMar 13, 2025·
chore: Update Intro Gemini Chat to Google Gen AI SDK
1 parent 8121ef7 commit 9a5fa50

File tree

1 file changed

+55
-62
lines changed

1 file changed

+55
-62
lines changed
 

‎gemini/getting-started/intro_gemini_chat.ipynb

+55-62
Original file line numberDiff line numberDiff line change
@@ -85,9 +85,9 @@
8585
"id": "f95c904716cd"
8686
},
8787
"source": [
88-
"| | |\n",
89-
"|-|-|\n",
90-
"|Author(s) | [Eric Dong](https://github.com/gericdong) |"
88+
"| Authors |\n",
89+
"| --- |\n",
90+
"| [Eric Dong](https://github.com/gericdong), [Holt Skinner](https://github.com/holtskinner) |"
9191
]
9292
},
9393
{
@@ -104,7 +104,7 @@
104104
" <img src=\"https://img.youtube.com/vi/YfiLUpNejpE/maxresdefault.jpg\" alt=\"Introduction to Gemini on Vertex AI\" width=\"500\">\n",
105105
"</a>\n",
106106
"\n",
107-
"This notebook demonstrates how to send chat prompts to the Gemini model by using the Vertex AI SDK for Python and LangChain. Gemini supports prompts with text-only input, including natural language tasks, multi-turn text and code chat, and code generation. It can output text and code.\n",
107+
"This notebook demonstrates how to send chat prompts to the Gemini model. Gemini supports prompts with multimodal input, including natural language tasks, multi-turn text, images, video, audio, and code generation. It can output text and code.\n",
108108
"\n",
109109
"Learn more about [Sending chat prompt requests (Gemini)](https://cloud.google.com/vertex-ai/docs/generative-ai/multimodal/send-chat-prompts-gemini)."
110110
]
@@ -117,11 +117,11 @@
117117
"source": [
118118
"### Objectives\n",
119119
"\n",
120-
"In this tutorial, you learn how to send chat prompts to the Gemini model using the Vertex AI SDK for Python and LangChain.\n",
120+
"In this tutorial, you learn how to send chat prompts to the Gemini model.\n",
121121
"\n",
122122
"You will complete the following tasks:\n",
123123
"\n",
124-
"- Sending chat prompts using Vertex AI SDK for Python\n",
124+
"- Sending chat prompts using Google Gen AI SDK for Python\n",
125125
"- Sending chat prompts using LangChain"
126126
]
127127
},
@@ -165,7 +165,7 @@
165165
},
166166
"outputs": [],
167167
"source": [
168-
"%pip install --upgrade --quiet google-cloud-aiplatform langchain-google-vertexai langchain"
168+
"%pip install --upgrade --quiet google-genai langchain-google-vertexai langchain"
169169
]
170170
},
171171
{
@@ -223,15 +223,15 @@
223223
"# Use the environment variable if the user doesn't provide Project ID.\n",
224224
"import os\n",
225225
"\n",
226-
"import vertexai\n",
226+
"from google import genai\n",
227227
"\n",
228-
"PROJECT_ID = \"[your-project-id]\" # @param {type: \"string\", placeholder: \"[your-project-id]\" isTemplate: true}\n",
228+
"PROJECT_ID = \"[your-project-id]\" # @param {type: \"string\", placeholder: \"[your-project-id]\", isTemplate: true}\n",
229229
"if not PROJECT_ID or PROJECT_ID == \"[your-project-id]\":\n",
230230
" PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n",
231231
"\n",
232232
"LOCATION = os.environ.get(\"GOOGLE_CLOUD_REGION\", \"us-central1\")\n",
233233
"\n",
234-
"vertexai.init(project=PROJECT_ID, location=LOCATION)"
234+
"client = genai.Client(vertexai=True, project=PROJECT_ID, location=LOCATION)"
235235
]
236236
},
237237
{
@@ -251,7 +251,8 @@
251251
},
252252
"outputs": [],
253253
"source": [
254-
"from IPython.display import Markdown\n",
254+
"from IPython.display import Markdown, display\n",
255+
"from google.genai.types import GenerateContentConfig, ModelContent, UserContent\n",
255256
"from langchain.chains import ConversationChain\n",
256257
"from langchain.memory import ConversationBufferMemory\n",
257258
"from langchain.prompts import (\n",
@@ -261,8 +262,7 @@
261262
" SystemMessagePromptTemplate,\n",
262263
")\n",
263264
"from langchain_core.messages import HumanMessage, SystemMessage\n",
264-
"from langchain_google_vertexai import ChatVertexAI, HarmBlockThreshold, HarmCategory\n",
265-
"from vertexai.generative_models import Content, GenerativeModel, Part"
265+
"from langchain_google_vertexai import ChatVertexAI, HarmBlockThreshold, HarmCategory"
266266
]
267267
},
268268
{
@@ -271,7 +271,7 @@
271271
"id": "4437b7608c8e"
272272
},
273273
"source": [
274-
"## Sending chat prompts using Vertex AI SDK for Python\n",
274+
"## Sending chat prompts using Gen AI SDK for Python\n",
275275
"\n",
276276
"### Load the Gemini model"
277277
]
@@ -284,7 +284,7 @@
284284
},
285285
"outputs": [],
286286
"source": [
287-
"model = GenerativeModel(\"gemini-2.0-flash\")"
287+
"MODEL_ID = \"gemini-2.0-flash\""
288288
]
289289
},
290290
{
@@ -306,35 +306,18 @@
306306
},
307307
"outputs": [],
308308
"source": [
309-
"chat = model.start_chat()\n",
309+
"chat = client.chats.create(\n",
310+
" model=MODEL_ID,\n",
311+
" config=GenerateContentConfig(\n",
312+
" system_instruction=\"You are an astronomer, knowledgeable about the solar system..\"\n",
313+
" ),\n",
314+
")\n",
310315
"\n",
311316
"response = chat.send_message(\n",
312-
" \"\"\"You are an astronomer, knowledgeable about the solar system.\n",
313-
"How many moons does Mars have? Tell me some fun facts about them.\n",
314-
"\"\"\"\n",
317+
" \"\"\"How many moons does Mars have? Tell me some fun facts about them.\"\"\"\n",
315318
")\n",
316319
"\n",
317-
"print(response.text)"
318-
]
319-
},
320-
{
321-
"cell_type": "markdown",
322-
"metadata": {
323-
"id": "e45ReUIxvTxX"
324-
},
325-
"source": [
326-
"You can use `Markdown` to display the generated text."
327-
]
328-
},
329-
{
330-
"cell_type": "code",
331-
"execution_count": null,
332-
"metadata": {
333-
"id": "8QU6brtOuyAx"
334-
},
335-
"outputs": [],
336-
"source": [
337-
"Markdown(response.text)"
320+
"display(Markdown(response.text))"
338321
]
339322
},
340323
{
@@ -374,7 +357,7 @@
374357
},
375358
"outputs": [],
376359
"source": [
377-
"print(chat.history)"
360+
"print(chat.get_history())"
378361
]
379362
},
380363
{
@@ -396,13 +379,19 @@
396379
},
397380
"outputs": [],
398381
"source": [
399-
"code_chat = model.start_chat()\n",
382+
"code_chat = client.chats.create(\n",
383+
" model=MODEL_ID,\n",
384+
" config=GenerateContentConfig(\n",
385+
" system_instruction=\"You are an expert software engineer, proficient in Python.\"\n",
386+
" ),\n",
387+
")\n",
388+
"\n",
400389
"\n",
401390
"response = code_chat.send_message(\n",
402391
" \"Write a function that checks if a year is a leap year\"\n",
403392
")\n",
404393
"\n",
405-
"print(response.text)"
394+
"display(Markdown(response.text))"
406395
]
407396
},
408397
{
@@ -424,7 +413,7 @@
424413
"source": [
425414
"response = code_chat.send_message(\"Write a unit test of the generated function\")\n",
426415
"\n",
427-
"print(response.text)"
416+
"display(Markdown(response.text))"
428417
]
429418
},
430419
{
@@ -446,27 +435,22 @@
446435
},
447436
"outputs": [],
448437
"source": [
449-
"chat2 = model.start_chat(\n",
438+
"chat2 = client.chats.create(\n",
439+
" model=MODEL_ID,\n",
450440
" history=[\n",
451-
" Content(\n",
452-
" role=\"user\",\n",
453-
" parts=[\n",
454-
" Part.from_text(\n",
455-
" \"\"\"\n",
456-
" My name is Ned. You are my personal assistant. My favorite movies are Lord of the Rings and Hobbit.\n",
441+
" UserContent(\n",
442+
" \"\"\"My name is Ned. You are my personal assistant. My favorite movies are Lord of the Rings and Hobbit.\n",
457443
" Who do you work for?\n",
458444
" \"\"\"\n",
459-
" )\n",
460-
" ],\n",
461445
" ),\n",
462-
" Content(role=\"model\", parts=[Part.from_text(\"I work for Ned.\")]),\n",
463-
" Content(role=\"user\", parts=[Part.from_text(\"What do I like?\")]),\n",
464-
" Content(role=\"model\", parts=[Part.from_text(\"Ned likes watching movies.\")]),\n",
465-
" ]\n",
446+
" ModelContent(\"I work for Ned.\"),\n",
447+
" UserContent(\"What do I like?\"),\n",
448+
" ModelContent(\"Ned likes watching movies.\"),\n",
449+
" ],\n",
466450
")\n",
467451
"\n",
468452
"response = chat2.send_message(\"Are my favorite movies based on a book series?\")\n",
469-
"Markdown(response.text)"
453+
"display(Markdown(response.text))"
470454
]
471455
},
472456
{
@@ -478,7 +462,16 @@
478462
"outputs": [],
479463
"source": [
480464
"response = chat2.send_message(\"When were these books published?\")\n",
481-
"Markdown(response.text)"
465+
"display(Markdown(response.text))"
466+
]
467+
},
468+
{
469+
"cell_type": "markdown",
470+
"metadata": {
471+
"id": "c4e8deb52116"
472+
},
473+
"source": [
474+
"### Multimodal "
482475
]
483476
},
484477
{
@@ -517,7 +510,7 @@
517510
"messages = [SystemMessage(content=system_message), HumanMessage(content=human_message)]\n",
518511
"\n",
519512
"chat = ChatVertexAI(\n",
520-
" model_name=\"gemini-2.0-flash\",\n",
513+
" model_name=MODEL_ID,\n",
521514
" convert_system_message_to_human=True,\n",
522515
" safety_settings={\n",
523516
" HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE\n",
@@ -572,7 +565,7 @@
572565
"prompt = ChatPromptTemplate.from_messages(messages)\n",
573566
"\n",
574567
"chat = ChatVertexAI(\n",
575-
" model_name=\"gemini-2.0-flash\",\n",
568+
" model_name=MODEL_ID,\n",
576569
" convert_system_message_to_human=True,\n",
577570
" safety_settings={\n",
578571
" HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE\n",
@@ -603,7 +596,7 @@
603596
"outputs": [],
604597
"source": [
605598
"model = ChatVertexAI(\n",
606-
" model_name=\"gemini-2.0-flash\",\n",
599+
" model_name=MODEL_ID,\n",
607600
" convert_system_message_to_human=True,\n",
608601
" safety_settings={\n",
609602
" HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE\n",

0 commit comments

Comments
 (0)
Please sign in to comment.