-
Notifications
You must be signed in to change notification settings - Fork 241
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
#191: Add simple ollama chat example
- Loading branch information
1 parent
c48e62e
commit 36493ac
Showing
4 changed files
with
98 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,10 @@ | ||
[package] | ||
name = "ollama-chat" | ||
version = "0.1.0" | ||
edition = "2021" | ||
publish = false | ||
|
||
[dependencies] | ||
async-openai = {path = "../../async-openai"} | ||
serde_json = "1.0.135" | ||
tokio = { version = "1.43.0", features = ["full"] } |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,18 @@ | ||
## Prerequisites | ||
|
||
- [Ollama](https://github.com/ollama/ollama) should be installed and running. | ||
- Pull a model to use with the library: `ollama pull <model>` e.g. `ollama pull llama3.2` | ||
- See [Ollama.com model search](https://ollama.com/search) for more information on the models available. | ||
- **You will need to pass the `<model>` name in the API call.** | ||
|
||
## Ollama OpenAI Compatibility | ||
|
||
**NOTE: an api key parameter is used for compatibility with OpenAI's API spec, but it is ignored by Ollama (it can be any value).** | ||
|
||
See the [Ollama OpenAI Compatibility docs](https://github.com/ollama/ollama/blob/main/docs/openai.md) for more details on what Ollama supports. | ||
|
||
## Response | ||
|
||
> Response: | ||
> | ||
> 0: Role: assistant Content: Some("The 2020 World Series was held at Globe Life Field in Arlington, Texas, which is home of the Texas Rangers. Due to COVID-19 pandemic protocols and stadium capacity restrictions, the series featured a neutral-site venue rather than the traditional host-field advantage. The Dodgers defeated the Tampa Bay Rays 4 games to 2.") |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,65 @@ | ||
use std::error::Error; | ||
|
||
use async_openai::{ | ||
config::OpenAIConfig, | ||
types::{ | ||
ChatCompletionRequestAssistantMessageArgs, ChatCompletionRequestSystemMessageArgs, | ||
ChatCompletionRequestUserMessageArgs, CreateChatCompletionRequestArgs, | ||
}, | ||
Client, | ||
}; | ||
|
||
#[tokio::main] | ||
async fn main() -> Result<(), Box<dyn Error>> { | ||
// NOTE: Ollama needs to be running | ||
let api_base = "http://localhost:11434/v1"; // This is the default host:port for Ollama's OpenAI endpoint | ||
|
||
// Required but ignored | ||
let api_key = "ollama"; | ||
|
||
let client = Client::with_config( | ||
OpenAIConfig::new() | ||
.with_api_key(api_key) | ||
.with_api_base(api_base), | ||
); | ||
|
||
// The model needs to be downloaded first before you can use it | ||
let model = "llama3.1"; | ||
|
||
let request = CreateChatCompletionRequestArgs::default() | ||
.max_tokens(512u32) | ||
.model(model) | ||
.messages([ | ||
ChatCompletionRequestSystemMessageArgs::default() | ||
.content("You are a helpful assistant.") | ||
.build()? | ||
.into(), | ||
ChatCompletionRequestUserMessageArgs::default() | ||
.content("Who won the world series in 2020?") | ||
.build()? | ||
.into(), | ||
ChatCompletionRequestAssistantMessageArgs::default() | ||
.content("The Los Angeles Dodgers won the World Series in 2020.") | ||
.build()? | ||
.into(), | ||
ChatCompletionRequestUserMessageArgs::default() | ||
.content("Where was it played?") | ||
.build()? | ||
.into(), | ||
]) | ||
.build()?; | ||
|
||
println!("{}", serde_json::to_string(&request).unwrap()); | ||
|
||
let response = client.chat().create(request).await?; | ||
|
||
println!("\nResponse:\n"); | ||
for choice in response.choices { | ||
println!( | ||
"{}: Role: {} Content: {:?}", | ||
choice.index, choice.message.role, choice.message.content | ||
); | ||
} | ||
|
||
Ok(()) | ||
} |