Skip to content

Commit

Permalink
Removed usage of azure_core::Result in examples
Browse files Browse the repository at this point in the history
  • Loading branch information
jpalvarezl committed Sep 30, 2024
1 parent f15c6c6 commit 688aab1
Show file tree
Hide file tree
Showing 6 changed files with 35 additions and 31 deletions.
12 changes: 7 additions & 5 deletions sdk/openai/inference/examples/azure_chat_completions.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
use azure_core::Result;
use azure_openai_inference::{
clients::{AzureOpenAIClient, AzureOpenAIClientMethods, ChatCompletionsClientMethods},
request::CreateChatCompletionsRequest,
Expand All @@ -7,7 +6,7 @@ use azure_openai_inference::{

// This example illustrates how to use Azure OpenAI with key credential authentication to generate a chat completion.
#[tokio::main]
pub async fn main() -> Result<()> {
pub async fn main() {
let endpoint =
std::env::var("AZURE_OPENAI_ENDPOINT").expect("Set AZURE_OPENAI_ENDPOINT env variable");
let secret = std::env::var("AZURE_OPENAI_KEY").expect("Set AZURE_OPENAI_KEY env variable");
Expand All @@ -20,7 +19,8 @@ pub async fn main() -> Result<()> {
.with_api_version(AzureServiceVersion::V2023_12_01Preview)
.build(),
),
)?
)
.unwrap()
.chat_completions_client();

let chat_completions_request = CreateChatCompletionsRequest::with_user_message(
Expand All @@ -34,12 +34,14 @@ pub async fn main() -> Result<()> {

match response {
Ok(chat_completions_response) => {
let chat_completions = chat_completions_response.deserialize_body().await?;
let chat_completions = chat_completions_response
.deserialize_body()
.await
.expect("Failed to deserialize response");
println!("{:#?}", &chat_completions);
}
Err(e) => {
println!("Error: {}", e);
}
};
Ok(())
}
15 changes: 8 additions & 7 deletions sdk/openai/inference/examples/azure_chat_completions_aad.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
use std::sync::Arc;

use azure_core::Result;
use azure_identity::DefaultAzureCredentialBuilder;
use azure_openai_inference::{
clients::{AzureOpenAIClient, AzureOpenAIClientMethods, ChatCompletionsClientMethods},
Expand All @@ -10,19 +9,20 @@ use azure_openai_inference::{

/// This example illustrates how to use Azure OpenAI Chat Completions with Azure Active Directory authentication.
#[tokio::main]
async fn main() -> Result<()> {
async fn main() {
let endpoint =
std::env::var("AZURE_OPENAI_ENDPOINT").expect("Set AZURE_OPENAI_ENDPOINT env variable");

let chat_completions_client = AzureOpenAIClient::new(
endpoint,
Arc::new(DefaultAzureCredentialBuilder::new().build()?),
Arc::new(DefaultAzureCredentialBuilder::new().build().unwrap()),
Some(
AzureOpenAIClientOptions::builder()
.with_api_version(AzureServiceVersion::V2023_12_01Preview)
.build(),
),
)?
)
.unwrap()
.chat_completions_client();

let chat_completions_request = CreateChatCompletionsRequest::with_user_message(
Expand All @@ -36,13 +36,14 @@ async fn main() -> Result<()> {

match response {
Ok(chat_completions_response) => {
let chat_completions = chat_completions_response.deserialize_body().await?;
let chat_completions = chat_completions_response
.deserialize_body()
.await
.expect("Failed to deserialize response");
println!("{:#?}", &chat_completions);
}
Err(e) => {
println!("Error: {}", e);
}
};

Ok(())
}
11 changes: 5 additions & 6 deletions sdk/openai/inference/examples/azure_chat_completions_stream.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
use azure_core::Result;
use azure_openai_inference::{
clients::{AzureOpenAIClient, AzureOpenAIClientMethods, ChatCompletionsClientMethods},
request::CreateChatCompletionsRequest,
Expand All @@ -9,7 +8,7 @@ use std::io::{self, Write};

/// This example illustrates how to use Azure OpenAI with key credential authentication to stream chat completions.
#[tokio::main]
async fn main() -> Result<()> {
async fn main() {
let endpoint =
std::env::var("AZURE_OPENAI_ENDPOINT").expect("Set AZURE_OPENAI_ENDPOINT env variable");
let secret = std::env::var("AZURE_OPENAI_KEY").expect("Set AZURE_OPENAI_KEY env variable");
Expand All @@ -22,7 +21,8 @@ async fn main() -> Result<()> {
.with_api_version(AzureServiceVersion::V2023_12_01Preview)
.build(),
),
)?
)
.unwrap()
.chat_completions_client();

let chat_completions_request = CreateChatCompletionsRequest::with_user_message_and_stream(
Expand All @@ -32,7 +32,8 @@ async fn main() -> Result<()> {

let response = chat_completions_client
.stream_chat_completions(&chat_completions_request.model, &chat_completions_request)
.await?;
.await
.unwrap();

// this pins the stream to the stack so it is safe to poll it (namely, it won't be dealloacted or moved)
futures::pin_mut!(response);
Expand All @@ -52,6 +53,4 @@ async fn main() -> Result<()> {
Err(e) => println!("Error: {:?}", e),
}
}

Ok(())
}
13 changes: 8 additions & 5 deletions sdk/openai/inference/examples/chat_completions.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,12 @@ use azure_openai_inference::{

/// This example illustrates how to use OpenAI to generate a chat completion.
#[tokio::main]
pub async fn main() -> azure_core::Result<()> {
pub async fn main() {
let secret = std::env::var("OPENAI_KEY").expect("Set OPENAI_KEY env variable");

let chat_completions_client =
OpenAIClient::with_key_credential(secret, None)?.chat_completions_client();
let chat_completions_client = OpenAIClient::with_key_credential(secret, None)
.unwrap()
.chat_completions_client();

let chat_completions_request = CreateChatCompletionsRequest::with_user_message(
"gpt-3.5-turbo-1106",
Expand All @@ -22,12 +23,14 @@ pub async fn main() -> azure_core::Result<()> {

match response {
Ok(chat_completions_response) => {
let chat_completions = chat_completions_response.deserialize_body().await?;
let chat_completions = chat_completions_response
.deserialize_body()
.await
.expect("Failed to deserialize response");
println!("{:#?}", &chat_completions);
}
Err(e) => {
println!("Error: {}", e);
}
};
Ok(())
}
13 changes: 6 additions & 7 deletions sdk/openai/inference/examples/chat_completions_stream.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
use azure_core::Result;
use azure_openai_inference::{
clients::{ChatCompletionsClientMethods, OpenAIClient, OpenAIClientMethods},
request::CreateChatCompletionsRequest,
Expand All @@ -8,11 +7,12 @@ use std::io::{self, Write};

/// This example illustrates how to use OpenAI to stream chat completions.
#[tokio::main]
async fn main() -> Result<()> {
async fn main() {
let secret = std::env::var("OPENAI_KEY").expect("Set OPENAI_KEY env variable");

let chat_completions_client =
OpenAIClient::with_key_credential(secret, None)?.chat_completions_client();
let chat_completions_client = OpenAIClient::with_key_credential(secret, None)
.unwrap()
.chat_completions_client();

let chat_completions_request = CreateChatCompletionsRequest::with_user_message_and_stream(
"gpt-3.5-turbo-1106",
Expand All @@ -21,7 +21,8 @@ async fn main() -> Result<()> {

let response = chat_completions_client
.stream_chat_completions(&chat_completions_request.model, &chat_completions_request)
.await?;
.await
.unwrap();

// this pins the stream to the stack so it is safe to poll it (namely, it won't be dealloacted or moved)
futures::pin_mut!(response);
Expand All @@ -41,6 +42,4 @@ async fn main() -> Result<()> {
Err(e) => println!("Error: {:?}", e),
}
}

Ok(())
}
2 changes: 1 addition & 1 deletion sdk/openai/inference/src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
mod credentials;
pub mod clients;
mod credentials;
mod helpers;
mod models;
mod options;
Expand Down

0 comments on commit 688aab1

Please sign in to comment.