From 688aab1b895a69f8fbe656ea33b1067ec0a169dd Mon Sep 17 00:00:00 2001 From: Jose Alvarez Date: Mon, 30 Sep 2024 11:40:59 +0200 Subject: [PATCH] Removed usage of azure_core::Result in examples --- .../inference/examples/azure_chat_completions.rs | 12 +++++++----- .../examples/azure_chat_completions_aad.rs | 15 ++++++++------- .../examples/azure_chat_completions_stream.rs | 11 +++++------ sdk/openai/inference/examples/chat_completions.rs | 13 ++++++++----- .../inference/examples/chat_completions_stream.rs | 13 ++++++------- sdk/openai/inference/src/lib.rs | 2 +- 6 files changed, 35 insertions(+), 31 deletions(-) diff --git a/sdk/openai/inference/examples/azure_chat_completions.rs b/sdk/openai/inference/examples/azure_chat_completions.rs index b262fb08b4..736f852a1a 100644 --- a/sdk/openai/inference/examples/azure_chat_completions.rs +++ b/sdk/openai/inference/examples/azure_chat_completions.rs @@ -1,4 +1,3 @@ -use azure_core::Result; use azure_openai_inference::{ clients::{AzureOpenAIClient, AzureOpenAIClientMethods, ChatCompletionsClientMethods}, request::CreateChatCompletionsRequest, @@ -7,7 +6,7 @@ use azure_openai_inference::{ // This example illustrates how to use Azure OpenAI with key credential authentication to generate a chat completion. #[tokio::main] -pub async fn main() -> Result<()> { +pub async fn main() { let endpoint = std::env::var("AZURE_OPENAI_ENDPOINT").expect("Set AZURE_OPENAI_ENDPOINT env variable"); let secret = std::env::var("AZURE_OPENAI_KEY").expect("Set AZURE_OPENAI_KEY env variable"); @@ -20,7 +19,8 @@ pub async fn main() -> Result<()> { .with_api_version(AzureServiceVersion::V2023_12_01Preview) .build(), ), - )? + ) + .unwrap() .chat_completions_client(); let chat_completions_request = CreateChatCompletionsRequest::with_user_message( @@ -34,12 +34,14 @@ pub async fn main() -> Result<()> { match response { Ok(chat_completions_response) => { - let chat_completions = chat_completions_response.deserialize_body().await?; + let chat_completions = chat_completions_response + .deserialize_body() + .await + .expect("Failed to deserialize response"); println!("{:#?}", &chat_completions); } Err(e) => { println!("Error: {}", e); } }; - Ok(()) } diff --git a/sdk/openai/inference/examples/azure_chat_completions_aad.rs b/sdk/openai/inference/examples/azure_chat_completions_aad.rs index d2140cc519..09310b03ab 100644 --- a/sdk/openai/inference/examples/azure_chat_completions_aad.rs +++ b/sdk/openai/inference/examples/azure_chat_completions_aad.rs @@ -1,6 +1,5 @@ use std::sync::Arc; -use azure_core::Result; use azure_identity::DefaultAzureCredentialBuilder; use azure_openai_inference::{ clients::{AzureOpenAIClient, AzureOpenAIClientMethods, ChatCompletionsClientMethods}, @@ -10,19 +9,20 @@ use azure_openai_inference::{ /// This example illustrates how to use Azure OpenAI Chat Completions with Azure Active Directory authentication. #[tokio::main] -async fn main() -> Result<()> { +async fn main() { let endpoint = std::env::var("AZURE_OPENAI_ENDPOINT").expect("Set AZURE_OPENAI_ENDPOINT env variable"); let chat_completions_client = AzureOpenAIClient::new( endpoint, - Arc::new(DefaultAzureCredentialBuilder::new().build()?), + Arc::new(DefaultAzureCredentialBuilder::new().build().unwrap()), Some( AzureOpenAIClientOptions::builder() .with_api_version(AzureServiceVersion::V2023_12_01Preview) .build(), ), - )? + ) + .unwrap() .chat_completions_client(); let chat_completions_request = CreateChatCompletionsRequest::with_user_message( @@ -36,13 +36,14 @@ async fn main() -> Result<()> { match response { Ok(chat_completions_response) => { - let chat_completions = chat_completions_response.deserialize_body().await?; + let chat_completions = chat_completions_response + .deserialize_body() + .await + .expect("Failed to deserialize response"); println!("{:#?}", &chat_completions); } Err(e) => { println!("Error: {}", e); } }; - - Ok(()) } diff --git a/sdk/openai/inference/examples/azure_chat_completions_stream.rs b/sdk/openai/inference/examples/azure_chat_completions_stream.rs index 9352b0e677..4fdf66547b 100644 --- a/sdk/openai/inference/examples/azure_chat_completions_stream.rs +++ b/sdk/openai/inference/examples/azure_chat_completions_stream.rs @@ -1,4 +1,3 @@ -use azure_core::Result; use azure_openai_inference::{ clients::{AzureOpenAIClient, AzureOpenAIClientMethods, ChatCompletionsClientMethods}, request::CreateChatCompletionsRequest, @@ -9,7 +8,7 @@ use std::io::{self, Write}; /// This example illustrates how to use Azure OpenAI with key credential authentication to stream chat completions. #[tokio::main] -async fn main() -> Result<()> { +async fn main() { let endpoint = std::env::var("AZURE_OPENAI_ENDPOINT").expect("Set AZURE_OPENAI_ENDPOINT env variable"); let secret = std::env::var("AZURE_OPENAI_KEY").expect("Set AZURE_OPENAI_KEY env variable"); @@ -22,7 +21,8 @@ async fn main() -> Result<()> { .with_api_version(AzureServiceVersion::V2023_12_01Preview) .build(), ), - )? + ) + .unwrap() .chat_completions_client(); let chat_completions_request = CreateChatCompletionsRequest::with_user_message_and_stream( @@ -32,7 +32,8 @@ async fn main() -> Result<()> { let response = chat_completions_client .stream_chat_completions(&chat_completions_request.model, &chat_completions_request) - .await?; + .await + .unwrap(); // this pins the stream to the stack so it is safe to poll it (namely, it won't be dealloacted or moved) futures::pin_mut!(response); @@ -52,6 +53,4 @@ async fn main() -> Result<()> { Err(e) => println!("Error: {:?}", e), } } - - Ok(()) } diff --git a/sdk/openai/inference/examples/chat_completions.rs b/sdk/openai/inference/examples/chat_completions.rs index 069b12c800..25fbc08477 100644 --- a/sdk/openai/inference/examples/chat_completions.rs +++ b/sdk/openai/inference/examples/chat_completions.rs @@ -5,11 +5,12 @@ use azure_openai_inference::{ /// This example illustrates how to use OpenAI to generate a chat completion. #[tokio::main] -pub async fn main() -> azure_core::Result<()> { +pub async fn main() { let secret = std::env::var("OPENAI_KEY").expect("Set OPENAI_KEY env variable"); - let chat_completions_client = - OpenAIClient::with_key_credential(secret, None)?.chat_completions_client(); + let chat_completions_client = OpenAIClient::with_key_credential(secret, None) + .unwrap() + .chat_completions_client(); let chat_completions_request = CreateChatCompletionsRequest::with_user_message( "gpt-3.5-turbo-1106", @@ -22,12 +23,14 @@ pub async fn main() -> azure_core::Result<()> { match response { Ok(chat_completions_response) => { - let chat_completions = chat_completions_response.deserialize_body().await?; + let chat_completions = chat_completions_response + .deserialize_body() + .await + .expect("Failed to deserialize response"); println!("{:#?}", &chat_completions); } Err(e) => { println!("Error: {}", e); } }; - Ok(()) } diff --git a/sdk/openai/inference/examples/chat_completions_stream.rs b/sdk/openai/inference/examples/chat_completions_stream.rs index cf85577a36..8f2492e574 100644 --- a/sdk/openai/inference/examples/chat_completions_stream.rs +++ b/sdk/openai/inference/examples/chat_completions_stream.rs @@ -1,4 +1,3 @@ -use azure_core::Result; use azure_openai_inference::{ clients::{ChatCompletionsClientMethods, OpenAIClient, OpenAIClientMethods}, request::CreateChatCompletionsRequest, @@ -8,11 +7,12 @@ use std::io::{self, Write}; /// This example illustrates how to use OpenAI to stream chat completions. #[tokio::main] -async fn main() -> Result<()> { +async fn main() { let secret = std::env::var("OPENAI_KEY").expect("Set OPENAI_KEY env variable"); - let chat_completions_client = - OpenAIClient::with_key_credential(secret, None)?.chat_completions_client(); + let chat_completions_client = OpenAIClient::with_key_credential(secret, None) + .unwrap() + .chat_completions_client(); let chat_completions_request = CreateChatCompletionsRequest::with_user_message_and_stream( "gpt-3.5-turbo-1106", @@ -21,7 +21,8 @@ async fn main() -> Result<()> { let response = chat_completions_client .stream_chat_completions(&chat_completions_request.model, &chat_completions_request) - .await?; + .await + .unwrap(); // this pins the stream to the stack so it is safe to poll it (namely, it won't be dealloacted or moved) futures::pin_mut!(response); @@ -41,6 +42,4 @@ async fn main() -> Result<()> { Err(e) => println!("Error: {:?}", e), } } - - Ok(()) } diff --git a/sdk/openai/inference/src/lib.rs b/sdk/openai/inference/src/lib.rs index 28387bd322..8251ecf5f0 100644 --- a/sdk/openai/inference/src/lib.rs +++ b/sdk/openai/inference/src/lib.rs @@ -1,5 +1,5 @@ -mod credentials; pub mod clients; +mod credentials; mod helpers; mod models; mod options;