From 445153440179593d9fba453f158a0788e586896c Mon Sep 17 00:00:00 2001 From: Jackson Chen <541898146chen@gmail.com> Date: Sun, 3 Nov 2024 18:11:29 -0600 Subject: [PATCH] feat: Refactor OpenAIModelProvider to improve streaming response This commit refactors the OpenAIModelProvider class to improve the streaming response when generating chat completions with OpenAI. It updates the messages array to include the system prompt and user message, and removes unnecessary comments. This change aims to enhance the overall performance and reliability of the streaming response feature. Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- llm-server/src/model/openai-model-provider.ts | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/llm-server/src/model/openai-model-provider.ts b/llm-server/src/model/openai-model-provider.ts index b3e8c17e..93c990c2 100644 --- a/llm-server/src/model/openai-model-provider.ts +++ b/llm-server/src/model/openai-model-provider.ts @@ -35,10 +35,10 @@ export class OpenAIModelProvider extends ModelProvider { // Get the system prompt based on the model const systemPrompt = systemPrompts['codefox-basic']?.systemPrompt || ''; - // Prepare the messages array, including system prompt if available - const messages: ChatCompletionMessageParam[] = systemPrompt - ? [{ role: 'system', content: systemPrompt }] - : [{ role: role as 'user' | 'system' | 'assistant', content: message }]; + const messages: ChatCompletionMessageParam[] = [ + { role: 'system', content: systemPrompt }, + { role: role as 'user' | 'system' | 'assistant', content: message }, + ]; try { const stream = await this.openai.chat.completions.create({ @@ -46,6 +46,7 @@ export class OpenAIModelProvider extends ModelProvider { messages, stream: true, }); + let chunkCount = 0; for await (const chunk of stream) { const content = chunk.choices[0]?.delta?.content || ''; @@ -55,6 +56,7 @@ export class OpenAIModelProvider extends ModelProvider { res.write(`data: ${JSON.stringify(chunk)}\n\n`); } } + const endTime = Date.now(); this.logger.log( `Response generation completed. Total chunks: ${chunkCount}`, @@ -73,20 +75,18 @@ export class OpenAIModelProvider extends ModelProvider { async getModelTagsResponse(res: Response): Promise { this.logger.log('Fetching available models from OpenAI...'); - // Set SSE headers res.writeHead(200, { 'Content-Type': 'text/event-stream', 'Cache-Control': 'no-cache', Connection: 'keep-alive', }); + try { const startTime = Date.now(); const models = await this.openai.models.list(); - const response = { - models: models, // Wrap the models in the required structure + models: models, }; - const endTime = Date.now(); this.logger.log( `Model fetching completed. Total models: ${models.data.length}`,