Skip to content

Commit

Permalink
feat: Refactor OpenAIModelProvider to improve streaming response
Browse files Browse the repository at this point in the history
This commit refactors the OpenAIModelProvider class to improve the streaming response when generating chat completions with OpenAI. It updates the messages array to include the system prompt and user message, and removes unnecessary comments. This change aims to enhance the overall performance and reliability of the streaming response feature.

Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
  • Loading branch information
Sma1lboy and autofix-ci[bot] committed Nov 4, 2024
1 parent c24518f commit 4451534
Showing 1 changed file with 8 additions and 8 deletions.
16 changes: 8 additions & 8 deletions llm-server/src/model/openai-model-provider.ts
Original file line number Diff line number Diff line change
Expand Up @@ -35,17 +35,18 @@ export class OpenAIModelProvider extends ModelProvider {
// Get the system prompt based on the model
const systemPrompt = systemPrompts['codefox-basic']?.systemPrompt || '';

// Prepare the messages array, including system prompt if available
const messages: ChatCompletionMessageParam[] = systemPrompt
? [{ role: 'system', content: systemPrompt }]
: [{ role: role as 'user' | 'system' | 'assistant', content: message }];
const messages: ChatCompletionMessageParam[] = [
{ role: 'system', content: systemPrompt },
{ role: role as 'user' | 'system' | 'assistant', content: message },
];

try {
const stream = await this.openai.chat.completions.create({
model,
messages,
stream: true,
});

let chunkCount = 0;
for await (const chunk of stream) {
const content = chunk.choices[0]?.delta?.content || '';
Expand All @@ -55,6 +56,7 @@ export class OpenAIModelProvider extends ModelProvider {
res.write(`data: ${JSON.stringify(chunk)}\n\n`);
}
}

const endTime = Date.now();
this.logger.log(
`Response generation completed. Total chunks: ${chunkCount}`,
Expand All @@ -73,20 +75,18 @@ export class OpenAIModelProvider extends ModelProvider {

async getModelTagsResponse(res: Response): Promise<void> {
this.logger.log('Fetching available models from OpenAI...');
// Set SSE headers
res.writeHead(200, {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
Connection: 'keep-alive',
});

try {
const startTime = Date.now();
const models = await this.openai.models.list();

const response = {
models: models, // Wrap the models in the required structure
models: models,
};

const endTime = Date.now();
this.logger.log(
`Model fetching completed. Total models: ${models.data.length}`,
Expand Down

0 comments on commit 4451534

Please sign in to comment.