Skip to content

Commit

Permalink
fix: remove deprecated stuff
Browse files Browse the repository at this point in the history
Signed-off-by: Jan Pokorný <[email protected]>
  • Loading branch information
JanPokorny committed Jan 14, 2025
1 parent 5ce9395 commit ececb2b
Show file tree
Hide file tree
Showing 5 changed files with 0 additions and 55 deletions.
2 changes: 0 additions & 2 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -63,8 +63,6 @@ WATSONX_API_KEY=
WATSONX_PROJECT_ID=
WATSONX_REGION=

BAM_API_KEY=

# Must contain port, can contain {model_id} placeholder, e.g. "{model_id}.inference.example.com:443"
IBM_VLLM_URL=
IBM_VLLM_ROOT_CERT=
Expand Down
2 changes: 0 additions & 2 deletions src/config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -92,8 +92,6 @@ export const IBM_VLLM_PRIVATE_KEY = getEnv('IBM_VLLM_PRIVATE_KEY', null);

export const OPENAI_API_KEY = getEnv('OPENAI_API_KEY', null);

export const BAM_API_KEY = getEnv('BAM_API_KEY', null);

export const WATSONX_API_KEY = getEnv('WATSONX_API_KEY', null);
export const WATSONX_PROJECT_ID = getEnv('WATSONX_PROJECT_ID', null);
export const WATSONX_REGION = getEnv('WATSONX_REGION', null);
Expand Down
1 change: 0 additions & 1 deletion src/runs/execution/constants.ts
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ export const AIBackend = {
OLLAMA: 'ollama',
IBM_VLLM: 'ibm-vllm',
OPENAI: 'openai',
BAM: 'bam',
WATSONX: 'watsonx'
} as const;

Expand Down
49 changes: 0 additions & 49 deletions src/runs/execution/provider.ts
Original file line number Diff line number Diff line change
@@ -1,10 +1,6 @@
import { ChatLLM, ChatLLMOutput } from 'bee-agent-framework/llms/chat';
import { LLM, LLMInput } from 'bee-agent-framework/llms/llm';
import { BaseLLMOutput, EmbeddingOptions, EmbeddingOutput } from 'bee-agent-framework/llms/base';
import { BAMChatLLM } from 'bee-agent-framework/adapters/bam/chat';
import { BAMLLM } from 'bee-agent-framework/adapters/bam/llm';
import { Client as BAMClient } from '@ibm-generative-ai/node-sdk';
import { BAMChatLLMPresetModel } from 'bee-agent-framework/adapters/bam/chatPreset';
import { OllamaChatLLM } from 'bee-agent-framework/adapters/ollama/chat';
import { OllamaLLM } from 'bee-agent-framework/adapters/ollama/llm';
import { Ollama } from 'ollama';
Expand All @@ -24,7 +20,6 @@ import { WatsonXLLM } from 'bee-agent-framework/adapters/watsonx/llm';

import {
AI_BACKEND,
BAM_API_KEY,
IBM_VLLM_CERT_CHAIN,
IBM_VLLM_PRIVATE_KEY,
IBM_VLLM_ROOT_CERT,
Expand Down Expand Up @@ -56,49 +51,6 @@ interface AIProvider<
createEmbeddingBackend?: (params?: { model?: string }) => EmbeddingModel;
}

export class BamAIProvider implements AIProvider<BAMChatLLM, BAMLLM> {
static client: BAMClient;

constructor() {
BamAIProvider.client ??= new BAMClient({ apiKey: BAM_API_KEY ?? undefined });
}

createChatBackend({
model = 'meta-llama/llama-3-1-70b-instruct',
...params
}: ChatLLMParams = {}) {
return BAMChatLLM.fromPreset(model as BAMChatLLMPresetModel, {
client: BamAIProvider.client,
parameters: (parameters) => ({
...parameters,
top_p: params.topP ?? parameters.top_p,
temperature: params.temperature ?? parameters.temperature,
max_new_tokens: MAX_NEW_TOKENS
})
});
}

createAssistantBackend(params?: ChatLLMParams) {
return this.createChatBackend(params);
}

createCodeBackend({ model = 'meta-llama/llama-3-1-70b-instruct' } = {}) {
return new BAMLLM({
client: BamAIProvider.client,
modelId: model,
parameters: {
decoding_method: 'greedy',
include_stop_sequence: false,
max_new_tokens: MAX_NEW_TOKENS
}
});
}

createEmbeddingBackend({ model = 'baai/bge-large-en-v1.5' } = {}) {
return new BAMLLM({ client: BamAIProvider.client, modelId: model });
}
}

export class OllamaAIProvider implements AIProvider<OllamaChatLLM, OllamaLLM> {
static client: Ollama;

Expand Down Expand Up @@ -278,7 +230,6 @@ export const aiProviderByBackend = {
[AIBackend.OLLAMA]: OllamaAIProvider,
[AIBackend.IBM_VLLM]: IBMvLLMAIProvider,
[AIBackend.OPENAI]: OpenAIProvider,
[AIBackend.BAM]: BamAIProvider,
[AIBackend.WATSONX]: WatsonxAIProvider
};

Expand Down
1 change: 0 additions & 1 deletion src/runs/execution/tools/helpers.ts
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,6 @@ export async function getTools(run: LoadedRun, context: AgentContext): Promise<F
tools.push(
new PythonTool({
codeInterpreter,
executorId,
storage: createPythonStorage(files, run),
preprocess: codeLLM
? {
Expand Down

0 comments on commit ececb2b

Please sign in to comment.