diff --git a/docSite/content/zh-cn/docs/development/configuration.md b/docSite/content/zh-cn/docs/development/configuration.md index 4663895a2585..f9ee74cbd356 100644 --- a/docSite/content/zh-cn/docs/development/configuration.md +++ b/docSite/content/zh-cn/docs/development/configuration.md @@ -199,8 +199,8 @@ weight: 707 - OpenAI - Claude - Gemini -- Meta - MistralAI +- Groq - AliCloud - 阿里云 - Qwen - 通义千问 - Doubao - 豆包 diff --git a/packages/service/core/ai/config/embedding/text-embedding-004.json b/packages/service/core/ai/config/embedding/text-embedding-004.json new file mode 100644 index 000000000000..9b63190b2652 --- /dev/null +++ b/packages/service/core/ai/config/embedding/text-embedding-004.json @@ -0,0 +1,10 @@ +{ + "provider": "Gemini", + "model": "text-embedding-004", + "name": "text-embedding-004", + + "defaultToken": 512, + "maxToken": 2000, + + "charsPointsPrice": 0 +} diff --git a/packages/service/core/ai/config/embedding/text-embedding-3-large.json b/packages/service/core/ai/config/embedding/text-embedding-3-large.json new file mode 100644 index 000000000000..cfc87467cdd5 --- /dev/null +++ b/packages/service/core/ai/config/embedding/text-embedding-3-large.json @@ -0,0 +1,14 @@ +{ + "provider": "OpenAI", + "model": "text-embedding-3-large", + "name": "text-embedding-3-large", + + "defaultToken": 512, + "maxToken": 3000, + + "charsPointsPrice": 0, + + "defaultConfig": { + "dimensions": 1024 + } +} diff --git a/packages/service/core/ai/config/llm/Doubao-lite-128k.json b/packages/service/core/ai/config/llm/Doubao-lite-128k.json new file mode 100644 index 000000000000..85b2863c88f5 --- /dev/null +++ b/packages/service/core/ai/config/llm/Doubao-lite-128k.json @@ -0,0 +1,29 @@ +{ + "provider": "Doubao", + "model": "Doubao-lite-128k", + "name": "Doubao-lite-128k", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 128000, + "maxResponse": 4000, + "quoteMaxToken": 120000, + "maxTemperature": 1, + + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": false, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": false, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/Doubao-lite-32k.json b/packages/service/core/ai/config/llm/Doubao-lite-32k.json new file mode 100644 index 000000000000..24a0e1316de1 --- /dev/null +++ b/packages/service/core/ai/config/llm/Doubao-lite-32k.json @@ -0,0 +1,29 @@ +{ + "provider": "Doubao", + "model": "Doubao-lite-32k", + "name": "Doubao-lite-32k", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 32000, + "maxResponse": 4000, + "quoteMaxToken": 30000, + "maxTemperature": 1, + + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": false, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/Doubao-pro-128k.json b/packages/service/core/ai/config/llm/Doubao-pro-128k.json new file mode 100644 index 000000000000..dfa95808d543 --- /dev/null +++ b/packages/service/core/ai/config/llm/Doubao-pro-128k.json @@ -0,0 +1,29 @@ +{ + "provider": "Doubao", + "model": "Doubao-pro-128k", + "name": "Doubao-pro-128k", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 128000, + "maxResponse": 4000, + "quoteMaxToken": 120000, + "maxTemperature": 1, + + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": false, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": false, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/Doubao-pro-32k.json b/packages/service/core/ai/config/llm/Doubao-pro-32k.json new file mode 100644 index 000000000000..eab9bd21d41a --- /dev/null +++ b/packages/service/core/ai/config/llm/Doubao-pro-32k.json @@ -0,0 +1,29 @@ +{ + "provider": "Doubao", + "model": "Doubao-pro-32k", + "name": "Doubao-pro-32k", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 32000, + "maxResponse": 4000, + "quoteMaxToken": 30000, + "maxTemperature": 1, + + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": false, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/Doubao-vision-lite-32k.json b/packages/service/core/ai/config/llm/Doubao-vision-lite-32k.json new file mode 100644 index 000000000000..9a3c89c82ac8 --- /dev/null +++ b/packages/service/core/ai/config/llm/Doubao-vision-lite-32k.json @@ -0,0 +1,29 @@ +{ + "provider": "Doubao", + "model": "Doubao-vision-lite-32k", + "name": "Doubao-vision-lite-32k", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 32000, + "maxResponse": 4000, + "quoteMaxToken": 30000, + "maxTemperature": 1, + + "vision": true, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": false, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/Doubao-vision-pro-32k.json b/packages/service/core/ai/config/llm/Doubao-vision-pro-32k.json new file mode 100644 index 000000000000..e3d6f8939c2c --- /dev/null +++ b/packages/service/core/ai/config/llm/Doubao-vision-pro-32k.json @@ -0,0 +1,29 @@ +{ + "provider": "Doubao", + "model": "Doubao-vision-pro-32k", + "name": "Doubao-vision-pro-32k", + + "censor": false, + "charsPointsPrice": 2, + + "maxContext": 32000, + "maxResponse": 4000, + "quoteMaxToken": 30000, + "maxTemperature": 1, + + "vision": true, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": false, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/ERNIE-4.0-8K.json b/packages/service/core/ai/config/llm/ERNIE-4.0-8K.json new file mode 100644 index 000000000000..8beef5a02e85 --- /dev/null +++ b/packages/service/core/ai/config/llm/ERNIE-4.0-8K.json @@ -0,0 +1,29 @@ +{ + "provider": "Ernie", + "model": "ERNIE-4.0-8K", + "name": "ERNIE-4.0-8K", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 8000, + "maxResponse": 2048, + "quoteMaxToken": 5000, + "maxTemperature": 1, + + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": false, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/ERNIE-4.0-Turbo-8K.json b/packages/service/core/ai/config/llm/ERNIE-4.0-Turbo-8K.json new file mode 100644 index 000000000000..dd81db52ea9c --- /dev/null +++ b/packages/service/core/ai/config/llm/ERNIE-4.0-Turbo-8K.json @@ -0,0 +1,29 @@ +{ + "provider": "Ernie", + "model": "ERNIE-4.0-Turbo-8K", + "name": "ERNIE-4.0-Turbo-8K", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 8000, + "maxResponse": 2048, + "quoteMaxToken": 5000, + "maxTemperature": 1, + + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": false, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/ERNIE-Lite-8K.json b/packages/service/core/ai/config/llm/ERNIE-Lite-8K.json new file mode 100644 index 000000000000..5288e690e6da --- /dev/null +++ b/packages/service/core/ai/config/llm/ERNIE-Lite-8K.json @@ -0,0 +1,29 @@ +{ + "provider": "Ernie", + "model": "ERNIE-Lite-8K", + "name": "ERNIE-lite-8k", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 8000, + "maxResponse": 2048, + "quoteMaxToken": 6000, + "maxTemperature": 1, + + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": false, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/ERNIE-Speed-128K.json b/packages/service/core/ai/config/llm/ERNIE-Speed-128K.json new file mode 100644 index 000000000000..6765e5acc0cb --- /dev/null +++ b/packages/service/core/ai/config/llm/ERNIE-Speed-128K.json @@ -0,0 +1,29 @@ +{ + "provider": "Ernie", + "model": "ERNIE-Speed-128K", + "name": "ERNIE-Speed-128K(测试)", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 128000, + "maxResponse": 4096, + "quoteMaxToken": 120000, + "maxTemperature": 1, + + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": false, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/MiniMax-Text-01.json b/packages/service/core/ai/config/llm/MiniMax-Text-01.json new file mode 100644 index 000000000000..9d5ae20b42bb --- /dev/null +++ b/packages/service/core/ai/config/llm/MiniMax-Text-01.json @@ -0,0 +1,29 @@ +{ + "provider": "MiniMax", + "model": "MiniMax-Text-01", + "name": "MiniMax-Text-01", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 1000000, + "maxResponse": 1000000, + "quoteMaxToken": 100000, + "maxTemperature": 1, + + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": false, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/SparkDesk-v3.5.json b/packages/service/core/ai/config/llm/SparkDesk-v3.5.json new file mode 100644 index 000000000000..f4fec9f0f047 --- /dev/null +++ b/packages/service/core/ai/config/llm/SparkDesk-v3.5.json @@ -0,0 +1,22 @@ +{ + "provider": "SparkDesk", + "model": "SparkDesk-v3.5", + "name": "SparkDesk-v3.5", + "maxContext": 8000, + "maxResponse": 8000, + "quoteMaxToken": 6000, + "maxTemperature": 1, + "charsPointsPrice": 0.3, + "censor": false, + "vision": false, + "datasetProcess": false, + "usedInClassify": true, + "usedInExtractFields": true, + "usedInToolCall": true, + "usedInQueryExtension": true, + "toolChoice": false, + "functionCall": false, + "customCQPrompt": "", + "customExtractPrompt": "", + "defaultSystemChatPrompt": "" +} diff --git a/packages/service/core/ai/config/llm/SparkDesk-v4.0.json b/packages/service/core/ai/config/llm/SparkDesk-v4.0.json new file mode 100644 index 000000000000..49416ee417a6 --- /dev/null +++ b/packages/service/core/ai/config/llm/SparkDesk-v4.0.json @@ -0,0 +1,29 @@ +{ + "provider": "SparkDesk", + "model": "SparkDesk-v4.0", + "name": "SparkDesk-v4.0", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 8000, + "maxResponse": 8000, + "quoteMaxToken": 6000, + "maxTemperature": 1, + + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": false, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/abab6.5s-chat.json b/packages/service/core/ai/config/llm/abab6.5s-chat.json new file mode 100644 index 000000000000..d9decfc0f2fe --- /dev/null +++ b/packages/service/core/ai/config/llm/abab6.5s-chat.json @@ -0,0 +1,29 @@ +{ + "provider": "MiniMax", + "model": "abab6.5s-chat", + "name": "MiniMax-abab6.5s", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 245000, + "maxResponse": 10000, + "quoteMaxToken": 240000, + "maxTemperature": 1, + + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": false, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/claude-3-5-haiku-20241022.json b/packages/service/core/ai/config/llm/claude-3-5-haiku-20241022.json new file mode 100644 index 000000000000..a775495dae6f --- /dev/null +++ b/packages/service/core/ai/config/llm/claude-3-5-haiku-20241022.json @@ -0,0 +1,29 @@ +{ + "provider": "Claude", + "model": "claude-3-5-haiku-20241022", + "name": "claude-3-5-haiku-20241022", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 200000, + "maxResponse": 8000, + "quoteMaxToken": 100000, + "maxTemperature": 1, + + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/claude-3-5-sonnet-20240620.json b/packages/service/core/ai/config/llm/claude-3-5-sonnet-20240620.json new file mode 100644 index 000000000000..dc4e6f002f69 --- /dev/null +++ b/packages/service/core/ai/config/llm/claude-3-5-sonnet-20240620.json @@ -0,0 +1,29 @@ +{ + "provider": "Claude", + "model": "claude-3-5-sonnet-20240620", + "name": "Claude-3-5-sonnet-20240620", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 200000, + "maxResponse": 8000, + "quoteMaxToken": 100000, + "maxTemperature": 1, + + "vision": true, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/claude-3-5-sonnet-20241022.json b/packages/service/core/ai/config/llm/claude-3-5-sonnet-20241022.json new file mode 100644 index 000000000000..5b0dca6d6cf3 --- /dev/null +++ b/packages/service/core/ai/config/llm/claude-3-5-sonnet-20241022.json @@ -0,0 +1,29 @@ +{ + "provider": "Claude", + "model": "claude-3-5-sonnet-20241022", + "name": "Claude-3-5-sonnet-20241022", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 200000, + "maxResponse": 8000, + "quoteMaxToken": 100000, + "maxTemperature": 1, + + "vision": true, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/claude-3-opus-20240229.json b/packages/service/core/ai/config/llm/claude-3-opus-20240229.json new file mode 100644 index 000000000000..e088b63020c5 --- /dev/null +++ b/packages/service/core/ai/config/llm/claude-3-opus-20240229.json @@ -0,0 +1,29 @@ +{ + "provider": "Claude", + "model": "claude-3-opus-20240229", + "name": "claude-3-opus-20240229", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 200000, + "maxResponse": 4096, + "quoteMaxToken": 100000, + "maxTemperature": 1, + + "vision": true, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/deepseek-chat.json b/packages/service/core/ai/config/llm/deepseek-chat.json new file mode 100644 index 000000000000..729387e16dba --- /dev/null +++ b/packages/service/core/ai/config/llm/deepseek-chat.json @@ -0,0 +1,26 @@ +{ + "provider": "DeepSeek", + "model": "deepseek-chat", + "name": "Deepseek-chat", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 64000, + "maxResponse": 4096, + "quoteMaxToken": 60000, + "maxTemperature": 1.5, + + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true +} diff --git a/packages/service/core/ai/config/llm/deepseek-reasoner.json b/packages/service/core/ai/config/llm/deepseek-reasoner.json new file mode 100644 index 000000000000..3971afff8d59 --- /dev/null +++ b/packages/service/core/ai/config/llm/deepseek-reasoner.json @@ -0,0 +1,31 @@ +{ + "provider": "DeepSeek", + "model": "deepseek-reasoner", + "name": "Deepseek-reasoner", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 64000, + "maxResponse": 4096, + "quoteMaxToken": 60000, + "maxTemperature": 1.5, + + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": { + "temperature": null + }, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/gemini-1.5-flash.json b/packages/service/core/ai/config/llm/gemini-1.5-flash.json new file mode 100644 index 000000000000..597e625e693f --- /dev/null +++ b/packages/service/core/ai/config/llm/gemini-1.5-flash.json @@ -0,0 +1,29 @@ +{ + "provider": "Gemini", + "model": "gemini-1.5-flash", + "name": "Gemini-1.5-flash", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 1000000, + "maxResponse": 8000, + "quoteMaxToken": 60000, + "maxTemperature": 1, + + "vision": true, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": false, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/gemini-1.5-pro.json b/packages/service/core/ai/config/llm/gemini-1.5-pro.json new file mode 100644 index 000000000000..ed9489d595df --- /dev/null +++ b/packages/service/core/ai/config/llm/gemini-1.5-pro.json @@ -0,0 +1,29 @@ +{ + "provider": "Gemini", + "model": "gemini-1.5-pro", + "name": "Gemini-1.5-pro", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 2000000, + "maxResponse": 8000, + "quoteMaxToken": 60000, + "maxTemperature": 1, + + "vision": true, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": false, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/gemini-2.0-flash-exp.json b/packages/service/core/ai/config/llm/gemini-2.0-flash-exp.json new file mode 100644 index 000000000000..a55f83b4ad0e --- /dev/null +++ b/packages/service/core/ai/config/llm/gemini-2.0-flash-exp.json @@ -0,0 +1,29 @@ +{ + "provider": "Gemini", + "model": "gemini-2.0-flash-exp", + "name": "Gemini-2.0-flash-exp", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 1000000, + "maxResponse": 8000, + "quoteMaxToken": 60000, + "maxTemperature": 1, + + "vision": true, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/gemini-2.0-flash-thinking-exp.json b/packages/service/core/ai/config/llm/gemini-2.0-flash-thinking-exp.json new file mode 100644 index 000000000000..13e563852fe3 --- /dev/null +++ b/packages/service/core/ai/config/llm/gemini-2.0-flash-thinking-exp.json @@ -0,0 +1,29 @@ +{ + "provider": "Gemini", + "model": "gemini-2.0-flash-thinking-exp-01-21", + "name": "Gemini-2.0-flash-thinking-exp", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 1000000, + "maxResponse": 8000, + "quoteMaxToken": 60000, + "maxTemperature": 1, + + "vision": true, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/glm-4-air.json b/packages/service/core/ai/config/llm/glm-4-air.json new file mode 100644 index 000000000000..d6dde05af9de --- /dev/null +++ b/packages/service/core/ai/config/llm/glm-4-air.json @@ -0,0 +1,29 @@ +{ + "provider": "ChatGLM", + "model": "glm-4-air", + "name": "glm-4-air", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 128000, + "maxResponse": 4000, + "quoteMaxToken": 120000, + "maxTemperature": 0.99, + + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": false, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/glm-4-flash.json b/packages/service/core/ai/config/llm/glm-4-flash.json new file mode 100644 index 000000000000..f1fd0a61095d --- /dev/null +++ b/packages/service/core/ai/config/llm/glm-4-flash.json @@ -0,0 +1,29 @@ +{ + "provider": "ChatGLM", + "model": "glm-4-flash", + "name": "glm-4-flash", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 128000, + "maxResponse": 4000, + "quoteMaxToken": 120000, + "maxTemperature": 0.99, + + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": false, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/glm-4-long.json b/packages/service/core/ai/config/llm/glm-4-long.json new file mode 100644 index 000000000000..623cde0f987c --- /dev/null +++ b/packages/service/core/ai/config/llm/glm-4-long.json @@ -0,0 +1,29 @@ +{ + "provider": "ChatGLM", + "model": "glm-4-long", + "name": "glm-4-long", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 1000000, + "maxResponse": 4000, + "quoteMaxToken": 900000, + "maxTemperature": 0.99, + + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": false, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/glm-4-plus.json b/packages/service/core/ai/config/llm/glm-4-plus.json new file mode 100644 index 000000000000..a040cd9dbca8 --- /dev/null +++ b/packages/service/core/ai/config/llm/glm-4-plus.json @@ -0,0 +1,29 @@ +{ + "provider": "ChatGLM", + "model": "glm-4-plus", + "name": "GLM-4-plus", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 128000, + "maxResponse": 4000, + "quoteMaxToken": 120000, + "maxTemperature": 0.99, + + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": false, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/glm-4v-flash.json b/packages/service/core/ai/config/llm/glm-4v-flash.json new file mode 100644 index 000000000000..262d39eb288b --- /dev/null +++ b/packages/service/core/ai/config/llm/glm-4v-flash.json @@ -0,0 +1,29 @@ +{ + "provider": "ChatGLM", + "model": "glm-4v-flash", + "name": "glm-4v-flash", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 8000, + "maxResponse": 1000, + "quoteMaxToken": 6000, + "maxTemperature": 0.99, + + "vision": true, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": false, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/glm-4v-plus.json b/packages/service/core/ai/config/llm/glm-4v-plus.json new file mode 100644 index 000000000000..b4139bd72679 --- /dev/null +++ b/packages/service/core/ai/config/llm/glm-4v-plus.json @@ -0,0 +1,29 @@ +{ + "provider": "ChatGLM", + "model": "glm-4v-plus", + "name": "GLM-4v-plus", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 8000, + "maxResponse": 1000, + "quoteMaxToken": 6000, + "maxTemperature": 0.99, + + "vision": true, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": false, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/gpt-3.5-turbo.json b/packages/service/core/ai/config/llm/gpt-3.5-turbo.json new file mode 100644 index 000000000000..09ad7296799e --- /dev/null +++ b/packages/service/core/ai/config/llm/gpt-3.5-turbo.json @@ -0,0 +1,30 @@ +{ + "provider": "OpenAI", + "model": "gpt-3.5-turbo", + "name": "gpt-3.5-turbo(abandon)", + + "censor": true, + "charsPointsPrice": 1, + + "maxContext": 16000, + "maxResponse": 4000, + "quoteMaxToken": 13000, + "maxTemperature": 1.2, + + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": false, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": { + "max_tokens": null + } +} diff --git a/packages/service/core/ai/config/llm/gpt-4-turbo.json b/packages/service/core/ai/config/llm/gpt-4-turbo.json new file mode 100644 index 000000000000..316eb21a7a16 --- /dev/null +++ b/packages/service/core/ai/config/llm/gpt-4-turbo.json @@ -0,0 +1,30 @@ +{ + "provider": "OpenAI", + "model": "gpt-4-turbo", + "name": "gpt-4-turbo(abandon)", + + "censor": true, + "charsPointsPrice": 15, + + "maxContext": 125000, + "maxResponse": 4000, + "quoteMaxToken": 60000, + "maxTemperature": 1.2, + + "vision": true, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": false, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": { + "max_tokens": null + } +} diff --git a/packages/service/core/ai/config/llm/gpt-4o.json b/packages/service/core/ai/config/llm/gpt-4o.json new file mode 100644 index 000000000000..fc9ede2c8943 --- /dev/null +++ b/packages/service/core/ai/config/llm/gpt-4o.json @@ -0,0 +1,29 @@ +{ + "provider": "OpenAI", + "model": "gpt-4o", + "name": "GPT-4o", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 125000, + "maxResponse": 4000, + "quoteMaxToken": 60000, + "maxTemperature": 1.2, + + "vision": true, + "toolChoice": true, + "functionCall": true, + "defaultSystemChatPrompt": "", + + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/groq-llama-3.1-8b-instant.json b/packages/service/core/ai/config/llm/groq-llama-3.1-8b-instant.json new file mode 100644 index 000000000000..752c76df0b52 --- /dev/null +++ b/packages/service/core/ai/config/llm/groq-llama-3.1-8b-instant.json @@ -0,0 +1,28 @@ +{ + "provider": "Groq", + "model": "llama-3.1-8b-instant", + "name": "Groq-llama-3.1-8b-instant", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 125000, + "maxResponse": 8000, + "quoteMaxToken": 60000, + "maxTemperature": 1.2, + + "vision": true, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": false, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {} +} diff --git a/packages/service/core/ai/config/llm/groq-llama-3.3-70b-versatile copy.json b/packages/service/core/ai/config/llm/groq-llama-3.3-70b-versatile copy.json new file mode 100644 index 000000000000..034b0792478e --- /dev/null +++ b/packages/service/core/ai/config/llm/groq-llama-3.3-70b-versatile copy.json @@ -0,0 +1,28 @@ +{ + "provider": "Groq", + "model": "llama-3.3-70b-versatile", + "name": "Groq-llama-3.3-70b-versatile", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 125000, + "maxResponse": 8000, + "quoteMaxToken": 60000, + "maxTemperature": 1.2, + + "vision": true, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": false, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {} +} diff --git a/packages/service/core/ai/config/llm/hunyuan-large.json b/packages/service/core/ai/config/llm/hunyuan-large.json new file mode 100644 index 000000000000..4dd4b512644c --- /dev/null +++ b/packages/service/core/ai/config/llm/hunyuan-large.json @@ -0,0 +1,29 @@ +{ + "provider": "Hunyuan", + "model": "hunyuan-large", + "name": "hunyuan-large", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 28000, + "maxResponse": 4000, + "quoteMaxToken": 20000, + "maxTemperature": 1, + + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": false, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/hunyuan-lite.json b/packages/service/core/ai/config/llm/hunyuan-lite.json new file mode 100644 index 000000000000..b690783d455a --- /dev/null +++ b/packages/service/core/ai/config/llm/hunyuan-lite.json @@ -0,0 +1,29 @@ +{ + "provider": "Hunyuan", + "model": "hunyuan-lite", + "name": "hunyuan-lite", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 250000, + "maxResponse": 6000, + "quoteMaxToken": 100000, + "maxTemperature": 1, + + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": false, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/hunyuan-standard.json b/packages/service/core/ai/config/llm/hunyuan-standard.json new file mode 100644 index 000000000000..d26b8a75ba46 --- /dev/null +++ b/packages/service/core/ai/config/llm/hunyuan-standard.json @@ -0,0 +1,29 @@ +{ + "provider": "Hunyuan", + "model": "hunyuan-standard", + "name": "hunyuan-standard", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 30000, + "maxResponse": 2000, + "quoteMaxToken": 20000, + "maxTemperature": 1, + + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": false, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/hunyuan-turbo.json b/packages/service/core/ai/config/llm/hunyuan-turbo.json new file mode 100644 index 000000000000..b3988923b03b --- /dev/null +++ b/packages/service/core/ai/config/llm/hunyuan-turbo.json @@ -0,0 +1,29 @@ +{ + "provider": "Hunyuan", + "model": "hunyuan-turbo", + "name": "hunyuan-turbo", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 28000, + "maxResponse": 4000, + "quoteMaxToken": 20000, + "maxTemperature": 1, + + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": false, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/hunyuan-vision.json b/packages/service/core/ai/config/llm/hunyuan-vision.json new file mode 100644 index 000000000000..6d40cb493873 --- /dev/null +++ b/packages/service/core/ai/config/llm/hunyuan-vision.json @@ -0,0 +1,29 @@ +{ + "provider": "Hunyuan", + "model": "hunyuan-vision", + "name": "hunyuan-vision", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 6000, + "maxResponse": 2000, + "quoteMaxToken": 4000, + "maxTemperature": 1, + + "vision": true, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": false, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/ministral-3b-latest.json b/packages/service/core/ai/config/llm/ministral-3b-latest.json new file mode 100644 index 000000000000..1df50de9ab8b --- /dev/null +++ b/packages/service/core/ai/config/llm/ministral-3b-latest.json @@ -0,0 +1,29 @@ +{ + "provider": "MistralAI", + "model": "ministral-3b-latest", + "name": "Ministral-3b-latest", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 130000, + "maxResponse": 8000, + "quoteMaxToken": 60000, + "maxTemperature": 1.2, + + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/ministral-8b-latest.json b/packages/service/core/ai/config/llm/ministral-8b-latest.json new file mode 100644 index 000000000000..3186d2f7806a --- /dev/null +++ b/packages/service/core/ai/config/llm/ministral-8b-latest.json @@ -0,0 +1,29 @@ +{ + "provider": "MistralAI", + "model": "ministral-8b-latest", + "name": "Ministral-8b-latest", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 130000, + "maxResponse": 8000, + "quoteMaxToken": 60000, + "maxTemperature": 1.2, + + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/mistral-large-latest.json b/packages/service/core/ai/config/llm/mistral-large-latest.json new file mode 100644 index 000000000000..3ae60d5260a0 --- /dev/null +++ b/packages/service/core/ai/config/llm/mistral-large-latest.json @@ -0,0 +1,29 @@ +{ + "provider": "MistralAI", + "model": "mistral-large-latest", + "name": "Mistral-large-latest", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 130000, + "maxResponse": 8000, + "quoteMaxToken": 60000, + "maxTemperature": 1.2, + + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/mistral-small-latest.json b/packages/service/core/ai/config/llm/mistral-small-latest.json new file mode 100644 index 000000000000..238d406954ad --- /dev/null +++ b/packages/service/core/ai/config/llm/mistral-small-latest.json @@ -0,0 +1,29 @@ +{ + "provider": "MistralAI", + "model": "mistral-small-latest", + "name": "Mistral-small-latest", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 32000, + "maxResponse": 4000, + "quoteMaxToken": 30000, + "maxTemperature": 1.2, + + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/moonshot-v1-128k.json b/packages/service/core/ai/config/llm/moonshot-v1-128k.json new file mode 100644 index 000000000000..80236fb4625a --- /dev/null +++ b/packages/service/core/ai/config/llm/moonshot-v1-128k.json @@ -0,0 +1,29 @@ +{ + "provider": "Moonshot", + "model": "moonshot-v1-128k", + "name": "月之暗面-128k", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 128000, + "maxResponse": 4000, + "quoteMaxToken": 60000, + "maxTemperature": 1, + + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": false, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/moonshot-v1-32k.json b/packages/service/core/ai/config/llm/moonshot-v1-32k.json new file mode 100644 index 000000000000..18ca06da3dde --- /dev/null +++ b/packages/service/core/ai/config/llm/moonshot-v1-32k.json @@ -0,0 +1,29 @@ +{ + "provider": "Moonshot", + "model": "moonshot-v1-32k", + "name": "月之暗面-32k", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 32000, + "maxResponse": 4000, + "quoteMaxToken": 30000, + "maxTemperature": 1, + + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": false, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/moonshot-v1-8k.json b/packages/service/core/ai/config/llm/moonshot-v1-8k.json new file mode 100644 index 000000000000..ebd92f45fda1 --- /dev/null +++ b/packages/service/core/ai/config/llm/moonshot-v1-8k.json @@ -0,0 +1,29 @@ +{ + "provider": "Moonshot", + "model": "moonshot-v1-8k", + "name": "月之暗面-8k", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 8000, + "maxResponse": 4000, + "quoteMaxToken": 6000, + "maxTemperature": 1, + + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": false, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": false, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/o1-mini.json b/packages/service/core/ai/config/llm/o1-mini.json new file mode 100644 index 000000000000..c859f90768a2 --- /dev/null +++ b/packages/service/core/ai/config/llm/o1-mini.json @@ -0,0 +1,31 @@ +{ + "provider": "OpenAI", + "model": "o1-mini", + "name": "o1-mini", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 125000, + "maxResponse": 4000, + "quoteMaxToken": 120000, + "maxTemperature": 1.2, + + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": false, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": { + "temperature": 1, + "max_tokens": null + } +} diff --git a/packages/service/core/ai/config/llm/o1-preview.json b/packages/service/core/ai/config/llm/o1-preview.json new file mode 100644 index 000000000000..9b65d18ecd33 --- /dev/null +++ b/packages/service/core/ai/config/llm/o1-preview.json @@ -0,0 +1,32 @@ +{ + "provider": "OpenAI", + "model": "o1-preview", + "name": "o1-preview", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 125000, + "maxResponse": 4000, + "quoteMaxToken": 120000, + "maxTemperature": 1.2, + + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": false, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": { + "temperature": 1, + "max_tokens": null, + "stream": false + } +} diff --git a/packages/service/core/ai/config/llm/o1.json b/packages/service/core/ai/config/llm/o1.json new file mode 100644 index 000000000000..6fbcfe227d22 --- /dev/null +++ b/packages/service/core/ai/config/llm/o1.json @@ -0,0 +1,32 @@ +{ + "provider": "OpenAI", + "model": "o1", + "name": "o1", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 195000, + "maxResponse": 8000, + "quoteMaxToken": 120000, + "maxTemperature": 1.2, + + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": false, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": { + "temperature": 1, + "max_tokens": null, + "stream": false + } +} diff --git a/packages/service/core/ai/config/llm/qwen-max.json b/packages/service/core/ai/config/llm/qwen-max.json new file mode 100644 index 000000000000..d9894c06fcf0 --- /dev/null +++ b/packages/service/core/ai/config/llm/qwen-max.json @@ -0,0 +1,29 @@ +{ + "provider": "Qwen", + "model": "qwen-max", + "name": "Qwen-max", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 8000, + "maxResponse": 4000, + "quoteMaxToken": 6000, + "maxTemperature": 1, + + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": false, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/qwen-plus.json b/packages/service/core/ai/config/llm/qwen-plus.json new file mode 100644 index 000000000000..43461ab11e09 --- /dev/null +++ b/packages/service/core/ai/config/llm/qwen-plus.json @@ -0,0 +1,29 @@ +{ + "provider": "Qwen", + "model": "qwen-plus", + "name": "Qwen-plus", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 64000, + "maxResponse": 8000, + "quoteMaxToken": 60000, + "maxTemperature": 1, + + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/qwen-turbo.json b/packages/service/core/ai/config/llm/qwen-turbo.json new file mode 100644 index 000000000000..a7fcef65c344 --- /dev/null +++ b/packages/service/core/ai/config/llm/qwen-turbo.json @@ -0,0 +1,29 @@ +{ + "provider": "Qwen", + "model": "qwen-turbo", + "name": "Qwen-turbo", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 128000, + "maxResponse": 8000, + "quoteMaxToken": 100000, + "maxTemperature": 1, + + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/qwen-vl-max.json b/packages/service/core/ai/config/llm/qwen-vl-max.json new file mode 100644 index 000000000000..f040f18f9b5b --- /dev/null +++ b/packages/service/core/ai/config/llm/qwen-vl-max.json @@ -0,0 +1,29 @@ +{ + "provider": "Qwen", + "model": "qwen-vl-max", + "name": "qwen-vl-max", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 32000, + "maxResponse": 2000, + "quoteMaxToken": 20000, + "maxTemperature": 1.2, + + "vision": true, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": false, + "usedInClassify": false, + "customCQPrompt": "", + "usedInExtractFields": false, + "usedInQueryExtension": false, + "customExtractPrompt": "", + "usedInToolCall": false, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/qwen-vl-plus.json b/packages/service/core/ai/config/llm/qwen-vl-plus.json new file mode 100644 index 000000000000..7796d0d8bff8 --- /dev/null +++ b/packages/service/core/ai/config/llm/qwen-vl-plus.json @@ -0,0 +1,26 @@ +{ + "provider": "Qwen", + "model": "qwen-vl-plus", + "name": "qwen-vl-plus", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 32000, + "maxResponse": 2000, + "quoteMaxToken": 20000, + "maxTemperature": 1.2, + + "vision": true, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": false, + "usedInClassify": false, + "customCQPrompt": "", + "usedInExtractFields": false, + "usedInQueryExtension": false, + "customExtractPrompt": "", + "usedInToolCall": false +} diff --git a/packages/service/core/ai/config/llm/qwen2.5-14b-instruct.json b/packages/service/core/ai/config/llm/qwen2.5-14b-instruct.json new file mode 100644 index 000000000000..e1825c030a24 --- /dev/null +++ b/packages/service/core/ai/config/llm/qwen2.5-14b-instruct.json @@ -0,0 +1,29 @@ +{ + "provider": "Qwen", + "model": "qwen2.5-14b-instruct", + "name": "qwen2.5-14b-128k", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 128000, + "maxResponse": 8000, + "quoteMaxToken": 50000, + "maxTemperature": 1, + + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/qwen2.5-32b-instruct.json b/packages/service/core/ai/config/llm/qwen2.5-32b-instruct.json new file mode 100644 index 000000000000..b0c8a049874e --- /dev/null +++ b/packages/service/core/ai/config/llm/qwen2.5-32b-instruct.json @@ -0,0 +1,29 @@ +{ + "provider": "Qwen", + "model": "qwen2.5-32b-instruct", + "name": "qwen2.5-32b-128k", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 128000, + "maxResponse": 8000, + "quoteMaxToken": 50000, + "maxTemperature": 1, + + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/qwen2.5-72b-instruct.json b/packages/service/core/ai/config/llm/qwen2.5-72b-instruct.json new file mode 100644 index 000000000000..9b9e0e40b635 --- /dev/null +++ b/packages/service/core/ai/config/llm/qwen2.5-72b-instruct.json @@ -0,0 +1,29 @@ +{ + "provider": "Qwen", + "model": "qwen2.5-72b-instruct", + "name": "Qwen2.5-72B-128k", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 128000, + "maxResponse": 8000, + "quoteMaxToken": 50000, + "maxTemperature": 1, + + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": false, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/yi-lightning.json b/packages/service/core/ai/config/llm/yi-lightning.json new file mode 100644 index 000000000000..4584adc009e7 --- /dev/null +++ b/packages/service/core/ai/config/llm/yi-lightning.json @@ -0,0 +1,29 @@ +{ + "provider": "Yi", + "model": "yi-lightning", + "name": "yi-lightning", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 16000, + "maxResponse": 4000, + "quoteMaxToken": 12000, + "maxTemperature": 1, + + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": false, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/yi-vision-v2.json b/packages/service/core/ai/config/llm/yi-vision-v2.json new file mode 100644 index 000000000000..f338388cd881 --- /dev/null +++ b/packages/service/core/ai/config/llm/yi-vision-v2.json @@ -0,0 +1,29 @@ +{ + "provider": "Yi", + "model": "yi-vision-v2", + "name": "yi-vision-v2", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 16000, + "maxResponse": 4000, + "quoteMaxToken": 12000, + "maxTemperature": 1, + + "vision": true, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": false, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +}