From 3af28ecf3405574c76c249c58d7a90be48abd952 Mon Sep 17 00:00:00 2001
From: Petr Mironychev <9195189+Palm1r@users.noreply.github.com>
Date: Sun, 15 Dec 2024 22:05:20 +0100
Subject: [PATCH 1/5] :sparkles: feat: Add MessageBuilder for code completion
---
CMakeLists.txt | 1 +
CodeHandler.cpp | 76 +++++++++++++++++
CodeHandler.hpp | 41 ++++++++++
LLMClientInterface.cpp | 39 ++++++---
llmcore/CMakeLists.txt | 1 +
llmcore/MessageBuilder.cpp | 3 +
llmcore/MessageBuilder.hpp | 127 +++++++++++++++++++++++++++++
llmcore/PromptTemplateManager.hpp | 5 +-
llmcore/RequestHandler.cpp | 4 +-
llmcore/RequestType.hpp | 2 +-
providers/LMStudioProvider.cpp | 3 +-
providers/OllamaProvider.cpp | 2 +-
providers/OpenAICompatProvider.cpp | 2 +-
providers/OpenRouterAIProvider.cpp | 2 +-
14 files changed, 285 insertions(+), 23 deletions(-)
create mode 100644 CodeHandler.cpp
create mode 100644 CodeHandler.hpp
create mode 100644 llmcore/MessageBuilder.cpp
create mode 100644 llmcore/MessageBuilder.hpp
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 252a05c..324c9c0 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -67,4 +67,5 @@ add_qtc_plugin(QodeAssist
chat/ChatOutputPane.h chat/ChatOutputPane.cpp
chat/NavigationPanel.hpp chat/NavigationPanel.cpp
ConfigurationManager.hpp ConfigurationManager.cpp
+ CodeHandler.hpp CodeHandler.cpp
)
diff --git a/CodeHandler.cpp b/CodeHandler.cpp
new file mode 100644
index 0000000..d3ce1bb
--- /dev/null
+++ b/CodeHandler.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2024 Petr Mironychev
+ *
+ * This file is part of QodeAssist.
+ *
+ * QodeAssist is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * QodeAssist is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with QodeAssist. If not, see .
+ */
+
+#include "CodeHandler.hpp"
+
+namespace QodeAssist {
+
+QString CodeHandler::processText(QString text)
+{
+ return removeCodeBlockWrappers(text);
+}
+
+QString CodeHandler::removeCodeBlockWrappers(QString text)
+{
+ QRegularExpressionMatchIterator matchIterator = getFullCodeBlockRegex().globalMatch(text);
+ while (matchIterator.hasNext()) {
+ QRegularExpressionMatch match = matchIterator.next();
+ QString codeBlock = match.captured(0);
+ QString codeContent = match.captured(1).trimmed();
+ text.replace(codeBlock, codeContent);
+ }
+
+ QRegularExpressionMatch startMatch = getPartialStartBlockRegex().match(text);
+ if (startMatch.hasMatch()) {
+ QString partialBlock = startMatch.captured(0);
+ QString codeContent = startMatch.captured(1).trimmed();
+ text.replace(partialBlock, codeContent);
+ }
+
+ QRegularExpressionMatch endMatch = getPartialEndBlockRegex().match(text);
+ if (endMatch.hasMatch()) {
+ QString partialBlock = endMatch.captured(0);
+ QString codeContent = endMatch.captured(1).trimmed();
+ text.replace(partialBlock, codeContent);
+ }
+
+ return text;
+}
+
+const QRegularExpression &CodeHandler::getFullCodeBlockRegex()
+{
+ static const QRegularExpression
+ regex(R"(```[\w\s]*\n([\s\S]*?)```)", QRegularExpression::MultilineOption);
+ return regex;
+}
+
+const QRegularExpression &CodeHandler::getPartialStartBlockRegex()
+{
+ static const QRegularExpression
+ regex(R"(```[\w\s]*\n([\s\S]*?)$)", QRegularExpression::MultilineOption);
+ return regex;
+}
+
+const QRegularExpression &CodeHandler::getPartialEndBlockRegex()
+{
+ static const QRegularExpression regex(R"(^([\s\S]*?)```)", QRegularExpression::MultilineOption);
+ return regex;
+}
+
+} // namespace QodeAssist
diff --git a/CodeHandler.hpp b/CodeHandler.hpp
new file mode 100644
index 0000000..73d5d98
--- /dev/null
+++ b/CodeHandler.hpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2024 Petr Mironychev
+ *
+ * This file is part of QodeAssist.
+ *
+ * QodeAssist is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * QodeAssist is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with QodeAssist. If not, see .
+ */
+
+#pragma once
+
+#include
+#include
+#include
+
+namespace QodeAssist {
+
+class CodeHandler
+{
+public:
+ static QString processText(QString text);
+
+private:
+ static QString removeCodeBlockWrappers(QString text);
+
+ static const QRegularExpression &getFullCodeBlockRegex();
+ static const QRegularExpression &getPartialStartBlockRegex();
+ static const QRegularExpression &getPartialEndBlockRegex();
+};
+
+} // namespace QodeAssist
diff --git a/LLMClientInterface.cpp b/LLMClientInterface.cpp
index 9684c3e..db7cade 100644
--- a/LLMClientInterface.cpp
+++ b/LLMClientInterface.cpp
@@ -26,7 +26,9 @@
#include
#include
+#include "CodeHandler.hpp"
#include "DocumentContextReader.hpp"
+#include "llmcore/MessageBuilder.hpp"
#include "llmcore/PromptTemplateManager.hpp"
#include "llmcore/ProvidersManager.hpp"
#include "logger/Logger.hpp"
@@ -167,11 +169,13 @@ void LLMClientInterface::handleCompletion(const QJsonObject &request)
}
LLMCore::LLMConfig config;
- config.requestType = LLMCore::RequestType::Fim;
+ config.requestType = LLMCore::RequestType::CodeCompletion;
config.provider = provider;
config.promptTemplate = promptTemplate;
- config.url = QUrl(
- QString("%1%2").arg(Settings::generalSettings().ccUrl(), provider->completionEndpoint()));
+ config.url = QUrl(QString("%1%2").arg(
+ Settings::generalSettings().ccUrl(),
+ promptTemplate->type() == LLMCore::TemplateType::Fim ? provider->completionEndpoint()
+ : provider->chatEndpoint()));
config.apiKey = Settings::codeCompletionSettings().apiKey();
config.providerRequest
@@ -180,20 +184,27 @@ void LLMClientInterface::handleCompletion(const QJsonObject &request)
config.multiLineCompletion = completeSettings.multiLineCompletion();
+ const auto stopWords = QJsonArray::fromStringList(config.promptTemplate->stopWords());
+ if (!stopWords.isEmpty())
+ config.providerRequest["stop"] = stopWords;
+
QString systemPrompt;
if (completeSettings.useSystemPrompt())
systemPrompt.append(completeSettings.systemPrompt());
if (!updatedContext.fileContext.isEmpty())
systemPrompt.append(updatedContext.fileContext);
- if (!systemPrompt.isEmpty())
- config.providerRequest["system"] = systemPrompt;
- const auto stopWords = QJsonArray::fromStringList(config.promptTemplate->stopWords());
- if (!stopWords.isEmpty())
- config.providerRequest["stop"] = stopWords;
+ auto message = LLMCore::MessageBuilder()
+ .addSystemMessage(systemPrompt)
+ .addUserMessage(updatedContext.prefix)
+ .addSuffix(updatedContext.suffix)
+ .addtTokenizer(promptTemplate);
+
+ message.saveTo(
+ config.providerRequest,
+ providerName == "Ollama" ? LLMCore::ProvidersApi::Ollama : LLMCore::ProvidersApi::OpenAI);
- config.promptTemplate->prepareRequest(config.providerRequest, updatedContext);
- config.provider->prepareRequest(config.providerRequest, LLMCore::RequestType::Fim);
+ config.provider->prepareRequest(config.providerRequest, LLMCore::RequestType::CodeCompletion);
auto errors = config.provider->validateRequest(config.providerRequest, promptTemplate->type());
if (!errors.isEmpty()) {
@@ -237,14 +248,18 @@ void LLMClientInterface::sendCompletionToClient(const QString &completion,
QJsonObject response;
response["jsonrpc"] = "2.0";
response[LanguageServerProtocol::idKey] = request["id"];
+
QJsonObject result;
QJsonArray completions;
QJsonObject completionItem;
- completionItem[LanguageServerProtocol::textKey] = completion;
+
+ QString processedCompletion = CodeHandler::processText(completion);
+
+ completionItem[LanguageServerProtocol::textKey] = processedCompletion;
QJsonObject range;
range["start"] = position;
QJsonObject end = position;
- end["character"] = position["character"].toInt() + completion.length();
+ end["character"] = position["character"].toInt() + processedCompletion.length();
range["end"] = end;
completionItem[LanguageServerProtocol::rangeKey] = range;
completionItem[LanguageServerProtocol::positionKey] = position;
diff --git a/llmcore/CMakeLists.txt b/llmcore/CMakeLists.txt
index 4a8e0d9..93d7bcf 100644
--- a/llmcore/CMakeLists.txt
+++ b/llmcore/CMakeLists.txt
@@ -10,6 +10,7 @@ add_library(LLMCore STATIC
OllamaMessage.hpp OllamaMessage.cpp
OpenAIMessage.hpp OpenAIMessage.cpp
ValidationUtils.hpp ValidationUtils.cpp
+ MessageBuilder.hpp MessageBuilder.cpp
)
target_link_libraries(LLMCore
diff --git a/llmcore/MessageBuilder.cpp b/llmcore/MessageBuilder.cpp
new file mode 100644
index 0000000..076e225
--- /dev/null
+++ b/llmcore/MessageBuilder.cpp
@@ -0,0 +1,3 @@
+#include "MessageBuilder.hpp"
+
+namespace QodeAssist::LLMCore {} // namespace QodeAssist::LLMCore
diff --git a/llmcore/MessageBuilder.hpp b/llmcore/MessageBuilder.hpp
new file mode 100644
index 0000000..ec42806
--- /dev/null
+++ b/llmcore/MessageBuilder.hpp
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2024 Petr Mironychev
+ *
+ * This file is part of QodeAssist.
+ *
+ * QodeAssist is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * QodeAssist is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with QodeAssist. If not, see .
+ */
+
+#pragma once
+
+#include
+#include
+#include
+#include
+
+#include "PromptTemplate.hpp"
+
+namespace QodeAssist::LLMCore {
+
+enum class MessageRole { System, User, Assistant };
+
+enum class OllamaFormat { Messages, Completions };
+
+enum class ProvidersApi { Ollama, OpenAI };
+
+static const QString ROLE_SYSTEM = "system";
+static const QString ROLE_USER = "user";
+static const QString ROLE_ASSISTANT = "assistant";
+
+struct Message
+{
+ MessageRole role;
+ QString content;
+};
+
+class MessageBuilder
+{
+public:
+ MessageBuilder &addSystemMessage(const QString &content)
+ {
+ m_systemMessage = content;
+ return *this;
+ }
+
+ MessageBuilder &addUserMessage(const QString &content)
+ {
+ m_messages.append({MessageRole::User, content});
+ return *this;
+ }
+
+ MessageBuilder &addSuffix(const QString &content)
+ {
+ m_suffix = content;
+ return *this;
+ }
+
+ MessageBuilder &addtTokenizer(PromptTemplate *promptTemplate)
+ {
+ m_promptTemplate = promptTemplate;
+ return *this;
+ }
+
+ QString roleToString(MessageRole role) const
+ {
+ switch (role) {
+ case MessageRole::System:
+ return ROLE_SYSTEM;
+ case MessageRole::User:
+ return ROLE_USER;
+ case MessageRole::Assistant:
+ return ROLE_ASSISTANT;
+ default:
+ return ROLE_USER;
+ }
+ }
+
+ void saveTo(QJsonObject &request, ProvidersApi api)
+ {
+ if (!m_promptTemplate) {
+ return;
+ }
+
+ if (api == ProvidersApi::Ollama) {
+ ContextData context{
+ m_messages.isEmpty() ? QString() : m_messages.last().content,
+ m_suffix,
+ m_systemMessage};
+
+ if (m_promptTemplate->type() == TemplateType::Fim) {
+ m_promptTemplate->prepareRequest(request, context);
+ } else {
+ QJsonArray messages;
+
+ messages.append(QJsonObject{{"role", "system"}, {"content", m_systemMessage}});
+ messages.append(
+ QJsonObject{{"role", "user"}, {"content", m_messages.last().content}});
+ request["messages"] = messages;
+ m_promptTemplate->prepareRequest(request, {});
+ }
+ } else if (api == ProvidersApi::OpenAI) {
+ QJsonArray messages;
+
+ messages.append(QJsonObject{{"role", "system"}, {"content", m_systemMessage}});
+ messages.append(QJsonObject{{"role", "user"}, {"content", m_messages.last().content}});
+ request["messages"] = messages;
+ m_promptTemplate->prepareRequest(request, {});
+ }
+ }
+
+private:
+ QString m_systemMessage;
+ QString m_suffix;
+ QVector m_messages;
+ PromptTemplate *m_promptTemplate;
+};
+} // namespace QodeAssist::LLMCore
diff --git a/llmcore/PromptTemplateManager.hpp b/llmcore/PromptTemplateManager.hpp
index 4774a22..1cae0b0 100644
--- a/llmcore/PromptTemplateManager.hpp
+++ b/llmcore/PromptTemplateManager.hpp
@@ -39,9 +39,8 @@ class PromptTemplateManager
"T must inherit from PromptTemplate");
T *template_ptr = new T();
QString name = template_ptr->name();
- if (template_ptr->type() == TemplateType::Fim) {
- m_fimTemplates[name] = template_ptr;
- } else if (template_ptr->type() == TemplateType::Chat) {
+ m_fimTemplates[name] = template_ptr;
+ if (template_ptr->type() == TemplateType::Chat) {
m_chatTemplates[name] = template_ptr;
}
}
diff --git a/llmcore/RequestHandler.cpp b/llmcore/RequestHandler.cpp
index b51f6c8..3855e6d 100644
--- a/llmcore/RequestHandler.cpp
+++ b/llmcore/RequestHandler.cpp
@@ -75,7 +75,7 @@ void RequestHandler::handleLLMResponse(QNetworkReply *reply,
bool isComplete = config.provider->handleResponse(reply, accumulatedResponse);
- if (config.requestType == RequestType::Fim) {
+ if (config.requestType == RequestType::CodeCompletion) {
if (!config.multiLineCompletion
&& processSingleLineCompletion(reply, request, accumulatedResponse, config)) {
return;
@@ -84,7 +84,6 @@ void RequestHandler::handleLLMResponse(QNetworkReply *reply,
if (isComplete) {
auto cleanedCompletion = removeStopWords(accumulatedResponse,
config.promptTemplate->stopWords());
- removeCodeBlockWrappers(cleanedCompletion);
emit completionReceived(cleanedCompletion, request, true);
}
@@ -126,7 +125,6 @@ bool RequestHandler::processSingleLineCompletion(
const LLMConfig &config)
{
QString cleanedResponse = accumulatedResponse;
- removeCodeBlockWrappers(cleanedResponse);
int newlinePos = cleanedResponse.indexOf('\n');
if (newlinePos != -1) {
diff --git a/llmcore/RequestType.hpp b/llmcore/RequestType.hpp
index 763e35e..fe9de64 100644
--- a/llmcore/RequestType.hpp
+++ b/llmcore/RequestType.hpp
@@ -21,5 +21,5 @@
namespace QodeAssist::LLMCore {
-enum RequestType { Fim, Chat };
+enum RequestType { CodeCompletion, Chat };
}
diff --git a/providers/LMStudioProvider.cpp b/providers/LMStudioProvider.cpp
index f16ad8d..9097ed7 100644
--- a/providers/LMStudioProvider.cpp
+++ b/providers/LMStudioProvider.cpp
@@ -94,7 +94,7 @@ void LMStudioProvider::prepareRequest(QJsonObject &request, LLMCore::RequestType
request["messages"] = std::move(messages);
}
- if (type == LLMCore::RequestType::Fim) {
+ if (type == LLMCore::RequestType::CodeCompletion) {
applyModelParams(Settings::codeCompletionSettings());
} else {
applyModelParams(Settings::chatAssistantSettings());
@@ -104,6 +104,7 @@ void LMStudioProvider::prepareRequest(QJsonObject &request, LLMCore::RequestType
bool LMStudioProvider::handleResponse(QNetworkReply *reply, QString &accumulatedResponse)
{
QByteArray data = reply->readAll();
+ qDebug() << "handleResponse" << data;
if (data.isEmpty()) {
return false;
}
diff --git a/providers/OllamaProvider.cpp b/providers/OllamaProvider.cpp
index 58a0c97..fa5f39f 100644
--- a/providers/OllamaProvider.cpp
+++ b/providers/OllamaProvider.cpp
@@ -80,7 +80,7 @@ void OllamaProvider::prepareRequest(QJsonObject &request, LLMCore::RequestType t
request["keep_alive"] = settings.ollamaLivetime();
};
- if (type == LLMCore::RequestType::Fim) {
+ if (type == LLMCore::RequestType::CodeCompletion) {
applySettings(Settings::codeCompletionSettings());
} else {
applySettings(Settings::chatAssistantSettings());
diff --git a/providers/OpenAICompatProvider.cpp b/providers/OpenAICompatProvider.cpp
index 45ceda9..785d986 100644
--- a/providers/OpenAICompatProvider.cpp
+++ b/providers/OpenAICompatProvider.cpp
@@ -93,7 +93,7 @@ void OpenAICompatProvider::prepareRequest(QJsonObject &request, LLMCore::Request
request["messages"] = std::move(messages);
}
- if (type == LLMCore::RequestType::Fim) {
+ if (type == LLMCore::RequestType::CodeCompletion) {
applyModelParams(Settings::codeCompletionSettings());
} else {
applyModelParams(Settings::chatAssistantSettings());
diff --git a/providers/OpenRouterAIProvider.cpp b/providers/OpenRouterAIProvider.cpp
index 0a4e501..0435cb8 100644
--- a/providers/OpenRouterAIProvider.cpp
+++ b/providers/OpenRouterAIProvider.cpp
@@ -77,7 +77,7 @@ void OpenRouterProvider::prepareRequest(QJsonObject &request, LLMCore::RequestTy
request["messages"] = std::move(messages);
}
- if (type == LLMCore::RequestType::Fim) {
+ if (type == LLMCore::RequestType::CodeCompletion) {
applyModelParams(Settings::codeCompletionSettings());
} else {
applyModelParams(Settings::chatAssistantSettings());
From 6bacead727aaf6da1d75afa7a3e0121698dcc488 Mon Sep 17 00:00:00 2001
From: Petr Mironychev <9195189+Palm1r@users.noreply.github.com>
Date: Sun, 15 Dec 2024 22:24:27 +0100
Subject: [PATCH 2/5] :sparkles: feat: Add move text from request to comments
---
CodeHandler.cpp | 81 +++++++++++++++++++++++++---------
CodeHandler.hpp | 3 +-
providers/LMStudioProvider.cpp | 1 -
3 files changed, 62 insertions(+), 23 deletions(-)
diff --git a/CodeHandler.cpp b/CodeHandler.cpp
index d3ce1bb..29cca2f 100644
--- a/CodeHandler.cpp
+++ b/CodeHandler.cpp
@@ -18,39 +18,78 @@
*/
#include "CodeHandler.hpp"
+#include
namespace QodeAssist {
QString CodeHandler::processText(QString text)
{
- return removeCodeBlockWrappers(text);
+ QString result;
+ QStringList lines = text.split('\n');
+ bool inCodeBlock = false;
+ QString pendingComments;
+ QString currentLanguage;
+
+ for (const QString &line : lines) {
+ if (line.trimmed().startsWith("```")) {
+ if (!inCodeBlock) {
+ currentLanguage = detectLanguage(line);
+ }
+ inCodeBlock = !inCodeBlock;
+ continue;
+ }
+
+ if (inCodeBlock) {
+ if (!pendingComments.isEmpty()) {
+ QStringList commentLines = pendingComments.split('\n');
+ QString commentPrefix = getCommentPrefix(currentLanguage);
+
+ for (const QString &commentLine : commentLines) {
+ if (!commentLine.trimmed().isEmpty()) {
+ result += commentPrefix + " " + commentLine.trimmed() + "\n";
+ } else {
+ result += "\n";
+ }
+ }
+ pendingComments.clear();
+ }
+ result += line + "\n";
+ } else {
+ QString trimmed = line.trimmed();
+ if (!trimmed.isEmpty()) {
+ pendingComments += trimmed + "\n";
+ } else {
+ pendingComments += "\n";
+ }
+ }
+ }
+
+ return result;
}
-QString CodeHandler::removeCodeBlockWrappers(QString text)
+QString CodeHandler::getCommentPrefix(const QString &language)
{
- QRegularExpressionMatchIterator matchIterator = getFullCodeBlockRegex().globalMatch(text);
- while (matchIterator.hasNext()) {
- QRegularExpressionMatch match = matchIterator.next();
- QString codeBlock = match.captured(0);
- QString codeContent = match.captured(1).trimmed();
- text.replace(codeBlock, codeContent);
- }
+ static const QHash commentPrefixes
+ = {{"python", "#"}, {"py", "#"}, {"lua", "--"}, {"javascript", "//"},
+ {"js", "//"}, {"typescript", "//"}, {"ts", "//"}, {"cpp", "//"},
+ {"c++", "//"}, {"c", "//"}, {"java", "//"}, {"csharp", "//"},
+ {"cs", "//"}, {"php", "//"}, {"ruby", "#"}, {"rb", "#"},
+ {"rust", "//"}, {"rs", "//"}, {"go", "//"}, {"swift", "//"},
+ {"kotlin", "//"}, {"kt", "//"}, {"scala", "//"}, {"r", "#"},
+ {"shell", "#"}, {"bash", "#"}, {"sh", "#"}, {"perl", "#"},
+ {"pl", "#"}, {"haskell", "--"}, {"hs", "--"}};
- QRegularExpressionMatch startMatch = getPartialStartBlockRegex().match(text);
- if (startMatch.hasMatch()) {
- QString partialBlock = startMatch.captured(0);
- QString codeContent = startMatch.captured(1).trimmed();
- text.replace(partialBlock, codeContent);
- }
+ return commentPrefixes.value(language.toLower(), "//");
+}
- QRegularExpressionMatch endMatch = getPartialEndBlockRegex().match(text);
- if (endMatch.hasMatch()) {
- QString partialBlock = endMatch.captured(0);
- QString codeContent = endMatch.captured(1).trimmed();
- text.replace(partialBlock, codeContent);
+QString CodeHandler::detectLanguage(const QString &line)
+{
+ QString trimmed = line.trimmed();
+ if (trimmed.length() <= 3) { // Если только ```
+ return QString();
}
- return text;
+ return trimmed.mid(3).trimmed();
}
const QRegularExpression &CodeHandler::getFullCodeBlockRegex()
diff --git a/CodeHandler.hpp b/CodeHandler.hpp
index 73d5d98..9b704b4 100644
--- a/CodeHandler.hpp
+++ b/CodeHandler.hpp
@@ -31,7 +31,8 @@ class CodeHandler
static QString processText(QString text);
private:
- static QString removeCodeBlockWrappers(QString text);
+ static QString getCommentPrefix(const QString &language);
+ static QString detectLanguage(const QString &line);
static const QRegularExpression &getFullCodeBlockRegex();
static const QRegularExpression &getPartialStartBlockRegex();
diff --git a/providers/LMStudioProvider.cpp b/providers/LMStudioProvider.cpp
index 9097ed7..8150583 100644
--- a/providers/LMStudioProvider.cpp
+++ b/providers/LMStudioProvider.cpp
@@ -104,7 +104,6 @@ void LMStudioProvider::prepareRequest(QJsonObject &request, LLMCore::RequestType
bool LMStudioProvider::handleResponse(QNetworkReply *reply, QString &accumulatedResponse)
{
QByteArray data = reply->readAll();
- qDebug() << "handleResponse" << data;
if (data.isEmpty()) {
return false;
}
From 9b921bc6582f4c4d9e56f7d81417d366dc0235b3 Mon Sep 17 00:00:00 2001
From: Petr Mironychev <9195189+Palm1r@users.noreply.github.com>
Date: Tue, 17 Dec 2024 00:04:53 +0100
Subject: [PATCH 3/5] :bug: fix: Add stop to ollama request validator
---
providers/OllamaProvider.cpp | 3 +++
1 file changed, 3 insertions(+)
diff --git a/providers/OllamaProvider.cpp b/providers/OllamaProvider.cpp
index fa5f39f..49f693b 100644
--- a/providers/OllamaProvider.cpp
+++ b/providers/OllamaProvider.cpp
@@ -66,6 +66,7 @@ void OllamaProvider::prepareRequest(QJsonObject &request, LLMCore::RequestType t
QJsonObject options;
options["num_predict"] = settings.maxTokens();
options["temperature"] = settings.temperature();
+ options["stop"] = request.take("stop");
if (settings.useTopP())
options["top_p"] = settings.topP();
@@ -150,6 +151,7 @@ QList OllamaProvider::validateRequest(const QJsonObject &request, LLMCo
{"options",
QJsonObject{
{"temperature", {}},
+ {"stop", {}},
{"top_p", {}},
{"top_k", {}},
{"num_predict", {}},
@@ -164,6 +166,7 @@ QList OllamaProvider::validateRequest(const QJsonObject &request, LLMCo
{"options",
QJsonObject{
{"temperature", {}},
+ {"stop", {}},
{"top_p", {}},
{"top_k", {}},
{"num_predict", {}},
From 498caf9b4f3a002520817d531a10c52cb30f5afc Mon Sep 17 00:00:00 2001
From: Petr Mironychev <9195189+Palm1r@users.noreply.github.com>
Date: Tue, 17 Dec 2024 00:18:00 +0100
Subject: [PATCH 4/5] :bug: fix: Template double delete
---
llmcore/PromptTemplateManager.cpp | 1 -
1 file changed, 1 deletion(-)
diff --git a/llmcore/PromptTemplateManager.cpp b/llmcore/PromptTemplateManager.cpp
index c0b08fc..644e934 100644
--- a/llmcore/PromptTemplateManager.cpp
+++ b/llmcore/PromptTemplateManager.cpp
@@ -40,7 +40,6 @@ QStringList PromptTemplateManager::chatTemplatesNames() const
PromptTemplateManager::~PromptTemplateManager()
{
qDeleteAll(m_fimTemplates);
- qDeleteAll(m_chatTemplates);
}
PromptTemplate *PromptTemplateManager::getFimTemplateByName(const QString &templateName)
From 83fcb8fae34c2dee1c7f8f89081141873a6caf61 Mon Sep 17 00:00:00 2001
From: Petr Mironychev <9195189+Palm1r@users.noreply.github.com>
Date: Tue, 17 Dec 2024 00:25:13 +0100
Subject: [PATCH 5/5] :sparkles: feat: Add settings for process text of
instruct model
---
LLMClientInterface.cpp | 10 +++++++++-
settings/CodeCompletionSettings.cpp | 13 +++++++++----
settings/CodeCompletionSettings.hpp | 1 +
settings/SettingsConstants.hpp | 1 +
4 files changed, 20 insertions(+), 5 deletions(-)
diff --git a/LLMClientInterface.cpp b/LLMClientInterface.cpp
index db7cade..d660ab2 100644
--- a/LLMClientInterface.cpp
+++ b/LLMClientInterface.cpp
@@ -243,6 +243,10 @@ void LLMClientInterface::sendCompletionToClient(const QString &completion,
const QJsonObject &request,
bool isComplete)
{
+ auto templateName = Settings::generalSettings().ccTemplate();
+ auto promptTemplate = LLMCore::PromptTemplateManager::instance().getFimTemplateByName(
+ templateName);
+
QJsonObject position = request["params"].toObject()["doc"].toObject()["position"].toObject();
QJsonObject response;
@@ -253,7 +257,11 @@ void LLMClientInterface::sendCompletionToClient(const QString &completion,
QJsonArray completions;
QJsonObject completionItem;
- QString processedCompletion = CodeHandler::processText(completion);
+ QString processedCompletion
+ = promptTemplate->type() == LLMCore::TemplateType::Chat
+ && Settings::codeCompletionSettings().smartProcessInstuctText()
+ ? CodeHandler::processText(completion)
+ : completion;
completionItem[LanguageServerProtocol::textKey] = processedCompletion;
QJsonObject range;
diff --git a/settings/CodeCompletionSettings.cpp b/settings/CodeCompletionSettings.cpp
index 927ebc9..9f26214 100644
--- a/settings/CodeCompletionSettings.cpp
+++ b/settings/CodeCompletionSettings.cpp
@@ -48,13 +48,17 @@ CodeCompletionSettings::CodeCompletionSettings()
autoCompletion.setDefaultValue(true);
multiLineCompletion.setSettingsKey(Constants::CC_MULTILINE_COMPLETION);
- multiLineCompletion.setDefaultValue(false);
- multiLineCompletion.setLabelText(Tr::tr("Enable Multiline Completion(experimental)"));
+ multiLineCompletion.setDefaultValue(true);
+ multiLineCompletion.setLabelText(Tr::tr("Enable Multiline Completion"));
stream.setSettingsKey(Constants::CC_STREAM);
stream.setDefaultValue(true);
stream.setLabelText(Tr::tr("Enable stream option"));
+ smartProcessInstuctText.setSettingsKey(Constants::CC_SMART_PROCESS_INSTRUCT_TEXT);
+ smartProcessInstuctText.setDefaultValue(true);
+ smartProcessInstuctText.setLabelText(Tr::tr("Enable smart process text from instruct model"));
+
startSuggestionTimer.setSettingsKey(Constants::СС_START_SUGGESTION_TIMER);
startSuggestionTimer.setLabelText(Tr::tr("with delay(ms)"));
startSuggestionTimer.setRange(10, 10000);
@@ -147,8 +151,8 @@ CodeCompletionSettings::CodeCompletionSettings()
systemPrompt.setSettingsKey(Constants::CC_SYSTEM_PROMPT);
systemPrompt.setDisplayStyle(Utils::StringAspect::TextEditDisplay);
- systemPrompt.setDefaultValue("You are an expert C++, Qt, and QML code completion AI. ANSWER "
- "should be SHORT and in CODE");
+ systemPrompt.setDefaultValue("You are an expert C++, Qt, and QML code completion AI. Answer "
+ "should be ONLY in CODE and without repeating current.");
useFilePathInContext.setSettingsKey(Constants::CC_USE_FILE_PATH_IN_CONTEXT);
useFilePathInContext.setDefaultValue(true);
@@ -227,6 +231,7 @@ CodeCompletionSettings::CodeCompletionSettings()
Space{8},
multiLineCompletion,
stream,
+ smartProcessInstuctText,
Row{autoCompletionCharThreshold,
autoCompletionTypingInterval,
startSuggestionTimer,
diff --git a/settings/CodeCompletionSettings.hpp b/settings/CodeCompletionSettings.hpp
index 310b1a3..5ca671d 100644
--- a/settings/CodeCompletionSettings.hpp
+++ b/settings/CodeCompletionSettings.hpp
@@ -36,6 +36,7 @@ class CodeCompletionSettings : public Utils::AspectContainer
Utils::BoolAspect autoCompletion{this};
Utils::BoolAspect multiLineCompletion{this};
Utils::BoolAspect stream{this};
+ Utils::BoolAspect smartProcessInstuctText{this};
Utils::IntegerAspect startSuggestionTimer{this};
Utils::IntegerAspect autoCompletionCharThreshold{this};
diff --git a/settings/SettingsConstants.hpp b/settings/SettingsConstants.hpp
index 3e33ccc..8eaf21a 100644
--- a/settings/SettingsConstants.hpp
+++ b/settings/SettingsConstants.hpp
@@ -50,6 +50,7 @@ const char СС_AUTO_COMPLETION_TYPING_INTERVAL[] = "QodeAssist.autoCompletionTy
const char MAX_FILE_THRESHOLD[] = "QodeAssist.maxFileThreshold";
const char CC_MULTILINE_COMPLETION[] = "QodeAssist.ccMultilineCompletion";
const char CC_STREAM[] = "QodeAssist.ccStream";
+const char CC_SMART_PROCESS_INSTRUCT_TEXT[] = "QodeAssist.ccSmartProcessInstructText";
const char CUSTOM_JSON_TEMPLATE[] = "QodeAssist.customJsonTemplate";
const char CA_TOKENS_THRESHOLD[] = "QodeAssist.caTokensThreshold";
const char CA_SHARING_CURRENT_FILE[] = "QodeAssist.caSharingCurrentFile";