-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathchatGptTurbo.py
111 lines (82 loc) · 3.41 KB
/
chatGptTurbo.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
import openai
from flask import current_app as app
from apps.chat.fakeChat import FakeChat
from apps.chat.chatUtil import ChatUtil
f = open('apps/configs/openai_api_key.txt', 'r')
openai.api_key = f.read()
#use for debugging, so no api credit is spent
FAKE_CHAT = False
fakeChat = FakeChat()
#error message to fonrt user when something gose wrong:
ErrMsg = "我出错了,您再重新试试?"
#Model definition:
ModelName = "gpt-3.5-turbo-0613"
MaxToken = 16*1024
TokenMargin = 100
chatUtil = ChatUtil()
#reset user's conversation:
def resetConversation(userConversations, currentUser):
userConversations[currentUser] = [{"role": "system", "content": "You are a helpful assistant."}]
return
def parseFinishReason(userConversations, currentUser, finishReason):
if(finishReason == "stop"):
return
#conversation has exceeded maximum token allowed, need to re-init:
if(finishReason == "length"):
#restConversation(userConversations, currentUser)
pass
app.logger.warning("%s's chat return with reason: %s" % (currentUser, finishReason))
return
def parseTotalToken(userConversations, currentUser, totalToken):
if(totalToken > (MaxToken - TokenMargin)):
app.logger.warning("%s's total token reached %d, reset conversation" % (currentUser, totalToken))
resetConversation(userConversations, currentUser)
return
userConversations = {}
def chatWithTurbo3(currentUser, chatConversation):
global userConversations
try:
response = openai.ChatCompletion.create(
model=ModelName,
messages=chatConversation,
temperature=0.2,
max_tokens=1024,
top_p=1,
frequency_penalty=0.0,
presence_penalty=0.6,
stop=None
)
message = response['choices'][0]['message']['content']
message = message.strip()
message = chatUtil.processText(message)
except Exception as e:
app.logger.warning("User {} meet error: {}".format(currentUser, e))
resetConversation(userConversations, currentUser)
return ErrMsg, 0, "response_error"
try:
totalToken = response['usage']['total_tokens']
finishReason = response['choices'][0]['finish_reason']
except Exception as e:
app.logger.warning(e)
resetConversation(userConversations, currentUser)
return ErrMsg, 0, "parse_result_error"
return message, totalToken, finishReason
def chatResponseFromTurbo(currentUser, prompt):
global userConversations
#app.logger.info("User %s Encrypted prompt %s" % (currentUser, prompt))
if currentUser not in userConversations:
resetConversation(userConversations, currentUser)
userConversations[currentUser].append({"role": "user", "content": prompt})
if(FAKE_CHAT == False):
botAnswer, totalToken, finishReason= chatWithTurbo3(currentUser, userConversations[currentUser])
else:
botAnswer = fakeChat.giveAnswer()
totalToken = 0
finishReason = "stop"
app.logger.info("Response to %s with total token %d" % (currentUser, totalToken))
#we got a good response:
if(finishReason == "stop"):
userConversations[currentUser].append({"role": "assistant", "content": botAnswer})
parseFinishReason(userConversations, currentUser, finishReason)
parseTotalToken(userConversations, currentUser, totalToken)
return botAnswer