-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathchat_engine.py
80 lines (60 loc) · 2.76 KB
/
chat_engine.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
import llm
import openai
import chat
import decision_center
'''def initialize_conversation():
#create the bot conversation
model = llm.get_model("nous-hermes-llama2-13b")
model.key = ''
conversation = model.conversation()
return conversation
conversation = initialize_conversation()
#get settings
nl_settings = chat.load_settings()
dmc_settings = decision_center.load_settings()
dmc_system_prompt = dmc_settings['system_prompt'].join(str(x) for x in dmc_settings['available_instructions']) + "You are named Magnus. You are speaking to " + dmc_settings["current_user"] + "\n"
nl_system_prompt = nl_settings['system_prompt'] + nl_settings['personality'] + "You are named Magnus. You are speaking to " + nl_settings["current_user"] + "\n"
while True:
print("You: ", end='')
user_input = input()
#print("Magnus: ", end='')
if user_input.lower() == "exit":
break
dmc_output = decision_center.generate_reply_text(user_input, conversation, dmc_system_prompt)
print("DMC: ", dmc_output)
nl_system_prompt += dmc_output
reply_text = chat.generate_reply_text(user_input, conversation, nl_system_prompt)
print("NLP: ", reply_text)
print("\n")
'''
# okay fresh start/final draft
#Variables
client = openai.OpenAI(api_key="")
conversation_context = ""
DMC_system_prompt = ""
NL_system_prompt = ""
user_prompt = ""
nl_settings = chat.load_settings()
dmc_settings = decision_center.load_settings()
#start the loop
while True:
if(len(conversation_context) > 5000):
conversation_context = conversation_context[len(conversation_context) - 5000: len(conversation_context)]
print("USER: ", end='')
user_input = "USER: " + input()
conversation_context += user_input + "\n"
#QOL exit command
if user_input.lower() == "exit":
break
#DMC
DMC_system_prompt = dmc_settings['system_prompt'] + "CURRENT CONVERSATION CONTEXT: \n" + conversation_context + "AVAILABLE INSTRUCTIONS(CHOOSE FROM THESE ONLY): " + "".join(dmc_settings['available_instructions'])+ "RULES(FOLLOW THESE WHILE LISTING INSTRUCTIONS): " + "".join(dmc_settings['rules']) + "\n"
#generate instructions, print for now
instructions = decision_center.generate_instructions(DMC_system_prompt, user_input, client, True)
print("\n")
conversation_context += instructions
#Generate filal output
NL_system_prompt = nl_settings['system_prompt'] + nl_settings['personality'] + "CURRENT CONVERSATION CONTEXT: \n" + conversation_context + "\n"
#print("NL PROMPT: ", NL_system_prompt)
magnus_response = chat.generate_natural_output(NL_system_prompt, "EXECUTE THE FOLLOWING INSTRUCTiONS: " + instructions, client, True)
conversation_context += magnus_response
print("\n")