-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtesting_system.py
64 lines (46 loc) · 2.2 KB
/
testing_system.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
import llm
import openai
import chat
import decision_center
import json
# okay fresh start/final draft
#Variables
client = openai.OpenAI(api_key="")
#client = openai.OpenAI(api_key="")
conversation_context = ""
DMC_system_prompt = ""
NL_system_prompt = ""
user_prompt = ""
nl_settings = chat.load_settings()
dmc_settings = decision_center.load_settings()
#start the loop
i = 1;
while(i < 21):
print("-------------------------------------------------------")
with open('tester.json') as f:
test_settings = json.load(f)
conversation_context = "\n".join(test_settings['context'])
#print("USER: ", end='')
#user_input = "USER: " + input()
user_input = "THIS IS A TEST, PLEASE RESPOND TO THE CONTEXT AS IF IT IS OUR CURRENT CONVERSATION"
conversation_context += user_input + "\n"
#QOL exit command
if user_input.lower() == "exit":
break
#DMC
DMC_system_prompt = dmc_settings['system_prompt'] + "CURRENT CONVERSATION CONTEXT: \n" + conversation_context + "AVAILABLE INSTRUCTIONS(CHOOSE FROM THESE ONLY): " + "".join(dmc_settings['available_instructions'])+ "RULES(FOLLOW THESE WHILE LISTING INSTRUCTIONS): " + "".join(dmc_settings['rules']) + "\n" + "RELEVANT MEMORIES(USE THESE TO INFORM DECISIONS): " + "\n".join(test_settings['relevant_memories']) + "\n" + "CURRENT TIME: " + test_settings["current_time"] + "\n" + "CURRENT USER: " + dmc_settings["current_user"] + "\n"
#generate instructions, print for now
instructions = decision_center.generate_instructions(DMC_system_prompt, user_input, client, False)
conversation_context += instructions
print("RESULTS OF TEST " + str(i) + ":")
print("DMC: ", instructions)
#update json file
#Generate filal output
NL_system_prompt = nl_settings['system_prompt'] + nl_settings['personality'] + "CURRENT CONVERSATION CONTEXT: \n" + conversation_context + "\n"
#print("NL PROMPT: ", NL_system_prompt)
magnus_response = chat.generate_natural_output(NL_system_prompt, "EXECUTE THE FOLLOWING INSTRUCTiONS: " + instructions, client, False)
conversation_context += magnus_response
print("\n")
print("NLP: ", magnus_response)
#json.dump(test_settings, f)
i += 1