Skip to content
Snippets Groups Projects
Commit 6d576762 authored by Giner, Aaron's avatar Giner, Aaron
Browse files

u

parent 0e3224d5
No related branches found
No related tags found
No related merge requests found
.idea
\ No newline at end of file
.idea
*/__pycache__/
\ No newline at end of file
File deleted
......@@ -3,10 +3,11 @@ import math
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from transformers import AutoTokenizer
import templates
import util
import memory_util
import util
from datetime import datetime
# parses a query request
......@@ -57,7 +58,7 @@ def query_chat(request, llm, tokenizer):
messages = [
{"role": "system",
"content": templates.load_template("chat_system").format(**parameters)},
"content": util.load_template("chat_system").format(**parameters)},
]
roles = ["user", "assistant"]
......@@ -66,6 +67,9 @@ def query_chat(request, llm, tokenizer):
messages.append({"role": roles[i % 2], "content": chat[i]})
prompt = tokenizer.apply_chat_template(messages, tokenize=False)
print()
print(prompt)
print()
res = run_query(prompt, llm)
memories_accessed = [str(mem["NodeId"]) for mem in memories[:5]]
......@@ -82,7 +86,7 @@ def query_reflection(request, llm, tokenizer):
messages = [
{"role": "user",
"content": templates.load_template("reflection_a").format(**parameters)},
"content": util.load_template("reflection_a").format(**parameters)},
]
prompt = tokenizer.apply_chat_template(messages, tokenize=False)
......@@ -98,7 +102,7 @@ def query_poignancy(request, llm, tokenizer):
parameters = request["data"]
messages = [
{"role": "user",
"content": templates.load_template("poignancy_memory").format(**parameters)},
"content": util.load_template("poignancy").format(**parameters)},
]
prompt = tokenizer.apply_chat_template(messages, tokenize=False)
......@@ -113,17 +117,17 @@ def query_chat_summary(request, llm, tokenizer):
parameters = request["data"]
messages_summary = [
{"role": "user",
"content": templates.load_template("chat_summary_single").format(**parameters)},
"content": util.load_template("chat_summary_single").format(**parameters)},
]
messages_user = [
{"role": "user",
"content": templates.load_template("chat_summary_user").format(**parameters)},
"content": util.load_template("chat_summary_user").format(**parameters)},
]
messages_agent = [
{"role": "user",
"content": templates.load_template("chat_summary_agent").format(**parameters)},
"content": util.load_template("chat_summary_agent").format(**parameters)},
]
prompt_summary = tokenizer.apply_chat_template(messages_summary, tokenize=False)
......@@ -158,9 +162,10 @@ def query_chat_summary(request, llm, tokenizer):
def query_chat_extract_plan(request, llm, tokenizer):
parameters = request["data"]
messages = [
{"role": "user",
"content": templates.load_template("chat_extract_plan").format(**parameters)},
"content": util.load_template("chat_extract_plan").format(**parameters)},
]
prompt = tokenizer.apply_chat_template(messages, tokenize=False)
......@@ -196,7 +201,7 @@ def generate_context(request):
relationship = query_relationship(request, util.load_model(util.MISTRAL_PATH, 4096, 0),
util.load_tokenizer(util.MISTRAL_TOK))
return json.dumps({"response": action + last_chat + relationship})
return json.dumps({"response": last_chat + " " + relationship})
def query_relationship(request, llm, tokenizer):
......@@ -207,11 +212,11 @@ def query_relationship(request, llm, tokenizer):
messages = [
{"role": "user",
"content": templates.load_template("relationship").format(memories=memories_str, **parameters)},
"content": util.load_template("relationship").format(memories=memories_str, **parameters)},
]
prompt = tokenizer.apply_chat_template(messages, tokenize=False)
relationship = parameters["agent"] + "'s relationship with " + parameters["user"] + " is" + run_query(prompt, llm)
relationship = parameters["agent"] + "'s relationship with " + parameters["user"] + " is " + run_query(prompt, llm)
return relationship
......@@ -220,7 +225,7 @@ def query_agent_action(request, llm, tokenizer):
parameters = request["data"]
messages = [
{"role": "user",
"content": templates.load_template("agent_action").format(**parameters)},
"content": util.load_template("agent_action").format(**parameters)},
]
prompt = tokenizer.apply_chat_template(messages, tokenize=False)
......@@ -233,7 +238,7 @@ def query_knowledge(request, llm, tokenizer):
parameters = request["data"]
messages = [
{"role": "user",
"content": templates.load_template("knowledge_summary").format(**parameters)},
"content": util.load_template("knowledge_summary").format(**parameters)},
]
prompt = tokenizer.apply_chat_template(messages, tokenize=False)
......@@ -247,21 +252,44 @@ def query_plan_day(request, llm, tokenizer):
parameters = request["data"]
memories = request["memories"]
plans = memory_util.memories_to_string(memories)
memories_str = memory_util.memories_to_string(memories)
messages = [
{"role": "user",
"content": templates.load_template("plan_day").format(plans=plans, **parameters)},
"content": util.load_template("plan_day").format(**parameters)},
]
prompt = tokenizer.apply_chat_template(messages, tokenize=False)
res = run_query(prompt, llm)
plans = [s.replace("- ", "") for s in res.split("\n") if "- " in s]
print(prompt)
json_res = json.dumps({"memories": plans})
return json_res
rough_plan = run_query(prompt, llm)
# plans = [s.replace("- ", "") for s in res.split("\n") if "- " in s]
for h in range(24):
time_start = datetime.strptime(str(h)+":00", "%H:%M")
time_start = time_start.strftime("%I:%M%p")
time_end = datetime.strptime(str((h+1) % 24)+":00", "%H:%M")
time_end = time_end.strftime("%I:%M%p")
messages = [
{"role": "user",
"content": util.load_template("plan_day_decomp").format(time_start=time_start, time_end=time_end,
memories=memories_str, plan=rough_plan,
**parameters)},
]
prompt = tokenizer.apply_chat_template(messages, tokenize=False)
res = time_start + "-" + time_end + ":" + run_query(prompt, llm)
print()
print(res)
print()
exit(1)
json_res = json.dumps({"response": res})
return json_res
# returns a list of validated statements
def conversation_validate_statements(parameters, statements, llm, tokenizer):
......@@ -269,7 +297,7 @@ def conversation_validate_statements(parameters, statements, llm, tokenizer):
for statement in statements:
message_validate = [
{"role": "user",
"content": templates.load_template("chat_validate_statement").format(statement=statement,
"content": util.load_template("chat_validate_statement").format(statement=statement,
**parameters)},
]
......@@ -291,7 +319,7 @@ def run_query(prompt, llm):
verbose=False
)
return llm_chain.run({})
return llm_chain.run({}).strip()
d = {"type": "chat_summary",
......
You are given a conversation between {agent} and {user}.
By completing the sentence below, briefly summarize what {agent} and {user} talked about.
In 1 short sentence, summarize the conversation below between {agent} and {user}.
Conversation:
{conversation}
{agent} had a conversation with {user} and they talked about
Answer:
{agent} and {user} talked about
In full sentences, create an hourly schedule for {agent} for today, given the restrictions below.
Use present tense.
In broad strokes, generate a plan for {agent}'s day for {date}.
When does he have to wake up and when does he go to sleep?
Answer in full sentences and include the time for each action.
Today is {date}.
Plan restrictions:
{daily_plan_req}
{scheduleOutline}
{plans}
Complete the prompt:
On {date}, {agent}
Answer:
Given a rough outline of {agent}'s' day, generate actions for {agent} for the time between {time_start} and {time_end}.
Answer in 1-6 words.
Rough outline of {agent}'s day: {plan}
Answer:
Generate a list of 5 things {agent} wants to achieve on {date} given the information below.
All items must be distinct.
List items must start with a dash '-'
Restrictions: {daily_plan_req}
List:
Today is {date}.
What is a suitable wakeup time for {agent}?
Restrictions:
{daily_plan_req}
Answer:
A suitable wakeup time for {agent} is
\ No newline at end of file
Based on {agent}'s memories, briefly describe the relationship between {agent} and {user}.
Briefly describe what {agent} feels or knows about each {user}.
Current date and time: {datetime}
{agent}'s memories related to {user}:
{agent}'s memories:
{memories}
Complete the sentence:
{agent}'s relationship with {user} is
\ No newline at end of file
Answer:
......@@ -31,6 +31,7 @@ def load_model(model, ctx_size, temperature):
callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
llm = LlamaCpp(
model_path=model,
max_tokens=500,
temperature=temperature,
n_gpu_layers=-1,
n_batch=512,
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment