From 6d5767627f24d79a47c77165b327a057f36d4b13 Mon Sep 17 00:00:00 2001
From: Aaron Giner <aaron.giner@student.tugraz.at>
Date: Sat, 6 Apr 2024 00:06:51 +0200
Subject: [PATCH] u

---
 python/.gitignore                             |   3 +-
 .../__pycache__/queries.cpython-310.pyc       | Bin 8970 -> 0 bytes
 python/llm-server/queries.py                  |  74 ++++++++++++------
 .../templates/chat_summary_single.txt         |   5 +-
 python/llm-server/templates/plan_day.txt      |  15 ++--
 .../llm-server/templates/plan_day_decomp.txt  |   6 ++
 .../llm-server/templates/plan_day_goals.txt   |   7 ++
 .../llm-server/templates/plan_day_wakeup.txt  |   8 ++
 .../{poignancy_memory.txt => poignancy.txt}   |   0
 python/llm-server/templates/relationship.txt  |   7 +-
 python/llm-server/util.py                     |   1 +
 11 files changed, 87 insertions(+), 39 deletions(-)
 delete mode 100644 python/llm-server/__pycache__/queries.cpython-310.pyc
 create mode 100644 python/llm-server/templates/plan_day_decomp.txt
 create mode 100644 python/llm-server/templates/plan_day_goals.txt
 create mode 100644 python/llm-server/templates/plan_day_wakeup.txt
 rename python/llm-server/templates/{poignancy_memory.txt => poignancy.txt} (100%)

diff --git a/python/.gitignore b/python/.gitignore
index 723ef36..df72eae 100644
--- a/python/.gitignore
+++ b/python/.gitignore
@@ -1 +1,2 @@
-.idea
\ No newline at end of file
+.idea
+*/__pycache__/
\ No newline at end of file
diff --git a/python/llm-server/__pycache__/queries.cpython-310.pyc b/python/llm-server/__pycache__/queries.cpython-310.pyc
deleted file mode 100644
index 0b5d1a481abef7797a375fb23e00ca37dd469498..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 8970
zcmb_i+i%=fdM7#L42N^k#j<5*b2;{A>&lh2iR)||uQy)RQPxU*NhJl_uwG25In>P9
z9CC6FWn~7`qJXmx1)2gyU$a;V=!@S9^uH+j7qE|gSw9v5iXy-w{e6ezTx2=PLr37@
zxu5TR_wS>kM#ItY`~1zhJ^#m=_FvSP{#j`J9FP1D6kOwapt;#wcXhOl!04CUl1_c4
z!0cPDrRru-?pNH3s_TL6+G=gpt*W=<I^5#r&o#HkE8NDr&a2$PyTNO`j(3wccoXkA
zKF8<rp63gE5$^?lhA-j0$j|a~c%R|R{5;-E`~rUk@3Z^|{8hZq@z?n4crWueIO7-j
zCH})NOC62B`DMvH&-HC><wwtPBklf*p4xZs-n-rPdLin~k3`fT#5;a}5O}eVp2qrq
z9PLDp{jfLkMO&v;`Om<+g6B3K`5s1M?MP3wWBpJ|fT|1Hp`Pg6cx;H*Vk6Pw(vcaL
z9$CD^&EpbUWwa{C`qR?3hItm|y&c=*c~#6SkLNk#c@@mth-=?D&&Iq@;`(>at72Y1
zZhYrF2lMvhW}-a>`T%bY?Z1!bzH?n2^L`u8fBQV%07;tnQ!^e8{M2lBz4*B{uW6kO
z)xW=$)~T;0_xpWM3<Z%PT~J;AXR+|waSH?sT}SwPf!~gMQRr3&QLhtvVSDIS+EEx|
zqFa3&MhAh<JAP`@f)@9NuC=kX^<ZmdAvGV#C`_#&^0-W$PxpN>Y|$C1xgYm})S-b^
zKjMDiI(P4`-&=2ew7zpEZL0pb;1Rc)jqN=6%XD$FNdbFWzqh%)v$cLVTU;wzG<|t$
z`t-fh`D|cvyL2w=8Q(FT@A#oFz_hLWvc;6im7DpR{NgJ{S`j|_WbD@R$`0`VOcOr<
ze!sr^*Eb$+ZG7}#dvoW(*5=0c!)w>C%E8q^)CqdAY<I7AdN}ba&h_xb^5I|@ccbuO
z5cID~Up(=}LpreM%eBEUZ45ji{Z@VsXsgM;K;h`NZX1ibqnqD+_ZxHd*|}y(yoPPs
zB^1T4fr^bs3FZ=bB^Sq1QsTx1?FhOGLTaMdgpgXXeq<yT#;gk(FE49}$txGMW$oC|
zHH?)K3tG$m4Oa5%v4MH!O)WOL^WxaD#+_vd2=rbpwvNiYeyl&L@a3MCl>SBkN1Zpm
zG&=fW8QQPeF%B!S9aj_css2=dU;EYkp`DbEoTQT2eD0if0dOQX&cvEy?D*+cVmzuP
zWe#2Vg>hJgXjc>csd4D=MSkXUZ35-{fZ(teH;$T#!<XW@qj`QdsT~_Vou8w<4(mxB
z;&vY5#=Y1RWuWS7{nx48_xll%3;YiHeXalW`8W7ICnK<v>P}jgLmB&hx0cDGYCJPs
zQ$&HE%UB2*^Q7#_*bC$5q#-|e1{GTf#|!$C_TI)L&!gyoP`|Pd^_cVv5b#w0C*xBC
zpz3b<KE!U5Lm`)BEWo+O+FNc304r9_idV5k>f8))cpF;$y<gD21j2c3^m@kn1o+y`
zAZmMod=HgkYPwL|z2&vrzLY-yT3^L6w(#X33W1K-v5`6lio%iuh(KCXi-s-I4({Au
zFNl57DmF?Ni&`s=S^&Bic2aAASDaRXMP*=Q+Q^hg-cHMVk?2F?Si%dTq)LGwre$w1
z@I#)S!8^z_Qmfb?t&07yr7kCGbbpKQ_Xkq^3DwO<5v*E6yh4o<#1aP=S}JuQk6zyg
z$Wn2DU^ELnASJ|Ex)xDYrg|?e50p_%OTdL#Q?p1>LX_nR><f}S_z{}?V-&XDglNC2
zpEsCcl$Idi5OHhXcwKKAj&V*e)A%<KU|KaJz~qz=XD9*wH7a8PhRXqwHDm3O4l#u&
zenA3h+|;-g8%Ja@^+zUej0LWu1kQr!SP&4KS0Os(xSW)g=v27FYY?5vQH4ZR3634x
zM^$blc29?>RFkT*Od0tZK-mE|HhW<^*ynyLR9q=8qTj7gv~$Z7m(lfsSV8#$&y&0n
z&ws^GD4wq$g5MMUi6)FhhkAndaBO6}|I8Q}S6Qk-ilD5)G;RNxAy%<6)n7pWRdoLi
zp09}OnKpVe<N66;r}4h{5hg1_K6Au1s{bh!?@;j*D*lX$pHgv?inpmCUK2k<vEnH9
z61S-1=Txjy!KfhpF5ahN1BGk%LfPwdV<|qMKC&@cL4qsJqPUMoX@X487fjQ%9AgPg
zW9n+w46BfyFIeTPkI*Psg&X9Kk*0rSaC^)UreX*a3{e6D!3zaLSg{HHPr9_Mw00#a
zE8<s@3W#65pAlaXd46ngGeI9t5n8;3<1<87UXH#o=APN7Vr~{`r=sc{&hhZbMHIGf
z068REgeHX~8lOSZ0y+vL$v}SzlFpt)QjS7Pq44CWm5-9d8T{mEIY%(udmy-?S{6C`
zXxXY&&RbQO(khwE*n;U(1`QJJ!0^>zij~2JS*@cE(|YK{=2H!GA>;WPTdk?_CZB^O
z*I;z#b4jlA1+**2+7n&;9X#t&Qcv{6cxoQjv3@aMpBd2x0t;H-m|EY&`bNI~NUd+?
z>tR@7#^;i`qiO=Zp=+wvq+0A8)%a2}rx<8HnFj-%n`WRhneEK{*p`G*2z<puZsyMB
z9&U>)u)8=jS$SERP8ls+&3<hA<9qkkw?4VSQcI1GF0ii0naAJ-!UK~Y`NHg=7kAm{
zEW66$u0Le47d-YcX*b%B*{E6U_3`$`)(!SjeI>}CiaS(%NX1<$PDn9npnsr#66Mh!
zO{{L={=QTLjh4fRJ@JBlZ#VE+FU$-ffFI2j@U8!1<NnV71AzZr0GO#My248RREfP#
zGsjwwLW2Weq%*i}0p=e3C?7T+WuH9Zv<&YZCVNUvE}SOp?tEcx3-q{+agXAF=D4Y-
z>ozl6Jf2?5+iG$>Z)8&|IV-vK8J=30Znek*avMcgws3K#nN6vO{=wL_IaMi<jv3&Q
zq}(S;$tY*asdzW7rg94${Z%*sw&CbYdfgyLLDeu;M$O5zJ?%N8$fmWu{9BBks_mVV
zitJ_Do<q|gSx^v~xXf*G4B!Y<XS9E9N_a&TCr&B+Q3WhZVWilkfwB5uu}@_t#Ya0d
zb8G{|7Bx@E&=UH7hewhgpJ0;IdtEm(%+Q<}L9L*J9_2FqFDf}^kdRO=lK~tc?&0R=
zaFxjFQEUTcsB=mvTvE;xR4K1Q@j}s(`r3w?Mc9rwCbkg8AXkHQgmq-2Ri$*qVJUXv
z+EG0*c%9;u3rKw+@-XvVno!v~pQA{M&!e8rT2Sr9Weq6@L_QchgE5mY0g8wGY={^I
zealew79|_L<@9n6GvYJsfVeG#8F9_TS(`x-?4CxB(Ka4QoRpym8rry@&fgJo8*ZI{
zTlh+6Dyksf&_6ptpVYn?c>P`Oz4vdl5$$Meqe~~l?`cJsP2WL0QF`nT$&n+~?5i|J
z#x?}g=x3}ap}n8LD4eRmWz_eh(9d*uNP93i@NCzIuJqRgkzf3fieJ)h^DOXWT<H2Z
zV#-Y&nGE*8t*C4Id{lx>_*!Sr2|2FWjrN6WQ7K0j^E%Pm%3Ny7NW|h@+IY;kMP@^3
z48%-iWki#v%uzv(iI}J242rZwL63>Jri<K;-wU$vBu|bAAE`Dhm)+iArIM-1by~`3
zX*t{5t<hy}BYXp$+$uG))C|1t$?_AF63f`(U-3v1&CI8)m(J;nCS@DWnU{1!H@+$3
zp?6cg&2RG2l3G2ZeMtPrZd8m$p?z{0n+Zz}t=W(SA%)e`^E^NnmXMQVA)?R1lFE$R
zgIq-B_8?eMZV@EHPV7^Hk)<LSQAVgu?CWzyYvBXNDSagrmz}*V*)-B}=b!}U%Xq%>
zJ)(e>>S^{&k<6`+17AeFQ$c`&=7g?m1HymDBbQJRg_;Nz&*$#!CEYXx&6uGAvH6$^
z<SiFenDO{4-`55tW#H7F((-P7hN&;(gv{y6uw%Vcw-?NNA{3f9^c-8AWYHG-{u7TR
zX_-;>ba@(`x%{*N6dz%9HauAvC;JKyVVe9T1=;z3MYYpx52ay#kLR?|0=7rc_zXZZ
zq524;1vMrzfuy*JP#TGErLj>*h+gLi$j9<f&hlcpJmf(`!TE=f5BfTg*s#vE+_|^%
zFdci0!}P|K=FbFxphMCHCR$D*V#X?v{w^Z&@9W{3(3Y8pdnxm9=>*CpPb>W*w={{b
z#RHu6Z2|Kh#-{{kgPNzs$`>&3KX}HmvNI($<5-zywM`VokMtK^^)?>)8Hx^4DGB8s
z-vM=yc0;_3{2;<Phr_CZI-1H_W-)U)DIsR2+$F`#6@*X~vb@~RGM*}YGY~_ATA)-}
zc~w72%p=TFfsqQ_J~a~)@&>Q!BF=Z0Re<xN2c1^O&h!-BMrSs++0LB}w!O2yvvE%a
z1D8kj%k1`pt*wpQJGX8yw=~SNQZI4BuH)Fuqu-(04HVF2WN%z&e5RbFGh9nSCpt8v
zaM#B^zdM(^+{G2r5^5)9{9~-Gfn_A+5KSmK0|{?KDLF*#<#{-~b-27XNj%MM6HgYu
z5-MYLM;pYHl*ad@WBR%Tf_+gsgmZUP>Y)dyE#3cMrIck|ND-wKxCy(F^jAzl+X~vx
zZR{bXH%TIvkVKqb1OprN$-ef&lXoE}_6@pA9TkJ_g1_<~oN|U#(oZA%!>AjwyS>o0
zK17O9dJ-3KRP_eC(_3Ymm-|dL*Vv{ccZLnR9`c^;A(P0Z1}s9t^OjR=6W(C!-Bs3!
zBF^CTgz%3pvMU~cv@c`HI&cQ0FR!o%nA7!xAfg?Gf02FS%hlQ4;a^P-SY`bo+e5-T
z<k(L3d&qaf7~=EJ@Cw_EVJ3IFaD*V%t85*K+kjn<am2N^u3cYcUdUOG=Ct>PK+G2m
z+1uA{ymOtc-&<pMq660VhHR5%xo|d&_HVH(f$tIQ4DQ;2C-^G!VQB<|YX-0fR|*Vm
zvd~9#PTn3{mxCTs#<YZ>rNp`~*4Q>sHtj3!BINJ-9{1W^e~sPtLON0H3J)<u#|%0j
z>>-B5-F%`2hX)C_b1*<+AOi*HfE$6mq&m0&VmJEiilPK$3@FJ!TU%RWS7Prm=2AAn
zV|)GqoK6JsF=%oZ{wW|JN-^x$e$0A%?0}AY;sb|6wOS2G#QdOb90_1wQLdOaKoUG2
z0;VpIF4fIE0w4V>+MAqp2Sit+o#-^uUST_cl8_-$Ml@682vyYV^Z*hV2mZ0o4!VBG
zB(O}R0BA8TdC&_V<4SkHoN8##_X7#s#UKf&6@?cI3hn^X#|jh~vHpI$o3jLJM7Cc1
zGGgF58C`H&@HN5_$-J0>`PzWl>);N6sh~$YBL08#?YGw6eOD36W3ul-e0HNa&X9BM
ztpY!|K*r)ZZ+mRF7gH>t#E%Xj_Tt15jJ$|dwhMA97Vjc!d-c<O<T+vz(EYtVfR|(A
zm(hN})m4KYuuI&An3zyY95@RQL?GJRL*#@0U0kL>*CxP#{3xDidlE7Rw2=s_JrIPj
zJBnRcNN9jOPHToKR;FlNMBuzWAq|KCnBzOMF41k8=r7O@C>nUkq=3ju{Jr478$!X5
z-XX~#AZDZz*dv}?Rd5p0CR~zjkgWw)J=);0?+>yK`abWqv(y}fn<#*oi=<Hr3c7#>
z2=WztUjQ)gD`w5K0%V5aD;<P~KOt;DF9;u>yO4K+|4(7mEr;blsC4r9GgDiiP6%Rz
zY|121Ub!GtZ0DiZod@T2l=fd!rBs%~U6^!dR!?Teq>c~pp`?5reM=&TBXf~eICzbk
z^o3S^<x%;2L3s^9xg9}XzlyVzo3Gqm<zfo*U6d_UW+m16lpRtb1bs6=B#y7~`+-mY
ze4)udM`6Q<t|LuShh<WK75feRow4WaAKMG|D|i}q(>`l2;S-cWw#)p3(RBX%{{YT@
Bmbw4{

diff --git a/python/llm-server/queries.py b/python/llm-server/queries.py
index a629d21..d5be5b4 100644
--- a/python/llm-server/queries.py
+++ b/python/llm-server/queries.py
@@ -3,10 +3,11 @@ import math
 
 from langchain.chains import LLMChain
 from langchain.prompts import PromptTemplate
-from transformers import AutoTokenizer
-import templates
-import util
+
 import memory_util
+import util
+
+from datetime import datetime
 
 
 # parses a query request
@@ -57,7 +58,7 @@ def query_chat(request, llm, tokenizer):
 
     messages = [
         {"role": "system",
-         "content": templates.load_template("chat_system").format(**parameters)},
+         "content": util.load_template("chat_system").format(**parameters)},
     ]
 
     roles = ["user", "assistant"]
@@ -66,6 +67,9 @@ def query_chat(request, llm, tokenizer):
         messages.append({"role": roles[i % 2], "content": chat[i]})
 
     prompt = tokenizer.apply_chat_template(messages, tokenize=False)
+    print()
+    print(prompt)
+    print()
 
     res = run_query(prompt, llm)
     memories_accessed = [str(mem["NodeId"]) for mem in memories[:5]]
@@ -82,7 +86,7 @@ def query_reflection(request, llm, tokenizer):
 
     messages = [
         {"role": "user",
-         "content": templates.load_template("reflection_a").format(**parameters)},
+         "content": util.load_template("reflection_a").format(**parameters)},
     ]
 
     prompt = tokenizer.apply_chat_template(messages, tokenize=False)
@@ -98,7 +102,7 @@ def query_poignancy(request, llm, tokenizer):
     parameters = request["data"]
     messages = [
         {"role": "user",
-         "content": templates.load_template("poignancy_memory").format(**parameters)},
+         "content": util.load_template("poignancy").format(**parameters)},
     ]
 
     prompt = tokenizer.apply_chat_template(messages, tokenize=False)
@@ -113,17 +117,17 @@ def query_chat_summary(request, llm, tokenizer):
     parameters = request["data"]
     messages_summary = [
         {"role": "user",
-         "content": templates.load_template("chat_summary_single").format(**parameters)},
+         "content": util.load_template("chat_summary_single").format(**parameters)},
     ]
 
     messages_user = [
         {"role": "user",
-         "content": templates.load_template("chat_summary_user").format(**parameters)},
+         "content": util.load_template("chat_summary_user").format(**parameters)},
     ]
 
     messages_agent = [
         {"role": "user",
-         "content": templates.load_template("chat_summary_agent").format(**parameters)},
+         "content": util.load_template("chat_summary_agent").format(**parameters)},
     ]
 
     prompt_summary = tokenizer.apply_chat_template(messages_summary, tokenize=False)
@@ -158,9 +162,10 @@ def query_chat_summary(request, llm, tokenizer):
 
 def query_chat_extract_plan(request, llm, tokenizer):
     parameters = request["data"]
+
     messages = [
         {"role": "user",
-         "content": templates.load_template("chat_extract_plan").format(**parameters)},
+         "content": util.load_template("chat_extract_plan").format(**parameters)},
     ]
 
     prompt = tokenizer.apply_chat_template(messages, tokenize=False)
@@ -196,7 +201,7 @@ def generate_context(request):
     relationship = query_relationship(request, util.load_model(util.MISTRAL_PATH, 4096, 0),
                                       util.load_tokenizer(util.MISTRAL_TOK))
 
-    return json.dumps({"response": action + last_chat + relationship})
+    return json.dumps({"response": last_chat + " " + relationship})
 
 
 def query_relationship(request, llm, tokenizer):
@@ -207,11 +212,11 @@ def query_relationship(request, llm, tokenizer):
 
     messages = [
         {"role": "user",
-         "content": templates.load_template("relationship").format(memories=memories_str, **parameters)},
+         "content": util.load_template("relationship").format(memories=memories_str, **parameters)},
     ]
 
     prompt = tokenizer.apply_chat_template(messages, tokenize=False)
-    relationship = parameters["agent"] + "'s relationship with " + parameters["user"] + " is" + run_query(prompt, llm)
+    relationship = parameters["agent"] + "'s relationship with " + parameters["user"] + " is " + run_query(prompt, llm)
 
     return relationship
 
@@ -220,7 +225,7 @@ def query_agent_action(request, llm, tokenizer):
     parameters = request["data"]
     messages = [
         {"role": "user",
-         "content": templates.load_template("agent_action").format(**parameters)},
+         "content": util.load_template("agent_action").format(**parameters)},
     ]
 
     prompt = tokenizer.apply_chat_template(messages, tokenize=False)
@@ -233,7 +238,7 @@ def query_knowledge(request, llm, tokenizer):
     parameters = request["data"]
     messages = [
         {"role": "user",
-         "content": templates.load_template("knowledge_summary").format(**parameters)},
+         "content": util.load_template("knowledge_summary").format(**parameters)},
     ]
 
     prompt = tokenizer.apply_chat_template(messages, tokenize=False)
@@ -247,21 +252,44 @@ def query_plan_day(request, llm, tokenizer):
     parameters = request["data"]
     memories = request["memories"]
 
-    plans = memory_util.memories_to_string(memories)
+    memories_str = memory_util.memories_to_string(memories)
 
     messages = [
         {"role": "user",
-         "content": templates.load_template("plan_day").format(plans=plans, **parameters)},
+         "content": util.load_template("plan_day").format(**parameters)},
     ]
 
     prompt = tokenizer.apply_chat_template(messages, tokenize=False)
 
-    res = run_query(prompt, llm)
-    plans = [s.replace("- ", "") for s in res.split("\n") if "- " in s]
+    print(prompt)
 
-    json_res = json.dumps({"memories": plans})
-    return json_res
+    rough_plan = run_query(prompt, llm)
+    # plans = [s.replace("- ", "") for s in res.split("\n") if "- " in s]
+
+    for h in range(24):
+        time_start = datetime.strptime(str(h)+":00", "%H:%M")
+        time_start = time_start.strftime("%I:%M%p")
+        time_end = datetime.strptime(str((h+1) % 24)+":00", "%H:%M")
+        time_end = time_end.strftime("%I:%M%p")
 
+        messages = [
+            {"role": "user",
+             "content": util.load_template("plan_day_decomp").format(time_start=time_start, time_end=time_end,
+                                                                     memories=memories_str, plan=rough_plan,
+                                                                     **parameters)},
+        ]
+
+        prompt = tokenizer.apply_chat_template(messages, tokenize=False)
+
+        res = time_start + "-" + time_end + ":" + run_query(prompt, llm)
+
+        print()
+        print(res)
+        print()
+
+    exit(1)
+    json_res = json.dumps({"response": res})
+    return json_res
 
 # returns a list of validated statements
 def conversation_validate_statements(parameters, statements, llm, tokenizer):
@@ -269,7 +297,7 @@ def conversation_validate_statements(parameters, statements, llm, tokenizer):
     for statement in statements:
         message_validate = [
             {"role": "user",
-             "content": templates.load_template("chat_validate_statement").format(statement=statement,
+             "content": util.load_template("chat_validate_statement").format(statement=statement,
                                                                                   **parameters)},
         ]
 
@@ -291,7 +319,7 @@ def run_query(prompt, llm):
         verbose=False
     )
 
-    return llm_chain.run({})
+    return llm_chain.run({}).strip()
 
 
 d = {"type": "chat_summary",
diff --git a/python/llm-server/templates/chat_summary_single.txt b/python/llm-server/templates/chat_summary_single.txt
index 7265da5..45a433c 100644
--- a/python/llm-server/templates/chat_summary_single.txt
+++ b/python/llm-server/templates/chat_summary_single.txt
@@ -1,7 +1,8 @@
 You are given a conversation between {agent} and {user}.
-By completing the sentence below, briefly summarize what {agent} and {user} talked about.
+In 1 short sentence, summarize the conversation below between {agent} and {user}.
 
 Conversation:
 {conversation}
 
-{agent} had a conversation with {user} and they talked about
+Answer:
+{agent} and {user} talked about
diff --git a/python/llm-server/templates/plan_day.txt b/python/llm-server/templates/plan_day.txt
index a3af7ce..372a1e7 100644
--- a/python/llm-server/templates/plan_day.txt
+++ b/python/llm-server/templates/plan_day.txt
@@ -1,11 +1,8 @@
-In full sentences, create an hourly schedule for {agent} for today, given the restrictions below.
-Use present tense.
+In broad strokes, generate a plan for {agent}'s day for {date}.
+When does he have to wake up and when does he go to sleep?
+Answer in full sentences and include the time for each action.
 
-Today is {date}.
+Plan restrictions:
+{daily_plan_req}
 
-{scheduleOutline}
-
-{plans}
-
-Complete the prompt:
-On {date}, {agent}
+Answer:
diff --git a/python/llm-server/templates/plan_day_decomp.txt b/python/llm-server/templates/plan_day_decomp.txt
new file mode 100644
index 0000000..64c4496
--- /dev/null
+++ b/python/llm-server/templates/plan_day_decomp.txt
@@ -0,0 +1,6 @@
+Given a rough outline of {agent}'s' day, generate actions for {agent} for the time between {time_start} and {time_end}.
+Answer in 1-6 words.
+
+Rough outline of {agent}'s day: {plan}
+
+Answer:
diff --git a/python/llm-server/templates/plan_day_goals.txt b/python/llm-server/templates/plan_day_goals.txt
new file mode 100644
index 0000000..e6bd758
--- /dev/null
+++ b/python/llm-server/templates/plan_day_goals.txt
@@ -0,0 +1,7 @@
+Generate a list of 5 things {agent} wants to achieve on {date} given the information below.
+All items must be distinct.
+List items must start with a dash '-'
+
+Restrictions: {daily_plan_req}
+
+List:
diff --git a/python/llm-server/templates/plan_day_wakeup.txt b/python/llm-server/templates/plan_day_wakeup.txt
new file mode 100644
index 0000000..ce083f1
--- /dev/null
+++ b/python/llm-server/templates/plan_day_wakeup.txt
@@ -0,0 +1,8 @@
+Today is {date}.
+What is a suitable wakeup time for {agent}?
+
+Restrictions:
+{daily_plan_req}
+
+Answer:
+A suitable wakeup time for {agent} is
\ No newline at end of file
diff --git a/python/llm-server/templates/poignancy_memory.txt b/python/llm-server/templates/poignancy.txt
similarity index 100%
rename from python/llm-server/templates/poignancy_memory.txt
rename to python/llm-server/templates/poignancy.txt
diff --git a/python/llm-server/templates/relationship.txt b/python/llm-server/templates/relationship.txt
index 331dd01..4df8a5d 100644
--- a/python/llm-server/templates/relationship.txt
+++ b/python/llm-server/templates/relationship.txt
@@ -1,9 +1,8 @@
-Based on {agent}'s memories, briefly describe the relationship between {agent} and {user}.
+Briefly describe what {agent} feels or knows about each {user}.
 
 Current date and time: {datetime}
 
-{agent}'s memories related to {user}:
+{agent}'s memories:
 {memories}
 
-Complete the sentence:
-{agent}'s relationship with {user} is
\ No newline at end of file
+Answer:
diff --git a/python/llm-server/util.py b/python/llm-server/util.py
index e77208f..2fa0d7c 100644
--- a/python/llm-server/util.py
+++ b/python/llm-server/util.py
@@ -31,6 +31,7 @@ def load_model(model, ctx_size, temperature):
     callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
     llm = LlamaCpp(
         model_path=model,
+        max_tokens=500,
         temperature=temperature,
         n_gpu_layers=-1,
         n_batch=512,
-- 
GitLab