From 0e3224d5edd8e98382b40231e03bfeaac4638723 Mon Sep 17 00:00:00 2001
From: Aaron Giner <aaron.giner@student.tugraz.at>
Date: Fri, 5 Apr 2024 00:07:31 +0200
Subject: [PATCH] some reorganizing

---
 .../__pycache__/memory_filter.cpython-310.pyc | Bin 1402 -> 0 bytes
 .../__pycache__/models.cpython-310.pyc        | Bin 1371 -> 0 bytes
 .../__pycache__/queries.cpython-310.pyc       | Bin 0 -> 8970 bytes
 .../__pycache__/query_hub.cpython-310.pyc     | Bin 8534 -> 0 bytes
 .../__pycache__/templates.cpython-310.pyc     | Bin 366 -> 0 bytes
 .../{memory_filter.py => memory_util.py}      |   0
 python/llm-server/models.py                   |  35 ------
 .../llm-server/{query_hub.py => queries.py}   | 104 +++++++++++-------
 python/llm-server/requirements.txt            |   1 -
 .../llm-server/{llm_server.py => server.py}   |   9 +-
 python/llm-server/templates.py                |   3 -
 python/llm-server/templates/agent_action.txt  |   7 ++
 python/llm-server/templates/chat_system.txt   |   5 +-
 .../{relationship2.txt => relationship.txt}   |   0
 python/llm-server/templates/relationship1.txt |   7 --
 python/llm-server/util.py                     |  42 +++++++
 16 files changed, 117 insertions(+), 96 deletions(-)
 delete mode 100644 python/llm-server/__pycache__/memory_filter.cpython-310.pyc
 delete mode 100644 python/llm-server/__pycache__/models.cpython-310.pyc
 create mode 100644 python/llm-server/__pycache__/queries.cpython-310.pyc
 delete mode 100644 python/llm-server/__pycache__/query_hub.cpython-310.pyc
 delete mode 100644 python/llm-server/__pycache__/templates.cpython-310.pyc
 rename python/llm-server/{memory_filter.py => memory_util.py} (100%)
 delete mode 100644 python/llm-server/models.py
 rename python/llm-server/{query_hub.py => queries.py} (69%)
 delete mode 100644 python/llm-server/requirements.txt
 rename python/llm-server/{llm_server.py => server.py} (69%)
 delete mode 100644 python/llm-server/templates.py
 create mode 100644 python/llm-server/templates/agent_action.txt
 rename python/llm-server/templates/{relationship2.txt => relationship.txt} (100%)
 delete mode 100644 python/llm-server/templates/relationship1.txt
 create mode 100644 python/llm-server/util.py

diff --git a/python/llm-server/__pycache__/memory_filter.cpython-310.pyc b/python/llm-server/__pycache__/memory_filter.cpython-310.pyc
deleted file mode 100644
index 98696545a4361becd91e66d3198f7cc1627c7965..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 1402
zcmZ8h%WfMt6eT$ijTB2(6gP_7AW(p|fC9(`x=4Y*by7GmTEKP!IN3}T48}v+9cLag
zNxPLXTG`wDLxQgQ2kpN95P++qU3t}gHtpfahK*9-9rE5o^78N=mF;$mp#An&XYY5H
zkiYHZdUG&&g03FGaKdRuavIW{g^Uu;LWeutJtLvZo80G(Ga7n4*dfE#Iq+o2%&i?!
zXi+5Mg^UZeSIS&S;~wcW+yC&(<lEMVe;y~yVlP?~UmtxPUHt*0ap)<Xv5FngigL!C
zw`}HA<SBXmaOP^J9qm>wcbCa2J!Labdzg84#{JL8%&*tEys?aQ>$MH-ziU)Z)p*a|
z1@8$D5J8^>yjgimgtsbxiS!A-Q3dvk-4<5KVY?y+P2T36GrP~?TU@`+p^F?^z}Wq}
zeWGtzygj{-PRZdjPC&=a?UOIn1rZ~C#f17Q&a%-~TBMs>qs@n-<F8)~X?Qa!i(?^m
zq{|50sZxf$@1@#FLE~nu^mj=jlnR^A%XGhpi)6YFp3pvjVjAnErCUpi`Gxz>$LE&e
zbNiOY+eu0(5*xm3c5DGV=qiAzK>4ywn{CSm=D(65HS}j`W3~CXvHocMQVFTXZ*X2b
zepZO5GCdaKmqj|>LW)8Tp3Cw;Bzl}>`3O*4Xq=0@l+$Q0%>Y=LOii$w#rZCe*LwDB
zn?yiP?$6^<jZTGHS<T8M&eR$Pb2RG8c_XH#AqBQo;-obg4eE<M8Vnbt-N*PSx{?dy
zOv<0k0vAb)gX2V((s+AW8EfOD;K`B_E>5QKCqG6>sUnr;#@A(*Ds5bFuZ=g6fEq_>
zIc%EXGB4vD9tk-$UXF9Ja8cyDg7dW4kLG>{zz``W$YdI}(tLu=@LVEnUqOK}O(`;Q
zY$I0`ORGz>_&+MFGSnTIfOcrWdbCS>><+yL8?bxS{a4;aa3is|x@>lhhh8gc7XwZ&
z^z&fmV(e93#pEq*E1lK4ar&g7oPI;<7?t<ARTwk{RrqzRQO6vtbgg#aZ}tg%&Y@d5
zR{q-tFR%?>e&8o;W$=-{3Ky<QSHSstVCX>NKZh9F8d7;1qoFVR*0F?2&yKi=c&tT~
zNP!PGol9>~?;EzjU4%CQ<MLCx%%4cHmtL#WIf*^sN{K?UdYa=cg{lMN(tx`3E`GPE
z%j6euZ4cST#?UvPTnaY2F4(%b>Z*}lfNQ?%W!Ijr)=B+-!76IwC=+W|W3{%r#ASig
G>HP=$%w?1S

diff --git a/python/llm-server/__pycache__/models.cpython-310.pyc b/python/llm-server/__pycache__/models.cpython-310.pyc
deleted file mode 100644
index d4cba68861e7b29efc2747a38281b21ce03153cf..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 1371
zcma)5OONA35bpLP$t2EW_W%e18jwITlC!(BAfy%ImE|QSS$2j&sAb9OxI2j_?Y3#R
zM>A3Ov|RT;fH?7&d_$b}C%^@&okW>g?SZyju1;51zOSmjtlMoPSa1B^oBin^^q1LO
zUlwdWgSQC5FvPGxDJB>j-x5}86FYT?Q}1ozrVY{n-yu!rk``-_1J)#M)*>BtKs?qa
zUDhEz=CSTCi1b;H_2D~UgHz<+dJg}9d|b7T1x@KF&wZ=vkEjq+8ZXCG(m7YvTc=ua
znkI67s@bDb@6NxWk_oQhdiV2EXFp_1E|V2ktD~PDgvZC@!*RyAD8e);w4x&TXc{CE
z+%nd|#ogh(;o}dY@1pT=J}+l0|K=TG@B~NZPQ0M{M(F!5550H)C9(e-%7=R>_g~0g
zN*d54s39E%rHr{OGR4^qQ15J6(&Wx~$M=ii+v|K+Z>C(+O`bb-!S@7%#f-eWm_=;)
zV|sFS@{pW;J)LBed~wMnS)CuBT#ml{c_z}bh_UI}b{QW=2#w$^4#6<IMi)qRG&%=Q
zTg-Z9og$5Xh23u$T5eB>TQl)7akXigZz~sWlKxC(X_PAt%}OL^Rkx0dXr4q%eg$oI
zQ`68tkZM8vKc9{sgl7d;MYznQrr{&WAFAYnhi5Vg$F$H~9e%H}a~|tZh%|uE3$DUC
zM={K=s-DOwi)svJYzC0DXn<+q9&TI8h$Obb?3O`!0B=q9J#aMA_$iv>r}&Ajtu<O(
z3^Qwu*EX}6^Q`&ASvx@4ojm^+R)RoSU-_Vo8bXv)y{I~xr#V+tmx>clM)SOk1ib=Q
zq$#5*)$xM3GK%$OHHf#mh|;Yvn!sn86&xyPa4ORYWz_4tstF&`f_qV6$a&_dYksq#
z7IgtP#t^F7btP(8ufo1~1&oKS7oNu7PFwZiXyV@@Zwo5tAdLjTWocT<L|+XdbpZyX
z6X$u2q<ZaooZ`bBiDD0#c+hcK%rcb%@69lH+i|X|MB1B4Fcl-cy&r+($n9OEplZ%H
z@qGcBiS|k%om~)gn;dL1fU8Wq>qghSu9d}1n5t{j`WWg`3jT?i_2P9f9vWZ|JC=jD
KPwlmAulXOyk%5;0

diff --git a/python/llm-server/__pycache__/queries.cpython-310.pyc b/python/llm-server/__pycache__/queries.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0b5d1a481abef7797a375fb23e00ca37dd469498
GIT binary patch
literal 8970
zcmb_i+i%=fdM7#L42N^k#j<5*b2;{A>&lh2iR)||uQy)RQPxU*NhJl_uwG25In>P9
z9CC6FWn~7`qJXmx1)2gyU$a;V=!@S9^uH+j7qE|gSw9v5iXy-w{e6ezTx2=PLr37@
zxu5TR_wS>kM#ItY`~1zhJ^#m=_FvSP{#j`J9FP1D6kOwapt;#wcXhOl!04CUl1_c4
z!0cPDrRru-?pNH3s_TL6+G=gpt*W=<I^5#r&o#HkE8NDr&a2$PyTNO`j(3wccoXkA
zKF8<rp63gE5$^?lhA-j0$j|a~c%R|R{5;-E`~rUk@3Z^|{8hZq@z?n4crWueIO7-j
zCH})NOC62B`DMvH&-HC><wwtPBklf*p4xZs-n-rPdLin~k3`fT#5;a}5O}eVp2qrq
z9PLDp{jfLkMO&v;`Om<+g6B3K`5s1M?MP3wWBpJ|fT|1Hp`Pg6cx;H*Vk6Pw(vcaL
z9$CD^&EpbUWwa{C`qR?3hItm|y&c=*c~#6SkLNk#c@@mth-=?D&&Iq@;`(>at72Y1
zZhYrF2lMvhW}-a>`T%bY?Z1!bzH?n2^L`u8fBQV%07;tnQ!^e8{M2lBz4*B{uW6kO
z)xW=$)~T;0_xpWM3<Z%PT~J;AXR+|waSH?sT}SwPf!~gMQRr3&QLhtvVSDIS+EEx|
zqFa3&MhAh<JAP`@f)@9NuC=kX^<ZmdAvGV#C`_#&^0-W$PxpN>Y|$C1xgYm})S-b^
zKjMDiI(P4`-&=2ew7zpEZL0pb;1Rc)jqN=6%XD$FNdbFWzqh%)v$cLVTU;wzG<|t$
z`t-fh`D|cvyL2w=8Q(FT@A#oFz_hLWvc;6im7DpR{NgJ{S`j|_WbD@R$`0`VOcOr<
ze!sr^*Eb$+ZG7}#dvoW(*5=0c!)w>C%E8q^)CqdAY<I7AdN}ba&h_xb^5I|@ccbuO
z5cID~Up(=}LpreM%eBEUZ45ji{Z@VsXsgM;K;h`NZX1ibqnqD+_ZxHd*|}y(yoPPs
zB^1T4fr^bs3FZ=bB^Sq1QsTx1?FhOGLTaMdgpgXXeq<yT#;gk(FE49}$txGMW$oC|
zHH?)K3tG$m4Oa5%v4MH!O)WOL^WxaD#+_vd2=rbpwvNiYeyl&L@a3MCl>SBkN1Zpm
zG&=fW8QQPeF%B!S9aj_css2=dU;EYkp`DbEoTQT2eD0if0dOQX&cvEy?D*+cVmzuP
zWe#2Vg>hJgXjc>csd4D=MSkXUZ35-{fZ(teH;$T#!<XW@qj`QdsT~_Vou8w<4(mxB
z;&vY5#=Y1RWuWS7{nx48_xll%3;YiHeXalW`8W7ICnK<v>P}jgLmB&hx0cDGYCJPs
zQ$&HE%UB2*^Q7#_*bC$5q#-|e1{GTf#|!$C_TI)L&!gyoP`|Pd^_cVv5b#w0C*xBC
zpz3b<KE!U5Lm`)BEWo+O+FNc304r9_idV5k>f8))cpF;$y<gD21j2c3^m@kn1o+y`
zAZmMod=HgkYPwL|z2&vrzLY-yT3^L6w(#X33W1K-v5`6lio%iuh(KCXi-s-I4({Au
zFNl57DmF?Ni&`s=S^&Bic2aAASDaRXMP*=Q+Q^hg-cHMVk?2F?Si%dTq)LGwre$w1
z@I#)S!8^z_Qmfb?t&07yr7kCGbbpKQ_Xkq^3DwO<5v*E6yh4o<#1aP=S}JuQk6zyg
z$Wn2DU^ELnASJ|Ex)xDYrg|?e50p_%OTdL#Q?p1>LX_nR><f}S_z{}?V-&XDglNC2
zpEsCcl$Idi5OHhXcwKKAj&V*e)A%<KU|KaJz~qz=XD9*wH7a8PhRXqwHDm3O4l#u&
zenA3h+|;-g8%Ja@^+zUej0LWu1kQr!SP&4KS0Os(xSW)g=v27FYY?5vQH4ZR3634x
zM^$blc29?>RFkT*Od0tZK-mE|HhW<^*ynyLR9q=8qTj7gv~$Z7m(lfsSV8#$&y&0n
z&ws^GD4wq$g5MMUi6)FhhkAndaBO6}|I8Q}S6Qk-ilD5)G;RNxAy%<6)n7pWRdoLi
zp09}OnKpVe<N66;r}4h{5hg1_K6Au1s{bh!?@;j*D*lX$pHgv?inpmCUK2k<vEnH9
z61S-1=Txjy!KfhpF5ahN1BGk%LfPwdV<|qMKC&@cL4qsJqPUMoX@X487fjQ%9AgPg
zW9n+w46BfyFIeTPkI*Psg&X9Kk*0rSaC^)UreX*a3{e6D!3zaLSg{HHPr9_Mw00#a
zE8<s@3W#65pAlaXd46ngGeI9t5n8;3<1<87UXH#o=APN7Vr~{`r=sc{&hhZbMHIGf
z068REgeHX~8lOSZ0y+vL$v}SzlFpt)QjS7Pq44CWm5-9d8T{mEIY%(udmy-?S{6C`
zXxXY&&RbQO(khwE*n;U(1`QJJ!0^>zij~2JS*@cE(|YK{=2H!GA>;WPTdk?_CZB^O
z*I;z#b4jlA1+**2+7n&;9X#t&Qcv{6cxoQjv3@aMpBd2x0t;H-m|EY&`bNI~NUd+?
z>tR@7#^;i`qiO=Zp=+wvq+0A8)%a2}rx<8HnFj-%n`WRhneEK{*p`G*2z<puZsyMB
z9&U>)u)8=jS$SERP8ls+&3<hA<9qkkw?4VSQcI1GF0ii0naAJ-!UK~Y`NHg=7kAm{
zEW66$u0Le47d-YcX*b%B*{E6U_3`$`)(!SjeI>}CiaS(%NX1<$PDn9npnsr#66Mh!
zO{{L={=QTLjh4fRJ@JBlZ#VE+FU$-ffFI2j@U8!1<NnV71AzZr0GO#My248RREfP#
zGsjwwLW2Weq%*i}0p=e3C?7T+WuH9Zv<&YZCVNUvE}SOp?tEcx3-q{+agXAF=D4Y-
z>ozl6Jf2?5+iG$>Z)8&|IV-vK8J=30Znek*avMcgws3K#nN6vO{=wL_IaMi<jv3&Q
zq}(S;$tY*asdzW7rg94${Z%*sw&CbYdfgyLLDeu;M$O5zJ?%N8$fmWu{9BBks_mVV
zitJ_Do<q|gSx^v~xXf*G4B!Y<XS9E9N_a&TCr&B+Q3WhZVWilkfwB5uu}@_t#Ya0d
zb8G{|7Bx@E&=UH7hewhgpJ0;IdtEm(%+Q<}L9L*J9_2FqFDf}^kdRO=lK~tc?&0R=
zaFxjFQEUTcsB=mvTvE;xR4K1Q@j}s(`r3w?Mc9rwCbkg8AXkHQgmq-2Ri$*qVJUXv
z+EG0*c%9;u3rKw+@-XvVno!v~pQA{M&!e8rT2Sr9Weq6@L_QchgE5mY0g8wGY={^I
zealew79|_L<@9n6GvYJsfVeG#8F9_TS(`x-?4CxB(Ka4QoRpym8rry@&fgJo8*ZI{
zTlh+6Dyksf&_6ptpVYn?c>P`Oz4vdl5$$Meqe~~l?`cJsP2WL0QF`nT$&n+~?5i|J
z#x?}g=x3}ap}n8LD4eRmWz_eh(9d*uNP93i@NCzIuJqRgkzf3fieJ)h^DOXWT<H2Z
zV#-Y&nGE*8t*C4Id{lx>_*!Sr2|2FWjrN6WQ7K0j^E%Pm%3Ny7NW|h@+IY;kMP@^3
z48%-iWki#v%uzv(iI}J242rZwL63>Jri<K;-wU$vBu|bAAE`Dhm)+iArIM-1by~`3
zX*t{5t<hy}BYXp$+$uG))C|1t$?_AF63f`(U-3v1&CI8)m(J;nCS@DWnU{1!H@+$3
zp?6cg&2RG2l3G2ZeMtPrZd8m$p?z{0n+Zz}t=W(SA%)e`^E^NnmXMQVA)?R1lFE$R
zgIq-B_8?eMZV@EHPV7^Hk)<LSQAVgu?CWzyYvBXNDSagrmz}*V*)-B}=b!}U%Xq%>
zJ)(e>>S^{&k<6`+17AeFQ$c`&=7g?m1HymDBbQJRg_;Nz&*$#!CEYXx&6uGAvH6$^
z<SiFenDO{4-`55tW#H7F((-P7hN&;(gv{y6uw%Vcw-?NNA{3f9^c-8AWYHG-{u7TR
zX_-;>ba@(`x%{*N6dz%9HauAvC;JKyVVe9T1=;z3MYYpx52ay#kLR?|0=7rc_zXZZ
zq524;1vMrzfuy*JP#TGErLj>*h+gLi$j9<f&hlcpJmf(`!TE=f5BfTg*s#vE+_|^%
zFdci0!}P|K=FbFxphMCHCR$D*V#X?v{w^Z&@9W{3(3Y8pdnxm9=>*CpPb>W*w={{b
z#RHu6Z2|Kh#-{{kgPNzs$`>&3KX}HmvNI($<5-zywM`VokMtK^^)?>)8Hx^4DGB8s
z-vM=yc0;_3{2;<Phr_CZI-1H_W-)U)DIsR2+$F`#6@*X~vb@~RGM*}YGY~_ATA)-}
zc~w72%p=TFfsqQ_J~a~)@&>Q!BF=Z0Re<xN2c1^O&h!-BMrSs++0LB}w!O2yvvE%a
z1D8kj%k1`pt*wpQJGX8yw=~SNQZI4BuH)Fuqu-(04HVF2WN%z&e5RbFGh9nSCpt8v
zaM#B^zdM(^+{G2r5^5)9{9~-Gfn_A+5KSmK0|{?KDLF*#<#{-~b-27XNj%MM6HgYu
z5-MYLM;pYHl*ad@WBR%Tf_+gsgmZUP>Y)dyE#3cMrIck|ND-wKxCy(F^jAzl+X~vx
zZR{bXH%TIvkVKqb1OprN$-ef&lXoE}_6@pA9TkJ_g1_<~oN|U#(oZA%!>AjwyS>o0
zK17O9dJ-3KRP_eC(_3Ymm-|dL*Vv{ccZLnR9`c^;A(P0Z1}s9t^OjR=6W(C!-Bs3!
zBF^CTgz%3pvMU~cv@c`HI&cQ0FR!o%nA7!xAfg?Gf02FS%hlQ4;a^P-SY`bo+e5-T
z<k(L3d&qaf7~=EJ@Cw_EVJ3IFaD*V%t85*K+kjn<am2N^u3cYcUdUOG=Ct>PK+G2m
z+1uA{ymOtc-&<pMq660VhHR5%xo|d&_HVH(f$tIQ4DQ;2C-^G!VQB<|YX-0fR|*Vm
zvd~9#PTn3{mxCTs#<YZ>rNp`~*4Q>sHtj3!BINJ-9{1W^e~sPtLON0H3J)<u#|%0j
z>>-B5-F%`2hX)C_b1*<+AOi*HfE$6mq&m0&VmJEiilPK$3@FJ!TU%RWS7Prm=2AAn
zV|)GqoK6JsF=%oZ{wW|JN-^x$e$0A%?0}AY;sb|6wOS2G#QdOb90_1wQLdOaKoUG2
z0;VpIF4fIE0w4V>+MAqp2Sit+o#-^uUST_cl8_-$Ml@682vyYV^Z*hV2mZ0o4!VBG
zB(O}R0BA8TdC&_V<4SkHoN8##_X7#s#UKf&6@?cI3hn^X#|jh~vHpI$o3jLJM7Cc1
zGGgF58C`H&@HN5_$-J0>`PzWl>);N6sh~$YBL08#?YGw6eOD36W3ul-e0HNa&X9BM
ztpY!|K*r)ZZ+mRF7gH>t#E%Xj_Tt15jJ$|dwhMA97Vjc!d-c<O<T+vz(EYtVfR|(A
zm(hN})m4KYuuI&An3zyY95@RQL?GJRL*#@0U0kL>*CxP#{3xDidlE7Rw2=s_JrIPj
zJBnRcNN9jOPHToKR;FlNMBuzWAq|KCnBzOMF41k8=r7O@C>nUkq=3ju{Jr478$!X5
z-XX~#AZDZz*dv}?Rd5p0CR~zjkgWw)J=);0?+>yK`abWqv(y}fn<#*oi=<Hr3c7#>
z2=WztUjQ)gD`w5K0%V5aD;<P~KOt;DF9;u>yO4K+|4(7mEr;blsC4r9GgDiiP6%Rz
zY|121Ub!GtZ0DiZod@T2l=fd!rBs%~U6^!dR!?Teq>c~pp`?5reM=&TBXf~eICzbk
z^o3S^<x%;2L3s^9xg9}XzlyVzo3Gqm<zfo*U6d_UW+m16lpRtb1bs6=B#y7~`+-mY
ze4)udM`6Q<t|LuShh<WK75feRow4WaAKMG|D|i}q(>`l2;S-cWw#)p3(RBX%{{YT@
Bmbw4{

literal 0
HcmV?d00001

diff --git a/python/llm-server/__pycache__/query_hub.cpython-310.pyc b/python/llm-server/__pycache__/query_hub.cpython-310.pyc
deleted file mode 100644
index 8081ca13e3d0382ac7c359d472efa3c9cedda74b..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 8534
zcmb_h%WvFBdgrs7&3@=%S+Zt4p0Y7nM~<v`oS8+&>)5NcJ!=pA*hunjrsF{`)kUc(
zvB`FkvZfo<VgqlE4iF##a+rez<l^HV7sy|cBToU6LwpGk400J@;rzZL*$-JBFM>#5
zk@c?n9>4F=VZClE`1_~-Xl(s@SyBF-D&v19DnG^({R;}NaMf3u*;{R@sB6B~)tkCX
zZMtuC&88{84ZqYaH_P%{^{u8Q=T@2(`L>%jH+ktdO0&w#+`_xYE8NDr&a1qJcZ1h?
z1Mew5#i#L}<}-X2?-_oI&*44GPxE=aPw@qQ2Jbn($lt^JG=HC;<sa~K{KIebZH2RM
z^yWNQ*OjI7uW$q9;gXtK_wPTr)p5K4-;JM%u-l6_+-}czVizs-)x9{}2zTAU8@Qsa
z(yaVX!@G><7M|$m=!um>HBpY#10?}z7L@}vQMtCOi4S5eQDXhji1i(l>)bffQ7fTV
zK2o3S>k7u18252(4aZe5t~4BH566`;?oM2N=Qs=FzKm<{99O}(Zd`xoI2+^sCT_fQ
zTovQ~F`jzoxEjX2jHkbU9It~_jfbfb_j_(?v^!4hDbtFwow49QZ>BYBYejqAt|R(_
z*ptr4Cim-DIPJIvMh2-Z+%4a2$6gpTE4|R$4xFIfPiw*jo6#6`yk4`i8-)A5%eURs
zqFF8O^i%WB+S;SFrJ2;&iNYW?{m|i2YCqj`MZZNGrKN7jT_0`K)shu_`%c$s^_;kq
zHe`ETh)`O|`p}Thj+QB|kk-0hgp>Q(<Z4l);R|EKC$2Z0b&M{VPGv2ltL9scU$_`F
zx^^R<m0x~IOUuGVTNF2|`Nswr@lp{Vg3#an<kqz(k0Vz^Pj<r~cAh*6+}px?=015G
zcuyWUQS1r^h1vn(pZI?FGKM~L#glB4&fexqub<X?j)>e=zB*W|M4zLuRZF$BS=CmJ
z*Z<eIp^Nias;#3a{_6O!@JOAVgSh0HswX<v7L`N57WCIZs{#ErWA#u=O!S$H3NI}v
ziNVW@%7SvFsS5h^!~}p@e~X#Ca-?CLab1ZGZok>Lpm2LZd9DHitFd`l;<Y1nr_2{T
zCDH#`{X3P{ztOhUgA#zSv8^4HV=JyC#&h+#dRzHw`oKy`hjvm<EIu`_EaDuAg*`Fn
z2rE8aOSGM8QsRKc7urDuT3<=j=h}hIXZfk$D5F!}#t9Cpas9B7*nBRYI-KUGlj@P?
zseGQ+I;bTzXxtfS9Cu<zlt8Lf{Z(pp-EJs6H%g5?aPd3!N3UMv?-kjDja0SMQq%{#
zyUl8*j<WJnOAQhFZdz_bJlr5oD^3(ax}6|?Md11QOQ3I|7;iu;t-X#XI)h>xjQYwt
zP-B8h5a7A`_u5kpM^&4phtR)!99XSGaUiqW%9T{dfu)dU1;Y}leci|AZ2<g@yR<GH
z;fyjkm&qd?eC4_ywjDpZfsbNnI#b-e<+R&w6uJC6bs5`O!i{=>4p&^jLUJF#gc<dG
zFHY-nQopt3`Ot#qRQ7HaOQo~LS1S%%IJOsTr)Cc?5D9dY)+b8q8Fb|Jw6qn9E)d5Q
zP5_Y7eK$x;POs+%JUxZCpCMAKSRk#4y`UwpCrI#9lkatVk+@3V#!d*6R~P4~qC-8g
z@ereIXr<G2ar8*afcS_;K?<VC*><C*Nqmh`)k{k~X)#kB)DS<EqX;OW(9tpaiz)P8
z!4v&43QKK3!OyB^G^T0#9Ml|&Zcc0G)P`nj^J<CuUqgjy)`SX^heEO;Rrpu<7^*P*
z4=Ak>D?2I_70UR6R8qUHa5L5p$(*V?2ColQt}Io~gz}hB5sO!#Jf*mll%(>Mxy`Fk
zp7LRtlvS#Z6<dcDt|gYILRl(FMVhBf0Ek2zkjI7>wEaEqwgM@a;*ZeYgrzAAP|Fd2
zf~L;}(c%rMCygVi|CXVUQeQiO#3$-AMQDi%fP!mrq-E0oQX6QOS*k#f0Mrngw);{O
ze}<W<`Ud$wMe`Fp-;&%jW$;lZ_oIWIB>iFugC!$h+Tt30|A>mepyCD<H>n_2h}%?r
zMg_^5xPxNJmf|ITN)4oQ;vN-5De(mr_o;Y*qG@?S<ZXB2NIaxAvM*V|=xY(hBRmod
zU>Vd|wGf(yX=`&38bg(%CPamhz7Umfe}+mSDqJI9jj(=4<JM3h3@H!>1VV=Z!6k)2
zn6Uv1KxkT$xLr<4lKJJN4CYrJX3UpNo*tUpOws#ej23^6?K47_-%h?E<z8ARQf`uH
zCz9$t?6Zj{nnhu$8i+%>MP!mhqW%dY&7h$mQsj@B#0esuK2D^(*OVkY-fQN)q;VQQ
z-doBE3|}7vF3Xxpo<C}qtd;Uw1;(^OMl&{H{G>^PM%yrbRTyJwvSC<j_=a&kuw&!7
zg0ax?e2yjORC$9>L6fU6yVJQQ*Z2(T<s;>pDt-;OTTg0<nrP3BgBs?~=JPW%T1S*Y
z^Xp^t8<=0u=O4=XjeI^#D-8KmGIdx<fE%hJzZ&!v+lN&?mrO|knog!6K=b1QbSkr+
znKRppU=(~;%1|@&X>&KXMK-uOJ24or9K;!>3}!B8e{B8ng9oc?UtVLWDf<VDtmAOz
zFc@TbVv+QH*}fNd*x)p~%;JvQXR+h&x)`(>?!|1-DAszserN3(dn;ZEQcpoTCf2Cf
zpyHSo69T=U_Mf8|{9t5v3+MN(2sBs-LiWt@_nb}NWnPdOLOAY&sp5RAKfUvC<Nx7+
z|4<w-gDJYgQtd>Iy-p*Cs7Il}zAMrR+O}|J51y0@8;`OJ9z|&hzB^3z7);Kbr0mV<
z!rT_*X~L_>3~(j^jbT-Cx7o;S@o;!GuS;>M<rO(JE$5=ttWC(&%y_LuK2Wn>G-Z3t
zPE@lYwZPpUx;7_*(rJ+DDIV$Dk3dN)W#Cl28)ieg1-5z?4uGZE>YQ5B$Wf4A=qusN
z&d{FLoB*<Mw2yv?-V@Q@J`QAWLwgRSKQsXlinz!vatz=IR3^~BI;OmWwjTrhK^Y=T
zVWjv=>KMZRl69gmNjW0(JQ54hr0OvpB4_vzPc)BWL`j18nrdW}p)nJbsiA=!3+Xa#
zj6pmol)Fq6Yfwx>Z>}RYN6rE+lL3&;V;=%7WGW1}P%=qFDVi4)2bnu1Sj!5plC|Wu
zxO`~wI)J~z&%;$RJvEPT8d-z`dfSLqkjNqMkE?O*u#WjN0QK2LWKR&G0FLFZrzFVN
zDB|LCGWw!Xr)7P9K|wacdaj~x0eu!fgHwlmF;Mw?XnP+9p^~q2c5IywFmEa|Cemam
z{G7bGl4OPkrbO1qZ~x)ge1`8zz9)>1gz{=q1(aXNr9rwkAac(1zsA?#Qvh$o0vD*u
z;8zpUvK|mP3^LGlge!p;kk8_fZMr}>cVz-?X+?6~WK2ZDmWeku)*hzQcSW=g$J4zf
zT#4#ZZV(fxUmug8)Vhw`#3pxc{0?hGw4ycU436?4<6xXk*g({y@49_*HpzS-Kh~Tp
zyj_WMZQ9-Cna^7vPc~c~Qw?zun~Gne7)&!Cd4j^`;N}B^fhK8L-oxbs9cC$2*(<tz
zYIMRqk(yLS1Cu%1;mY9j#fXh>l$$f1dC{P<c4z(Z{f+f&Y%qoTIE4q=3d?9TEhLby
z(wT<-S87C|h`-<CO?3*_3`Ardq^4ZYmw}bE2(TCwSDobeHfzJs4{_hf4Q^J661NbA
zf|X6Hs0a#oPQ=vr(eYb6(T`D(Piz4z=ha!@rKX?BoxXYPf~u+7>k=M%qYrQ6^#$G1
zXojKvfJSP9R-3?#aWoZwh2BYDcV?J1D$LC|gNJA*zQoKEf$S_g{uR$jeky=WQ2)e%
zCgQ#UdJA|Tff&-S2I4ZLSTpbf2pPP_5#0?zs5As26Cjj(posZ0?-DT}<((4?P+!eG
zMKh22u*NXvtB<QX`TnAc0uem$J{lv$gaVVQ72(|<>Ms2ktu5W93z@q_J5XYBTsw<Y
z#VA-4zreiji@pL{_Ya6BKESwt!!r!LY((pYa18@58r3#X6n_<b3=PiLsB9xHPbii3
zDY%0y4uUVFn-Ck=97!~AN0CXeEC4GdIs!0Cg;4-jhCg0TOhB%cCD3Fnpdlawx4@~A
zG|@jyj6;l)Ubc(^o*Ib(Y=nuHvDt$3mfv*8(#p^?8{^yH)V+1KarX{e-&oza^FVr*
z7YDVA?AD{TwL7;qZo;M3`+17vEf!s(%E+w$1XNh6$N;!$5BH2l5RsblbRxvSq3<_z
zAe&RU|5jWf)$w%<+r?Ly`#*RhLLUSAts#d>`YuDj(lorM8vG~HdpWjEGFklT_!tI)
zv_MQbPW*ClM89KzVK4LpErDJ1&;rux4?kbhvk;yF{Im?4uo=QC7=o4+v|d`@LVjqJ
zk<yWo8lTieHoQxDbT2r57b0B+upg24qS0OOU;4dW%6#rJS_jq_VJBesy`X7+fqYfu
zM7V%0Kd-U7-ZHy)vCCw2h1~=Glhy4wNTRj-EQ&qfXCbm@H|=7X;2K-)EVJz}<P0V*
zfH^+TE;)Q>FN!0!4N#8U=n{K`F&)?ULt0U|=h>HTv^=>wtj%bHW!CMpE#yxFj^(1R
zhvXUT6n-}7Ut;%SSg?%_Od8aBnXMuV=(A5^Y;pC<)lZh06L99ynD(9!2q64E`}pd$
zPd{O+4_4USaG!OZKD)=V)GF(TdpFr7-*xEh3|_>(Blt3N;k^s;EO6}JrGi5D;E^EI
zi^8tUR->MW{3A`F(^8(+6)S8VB*R@g&NEuiVVj5_JFdf>cE?>|x14}>l&iw8$;dH5
z&PQ7ah;TQTc){V=V5wdYQD$}&kOM9Q){5lD#UVDsuP;eXK*vCm>}V@1E9_G2>|!j1
zrViV3_hHfz5ys%jeRx$k0db09z4k8iw%9&x_sj(i`*ODIh!8-6w{a*yeaX3ESPvQ1
zxQ{b+K=er7%%S6>orPPYz3zhPvUfW?iM5y522M%Dh){+!Qse}c+}y@*Bp{i9r{l8y
zjvKHDR3=v7XfZCi?*+TK(p?Cr?Amhi0~2T$(=RGf7zXDH4sPJ2yYeVe$hv#&PA(Go
zBDQ()+n9mtWPHJGA=mI6Bj&^m!q+BGbQ^cTnF@ZiLz4fGKEAT@*=Leb4vV@D)Mqn{
z<BT|0uN3sb1u_xOWg9}{#pKmV^`i|)yf{e&BQIi^ZGxRr#5+g<UVgfVpgAT5-P_v2
z@p5YXBHZ)2ysGbkx+HA~u!yuIfs+S<3WQr*a8aeiZjT9k7(jodOthT{ItH?l3d=R%
zPO`gFTv$M4fIf~R!<Z;jXeUBQUYCdlLg1LgJF^b)ZJg<E@DC*FIS9+aWU2m+zwh(`
zFoZj#8FYvVtpxQ*CYR+oiD)A!$ri}wf~pQJu<N?LY=N%Jy>^!8fpQZE;60NzN<~2v
z$N)vYgk&`ib6W~)h6>Obh97;PJp36^1Gpeu{Ca{U27G<`(e$Rx@;^X2g*npH;wRa$
z>=Js&ri@bLOADEo?W46h4awV<=)WS%NLr5O%&0lDdQoOfYWQI^h$#63)(UbsvRHbW
zzOGSmo(lTOKqiC5d-O$#0&#{4a&o0(Dad1y7El_KROMsVM@HNX0}v|V=iEKty+P4O
x^a%<Jo^}n9b`ADO{+BIgeQ2GsrmR1;W~}p8!#Zuv;TH^ztd;S5tzrN7{{nP(U%vnV

diff --git a/python/llm-server/__pycache__/templates.cpython-310.pyc b/python/llm-server/__pycache__/templates.cpython-310.pyc
deleted file mode 100644
index 24a5eca77de3e858f1ec91a9ee3a47b435cc0fa2..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 366
zcmY+A!Ab)$5QZn)Rf=?Pg<iex!RB5>itE)XXfG0w(9EHl-7HBgTD<84_z3mrE9E5g
zEj&3{iPnLc$;|wdkHi$k0gzp9KJkt6HWs^<Q86WV0}2WhdxaZN*%g$%1*2B7c<zx`
z<Zej8llB=#q#eNv+pu?tJG1q)n((<p>-fECyyO=Rp4sIJ`Mg>3bLl+V!%J)KaN)Vu
z^$|%|Xt~F_)zYKm>wN4VeVK*awAh5)qEsad{iOH+n-l-*w3>(xBs!2tAVDDUA0+-j
tB3<Lg?NcbUk?J<J;u~H3kRh}ClxF2m?sz+4k7l8NTpRVE@#J@F0N*c8TigHu

diff --git a/python/llm-server/memory_filter.py b/python/llm-server/memory_util.py
similarity index 100%
rename from python/llm-server/memory_filter.py
rename to python/llm-server/memory_util.py
diff --git a/python/llm-server/models.py b/python/llm-server/models.py
deleted file mode 100644
index 72af9bc..0000000
--- a/python/llm-server/models.py
+++ /dev/null
@@ -1,35 +0,0 @@
-from langchain_community.llms.llamacpp import LlamaCpp
-from langchain_core.callbacks import CallbackManager, StreamingStdOutCallbackHandler
-from transformers import AutoTokenizer
-
-mistral_path = "X:\LLM Models\mistral-7b-instruct-v0.1.Q4_K_M.gguf"
-llama_path = "X:\LLM Models\llama-2-7b-chat.Q4_K_M.gguf"
-llama_path_13b = "X:\LLM Models\llama-2-13b-chat.Q4_K_M.gguf"
-mistral_large_path = "X:\LLM Models\mistral-7b-instruct-v0.1.Q5_K_M.gguf"
-luna_uncensored = "X:\LLM Models\luna-ai-llama2-uncensored.Q4_K_M.gguf"
-
-mistral_tok = "mistralai/Mistral-7B-Instruct-v0.1"
-llama_tok = "meta-llama/Llama-2-7b-chat-hf"
-llama_api_key = "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"
-
-
-def load_tokenizer(model):
-    if "llama" in model:
-        return AutoTokenizer.from_pretrained(model, token=llama_api_key)
-    else:
-        return AutoTokenizer.from_pretrained(model)
-
-
-def load_model(model, ctx_size, temperature):
-    callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
-    llm = LlamaCpp(
-        model_path=model,
-        temperature=temperature,
-        n_gpu_layers=-1,
-        n_batch=512,
-        n_ctx=ctx_size,
-        callback_manager=callback_manager,
-        verbose=False,
-    )
-
-    return llm
diff --git a/python/llm-server/query_hub.py b/python/llm-server/queries.py
similarity index 69%
rename from python/llm-server/query_hub.py
rename to python/llm-server/queries.py
index a3b6c2b..a629d21 100644
--- a/python/llm-server/query_hub.py
+++ b/python/llm-server/queries.py
@@ -5,8 +5,8 @@ from langchain.chains import LLMChain
 from langchain.prompts import PromptTemplate
 from transformers import AutoTokenizer
 import templates
-import models
-import memory_filter
+import util
+import memory_util
 
 
 # parses a query request
@@ -18,29 +18,28 @@ def parse_request(request):
     requestType = request["type"]
 
     if requestType == "chat":
-        return query_chat(request, models.load_model(models.llama_path, 4096, 0.75),
-                          models.load_tokenizer(models.llama_tok))
+        return query_chat(request, util.load_model(util.LLAMA_PATH, 4096, 0.75),
+                          util.load_tokenizer(util.LLAMA_TOK))
     elif requestType == "chat_summary":
-        return query_chat_summary(request, models.load_model(models.mistral_path, 4096, 0),
-                                  models.load_tokenizer(models.mistral_tok))
+        return query_chat_summary(request, util.load_model(util.MISTRAL_PATH, 4096, 0),
+                                  util.load_tokenizer(util.MISTRAL_TOK))
     elif requestType == "chat_extract_plan":
-        return query_chat_extract_plan(request, models.load_model(models.mistral_path, 4096, 0),
-                                       models.load_tokenizer(models.mistral_tok))
+        return query_chat_extract_plan(request, util.load_model(util.MISTRAL_PATH, 4096, 0),
+                                       util.load_tokenizer(util.MISTRAL_TOK))
     elif requestType == "reflection":
-        return query_reflection(request, models.load_model(models.mistral_path, 4096, 0),
-                                models.load_tokenizer(models.mistral_tok))
+        return query_reflection(request, util.load_model(util.MISTRAL_PATH, 4096, 0),
+                                util.load_tokenizer(util.MISTRAL_TOK))
     elif requestType == "poignancy":
-        return query_poignancy(request, models.load_model(models.mistral_path, 4096, 0),
-                               models.load_tokenizer(models.mistral_tok))
-    elif requestType == "relationship":
-        return query_relationship(request, models.load_model(models.mistral_path, 4096, 0),
-                                  models.load_tokenizer(models.mistral_tok))
+        return query_poignancy(request, util.load_model(util.MISTRAL_PATH, 4096, 0),
+                               util.load_tokenizer(util.MISTRAL_TOK))
+    elif requestType == "context":
+        return generate_context(request)
     elif requestType == "knowledge":
-        return query_knowledge(request, models.load_model(models.mistral_path, 4096, 0),
-                               models.load_tokenizer(models.mistral_tok))
+        return query_knowledge(request, util.load_model(util.MISTRAL_PATH, 4096, 0),
+                               util.load_tokenizer(util.MISTRAL_TOK))
     elif requestType == "plan_day":
-        return query_plan_day(request, models.load_model(models.mistral_path, 4096, 0),
-                              models.load_tokenizer(models.mistral_tok))
+        return query_plan_day(request, util.load_model(util.MISTRAL_PATH, 4096, 0),
+                              util.load_tokenizer(util.MISTRAL_TOK))
 
     return "ERROR"
 
@@ -50,11 +49,11 @@ def query_chat(request, llm, tokenizer):
     chat = parameters["chat"].split("~")
 
     memories = request["memories"]
-    memory_filter.filter_memories(memories, parameters["user"] + ";" + chat[-1])
+    memory_util.filter_memories(memories, parameters["user"] + ";" + chat[-1])
 
-    parameters["memories"] = memory_filter.memories_to_string(memories[:5], True)  # TODO
+    parameters["memories"] = memory_util.memories_to_string(memories[:5], True)  # TODO
 
-    print("\n" + memory_filter.memories_to_string(memories[:5], True) + "\n")
+    print("\n" + memory_util.memories_to_string(memories[:5], True) + "\n")
 
     messages = [
         {"role": "system",
@@ -79,7 +78,7 @@ def query_chat(request, llm, tokenizer):
 def query_reflection(request, llm, tokenizer):
     parameters = request["data"]
     memories = request["memories"]
-    parameters["memories"] = memory_filter.memories_to_string(memories, include_nodeId=True)
+    parameters["memories"] = memory_util.memories_to_string(memories, include_nodeId=True)
 
     messages = [
         {"role": "user",
@@ -174,37 +173,60 @@ def query_chat_extract_plan(request, llm, tokenizer):
     return json_res
 
 
-def query_relationship(request, llm, tokenizer):
+def generate_context(request):
     parameters = request["data"]
     memories = request["memories"]
-    print(memories)
+    memories.sort(key=lambda x: x["HrsSinceCreation"], reverse=True)
 
+    # if the agent has no memory associated with the user, then they have never had conversation
     if len(memories) == 0:
-        return json.dumps({"response": parameters["agent"] + " has never met the person they are talking to before."})
+        return json.dumps({"response": parameters["agent"] + " is having a conversation with someone they "
+                                                             "never met before."})
 
-    memories.sort(key=lambda x: x["HrsSinceCreation"], reverse=True)
-    memories_str = memory_filter.memories_to_string(memories, include_date_created=True)
+    # agent's current action based on their schedule
+    action = query_agent_action(request, util.load_model(util.MISTRAL_PATH, 4096, 0),
+                                util.load_tokenizer(util.MISTRAL_TOK))
+
+    # when did the agent last talk to user?
+    lastChatHrs = int(math.ceil(memories[-1]["HrsSinceCreation"]))
+    last_chat = (parameters["agent"] + " last talked to " + parameters["user"] + " on " + memories[-1]["Created"]
+                 + " - " + str(lastChatHrs) + " " + ("hour" if lastChatHrs == 1 else "hours") + " ago.")
+
+    # what is the relationship between agent and user?
+    relationship = query_relationship(request, util.load_model(util.MISTRAL_PATH, 4096, 0),
+                                      util.load_tokenizer(util.MISTRAL_TOK))
+
+    return json.dumps({"response": action + last_chat + relationship})
+
+
+def query_relationship(request, llm, tokenizer):
+    parameters = request["data"]
+    memories = request["memories"]
+
+    memories_str = memory_util.memories_to_string(memories, include_date_created=True)
 
     messages = [
         {"role": "user",
-         "content": templates.load_template("relationship2").format(memories=memories_str, **parameters)},
+         "content": templates.load_template("relationship").format(memories=memories_str, **parameters)},
     ]
 
     prompt = tokenizer.apply_chat_template(messages, tokenize=False)
+    relationship = parameters["agent"] + "'s relationship with " + parameters["user"] + " is" + run_query(prompt, llm)
 
-    lastChatHrs = int(math.ceil(memories[-1]["HrsSinceCreation"]))
-    lastChat = (parameters["agent"] + " last talked to " + parameters["user"] + " on " + memories[-1]["Created"]
-                + " - " + str(lastChatHrs) + " " + ("hour" if lastChatHrs == 1 else "hours") + " ago.")
+    return relationship
 
-    relationship = parameters["agent"] + "'s relationship with " + parameters["user"] + " is" + run_query(prompt, llm)
 
-    print("RESULTS: ")
-    print(lastChat)
-    print(relationship)
+def query_agent_action(request, llm, tokenizer):
+    parameters = request["data"]
+    messages = [
+        {"role": "user",
+         "content": templates.load_template("agent_action").format(**parameters)},
+    ]
 
-    json_res = json.dumps(
-        {"response": parameters["agent"] + " is talking to " + parameters["user"] + ". " + lastChat + " " + relationship})
-    return json_res
+    prompt = tokenizer.apply_chat_template(messages, tokenize=False)
+    action = run_query(prompt, llm)
+
+    return action
 
 
 def query_knowledge(request, llm, tokenizer):
@@ -225,7 +247,7 @@ def query_plan_day(request, llm, tokenizer):
     parameters = request["data"]
     memories = request["memories"]
 
-    plans = memory_filter.memories_to_string(memories)
+    plans = memory_util.memories_to_string(memories)
 
     messages = [
         {"role": "user",
@@ -289,4 +311,4 @@ John Linn: Of course, no problem at all! *smiles* I hope your sister feels bette
      },
      "memories": []}
 
-# query_chat_summary(d, models.load_model(models.mistral_path, 4096, 0))
+# query_chat_summary(d, util.load_model(util.MISTRAL_PATH, 4096, 0))
diff --git a/python/llm-server/requirements.txt b/python/llm-server/requirements.txt
deleted file mode 100644
index f9c8132..0000000
--- a/python/llm-server/requirements.txt
+++ /dev/null
@@ -1 +0,0 @@
-sentence-transformers
\ No newline at end of file
diff --git a/python/llm-server/llm_server.py b/python/llm-server/server.py
similarity index 69%
rename from python/llm-server/llm_server.py
rename to python/llm-server/server.py
index dc8b501..54de83a 100644
--- a/python/llm-server/llm_server.py
+++ b/python/llm-server/server.py
@@ -1,11 +1,6 @@
 # https://realpython.com/python-sockets/
 import socket
-import models
-
-from langchain_community.llms import LlamaCpp
-from langchain_core.callbacks import CallbackManager, StreamingStdOutCallbackHandler
-
-import query_hub
+import queries
 
 HOST = "127.0.0.1"
 PORT = 65432
@@ -25,7 +20,7 @@ def start_server():
 
                 request = data.decode("utf-8")
 
-                response = query_hub.parse_request(request)
+                response = queries.parse_request(request)
 
                 conn.sendall(bytes(response, "utf-8"))
 
diff --git a/python/llm-server/templates.py b/python/llm-server/templates.py
deleted file mode 100644
index 114295b..0000000
--- a/python/llm-server/templates.py
+++ /dev/null
@@ -1,3 +0,0 @@
-def load_template(template):
-    return open("C:/Users/konta/OneDrive/Uni/Master Project/llm-server/templates/" + template + ".txt").read()
-
diff --git a/python/llm-server/templates/agent_action.txt b/python/llm-server/templates/agent_action.txt
new file mode 100644
index 0000000..5197f58
--- /dev/null
+++ b/python/llm-server/templates/agent_action.txt
@@ -0,0 +1,7 @@
+Based on {agent}'s schedule for today, what would {agent} be doing at {time}?
+Answer briefly.
+
+Schedule:
+{schedule}
+
+Answer:
diff --git a/python/llm-server/templates/chat_system.txt b/python/llm-server/templates/chat_system.txt
index ef533ad..5707f6c 100644
--- a/python/llm-server/templates/chat_system.txt
+++ b/python/llm-server/templates/chat_system.txt
@@ -1,7 +1,8 @@
 Act as {agent}.
-{agent}'s character traits: {traits}
 
-{relationship}
+{context}
+
+{agent}'s character traits: {traits}
 
 {schedule}
 
diff --git a/python/llm-server/templates/relationship2.txt b/python/llm-server/templates/relationship.txt
similarity index 100%
rename from python/llm-server/templates/relationship2.txt
rename to python/llm-server/templates/relationship.txt
diff --git a/python/llm-server/templates/relationship1.txt b/python/llm-server/templates/relationship1.txt
deleted file mode 100644
index 57359db..0000000
--- a/python/llm-server/templates/relationship1.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Based on {agent}'s memories, when was the last conversation between {agent} and {user}?
-
-{agent}'s memories related to {user}:
-{memories}
-
-Complete the sentence:
-{agent} last talked to {user} on
\ No newline at end of file
diff --git a/python/llm-server/util.py b/python/llm-server/util.py
new file mode 100644
index 0000000..e77208f
--- /dev/null
+++ b/python/llm-server/util.py
@@ -0,0 +1,42 @@
+from langchain_community.llms.llamacpp import LlamaCpp
+from langchain_core.callbacks import CallbackManager, StreamingStdOutCallbackHandler
+from transformers import AutoTokenizer
+
+TEMPLATES_PATH = "X:/REPOSITORIES/24-sp-poglitsch-giner-genagents/python/llm-server/templates/"
+
+
+def load_template(template):
+    return open(TEMPLATES_PATH + template + ".txt").read()
+
+
+MISTRAL_PATH = "X:/LLM Models/mistral-7b-instruct-v0.1.Q4_K_M.gguf"
+LLAMA_PATH = "X:/LLM Models/llama-2-7b-chat.Q4_K_M.gguf"
+LLAMA_PATH_13b = "X:/LLM Models/llama-2-13b-chat.Q4_K_M.gguf"
+MISTRAL_LARGE_PATH = "X:/LLM Models/mistral-7b-instruct-v0.1.Q5_K_M.gguf"
+LUNA_UNC_PATH = "X:/LLM Models/luna-ai-llama2-uncensored.Q4_K_M.gguf"
+
+MISTRAL_TOK = "mistralai/Mistral-7B-Instruct-v0.1"
+LLAMA_TOK = "meta-llama/Llama-2-7b-chat-hf"
+LLAMA_API_KEY = "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"
+
+
+def load_tokenizer(model):
+    if "llama" in model:
+        return AutoTokenizer.from_pretrained(model, token=LLAMA_API_KEY)
+    else:
+        return AutoTokenizer.from_pretrained(model)
+
+
+def load_model(model, ctx_size, temperature):
+    callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
+    llm = LlamaCpp(
+        model_path=model,
+        temperature=temperature,
+        n_gpu_layers=-1,
+        n_batch=512,
+        n_ctx=ctx_size,
+        callback_manager=callback_manager,
+        verbose=False,
+    )
+
+    return llm
-- 
GitLab