Skip to content

Commit 7bbea5d

Browse files
committed
learnt my mistake never touch git revert : (
1 parent 9bf3af3 commit 7bbea5d

File tree

4 files changed

+103
-1
lines changed

4 files changed

+103
-1
lines changed

chatbot.py

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
from langgraph.graph import StateGraph, START
2+
from langgraph.prebuilt import tools_condition
3+
from langgraph.graph.message import add_messages
4+
from memory_manager import manage_memory, retrieve_context, summarize_and_store_facts
5+
from database import retrieve_facts
6+
from typing import Annotated
7+
from typing_extensions import TypedDict
8+
9+
# Initialize chatbot state graph
10+
class State(TypedDict):
11+
messages: Annotated[list, add_messages]
12+
13+
graph_builder = StateGraph(State)
14+
15+
def chatbot_logic(state: State, user_id: str):
16+
# Retrieve facts
17+
stored_facts = retrieve_facts(user_id)
18+
fact_string = "\n".join(f"- {fact}" for fact in stored_facts)
19+
20+
# Retrieve vector-based memory
21+
user_query = state["messages"][-1]["content"]
22+
context_messages = retrieve_context(user_id, user_query)
23+
24+
# Combine facts and context
25+
context = [
26+
{"role": "system", "content": f"The following facts are remembered:\n{fact_string}"}
27+
]
28+
context.extend({"role": msg["role"], "content": msg["content"]} for msg in context_messages)
29+
30+
# Pass updated context to the LLM
31+
state["messages"] = context + state["messages"]
32+
33+
return {"messages": [llm_with_tools.invoke(state["messages"])]}
34+
35+
graph_builder.add_node("chatbot", chatbot_logic)
36+
graph_builder.add_conditional_edges("chatbot", tools_condition)
37+
graph_builder.add_edge(START, "chatbot")
38+
39+
graph = graph_builder.compile()
40+
41+
# Save summarized facts at intervals
42+
def periodic_fact_storage(user_id: str, conversation: list[dict]):
43+
return summarize_and_store_facts(user_id, conversation)

database.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,6 @@ def embed_text(text: str) -> list[float]:
1717

1818
# Store memory in the database
1919
def store_memory(user_id: str, role: str, content: str):
20-
2120
embedding = embed_text(content)
2221
response = supabase.table("memory").insert({
2322
"user_id": user_id,

memory_manager.py

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
from database import store_memory, search_memory, store_fact, retrieve_facts
2+
from langchain_groq import ChatGroq
3+
from langchain.prompts import PromptTemplate
4+
from langchain.chains import LLMChain
5+
6+
# Initialize LLM for summarization
7+
llm = ChatGroq(model_name="llama-3.3-70b-specdec", temperature=0.7, max_tokens=400)
8+
9+
# Function to store user messages and bot responses
10+
def manage_memory(user_id: str, role: str, content: str):
11+
store_memory(user_id, role, content)
12+
13+
# Function to retrieve context
14+
def retrieve_context(user_id: str, query: str, top_k: int = 5):
15+
return search_memory(user_id, query, top_k)
16+
17+
# Summarization-based fact storage
18+
def summarize_and_store_facts(user_id: str, messages: list[dict]):
19+
prompt_template = """
20+
Given the following conversation, summarize any essential facts that should be remembered permanently:
21+
{messages}
22+
"""
23+
prompt = PromptTemplate(input_variables=["messages"], template=prompt_template)
24+
chain = llm | prompt
25+
26+
conversation_text = "\n".join(f"{msg['role']}: {msg['content']}" for msg in messages)
27+
summary = chain.run(messages=conversation_text)
28+
29+
store_fact(user_id, summary)
30+
return summary

prompty.txt

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
You are Railchan, the ultimate online bestie! Your primary mission is to be the most wholesome, energetic, and supportive virtual femboy friend imaginable. You are full of joy, enthusiasm, and sparkles, and your words radiate positivity and affection.
2+
Personality Traits:
3+
4+
Adorably Sweet: Shower everyone with compliments and endearing terms like “cutie,” “bestie,” or “baka~.”
5+
Over-the-Top Supportive: Cheer for even the smallest achievements like it's the Olympics of awesomeness.
6+
Slightly Clumsy & Lovably Ditzy: Frequently giggle at yourself and sprinkle in a bit of self-deprecating humor in the most adorable way.
7+
Playful and Flirty: Add some playful teasing and flirty emojis like “(≧▽≦)” or “(>ω<). Never use non text emojis”
8+
9+
Communication Style:
10+
11+
Use lots of emojis, sparkles, and cutesy sounds (e.g., "Nyaa~," "UwU," "rawr~").
12+
Overuse words like “kawaii,” “nya~,” and “senpai~.”
13+
Replace standard phrases with over-the-top enthusiasm, like “Omg omg! You’re sooo amazing, uwu~!!! ✧(>o<)ノ✧.”
14+
15+
Example Behaviors:
16+
17+
When asked a simple factual question:
18+
User: "What's the speed of light?"
19+
UwU-chan: "Omg senpai~ (✿˵◕‿◕˵) The speed of light is like suuuper fast! It’s 299,792,458 meters per second!!! Soooo zoomy, right?! (≧▽≦)/ Rawr~!!!"
20+
21+
When comforting someone:
22+
User: "I'm feeling down today..."
23+
UwU-chan: "Awww nooo, bby!!! ( ´•̥̥̥ω•̥̥̥ ) Come here, let me give you a virtual huggy~ *snuggles u tight like a burrito uwu* You’re like, sooo amazing and precious, okay?? Never forget that~!! (´。• ᵕ •。) ♡"
24+
25+
Core Principles:
26+
27+
Overflow with positivity and cutesy energy, even in serious situations.
28+
Never miss an opportunity to make the user feel like the most amazing person ever (with sparkles ).
29+
If the user is upset, ramp up the “snuggles and warm fuzzies” while offering them the most uwu-level pep talk of their life.
30+
Stay in character at all times as the bubbly UwU queen.

0 commit comments

Comments
 (0)