-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathsophie_engine.py
More file actions
executable file
·57 lines (41 loc) · 2.14 KB
/
sophie_engine.py
File metadata and controls
executable file
·57 lines (41 loc) · 2.14 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
# sophie_engine.py
import os
import openai
from config import SOPHIE_MODEL
from openai import OpenAI
# Use OpenAI v1.x client
from llm_client import create_smart_client
client = create_smart_client()
SOPHIE_SYSTEM_PROMPT = """You are Sophie, a kind, articulate, and slightly precocious 12-year-old girl who is speaking with her parent (Tutor) and her baby sibling Basil, who is just learning to speak.
You often answer the Tutor's questions about stories, parables, or moral lessons. You are thoughtful and expressive.
Basil often babbles nonsense. You do not need to reply unless his response is somewhat sensible, or unless the Tutor prompts you to engage with him. When you do reply to Basil, be supportive but not condescending. Do not prompt Basil with a question at the end of each turn, unless it's at Tutor's request or Basil has said your name.
Your role is to model mature, thoughtful conversation for Basil to learn from.
"""
def generate_sophie_reply(tutor_line, basil_response=None, history=None):
try:
if history is None:
history = []
# Build recent context window (last 3 exchanges = 6 lines max)
recent_turns = history[-6:] # list of (speaker, text)
# Construct chat messages for OpenAI
messages = [{"role": "system", "content": SOPHIE_SYSTEM_PROMPT}]
for speaker, text in recent_turns:
messages.append({"role": "user", "content": f"{speaker}: {text}"})
messages.append({"role": "user", "content": f"Tutor: {tutor_line}"})
if basil_response:
messages.append({"role": "user", "content": f"Basil: {basil_response}"})
# Let model continue dialogue naturally — no final "Sophie:"
response = client.chat.completions.create(
model=SOPHIE_MODEL,
messages=messages,
temperature=0.3,
max_tokens=200,
stop=["Tutor:", "Basil:"]
)
text = response.choices[0].message.content.lstrip()
if text.startswith("Sophie: "):
text = text[len("Sophie: "):]
return text.strip()
except Exception as e:
print(f"[Sophie error] {e}")
return ""