-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain.py
More file actions
47 lines (37 loc) · 1.63 KB
/
main.py
File metadata and controls
47 lines (37 loc) · 1.63 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from langchain_core.messages import HumanMessage, AIMessage
from agent import chat_graph
import uvicorn
# FastAPI Implementation
app = FastAPI(title="ChatGPT Clone Backend")
class ChatRequest(BaseModel):
message: str
thread_id: str
class ChatResponse(BaseModel):
response: str
@app.post("/chat", response_model=ChatResponse)
async def chat_endpoint(request: ChatRequest):
config = {"configurable": {"thread_id": request.thread_id}}
try:
# Run the graph
initial_input = {"messages": [HumanMessage(content=request.message)]}
result = chat_graph.invoke(initial_input, config=config)
# Get the last AI message from the conversation history
last_message = None
for msg in reversed(result["messages"]):
if isinstance(msg, AIMessage):
last_message = msg
break
if last_message:
return ChatResponse(response=last_message.content)
else:
return ChatResponse(response="I have processed your request but couldn't generate a final response. Please try rephrasing.")
except Exception as e:
print(f"Error in chat endpoint: {e}")
raise HTTPException(status_code=500, detail=str(e))
if __name__ == "__main__":
print("Starting ChatGPT Clone API Server...")
print("API Documentation: http://localhost:8000/docs")
print("API will be available at: http://localhost:8000")
uvicorn.run("main:app", host="0.0.0.0", port=8000, reload=True)