From d705c8391a9ed338d65b9cdf7ded4c986ab7e74f Mon Sep 17 00:00:00 2001 From: Robert Date: Wed, 11 Mar 2026 13:42:53 -0500 Subject: [PATCH] impliment the finish logic --- backend/helper.py | 10 ++++++++++ backend/main.py | 19 ++++++++++++++----- 2 files changed, 24 insertions(+), 5 deletions(-) create mode 100644 backend/helper.py diff --git a/backend/helper.py b/backend/helper.py new file mode 100644 index 0000000..31931b7 --- /dev/null +++ b/backend/helper.py @@ -0,0 +1,10 @@ +from eval_ai import JudgeAI +from graph import Graph + + +def summarize_answers(graph: Graph, judge: JudgeAI): + answers: str = graph.get_answers() + print("ANSWERS: ", answers) + summary: str = judge.summarize(answers) + + return summary diff --git a/backend/main.py b/backend/main.py index 89cff15..7e7b0f5 100644 --- a/backend/main.py +++ b/backend/main.py @@ -3,6 +3,7 @@ from fastapi import FastAPI, HTTPException, status, Query from fastapi.middleware.cors import CORSMiddleware +from helper import summarize_answers from dto import Node, NodeStatus, NodeLevel, NodeEditPayload, ApiKeyRequest, SkillsResponse from graph import Graph from eval_ai import JudgeAI @@ -186,10 +187,18 @@ def chat_answer(answer: str): Receive user's answer and return next question. """ global last_node + + if answer == "I want to stop the interview": + summary = summarize_answers(graph, judge) + return { + "message": summary + } + score: int = judge.eval(question=last_node.question, answer=answer) + graph.eval(last_node, score) - last_node = graph.next() - if last_node is not None: + + if last_node := graph.next(): return { "question": last_node.question, "completed": False, @@ -206,9 +215,9 @@ def chat_stop(): """ Stop the interview. """ - answers: str = graph.get_answers() - print("ANSWERS: ", answers) - summary: str = judge.summarize(answers) + + summary = summarize_answers(graph, judge) + return { "message": summary }