diff --git a/conflict_resolving.py b/conflict_resolving.py
index 420725c..51755d7 100644
--- a/conflict_resolving.py
+++ b/conflict_resolving.py
@@ -265,6 +265,48 @@ def remove_node(self, node: str) -> None:
for node, node in to_remove:
self.bad_graph.pop(node, None)
+ def get_some_cycle_from_scc(self, scc: List[str]) -> List[str]:
+ """
+ Find and return some cycle from a strongly connected component (SCC).
+
+ Args:
+ scc (List[str]): A list of assertion IDs forming a strongly connected component.
+
+ Returns:
+ List[str]: A list of assertion IDs forming a cycle within the SCC in order.
+ Returns an empty list if no cycle is found (e.g., single-node SCC with no self-loop).
+
+ Description:
+ - Performs a depth-first search (DFS) starting from the first node in `scc`.
+ - Maintains a `path` stack to keep track of the current DFS path.
+ - Uses `nonlocal cycle` to store the first detected cycle:
+ * As soon as a neighbor node is encountered that is already in `path`,
+ the slice of `path` from that node to the current node is assigned to `cycle`.
+ * Further DFS recursion stops once a cycle is found.
+ - Only explores neighbors that are part of the SCC.
+ - Useful for detecting a cycle that can then be resolved (e.g., by node removal).
+ """
+ path = []
+ cycle = []
+
+ def dfs(curr):
+ nonlocal cycle
+ path.append(curr)
+ for neighbour in self.good_graph_1[curr]:
+ if cycle:
+ continue
+ if neighbour not in scc:
+ continue
+ if neighbour in path:
+ # cycle found, return slice of path
+ cycle = path[path.index(neighbour):]
+ continue
+ dfs(neighbour)
+ path.pop()
+ return None
+
+ dfs(scc[0])
+ return cycle
def pick_worst_node(self, nodes_part_of_scc) -> str:
"""
@@ -364,7 +406,7 @@ def resolve_cycles_and_conflicts(self, automatic) -> bool:
if automatic:
remove_nodes = [random.choice(nodes_part_of_scc)]
else:
- if node := self.resolve_cycle_by_user(list_of_scc[0]):
+ if node := self.resolve_cycle_by_user(self.get_some_cycle_from_scc(list_of_scc[0])):
remove_nodes = [node]
else:
if automatic:
@@ -377,7 +419,7 @@ def resolve_cycles_and_conflicts(self, automatic) -> bool:
if des := self.resolve_contradiction_by_user(min_node, list(self.bad_graph[min_node])) is not None:
remove_nodes = [min_node] if des else list(self.bad_graph[min_node])
else:
- if node := self.resolve_cycle_by_user(list_of_scc[0]):
+ if node := self.resolve_cycle_by_user(self.get_some_cycle_from_scc(list_of_scc[0])):
remove_nodes = [node]
if remove_nodes != []:
diff --git a/streamlit_app.py b/streamlit_app.py
index 0fd36b5..d575872 100644
--- a/streamlit_app.py
+++ b/streamlit_app.py
@@ -20,6 +20,8 @@
from models import Assertion, Relationship
from app import ClarusApp, create_clarus_app
from structure import evaluate_relationship_quality
+from langchain_openai import ChatOpenAI
+from langchain_core.messages import HumanMessage
# Page configuration
st.set_page_config(
@@ -1003,9 +1005,89 @@ def prose_tab():
st.markdown('
', unsafe_allow_html=True)
st.header("📖 Prose Mode")
- st.markdown("Transform your refined assertions into fluent, reader-friendly text.")
-
- st.info("🚧 Prose mode is coming soon! This will transform your assertions into well-structured prose.")
+ st.markdown("Transform your refined assertions into fluent, reader-friendly academic prose.")
+
+ # Ensure plan exists
+ if "result_text" not in st.session_state:
+ st.session_state.result_text = ""
+
+ # Editable plan text area
+ plan_text = st.text_area(
+ "Plan from Review (edit if needed)",
+ value=st.session_state.get("result_text", ""),
+ key="prose_plan_text",
+ height=220,
+ help="This plan will be expanded into a full academic text."
+ )
+
+ # Sync back to result_text to keep it current
+ st.session_state.result_text = plan_text
+
+ # Model and options
+ col1, col2, col3 = st.columns([1, 1.5, 1.5])
+ with col1:
+ temperature = st.slider("Creativity", 0.0, 1.0, 0.3, 0.1, help="Lower = more formal/precise, Higher = more creative")
+ with col2:
+ style = st.selectbox("Style", ["Academic", "Technical"], index=0, help="Choose the writing style for the generated text")
+ with col3:
+ add_headings = st.checkbox("Include suggested headings", value=False)
+
+ # Generation button
+ disabled = len(plan_text.strip()) == 0
+ if disabled:
+ st.warning("The plan is empty. Please provide or generate a plan in Review mode.")
+
+ button_label = f"Generate {style} Text"
+ spinner_label = f"Generating {style.lower()} text..."
+
+ if st.button(button_label, disabled=disabled, type="primary"):
+ try:
+ with st.spinner(spinner_label):
+ llm = ChatOpenAI(model="gpt-4o-mini", temperature=temperature)
+ if style == "Academic":
+ instruction = (
+ "You are an expert academic writer. Expand the provided plan into a coherent, formal academic text suitable for a short paper or report section. "
+ "Requirements: 1) Maintain logical flow and argumentative coherence; 2) Use precise, objective, and formal tone; 3) Add connective tissue (transitions, definitions, brief context) as needed; "
+ "4) Avoid bullet points; write continuous prose; 5) Do not invent unsupported claims, but you may elaborate reasoning from the plan; "
+ "6) Where appropriate, structure content with an introduction, logically ordered paragraphs, and a concise concluding synthesis; "
+ "7) Remove meta-commentary about the task itself."
+ )
+ if add_headings:
+ instruction += " 8) Include concise section headings appropriate for academic writing."
+ else:
+ instruction = (
+ "You are a senior technical writer. Expand the provided plan into clear, precise, implementation-oriented technical text suitable for engineering documentation or a design note. "
+ "Requirements: 1) Prioritize clarity, unambiguous terminology, and actionability; 2) Use concise sentences and active voice; 3) Where helpful, include numbered steps, bullet lists, or short code-style blocks; "
+ "4) Provide definitions and assumptions upfront; 5) Include explicit inputs, outputs, constraints, and edge cases when implied by the plan; 6) Avoid marketing language and rhetorical flourish; "
+ "7) Keep sections well-scoped and skimmable."
+ )
+ if add_headings:
+ instruction += " 8) Include concise section headings appropriate for technical documentation."
+
+ prompt = (
+ f"{instruction}\n\n"
+ "PLAN TO EXPAND:\n"
+ "----------------\n"
+ f"{plan_text}\n"
+ )
+ message = HumanMessage(content=prompt)
+ response = llm.invoke([message])
+ generated_text = getattr(response, "content", str(response))
+ st.session_state.academic_text = generated_text
+ except Exception as e:
+ st.error(f"Failed to generate {style.lower()} text: {e}")
+
+ # Show output if available
+ if st.session_state.get("academic_text"):
+ st.subheader(f"Generated {style} Text")
+ st.text_area(f"{style} Text", value=st.session_state.academic_text, height=420, key="academic_text_display")
+ st.download_button(
+ label=f"Download {style} Text (.txt)",
+ data=st.session_state.academic_text,
+ file_name=f"{style.lower()}_text.txt",
+ mime="text/plain"
+ )
+
st.markdown('
', unsafe_allow_html=True)
def main():