Skip to content

Commit f1bbf09

Browse files
author
Memtext User
committed
v0.5.0: Add AI intelligence with local LLM support
- Add llm.py with Ollama (local) and OpenAI support - Add synthesize_rule_based fallback (no LLM needed) - Add graph.py for relationship tracking - Add AutoTagger for content-based tagging - Add CLI: synthesize-ai, retag, link commands - Version bump to 0.5.0 - New optional: pip install memtext[llm]
1 parent 0d2cd1c commit f1bbf09

File tree

4 files changed

+639
-1
lines changed

4 files changed

+639
-1
lines changed

pyproject.toml

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "memtext"
3-
version = "0.2.1"
3+
version = "0.5.0"
44
description = "Context offloading for AI agents - persistent memory across sessions"
55
readme = "README.md"
66
requires-python = ">=3.10"
@@ -28,6 +28,9 @@ api = [
2828
"fastapi>=0.100",
2929
"uvicorn>=0.23",
3030
]
31+
llm = [
32+
"openai>=1.0",
33+
]
3134

3235
[project.scripts]
3336
memtext = "memtext.cli:main"

src/memtext/cli.py

Lines changed: 120 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -216,6 +216,35 @@ def main(argv=None):
216216
)
217217
share_parser.add_argument("entry_id", type=int, help="Entry ID to share")
218218

219+
synthesize_ai_parser = subparsers.add_parser(
220+
"synthesize-ai",
221+
help="Synthesize with AI (requires LLM)",
222+
description="Use LLM for intelligent memory synthesis. Supports Ollama (local) or OpenAI.",
223+
)
224+
synthesize_ai_parser.add_argument("--text", help="Text to synthesize")
225+
synthesize_ai_parser.add_argument(
226+
"--model", default="llama3", help="Model for local synthesis"
227+
)
228+
synthesize_ai_parser.add_argument(
229+
"--rule-based", action="store_true", help="Use rule-based fallback"
230+
)
231+
232+
retag_parser = subparsers.add_parser(
233+
"retag",
234+
help="Auto-tag entries",
235+
description="Automatically tag entries based on content",
236+
)
237+
retag_parser.add_argument("--entry-id", type=int, help="Entry ID to retag")
238+
retag_parser.add_argument("--all", action="store_true", help="Retag all entries")
239+
240+
link_parser = subparsers.add_parser(
241+
"link",
242+
help="Build relationship graph",
243+
description="Auto-detect relationships between entries",
244+
)
245+
link_parser.add_argument("--entry-id", type=int, help="Entry ID to find links for")
246+
link_parser.add_argument("--limit", type=int, default=5, help="Max results")
247+
219248
serve_parser = subparsers.add_parser(
220249
"serve",
221250
help="Start API server",
@@ -425,6 +454,97 @@ def main(argv=None):
425454
except Exception as e:
426455
raise DatabaseError(f"Share failed: {e}")
427456

457+
elif args.command == "synthesize-ai":
458+
try:
459+
from memtext.llm import (
460+
synthesize,
461+
synthesize_rule_based,
462+
check_llm_available,
463+
)
464+
465+
available = check_llm_available()
466+
print(f"LLM availability: {available}")
467+
468+
if args.rule_based:
469+
text = args.text or "Sample context for testing"
470+
result = synthesize_rule_based(text)
471+
print(f"\nSummary: {result.summary}")
472+
print(f"Memories: {len(result.memories)}")
473+
print(f"Tags: {result.tags}")
474+
else:
475+
if args.text:
476+
result = synthesize(args.text)
477+
if result:
478+
print(f"\nSummary: {result.summary}")
479+
print(f"Memories: {len(result.memories)}")
480+
for mem in result.memories:
481+
print(f" - {mem.get('title')}")
482+
print(f"Tags: {result.tags}")
483+
else:
484+
print(
485+
"No LLM available. Install openai package or run Ollama."
486+
)
487+
else:
488+
print("No text provided. Use --text or --rule-based")
489+
except ImportError:
490+
print("Error: LLM package not installed.")
491+
print("Run: pip install memtext[llm]")
492+
return 5
493+
494+
elif args.command == "retag":
495+
try:
496+
from memtext.llm import AutoTagger
497+
498+
require_context_dir()
499+
500+
tagger = AutoTagger()
501+
502+
if args.all:
503+
entries = list_entries(limit=100)
504+
for entry in entries:
505+
tags = tagger.tag_content(entry.get("content", ""))
506+
if tags:
507+
print(f"Entry {entry['id']}: {', '.join(tags)}")
508+
elif args.entry_id:
509+
from memtext.db import get_entry
510+
511+
entry = get_entry(args.entry_id)
512+
if entry:
513+
tags = tagger.tag_content(entry.get("content", ""))
514+
print(f"Entry {args.entry_id} tags: {', '.join(tags)}")
515+
else:
516+
print(f"Entry {args.entry_id} not found")
517+
else:
518+
print("Use --entry-id or --all")
519+
except Exception as e:
520+
raise DatabaseError(f"Retag failed: {e}")
521+
522+
elif args.command == "link":
523+
try:
524+
require_context_dir()
525+
from memtext.graph import (
526+
get_related_entries,
527+
build_relationships_from_entries,
528+
init_graph,
529+
)
530+
531+
init_graph()
532+
533+
if args.entry_id:
534+
related = get_related_entries(args.entry_id, args.limit)
535+
if related:
536+
print(f"Related to entry {args.entry_id}:")
537+
for r in related:
538+
print(f" [{r.get('entry_type')}] {r.get('title')}")
539+
else:
540+
print("No relationships found")
541+
else:
542+
entries = list_entries(limit=50)
543+
count = build_relationships_from_entries(entries)
544+
print(f"Built {count} relationships")
545+
except Exception as e:
546+
raise DatabaseError(f"Link failed: {e}")
547+
428548
elif args.command == "serve":
429549
try:
430550
from memtext.api import run as api_run

src/memtext/graph.py

Lines changed: 243 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,243 @@
1+
"""Relationship graph for MemText context.
2+
3+
Tracks relationships between entries for better context retrieval.
4+
"""
5+
6+
import sqlite3
7+
from pathlib import Path
8+
from typing import Optional, List, Dict, Set
9+
from collections import defaultdict
10+
11+
12+
def get_graph_path() -> Path:
13+
"""Get path to relationship graph database."""
14+
return Path.cwd() / ".context" / "relationships.db"
15+
16+
17+
def init_graph() -> Path:
18+
"""Initialize relationship graph database."""
19+
graph_path = get_graph_path()
20+
graph_path.parent.mkdir(parents=True, exist_ok=True)
21+
22+
conn = sqlite3.connect(graph_path)
23+
cursor = conn.cursor()
24+
25+
cursor.execute("""
26+
CREATE TABLE IF NOT EXISTS relationships (
27+
id INTEGER PRIMARY KEY AUTOINCREMENT,
28+
source_id INTEGER NOT NULL,
29+
target_id INTEGER NOT NULL,
30+
relationship_type TEXT NOT NULL,
31+
strength REAL DEFAULT 1.0,
32+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
33+
UNIQUE(source_id, target_id, relationship_type)
34+
)
35+
""")
36+
37+
cursor.execute("""
38+
CREATE TABLE IF NOT EXISTS cooccurrence (
39+
id INTEGER PRIMARY KEY AUTOINCREMENT,
40+
entry_id INTEGER NOT NULL,
41+
session_id TEXT,
42+
timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP
43+
)
44+
""")
45+
46+
cursor.execute("""
47+
CREATE INDEX IF NOT EXISTS idx_relationships_source
48+
ON relationships(source_id)
49+
""")
50+
51+
cursor.execute("""
52+
CREATE INDEX IF NOT EXISTS idx_cooccurrence_entry
53+
ON cooccurrence(entry_id)
54+
""")
55+
56+
conn.commit()
57+
conn.close()
58+
return graph_path
59+
60+
61+
def add_relationship(
62+
source_id: int,
63+
target_id: int,
64+
relationship_type: str = "related",
65+
strength: float = 1.0,
66+
) -> bool:
67+
"""Add a relationship between two entries."""
68+
graph_path = get_graph_path()
69+
if not graph_path.exists():
70+
init_graph()
71+
72+
try:
73+
conn = sqlite3.connect(graph_path)
74+
cursor = conn.cursor()
75+
cursor.execute(
76+
"""INSERT OR REPLACE INTO relationships
77+
(source_id, target_id, relationship_type, strength)
78+
VALUES (?, ?, ?, ?)""",
79+
(source_id, target_id, relationship_type, strength),
80+
)
81+
conn.commit()
82+
conn.close()
83+
return True
84+
except sqlite3.Error:
85+
return False
86+
87+
88+
def get_related_entries(entry_id: int, limit: int = 10) -> List[Dict]:
89+
"""Get entries related to the given entry."""
90+
graph_path = get_graph_path()
91+
if not graph_path.exists():
92+
return []
93+
94+
conn = sqlite3.connect(graph_path)
95+
conn.row_factory = sqlite3.Row
96+
cursor = conn.cursor()
97+
98+
cursor.execute(
99+
"""SELECT r.*, e.title, e.content, e.entry_type
100+
FROM relationships r
101+
JOIN context_entries e ON r.target_id = e.id
102+
WHERE r.source_id = ?
103+
ORDER BY r.strength DESC
104+
LIMIT ?""",
105+
(entry_id, limit),
106+
)
107+
108+
rows = cursor.fetchall()
109+
conn.close()
110+
return [dict(row) for row in rows]
111+
112+
113+
def auto_detect_relationships(content_pairs: List[tuple]) -> List[tuple]:
114+
"""Auto-detect relationships from content.
115+
116+
Args:
117+
content_pairs: List of (entry_id, content) tuples
118+
119+
Returns:
120+
List of (source_id, target_id, relationship_type, strength) tuples
121+
"""
122+
relationships = []
123+
content_by_id = {cid: content.lower() for cid, content in content_pairs}
124+
125+
keywords = {
126+
"depends_on": ["depends on", "requires", "needs", "build on"],
127+
"similar_to": ["similar to", "like", "also", "same"],
128+
"contrast_with": ["instead of", "rather than", "unlike", "but"],
129+
"related_to": ["related to", "see also", "see", "关联"],
130+
}
131+
132+
for id1, content1 in content_pairs:
133+
for id2, content2 in content_pairs:
134+
if id1 >= id2:
135+
continue
136+
137+
for rel_type, keywords_list in keywords.items():
138+
if any(kw in content1 for kw in keywords_list):
139+
if any(kw in content2 for kw in keywords_list):
140+
relationships.append((id1, id2, rel_type, 0.8))
141+
break
142+
143+
return relationships
144+
145+
146+
def build_relationships_from_entries(entries: List[Dict]) -> int:
147+
"""Build relationships by analyzing entry content."""
148+
content_pairs = [(e["id"], e.get("content", "")) for e in entries if "id" in e]
149+
relations = auto_detect_relationships(content_pairs)
150+
151+
count = 0
152+
for source_id, target_id, rel_type, strength in relations:
153+
if add_relationship(source_id, target_id, rel_type, strength):
154+
count += 1
155+
156+
return count
157+
158+
159+
def record_cooccurrence(entry_id: int, session_id: str = None):
160+
"""Record that an entry was accessed in a session."""
161+
graph_path = get_graph_path()
162+
if not graph_path.exists():
163+
init_graph()
164+
165+
conn = sqlite3.connect(graph_path)
166+
cursor = conn.cursor()
167+
cursor.execute(
168+
"INSERT INTO cooccurrence (entry_id, session_id) VALUES (?, ?)",
169+
(entry_id, session_id),
170+
)
171+
conn.commit()
172+
conn.close()
173+
174+
175+
def get_frequently_accessed_together(entry_id: int, limit: int = 5) -> List[Dict]:
176+
"""Get entries frequently accessed together with this one."""
177+
graph_path = get_graph_path()
178+
if not graph_path.exists():
179+
return []
180+
181+
conn = sqlite3.connect(graph_path)
182+
conn.row_factory = sqlite3.Row
183+
cursor = conn.cursor()
184+
185+
cursor.execute(
186+
"""
187+
SELECT c2.entry_id, COUNT(*) as access_count, e.title, e.content
188+
FROM cooccurrence c1
189+
JOIN cooccurrence c2 ON c1.session_id = c2.session_id
190+
AND c1.entry_id != c2.entry_id
191+
JOIN context_entries e ON c2.entry_id = e.id
192+
WHERE c1.entry_id = ?
193+
GROUP BY c2.entry_id
194+
ORDER BY access_count DESC
195+
LIMIT ?
196+
""",
197+
(entry_id, limit),
198+
)
199+
200+
rows = cursor.fetchall()
201+
conn.close()
202+
return [dict(row) for row in rows]
203+
204+
205+
def get_relationship_stats() -> Dict:
206+
"""Get statistics about the relationship graph."""
207+
graph_path = get_graph_path()
208+
if not graph_path.exists():
209+
return {"total_relationships": 0, "total_cooccurrences": 0}
210+
211+
conn = sqlite3.connect(graph_path)
212+
cursor = conn.cursor()
213+
214+
cursor.execute("SELECT COUNT(*) FROM relationships")
215+
rel_count = cursor.fetchone()[0]
216+
217+
cursor.execute("SELECT COUNT(*) FROM cooccurrence")
218+
cooc_count = cursor.fetchone()[0]
219+
220+
conn.close()
221+
222+
return {
223+
"total_relationships": rel_count,
224+
"total_cooccurrences": cooc_count,
225+
}
226+
227+
228+
def suggest_related(query: str, entries: List[Dict], limit: int = 5) -> List[Dict]:
229+
"""Suggest related entries based on query keywords."""
230+
query_terms = set(query.lower().split())
231+
scored = []
232+
233+
for entry in entries:
234+
content = entry.get("content", "").lower()
235+
title = entry.get("title", "").lower()
236+
text = f"{title} {content}"
237+
238+
score = sum(1 for term in query_terms if term in text)
239+
if score > 0:
240+
scored.append((score, entry))
241+
242+
scored.sort(reverse=True)
243+
return [e[1] for e in scored[:limit]]

0 commit comments

Comments
 (0)