-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathlmagi.py
More file actions
1582 lines (1375 loc) · 81.7 KB
/
lmagi.py
File metadata and controls
1582 lines (1375 loc) · 81.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# lmagi.py - Backend web server for easyAGI
# lmagi (c) Gregory L. Magnusson MIT license 2024
# easyAGI (c) Gregory L. Magnusson MIT license 2024
# easy augmented generative intelligence UIUX
# multi-model LLM with automind reasoning from premise to draw_conclusion
# conversation from main_loop(self) is saved to ./memory/stm/timestampmemory.json from memory.py creating short term memory store of input response
# reasoning_loop(self)conversation from internal_conclusions are saved in ./memory/logs/thoughts.json
#
# ENTRYPOINT: Use lmagi_gui.py to launch the application (preferred method)
# This file can also be run directly for development/testing: python lmagi.py
from nicegui import ui, app # handle UIUX
from fastapi.staticfiles import StaticFiles # integrate fastapi static folder and gfx folder
from webmind.ollama_handler import OllamaHandler # Import OllamaHandler for modular Ollama interactions
from webmind.html_head import add_head_html # handler for the html head imports and meta tags
from webmind.navigation import Navigation, SideNav # Unified navigation system and drawer
from webmind.settings import SettingsManager # Settings persistence system
from automind.openmind import OpenMind # Importing OpenMind class from openmind.py
import concurrent.futures
import ujson as json
import asyncio
import aiohttp
import httpx
import logging
import signal
import sys
import os
# Set up logging
logging.basicConfig(level=logging.DEBUG)
# Serve static graphic files and easystyle.css from the 'gfx' directory
app.mount('/gfx', StaticFiles(directory='gfx'), name='gfx')
# Initialize settings manager
settings_manager = SettingsManager()
openmind = OpenMind() # initialize OpenMind instance
# Initialize OllamaHandler with settings
try:
ollama_url = settings_manager.get('ollama_base_url', 'http://localhost:11434')
ollama_model = OllamaHandler(base_url=ollama_url)
except:
ollama_model = OllamaHandler() # Fallback to default
# Toggle for autonomous reasoning
async def toggle_autonomous_reasoning(value):
openmind.autonomous_reasoning = value
if value:
# Start reasoning loop task if not already running
if not openmind.reasoning_task or openmind.reasoning_task.done():
openmind.reasoning_task = openmind._create_task(openmind.reasoning_loop())
logging.info("Autonomous reasoning enabled")
else:
# Stop reasoning loop task
if openmind.reasoning_task and not openmind.reasoning_task.done():
openmind.reasoning_task.cancel()
try:
await openmind.reasoning_task
except asyncio.CancelledError:
pass
openmind.reasoning_task = None
logging.info("Autonomous reasoning disabled")
@ui.page('/')
def main():
global executor, message_container, log, keys_container, text, selected_api, conversation_history
executor = concurrent.futures.ThreadPoolExecutor() # initialize thread pool executor to manage and execute multiple tasks concurrently
selected_api = None # Variable to store selected API
conversation_history = [] # Store conversation history for context
async def send() -> None:
question = text.value # get value from input field
text.value = '' # clear input field for openmind
if not question:
ui.notify('Please enter a prompt.', type='warning')
logging.warning("No prompt entered. Please enter a prompt.")
return
if selected_api:
await generate_api_response(question)
async def generate_api_response(prompt) -> None:
# If Ollama Cloud is selected, handle streaming response like Ollama page
if selected_api == 'ollama_cloud':
# Display user query in conversation-style format
if message_container.client.connected:
with message_container:
from datetime import datetime
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
with ui.column().classes('message-block'):
ui.label('query').classes('text-lg font-semibold')
ui.html('<hr style="margin: 0.5rem 0; border: none; border-top: 1px solid var(--accent-blue);">')
ui.label(timestamp).classes('text-xs text-gray-500')
ui.markdown(prompt).classes('mt-2')
# Auto-scroll after adding query (simple like Ollama page)
try:
await ui.run_javascript('const c=document.querySelector(".chat-container"); if(c){c.scrollTop=c.scrollHeight;}')
except Exception:
pass
# Generate streaming response
await generate_ollama_cloud_streaming_response(prompt)
else:
# For other APIs, use AGI
await openmind.send_message(prompt) # send the question to OpenMind
await openmind.internal_queue.put(prompt) # add question to the internal queue for processing
async def generate_ollama_cloud_streaming_response(prompt) -> None:
"""Generate streaming response for Ollama Cloud with conversation history"""
global conversation_history
try:
from webmind.settings import SettingsManager
settings = SettingsManager()
base_url = settings.get('ollama_cloud_base_url', 'https://ollama.com')
from webmind.chatter import OllamaCloudModel
ollama_cloud_key = openmind.api_manager.get_api_key('ollama_cloud')
if not ollama_cloud_key:
ui.notify('Ollama Cloud API key not found', type='warning')
return
# Ensure AGI is initialized with ollama_cloud
if not hasattr(openmind, 'agi_instance') or not openmind.agi_instance:
await openmind.select_model('ollama_cloud')
# Wait a moment for initialization
import asyncio
await asyncio.sleep(1.0) # Increased wait time
# Get the selected model - wait and retry if not set yet
model_name = None
max_retries = 5
for attempt in range(max_retries):
if hasattr(openmind, 'agi_instance') and openmind.agi_instance:
if hasattr(openmind.agi_instance, 'agi') and hasattr(openmind.agi_instance.agi, 'chatter'):
model_name = openmind.agi_instance.agi.chatter.get_current_model()
if model_name:
logging.info(f"Found selected model: {model_name}")
break
if attempt < max_retries - 1:
await asyncio.sleep(0.5)
logging.debug(f"Waiting for model selection (attempt {attempt + 1}/{max_retries})")
if not model_name:
ui.notify('Please select an Ollama Cloud model first', type='warning')
logging.warning("No Ollama Cloud model selected after retries")
return
cloud_model = OllamaCloudModel(ollama_cloud_key, base_url=base_url)
cloud_model.set_model(model_name)
logging.info(f"Generating streaming response using Ollama Cloud model: {model_name}")
import aiohttp
import json
import asyncio
# Display response header before streaming (conversation-style)
response_markdown = None
if message_container.client.connected:
with message_container:
from datetime import datetime
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
response_container = ui.column().classes('message-block')
with response_container:
ui.label('ezAGI').classes('text-lg font-semibold')
ui.html('<hr style="margin: 0.5rem 0; border: none; border-top: 1px solid var(--accent-blue);">')
ui.label(timestamp).classes('text-xs text-gray-500')
# Create markdown element for streaming content
response_markdown = ui.markdown().classes('mt-2')
# Add user message to conversation history
conversation_history.append({"role": "user", "content": prompt})
# Smart context window management:
# - Keep last 20 messages (10 exchanges)
# - Or limit by approximate token count (~4000 tokens = ~20 messages)
# - This prevents context window overflow while maintaining conversation flow
max_messages = 20
if len(conversation_history) > max_messages:
# Keep system message if present, then last N user/assistant pairs
conversation_history = conversation_history[-max_messages:]
logging.debug(f"Conversation context: {len(conversation_history)} messages")
response_content = ""
headers = {
"Authorization": f"Bearer {ollama_cloud_key}",
"Content-Type": "application/json"
}
# Use /api/chat endpoint with messages array for conversation context
# According to docs: https://docs.ollama.com/cloud
payload = {
"model": model_name,
"messages": conversation_history, # Include full conversation history
"stream": True
}
logging.debug(f"Making request to {cloud_model.api_url}/chat with model: {model_name}, {len(conversation_history)} messages in context")
# Get timeout from settings
from webmind.settings import SettingsManager
timeout_settings = SettingsManager()
ollama_cloud_timeout = timeout_settings.get('ollama_cloud_timeout', 300)
# Batch updates for smoother streaming (update every 50ms instead of every chunk)
last_update_time = asyncio.get_event_loop().time()
update_interval = 0.05 # 50ms between UI updates
async with aiohttp.ClientSession() as session:
async with session.post(
f"{cloud_model.api_url}/chat",
json=payload,
headers=headers,
timeout=aiohttp.ClientTimeout(total=ollama_cloud_timeout)
) as response:
if response.status != 200:
error_text = await response.text()
logging.error(f"Ollama Cloud API error: HTTP {response.status} - {error_text}")
logging.error(f"Request URL: {cloud_model.api_url}/chat")
logging.error(f"Request payload: {payload}")
if response_markdown and message_container.client.connected:
response_markdown.set_content(f"**Error:** Ollama Cloud API returned HTTP {response.status}\n\n{error_text}")
ui.notify(f'Ollama Cloud API error: HTTP {response.status}', type='negative')
return
async for line in response.content:
if line:
try:
line_str = line.decode('utf-8').strip()
if line_str:
data = json.loads(line_str)
# Check for errors first
if "error" in data:
logging.error(f"Error in response: {data['error']}")
error_msg = f"**Error:** {data['error']}"
if response_markdown and message_container.client.connected:
response_markdown.set_content(error_msg)
ui.notify(f"Error: {data['error']}", type='negative')
continue
# Handle response content
if "message" in data and "content" in data["message"]:
# Chat endpoint format
response_content += data["message"]["content"]
elif "response" in data:
# Generate endpoint format (fallback)
response_content += data["response"]
else:
continue
# Batch UI updates for smoother streaming (update every 50ms)
current_time = asyncio.get_event_loop().time()
should_update_ui = current_time - last_update_time >= update_interval
if should_update_ui:
if response_markdown and message_container.client.connected:
response_markdown.set_content(response_content)
last_update_time = current_time
# Auto-scroll on every chunk when content changes (like Ollama page)
try:
await ui.run_javascript('const c=document.querySelector(".chat-container"); if(c){c.scrollTop=c.scrollHeight;}')
except Exception:
pass
logging.debug(f"Received response chunk: {len(response_content)} chars")
except json.JSONDecodeError:
continue
except Exception as e:
logging.error(f"Error processing stream chunk: {e}")
# Final update with complete response
if response_markdown and message_container.client.connected:
response_markdown.set_content(response_content)
# Add assistant response to conversation history
conversation_history.append({"role": "assistant", "content": response_content})
logging.info("Generated streaming response successfully.")
logging.debug(f"Complete response content: {response_content[:100]}... ({len(response_content)} chars)")
logging.info(f"Conversation history now has {len(conversation_history)} messages")
# Auto-scroll to bottom after streaming completes (simple like Ollama page)
try:
await ui.run_javascript('const c=document.querySelector(".chat-container"); if(c){c.scrollTop=c.scrollHeight;}')
except Exception:
pass
except Exception as e:
logging.error(f"Error generating Ollama Cloud streaming response: {e}", exc_info=True)
error_msg = f"**Error:** {str(e)}"
if message_container.client.connected:
with message_container:
ui.markdown(error_msg).classes('text-lg mt-4')
ui.notify(f'Error generating response: {str(e)}', type='negative')
def select_api(service):
global selected_api
selected_api = service
# Schedule async model selection
try:
openmind._create_task(openmind.select_model(service))
except Exception:
pass
ui.notify(f'Selected API: {service}', type='info')
logging.info(f'Selected API: {service}')
# Auto-select first available API if none chosen
def auto_select_api():
services_priority = ['openai', 'groq', 'together', 'ai71', 'ollama_cloud']
if not selected_api:
for svc in services_priority:
key = openmind.api_manager.get_api_key(svc)
if key:
select_api(svc)
break
# configure HTML head content from html_head.py; seed localStorage from server settings to avoid flash
add_head_html(ui, settings_manager.sync_to_localStorage())
dark_mode = ui.dark_mode()
# Ensure dark mode reflects persisted settings before building header/UI
try:
dark_mode.value = settings_manager.get('dark_mode', True)
except Exception:
dark_mode.value = True
# CRITICAL FIX #5: Restore autonomous state BEFORE creating header
# Use SettingsManager to restore state synchronously
autonomous_state_ref = {'value': settings_manager.get('autonomous_reasoning', False)}
openmind.autonomous_reasoning = autonomous_state_ref['value']
# Also sync from localStorage if available (for browser-only persistence)
async def restore_autonomous_state_before_header():
try:
stored = await ui.run_javascript('localStorage.getItem("autonomous-reasoning") === "true" || window.restoredAutonomousState === true')
if stored:
autonomous_state_ref['value'] = True
openmind.autonomous_reasoning = True
# Sync to SettingsManager
settings_manager.set('autonomous_reasoning', True)
except:
pass
# Use immediate timer to restore before header creation
ui.timer(0.01, restore_autonomous_state_before_header, once=True)
drawer = SideNav(current_page='chat').create_drawer()
async def toggle_dark_mode():
dark_mode.value = not dark_mode.value # toggle dark mode value
# Persist to both localStorage and SettingsManager
await ui.run_javascript(f'''
localStorage.setItem('theme', '{'dark' if dark_mode.value else 'light'}');
const savedTheme = localStorage.getItem('ui-theme') || 'everforest';
if (window.applyTheme) {{
window.applyTheme(savedTheme, {str(dark_mode.value).lower()});
}} else {{
// Fallback: manually sync body--dark class
if (document.body) {{
if ({str(dark_mode.value).lower()}) {{
document.body.classList.add('body--dark');
}} else {{
document.body.classList.remove('body--dark');
}}
}}
}}
''')
# Sync to SettingsManager
settings_manager.set('dark_mode', dark_mode.value)
async def init_theme_from_storage():
# Load from SettingsManager first (server-side persistence)
theme_name = settings_manager.get('theme', 'everforest')
dark_mode_setting = settings_manager.get('dark_mode', True)
# Sync to localStorage (browser persistence)
await ui.run_javascript(f'''
localStorage.setItem('ui-theme', '{theme_name}');
localStorage.setItem('theme', '{'dark' if dark_mode_setting else 'light'}');
''')
# Set dark_mode value
dark_mode.value = dark_mode_setting
# CRITICAL FIX #2: Sync with unified theme system after restoring
await ui.run_javascript(f'''
const savedTheme = localStorage.getItem('ui-theme') || 'everforest';
if (window.applyTheme) {{
window.applyTheme(savedTheme, {str(dark_mode.value).lower()});
}}
''')
# Wrapper to sync reactive state with openmind.autonomous_reasoning
async def autonomous_change_handler(e):
# Extract value from event object (NiceGUI's ValueChangeEventArguments)
value = e.value if hasattr(e, 'value') else e
autonomous_state_ref['value'] = value
openmind.autonomous_reasoning = value
# Persist to both localStorage and SettingsManager
await ui.run_javascript(f'localStorage.setItem("autonomous-reasoning", "{str(value).lower()}")')
settings_manager.set('autonomous_reasoning', value)
await toggle_autonomous_reasoning(value)
# Create unified navigation header
nav = Navigation(current_page='chat', dark_mode=dark_mode, drawer=drawer)
nav.create_header(
autonomous_callback=autonomous_change_handler,
dark_mode_callback=toggle_dark_mode,
autonomous_state=autonomous_state_ref['value']
)
# Initialize theme and autonomous state from storage
async def init_settings_from_storage():
await init_theme_from_storage()
# Restore autonomous reasoning state (already set from SettingsManager, but sync from localStorage too)
stored_autonomous = await ui.run_javascript('localStorage.getItem("autonomous-reasoning")')
if stored_autonomous == 'true':
autonomous_state_ref['value'] = True
openmind.autonomous_reasoning = True
settings_manager.set('autonomous_reasoning', True)
await toggle_autonomous_reasoning(True)
# Update switch if it exists
await ui.run_javascript('''
const switches = document.querySelectorAll("input[type=\'checkbox\']");
switches.forEach(sw => {
if (sw.closest(".q-switch") && sw.parentElement.textContent.includes("Autonomous")) {
sw.checked = true;
sw.dispatchEvent(new Event("change", { bubbles: true }));
}
});
''')
ui.timer(0.1, init_settings_from_storage, once=True)
# Attempt auto-select after initial settings load
ui.timer(0.2, auto_select_api, once=True)
# Ensure AGI is initialized from stored keys on page load
try:
openmind._create_task(openmind.initialize_agi())
except Exception:
pass
# Model selector moved to footer menu (removed FAB from content area)
# (tabs removed; logs moved to /logs, API keys moved to /settings)
# Chat display area - conversation-style with scrollable container (like Ollama page)
with ui.column().classes('page-content'):
with ui.column().classes('chat-container'):
message_container = ui.column()
openmind.message_container = message_container
# Terminal-style prompt footer
with ui.footer().classes('footer terminal-footer'):
with ui.row().classes('w-full items-center gap-2'):
ui.label('>').classes('terminal-prefix')
text = ui.textarea(placeholder='Type your prompt (Enter=send, Shift+Enter=newline)').props('rows=1 autogrow').classes('prompt-input')
ui.button(icon='send', on_click=send).classes('send-btn').props('flat round')
# API model selector button with dropdown menu (same pattern as Ollama page)
with ui.button(icon='psychology').props('round flat').classes('q-ml-sm'):
with ui.menu().props('anchor="top right"') as api_menu:
ui.menu_item('Model Selection').props('disable')
ui.separator()
api_menu_items_container = ui.column()
def select_ollama_cloud_model(model_name):
"""Select a specific Ollama Cloud model"""
global selected_api
selected_api = 'ollama_cloud'
logging.info(f"Selecting Ollama Cloud model: {model_name}")
try:
from webmind.settings import SettingsManager
settings = SettingsManager()
base_url = settings.get('ollama_cloud_base_url', 'https://ollama.com')
from webmind.chatter import OllamaCloudModel
ollama_cloud_key = openmind.api_manager.get_api_key('ollama_cloud')
if ollama_cloud_key:
# First, ensure AGI is initialized with ollama_cloud
async def ensure_model_selected():
# Initialize AGI if needed
if not hasattr(openmind, 'agi_instance') or not openmind.agi_instance:
logging.info("Initializing AGI with ollama_cloud")
await openmind.select_model('ollama_cloud')
await asyncio.sleep(0.5) # Wait for initialization
# Now set the model
if hasattr(openmind, 'agi_instance') and openmind.agi_instance:
if hasattr(openmind.agi_instance, 'agi') and hasattr(openmind.agi_instance.agi, 'chatter'):
if hasattr(openmind.agi_instance.agi.chatter, 'set_model'):
openmind.agi_instance.agi.chatter.set_model(model_name)
logging.info(f'Successfully set Ollama Cloud model to: {model_name}')
# Verify it was set
current = openmind.agi_instance.agi.chatter.get_current_model()
logging.info(f'Verified current model: {current}')
else:
logging.error("chatter.set_model method not found")
# Reinitialize
await openmind.select_model('ollama_cloud')
await asyncio.sleep(0.5)
if hasattr(openmind.agi_instance, 'agi') and hasattr(openmind.agi_instance.agi, 'chatter'):
if hasattr(openmind.agi_instance.agi.chatter, 'set_model'):
openmind.agi_instance.agi.chatter.set_model(model_name)
logging.info(f'Set model after reinit: {model_name}')
else:
logging.error("chatter not found, reinitializing")
await openmind.select_model('ollama_cloud')
await asyncio.sleep(0.5)
if hasattr(openmind.agi_instance, 'agi') and hasattr(openmind.agi_instance.agi, 'chatter'):
if hasattr(openmind.agi_instance.agi.chatter, 'set_model'):
openmind.agi_instance.agi.chatter.set_model(model_name)
logging.info(f'Set model after reinit: {model_name}')
else:
logging.error("AGI instance not available")
import asyncio
openmind._create_task(ensure_model_selected())
ui.notify(f'Selected Ollama Cloud model: {model_name}', type='info')
logging.info(f'Selected Ollama Cloud model: {model_name}')
except Exception as e:
logging.error(f"Error selecting Ollama Cloud model: {e}", exc_info=True)
ui.notify(f'Error selecting model: {str(e)}', type='negative')
def update_api_menu():
"""Populate the menu with available providers and their models (same pattern as Ollama page)"""
api_menu_items_container.clear()
with api_menu_items_container:
keys_list = list(openmind.api_manager.api_keys.items())
# Handle Ollama Cloud separately to show models
ollama_cloud_key = openmind.api_manager.get_api_key('ollama_cloud')
if ollama_cloud_key:
try:
from webmind.settings import SettingsManager
settings = SettingsManager()
base_url = settings.get('ollama_cloud_base_url', 'https://ollama.com')
logging.info(f"Loading Ollama Cloud models with base_url: {base_url}")
if not base_url or not base_url.strip():
logging.warning("Ollama Cloud base_url is empty, using default")
base_url = 'https://ollama.com'
from webmind.chatter import OllamaCloudModel
cloud_model = OllamaCloudModel(ollama_cloud_key, base_url=base_url)
models = cloud_model.list_models()
ui.menu_item('Ollama Cloud').props('disable')
if models:
logging.info(f"Displaying {len(models)} Ollama Cloud models in menu")
for model_name in models:
# Use same pattern as Ollama page - lambda with default parameter
ui.menu_item(model_name, on_click=lambda m=model_name: select_ollama_cloud_model(m))
else:
logging.warning("No Ollama Cloud models found")
ui.menu_item('No models available').props('disable')
ui.separator()
except Exception as e:
logging.error(f"Error loading Ollama Cloud models: {e}", exc_info=True)
ui.menu_item('Ollama Cloud (Error)').props('disable')
ui.separator()
# Handle other providers
for service, key in keys_list:
if service == 'ollama_cloud':
continue # Already handled above
# Use same pattern - lambda with default parameter
ui.menu_item(service.capitalize(), on_click=lambda s=service: select_api(s))
# Consolidated refresh that runs after UI is built (same pattern as Ollama page)
def refresh_api_menu():
try:
update_api_menu()
# Auto-select first Ollama Cloud model if ollama_cloud is selected and no model is set
if selected_api == 'ollama_cloud':
ollama_cloud_key = openmind.api_manager.get_api_key('ollama_cloud')
if ollama_cloud_key:
try:
from webmind.settings import SettingsManager
settings = SettingsManager()
base_url = settings.get('ollama_cloud_base_url', 'https://ollama.com')
from webmind.chatter import OllamaCloudModel
cloud_model = OllamaCloudModel(ollama_cloud_key, base_url=base_url)
models = cloud_model.list_models()
if models:
# Check if a model is already selected
current_model = None
if hasattr(openmind, 'agi_instance') and openmind.agi_instance:
if hasattr(openmind.agi_instance, 'agi') and hasattr(openmind.agi_instance.agi, 'chatter'):
current_model = openmind.agi_instance.agi.chatter.get_current_model()
# Auto-select first model if none selected
if not current_model:
select_ollama_cloud_model(models[0])
logging.info(f"Auto-selected first Ollama Cloud model: {models[0]}")
except Exception as e:
logging.debug(f"Could not auto-select Ollama Cloud model: {e}")
except Exception as e:
logging.error(f"Error refreshing API menu: {e}", exc_info=True)
# Defer refresh to ensure menu container exists
ui.timer(0.1, refresh_api_menu, once=True)
# Store refresh function for later use
openmind.refresh_api_menu = refresh_api_menu
ui.markdown('[easyAGI](https://rage.pythai.net)').classes('footer-link')
# Install Enter/Shift+Enter handler on prompt
ui.timer(0.05, lambda: ui.run_javascript('''
(function(){
const area = document.querySelector('.prompt-input textarea');
if (!area || area.__enterHandlerInstalled) return;
area.__enterHandlerInstalled = true;
area.addEventListener('keydown', function(ev){
if (ev.key === 'Enter' && !ev.shiftKey) {
ev.preventDefault();
const btn = document.querySelector('.send-btn');
if (btn) btn.click();
}
});
})();
'''), once=True)
# Start main loop to process user input (reasoning loop started separately if autonomous mode enabled)
# Note: main_loop processes user input queue; reasoning_loop handles autonomous reasoning
openmind._create_task(openmind.main_loop())
logging.debug("starting easyAGI")
# Entry point - only run when launched directly (not when imported)
# Note: lmagi_gui.py is the preferred entrypoint for normal use
if __name__ in {"__main__", "__mp_main__"}:
# Check if running in headless mode (launched by GUI)
headless = os.environ.get('LMAGI_HEADLESS', '0') == '1'
try:
ui.run(title='easyAGI', port=8080, show=not headless)
except KeyboardInterrupt:
logging.info("Shutting down...")
sys.exit(0)
@ui.page('/ollama')
def ollama_page():
global ollama_models, selected_model, response_output_ollama, ollama_menu_container
ollama_models = [] # List to store Ollama model references
selected_model = None # Variable to store selected Ollama model
ollama_menu_container = None # Container for model menu items
async def send() -> None:
question = text.value # get value from input field
text.value = '' # clear input field for openmind
if not question:
ui.notify('Please enter a prompt.', type='warning')
logging.warning("No prompt entered. Please enter a prompt.")
return
if selected_model:
await generate_ollama_response(question)
async def generate_ollama_response(prompt) -> None:
if not selected_model:
ui.notify('Please select an Ollama model first.', type='warning')
logging.warning("No model selected. Please select an Ollama model first.")
return
logging.debug(f"Generating response using model: {selected_model} with prompt: {prompt}")
try:
response_content = ""
async with aiohttp.ClientSession() as session:
payload = {
"model": selected_model,
"prompt": prompt,
"stream": True
}
logging.debug(f"Sending payload: {payload}")
async with session.post(ollama_model.api_url + "/generate", json=payload) as response:
async for line in response.content:
if line:
data = json.loads(line.decode('utf-8'))
if "response" in data:
response_content += data["response"]
# Update with markdown rendering for streaming
response_output_ollama.set_content(response_content)
logging.debug(f"Received response chunk: {data['response']}")
# Auto scroll chat container as content streams
try:
await ui.run_javascript('const c=document.querySelector(".chat-container"); if(c){c.scrollTop=c.scrollHeight;}')
except Exception:
pass
elif "error" in data:
logging.error(f"Error in response: {data['error']}")
ui.notify(f"Error: {data['error']}", type='negative')
logging.info("Generated response successfully.")
logging.debug(f"Complete response content: {response_content}")
await openmind.send_message(prompt) # Send the prompt to OpenMind
await openmind.internal_queue.put(prompt) # Add prompt to the internal queue for processing
logging.debug(f"Sent prompt to OpenMind: {prompt}")
await openmind.send_message(response_content) # Send the response to OpenMind
await openmind.internal_queue.put(response_content) # Add response to the internal queue for processing
logging.debug(f"Sent response to OpenMind: {response_content}")
except Exception as e:
logging.error(f"Error generating response: {e}")
ui.notify(f"Error generating response: {e}", type='negative')
def list_ollama_models():
try:
logging.debug("Running 'ollama list' command.")
result = ollama_model.list_models()
if result:
logging.debug(f"'ollama list' output:\n{result}")
global ollama_models
ollama_models = result # Keep the full result list
if ollama_models and len(ollama_models) > 1:
ui.notify('Models listed successfully.', type='positive')
logging.info("Models listed successfully.")
update_ollama_menu()
else:
ui.notify('No models found.', type='negative')
logging.warning("No models found.")
else:
logging.error("Error listing models.")
ui.notify('Error listing models.', type='negative')
except Exception as e:
logging.error(f"Exception during model listing: {e}")
ui.notify('Exception occurred while listing models.', type='negative')
# configure HTML head content; seed localStorage from server settings to avoid flash
add_head_html(ui, settings_manager.sync_to_localStorage())
dark_mode = ui.dark_mode()
# Ensure dark mode reflects persisted settings before building header/UI
try:
dark_mode.value = settings_manager.get('dark_mode', True)
except Exception:
dark_mode.value = True
# CRITICAL FIX #5: Restore autonomous state BEFORE creating header
# Use SettingsManager to restore state synchronously
autonomous_state_ref = {'value': settings_manager.get('autonomous_reasoning', False)}
openmind.autonomous_reasoning = autonomous_state_ref['value']
# Also sync from localStorage if available (for browser-only persistence)
async def restore_autonomous_state_before_header():
try:
stored = await ui.run_javascript('localStorage.getItem("autonomous-reasoning") === "true" || window.restoredAutonomousState === true')
if stored:
autonomous_state_ref['value'] = True
openmind.autonomous_reasoning = True
# Sync to SettingsManager
settings_manager.set('autonomous_reasoning', True)
except:
pass
ui.timer(0.01, restore_autonomous_state_before_header, once=True)
drawer = SideNav(current_page='ollama').create_drawer()
async def toggle_dark_mode():
dark_mode.value = not dark_mode.value # toggle dark mode value
# Persist to both localStorage and SettingsManager
await ui.run_javascript(f'''
localStorage.setItem('theme', '{'dark' if dark_mode.value else 'light'}');
const savedTheme = localStorage.getItem('ui-theme') || 'everforest';
if (window.applyTheme) {{
window.applyTheme(savedTheme, {str(dark_mode.value).lower()});
}}
''')
# Sync to SettingsManager
settings_manager.set('dark_mode', dark_mode.value)
async def init_theme_from_storage():
# Load from SettingsManager first (server-side persistence)
theme_name = settings_manager.get('theme', 'everforest')
dark_mode_setting = settings_manager.get('dark_mode', True)
# Sync to localStorage (browser persistence)
await ui.run_javascript(f'''
localStorage.setItem('ui-theme', '{theme_name}');
localStorage.setItem('theme', '{'dark' if dark_mode_setting else 'light'}');
''')
# Set dark_mode value
dark_mode.value = dark_mode_setting
# CRITICAL FIX #2: Sync with unified theme system after restoring
await ui.run_javascript(f'''
const savedTheme = localStorage.getItem('ui-theme') || 'everforest';
if (window.applyTheme) {{
window.applyTheme(savedTheme, {str(dark_mode.value).lower()});
}}
''')
# Wrapper to sync reactive state with openmind.autonomous_reasoning
async def autonomous_change_handler(e):
# Extract value from event object (NiceGUI's ValueChangeEventArguments)
value = e.value if hasattr(e, 'value') else e
autonomous_state_ref['value'] = value
openmind.autonomous_reasoning = value
# Persist to both localStorage and SettingsManager
await ui.run_javascript(f'localStorage.setItem("autonomous-reasoning", "{str(value).lower()}")')
settings_manager.set('autonomous_reasoning', value)
await toggle_autonomous_reasoning(value)
# Create unified navigation header
nav = Navigation(current_page='ollama', dark_mode=dark_mode, drawer=drawer)
nav.create_header(
autonomous_callback=autonomous_change_handler,
dark_mode_callback=toggle_dark_mode,
autonomous_state=autonomous_state_ref['value']
)
# Initialize theme and autonomous state from storage
async def init_settings_from_storage():
await init_theme_from_storage()
# Restore autonomous reasoning state (already set from SettingsManager, but sync from localStorage too)
stored_autonomous = await ui.run_javascript('localStorage.getItem("autonomous-reasoning")')
if stored_autonomous == 'true':
autonomous_state_ref['value'] = True
openmind.autonomous_reasoning = True
settings_manager.set('autonomous_reasoning', True)
await toggle_autonomous_reasoning(True)
# Update switch if it exists
await ui.run_javascript('''
const switches = document.querySelectorAll("input[type=\'checkbox\']");
switches.forEach(sw => {
if (sw.closest(".q-switch") && sw.parentElement.textContent.includes("Autonomous")) {
sw.checked = true;
sw.dispatchEvent(new Event("change", { bubbles: true }));
}
});
''')
ui.timer(0.1, init_settings_from_storage, once=True)
def select_ollama_model(model_name):
"""Handle model selection from FAB"""
global selected_model
selected_model = model_name
ollama_model.select_model(model_name)
ui.notify(f'Selected model: {model_name}', type='positive')
logging.info(f"User selected Ollama model: {model_name}")
# Ollama model selector moved to footer menu (removed FAB)
def update_ollama_menu():
"""Populate the menu with available Ollama models"""
ollama_menu_items_container.clear()
with ollama_menu_items_container:
if ollama_models and len(ollama_models) > 1:
for model_line in ollama_models[1:]: # Skip header line
model_name = model_line.split()[0]
ui.menu_item(model_name, on_click=lambda m=model_name: select_ollama_model(m))
else:
ui.menu_item('No models found').props('disable')
# Consolidated refresh that runs after UI is built
def refresh_ollama_models():
try:
list_ollama_models()
update_ollama_menu()
if (not selected_model) and ollama_models and len(ollama_models) > 1:
first_model = ollama_models[1].split()[0]
select_ollama_model(first_model)
except Exception:
pass
# Defer refresh to ensure menu container exists
ui.timer(0.05, refresh_ollama_models, once=True)
# Chat display area for Ollama responses
with ui.column().classes('page-content'):
with ui.column().classes('chat-container'):
response_output_ollama = ui.markdown().classes('text-lg mt-4')
# terminal-style footer for ollama too
with ui.footer().classes('footer terminal-footer'):
with ui.row().classes('w-full items-center gap-2'):
ui.label('>').classes('terminal-prefix')
text = ui.textarea(placeholder='Type your prompt (Enter=send, Shift+Enter=newline)').props('rows=1 autogrow').classes('prompt-input')
ui.button(icon='send', on_click=send).classes('send-btn').props('flat round')
# Ollama model selector button with dropdown
with ui.button(icon='smart_toy').props('round flat').classes('q-ml-sm'):
with ui.menu().props('anchor="top right"') as ollama_menu:
ui.menu_item('Ollama Models').props('disable')
ui.separator()
ollama_menu_items_container = ui.column()
ui.markdown('[easyAGI](https://rage.pythai.net)').classes('footer-link')
# Install Enter/Shift+Enter handler on prompt (ollama)
ui.timer(0.05, lambda: ui.run_javascript('''
(function(){
const area = document.querySelector('.prompt-input textarea');
if (!area || area.__enterHandlerInstalled) return;
area.__enterHandlerInstalled = true;
area.addEventListener('keydown', function(ev){
if (ev.key === 'Enter' && !ev.shiftKey) {
ev.preventDefault();
const btn = document.querySelector('.send-btn');
if (btn) btn.click();
}
});
})();
'''), once=True)
@ui.page('/settings')
def settings_page():
"""Application settings: appearance and API keys"""
add_head_html(ui, settings_manager.sync_to_localStorage())
dark_mode = ui.dark_mode()
# Ensure dark mode reflects persisted settings before building header/UI
try:
dark_mode.value = settings_manager.get('dark_mode', True)
except Exception:
dark_mode.value = True
# CRITICAL FIX #5: Restore autonomous state BEFORE creating header
# Use SettingsManager to restore state synchronously
autonomous_state_ref = {'value': settings_manager.get('autonomous_reasoning', False)}
openmind.autonomous_reasoning = autonomous_state_ref['value']
# Also sync from localStorage if available (for browser-only persistence)
async def restore_autonomous_state_before_header():
try:
stored = await ui.run_javascript('localStorage.getItem("autonomous-reasoning") === "true" || window.restoredAutonomousState === true')
if stored:
autonomous_state_ref['value'] = True
openmind.autonomous_reasoning = True
# Sync to SettingsManager
settings_manager.set('autonomous_reasoning', True)
except:
pass
ui.timer(0.01, restore_autonomous_state_before_header, once=True)
drawer = SideNav(current_page='settings').create_drawer()
async def init_theme_from_storage():
# Load from SettingsManager first (server-side persistence)
theme_name = settings_manager.get('theme', 'everforest')
dark_mode_setting = settings_manager.get('dark_mode', True)
# Sync to localStorage (browser persistence)
await ui.run_javascript(f'''
localStorage.setItem('ui-theme', '{theme_name}');
localStorage.setItem('theme', '{'dark' if dark_mode_setting else 'light'}');
''')
# Set dark_mode value
dark_mode.value = dark_mode_setting
# CRITICAL FIX #2: Sync with unified theme system after restoring
await ui.run_javascript(f'''
const savedTheme = localStorage.getItem('ui-theme') || 'everforest';
if (window.applyTheme) {{
window.applyTheme(savedTheme, {str(dark_mode.value).lower()});
}}
''')
async def on_theme_switch(e):
# set according to switch and persist
dark_mode.value = bool(e.value)
# CRITICAL FIX #2: Sync dark mode with unified theme system
await ui.run_javascript(f'''
localStorage.setItem('theme', '{'dark' if dark_mode.value else 'light'}');
const savedTheme = localStorage.getItem('ui-theme') || 'everforest';
if (window.applyTheme) {{
window.applyTheme(savedTheme, {str(dark_mode.value).lower()});
}} else {{
// Fallback: manually sync body--dark class
if (document.body) {{
if ({str(dark_mode.value).lower()}) {{
document.body.classList.add('body--dark');
}} else {{
document.body.classList.remove('body--dark');
}}
}}
}}
''')
# Persist to SettingsManager
settings_manager.set('dark_mode', dark_mode.value)
async def autonomous_change_handler(value):
autonomous_state_ref['value'] = value
openmind.autonomous_reasoning = value
# Persist to both localStorage and SettingsManager
await ui.run_javascript(f'localStorage.setItem("autonomous-reasoning", "{str(value).lower()}")')
settings_manager.set('autonomous_reasoning', value)
await toggle_autonomous_reasoning(value)
nav = Navigation(current_page='settings', dark_mode=dark_mode, drawer=drawer)
nav.create_header(
autonomous_callback=autonomous_change_handler,
dark_mode_callback=lambda: None, # theme is managed by the switch on this page
autonomous_state=autonomous_state_ref['value']
)