Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions dream-server/extensions/services/dashboard-api/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@ Environment variables (set in `.env`):

| Method | Path | Auth | Description |
|--------|------|------|-------------|
| `GET` | `/api/settings` | Yes | Consolidated Settings payload (system identity, updates, routing, storage) |
| `GET` | `/api/service-tokens` | Yes | Service auth tokens (e.g. OpenClaw) |
| `GET` | `/api/external-links` | Yes | Sidebar links from service manifests |
| `GET` | `/api/storage` | Yes | Storage breakdown (models, vector DB, total) |
Expand Down
205 changes: 155 additions & 50 deletions dream-server/extensions/services/dashboard-api/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -289,20 +289,38 @@ async def api_status(api_key: str = Depends(verify_api_key)):
sub-call (GPU, health checks, llama metrics …) never returns a raw 500
to the dashboard — the frontend would flash "0/17" otherwise.
"""
return await _safe_api_status("/api/status")


def _api_status_fallback() -> dict:
"""Fallback payload shared by endpoints that depend on status aggregation."""
return {
"gpu": None,
"services": [],
"model": None,
"bootstrap": None,
"uptime": 0,
"version": app.version,
"tier": "Unknown",
"cpu": {"percent": 0, "temp_c": None},
"ram": {"used_gb": 0, "total_gb": 0, "percent": 0},
"inference": {
"tokensPerSecond": 0,
"lifetimeTokens": 0,
"loadedModel": None,
"contextSize": None,
},
"manifest_errors": MANIFEST_ERRORS,
}


async def _safe_api_status(source: str) -> dict:
"""Build API status with a stable fallback for dashboard consumers."""
try:
return await _build_api_status()
except Exception:
logger.exception("/api/status handler failed — returning safe fallback")
return {
"gpu": None, "services": [], "model": None,
"bootstrap": None, "uptime": 0,
"version": app.version, "tier": "Unknown",
"cpu": {"percent": 0, "temp_c": None},
"ram": {"used_gb": 0, "total_gb": 0, "percent": 0},
"inference": {"tokensPerSecond": 0, "lifetimeTokens": 0,
"loadedModel": None, "contextSize": None},
"manifest_errors": MANIFEST_ERRORS,
}
logger.exception("%s handler failed — returning safe fallback", source)
return _api_status_fallback()


async def _build_api_status() -> dict:
Expand Down Expand Up @@ -349,7 +367,16 @@ async def _build_api_status() -> dict:
gpu_data["powerDraw"] = gpu_info.power_w
gpu_data["memoryLabel"] = "VRAM Partition" if gpu_info.memory_type == "unified" else "VRAM"

services_data = [{"name": s.name, "status": s.status, "port": s.external_port, "uptime": None} for s in service_statuses]
services_data = [
{
"id": s.id,
"name": s.name,
"status": s.status,
"port": s.external_port,
"uptime": None,
}
for s in service_statuses
]

model_data = None
if model_info:
Expand Down Expand Up @@ -394,6 +421,86 @@ async def _build_api_status() -> dict:

# --- Settings ---

def _format_uptime_label(seconds: int) -> str:
"""Render uptime in a compact human-readable format."""
if seconds <= 0:
return "0m"

days, remainder = divmod(seconds, 86400)
hours, remainder = divmod(remainder, 3600)
minutes, _ = divmod(remainder, 60)

parts = []
if days:
parts.append(f"{days}d")
if hours:
parts.append(f"{hours}h")
if minutes or not parts:
parts.append(f"{minutes}m")
return " ".join(parts[:2])


def _compute_storage_payload() -> dict:
models_dir = Path(DATA_DIR) / "models"
vector_dir = Path(DATA_DIR) / "qdrant"
data_dir = Path(DATA_DIR)

def dir_size_gb(path: Path) -> float:
if not path.exists():
return 0.0
total = 0
try:
for f in path.rglob("*"):
if f.is_file():
try:
total += f.stat().st_size
except OSError:
pass
except (PermissionError, OSError):
pass
return round(total / (1024**3), 2)

disk_info = get_disk_usage()
models_gb = dir_size_gb(models_dir)
vector_gb = dir_size_gb(vector_dir)
other_gb = dir_size_gb(data_dir) - models_gb - vector_gb
total_data_gb = models_gb + vector_gb + max(other_gb, 0)

return {
"models": {
"formatted": f"{models_gb:.1f} GB",
"gb": models_gb,
"percent": round(models_gb / disk_info.total_gb * 100, 1) if disk_info.total_gb else 0,
},
"vector_db": {
"formatted": f"{vector_gb:.1f} GB",
"gb": vector_gb,
"percent": round(vector_gb / disk_info.total_gb * 100, 1) if disk_info.total_gb else 0,
},
"total_data": {
"formatted": f"{total_data_gb:.1f} GB",
"gb": total_data_gb,
"percent": round(total_data_gb / disk_info.total_gb * 100, 1) if disk_info.total_gb else 0,
},
"disk": {
"used_gb": disk_info.used_gb,
"total_gb": disk_info.total_gb,
"percent": disk_info.percent,
},
}


async def _build_storage_payload() -> dict:
"""Get storage breakdown for Settings page (cached, runs in thread pool)."""
cached = _cache.get("storage")
if cached is not None:
return cached

result = await asyncio.to_thread(_compute_storage_payload)
_cache.set("storage", result, _STORAGE_CACHE_TTL)
return result


@app.get("/api/service-tokens", dependencies=[Depends(verify_api_key)])
async def service_tokens():
"""Return connection tokens for services that need browser-side auth."""
Expand Down Expand Up @@ -440,47 +547,45 @@ async def get_external_links(api_key: str = Depends(verify_api_key)):

@app.get("/api/storage")
async def api_storage(api_key: str = Depends(verify_api_key)):
"""Get storage breakdown for Settings page (cached, runs in thread pool)."""
cached = _cache.get("storage")
if cached is not None:
return cached
return await _build_storage_payload()


def _compute_storage():
models_dir = Path(DATA_DIR) / "models"
vector_dir = Path(DATA_DIR) / "qdrant"
data_dir = Path(DATA_DIR)

def dir_size_gb(path: Path) -> float:
if not path.exists():
return 0.0
total = 0
try:
for f in path.rglob("*"):
if f.is_file():
try:
total += f.stat().st_size
except OSError:
pass
except (PermissionError, OSError):
pass
return round(total / (1024**3), 2)

disk_info = get_disk_usage()
models_gb = dir_size_gb(models_dir)
vector_gb = dir_size_gb(vector_dir)
other_gb = dir_size_gb(data_dir) - models_gb - vector_gb
total_data_gb = models_gb + vector_gb + max(other_gb, 0)

return {
"models": {"formatted": f"{models_gb:.1f} GB", "gb": models_gb, "percent": round(models_gb / disk_info.total_gb * 100, 1) if disk_info.total_gb else 0},
"vector_db": {"formatted": f"{vector_gb:.1f} GB", "gb": vector_gb, "percent": round(vector_gb / disk_info.total_gb * 100, 1) if disk_info.total_gb else 0},
"total_data": {"formatted": f"{total_data_gb:.1f} GB", "gb": total_data_gb, "percent": round(total_data_gb / disk_info.total_gb * 100, 1) if disk_info.total_gb else 0},
"disk": {"used_gb": disk_info.used_gb, "total_gb": disk_info.total_gb, "percent": disk_info.percent}
@app.get("/api/settings")
async def api_settings(api_key: str = Depends(verify_api_key)):
"""Return a consolidated Settings payload for the dashboard."""
status_data, storage_data, version_data = await asyncio.gather(
_safe_api_status("/api/settings"),
_build_storage_payload(),
updates.resolve_version_info(),
)

services = [
{
"id": svc.get("id"),
"name": svc.get("name"),
"status": svc.get("status", "unknown"),
"port": svc.get("port"),
}
for svc in status_data.get("services", [])
]

result = await asyncio.to_thread(_compute_storage)
_cache.set("storage", result, _STORAGE_CACHE_TTL)
return result
return {
"version": version_data.get("current") or status_data.get("version"),
"installDate": updates.resolve_install_date(),
"tier": status_data.get("tier", "Unknown"),
"uptime": _format_uptime_label(status_data.get("uptime", 0)),
"hostname": socket.gethostname(),
"updates": version_data,
"services": services,
"storage": storage_data,
"gpu": status_data.get("gpu"),
"model": {
**(status_data.get("model") or {}),
"loadedModel": status_data.get("inference", {}).get("loadedModel"),
"tokensPerSecond": status_data.get("inference", {}).get("tokensPerSecond"),
"contextSize": status_data.get("inference", {}).get("contextSize"),
},
}


# --- Startup ---
Expand Down
Loading