Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -60,8 +60,8 @@ async def list_org_repos(request: OrgInfoRequest):
return {"status": "success", "data": result}

except Exception as e:
logger.exception("Error listing org repos")
raise HTTPException(status_code=500, detail=str(e))
logger.error(f"Error type: {type(e).__name__}")
raise HTTPException(status_code=500, detail="Failed to list organization repositories")

@app.post("/github_support")
async def get_github_supp(request: RepoInfoRequest):
Expand All @@ -78,8 +78,8 @@ async def get_github_supp(request: RepoInfoRequest):
return RepoInfoResponse(status="error", data={}, error=result["error"])
return RepoInfoResponse(status="success", data=result)
except Exception as e:
logger.exception("Error getting repo info")
raise HTTPException(status_code=500, detail=str(e))
logger.error(f"Error type: {type(e).__name__}")
raise HTTPException(status_code=500, detail="Failed to get repository information")

if __name__ == "__main__":
import uvicorn
Expand Down
12 changes: 9 additions & 3 deletions backend/app/agents/devrel/github/services/github_mcp_service.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,12 @@
import os
import requests
import asyncio
import logging
from typing import Optional
import config

logger = logging.getLogger(__name__)

class GitHubMCPService:
def __init__(self, token: str = None):
self.token = token or config.GITHUB_TOKEN
Expand All @@ -18,7 +21,8 @@ def repo_query(self, owner: str, repo: str) -> dict:
resp = requests.get(url, headers=headers, timeout=15)
resp.raise_for_status()
except requests.exceptions.RequestException as e:
return {"error": "Request failed", "message": str(e)}
logger.error(f"Error type: GitHubRequestError owner={owner} repo={repo}")
return {"error": "Request failed", "message": "GitHub API request failed"}

data = resp.json()
license_info = data.get("license")
Expand Down Expand Up @@ -51,7 +55,8 @@ def list_repo_issues(self, owner: str, repo: str, state: str = "open") -> list:
resp = requests.get(url, headers=headers, timeout=15)
resp.raise_for_status()
except requests.exceptions.RequestException as e:
return {"error": "Request failed", "message": str(e)}
logger.error(f"Error type: GitHubIssuesRequestError owner={owner} repo={repo}")
return {"error": "Request failed", "message": "Failed to fetch issues"}

issues = resp.json()
return [
Expand All @@ -76,7 +81,8 @@ def list_org_repos(self, org: str) -> list:
resp = requests.get(url, headers=headers, timeout=15)
resp.raise_for_status()
except requests.exceptions.RequestException as e:
return {"error": "Request failed", "message": str(e)}
logger.error(f"Error type: GitHubOrgReposRequestError org={org}")
return {"error": "Request failed", "message": "Failed to fetch repositories"}

repos = resp.json()
return [
Expand Down
4 changes: 2 additions & 2 deletions backend/app/agents/devrel/github/tools/github_support.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,5 +108,5 @@ async def handle_github_supp(query: str, org: Optional[str] = None):
}

except Exception as e:
logger.exception("GitHub support error: %s", e)
return {"status": "error", "message": str(e)}
logger.error(f"Error type: {type(e).__name__}")
return {"status": "error", "message": "An error occurred while fetching GitHub data"}
2 changes: 1 addition & 1 deletion backend/app/agents/devrel/github/tools/repo_support.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ async def handle_repo_support(query: str) -> Dict[str, Any]:
}

except Exception as e:
logger.exception("Repository support error")
logger.error(f"Error type: {type(e).__name__}")
return {
"status": "error",
"sub_function": "repo_support",
Expand Down
162 changes: 162 additions & 0 deletions backend/app/core/cache.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,162 @@
"""
In-memory cache utility with TTL support for GitHub API responses.
Production-ready caching layer with thread safety.
"""
import hashlib
import json
import logging
import time
from typing import Any, Optional, Callable
from threading import Lock

logger = logging.getLogger(__name__)


class CacheEntry:
"""Represents a single cache entry with TTL."""

def __init__(self, value: Any, ttl_seconds: int):
self.value = value
self.created_at = time.time()
self.ttl_seconds = ttl_seconds

def is_expired(self) -> bool:
"""Check if the cache entry has expired."""
return (time.time() - self.created_at) > self.ttl_seconds

def __repr__(self):
remaining = self.ttl_seconds - (time.time() - self.created_at)
return f"CacheEntry(TTL: {remaining:.1f}s remaining)"


class SimpleCache:
"""
Thread-safe in-memory cache for API responses.
Automatically invalidates expired entries.
"""

def __init__(self, max_size: int = 1000):
self._cache: dict[str, CacheEntry] = {}
self._lock = Lock()
self.max_size = max_size
self.hits = 0
self.misses = 0

def _generate_key(self, prefix: str, **kwargs) -> str:
"""Generate a cache key from prefix and kwargs."""
key_str = f"{prefix}:{json.dumps(kwargs, sort_keys=True)}"
return hashlib.md5(key_str.encode()).hexdigest()

def get(self, key: str) -> Optional[Any]:
"""Get value from cache if it exists and hasn't expired."""
with self._lock:
if key in self._cache:
entry = self._cache[key]
if not entry.is_expired():
self.hits += 1
logger.debug(f"Cache hit: {key} ({entry})")
return entry.value
else:
# Remove expired entry
del self._cache[key]
self.misses += 1
logger.debug(f"Cache miss (expired): {key}")
return None
self.misses += 1
logger.debug(f"Cache miss: {key}")
return None

def set(self, key: str, value: Any, ttl_seconds: int = 300) -> None:
"""Set value in cache with TTL."""
with self._lock:
# Ensure at least 1 entry is removed when cache exceeds max_size
if len(self._cache) >= self.max_size:
evict_count = max(1, int(self.max_size * 0.1))
old_keys = sorted(
self._cache.keys(),
key=lambda k: self._cache[k].created_at
)[:evict_count]
for old_key in old_keys:
del self._cache[old_key]
logger.debug(f"Cache evicted {len(old_keys)} old entries")

self._cache[key] = CacheEntry(value, ttl_seconds)
logger.debug(f"Cache set: {key} (TTL: {ttl_seconds}s)")

def clear(self) -> None:
"""Clear all cache entries."""
with self._lock:
size = len(self._cache)
self._cache.clear()
logger.info(f"Cache cleared ({size} entries removed)")

def stats(self) -> dict[str, Any]:
"""Get cache statistics."""
with self._lock:
total_requests = self.hits + self.misses
hit_rate = (self.hits / total_requests * 100) if total_requests > 0 else 0
return {
"size": len(self._cache),
"hits": self.hits,
"misses": self.misses,
"hit_rate": f"{hit_rate:.1f}%",
"total_requests": total_requests
}

def cleanup_expired(self) -> int:
"""Remove all expired entries. Returns count of removed entries."""
with self._lock:
expired_keys = [
key for key, entry in self._cache.items()
if entry.is_expired()
]
for key in expired_keys:
del self._cache[key]
if expired_keys:
logger.debug(f"Cache cleanup removed {len(expired_keys)} expired entries")
return len(expired_keys)


# Global cache instance
_cache = SimpleCache(max_size=1000)


def get_cache() -> SimpleCache:
"""Get the global cache instance."""
return _cache


def cache_result(prefix: str, ttl_seconds: int = 300):
"""
Decorator to cache async function results.

Args:
prefix: Cache key prefix
ttl_seconds: Time to live for cached result (default: 5 minutes)

Example:
@cache_result("repo_stats", ttl_seconds=600)
async def get_repo_stats(owner: str, repo: str):
...
"""
def decorator(func: Callable) -> Callable:
async def wrapper(*args, **kwargs):
cache = get_cache()
cache_key = cache._generate_key(prefix, args=args, kwargs=kwargs)

# Try to get from cache
cached_value = cache.get(cache_key)
if cached_value is not None:
return cached_value

# Execute function
result = await func(*args, **kwargs)

# Cache the result
if result is not None:
cache.set(cache_key, result, ttl_seconds)

return result

return wrapper
return decorator
2 changes: 1 addition & 1 deletion backend/app/core/dependencies.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ async def get_current_user(authorization: str = Header(None)) -> UUID:
try:
supabase = get_supabase_client()
# Verify the token and get user
user_response = supabase.auth.get_user(token)
user_response = await supabase.auth.get_user(token)

if not user_response or not user_response.user:
raise HTTPException(
Expand Down
21 changes: 21 additions & 0 deletions backend/app/core/rate_limiter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
"""
Rate limiting configuration using slowapi.
Prevents API quota exhaustion and protects against abuse.
"""
from slowapi import Limiter
from slowapi.util import get_remote_address
import logging

logger = logging.getLogger(__name__)

# Initialize the global rate limiter
limiter = Limiter(
key_func=get_remote_address,
default_limits=["100 per minute"], # Default fallback limit
storage_uri="memory://", # Use in-memory storage (can upgrade to Redis)
)


def get_limiter() -> Limiter:
"""Get the global rate limiter instance."""
return limiter
19 changes: 17 additions & 2 deletions backend/app/database/falkor/code-graph-backend/api/index.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,10 +70,15 @@ def graph_entities():
if not repo:
logging.error("Missing 'repo' parameter in request.")
return jsonify({"status": "Missing 'repo' parameter"}), 400

# Validate repo format - prevent path traversal
import re
if not re.match(r'^[a-zA-Z0-9_\-\.]+$', str(repo)):
return jsonify({'status': 'Invalid repository name format'}), 400

if not graph_exists(repo):
logging.error("Missing project %s", repo)
return jsonify({"status": f"Missing project {repo}"}), 400
return jsonify({"status": "Project not found"}), 400

try:
# Initialize the graph with the provided repo and credentials
Expand Down Expand Up @@ -161,6 +166,11 @@ def auto_complete():
repo = data.get('repo')
if repo is None:
return jsonify({'status': 'Missing mandatory parameter "repo"'}), 400

# Validate repo format - prevent path traversal
import re
if not re.match(r'^[a-zA-Z0-9_\-\.]+$', str(repo)):
return jsonify({'status': 'Invalid repository name format'}), 400

# Validate that 'prefix' is provided
prefix = data.get('prefix')
Expand All @@ -169,7 +179,7 @@ def auto_complete():

# Validate repo exists
if not graph_exists(repo):
return jsonify({'status': f'Missing project {repo}'}), 400
return jsonify({'status': 'Project not found'}), 400

# Fetch auto-completion results
completions = prefix_search(repo, prefix)
Expand Down Expand Up @@ -273,6 +283,11 @@ def find_paths():
repo = data.get('repo')
if repo is None:
return jsonify({'status': 'Missing mandatory parameter "repo"'}), 400

# Validate repo format - prevent path traversal
import re
if not re.match(r'^[a-zA-Z0-9_\-\.]+$', str(repo)):
return jsonify({'status': 'Invalid repository name format'}), 400

# Validate 'src' parameter
src = data.get('src')
Expand Down
7 changes: 6 additions & 1 deletion backend/app/services/codegraph/repo_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,12 @@ class RepoService:
def __init__(self):
self.supabase = get_supabase_client()
self.backend_url = os.getenv("CODEGRAPH_BACKEND_URL", "http://localhost:5000")
self.secret_token = os.getenv("SECRET_TOKEN", "DevRAI_CodeGraph_Secret")
self.secret_token = os.getenv("SECRET_TOKEN")
if not self.secret_token:
raise ValueError(
"SECRET_TOKEN environment variable must be set for CodeGraph backend authentication. "
"Please configure this in your .env file."
)
self.indexing_timeout = aiohttp.ClientTimeout(total=3600, connect=60)
self.query_timeout = aiohttp.ClientTimeout(total=300, connect=30)
logger.info(f"RepoService initialized with backend: {self.backend_url}")
Expand Down
9 changes: 8 additions & 1 deletion backend/integrations/discord/cogs.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,10 +29,15 @@ def __init__(self, bot: DiscordBot, queue_manager: AsyncQueueManager):

def cog_load(self):
"""Called when the cog is loaded"""
# Start the cleanup task - it will wait for the bot to be ready via before_loop
self.cleanup_expired_tokens.start()

def cog_unload(self):
self.cleanup_expired_tokens.cancel()
try:
if self.cleanup_expired_tokens.is_running():
self.cleanup_expired_tokens.cancel()
except Exception as e:
logger.warning(f"Error cancelling cleanup task: {type(e).__name__}")

@tasks.loop(minutes=5)
async def cleanup_expired_tokens(self):
Expand All @@ -47,7 +52,9 @@ async def cleanup_expired_tokens(self):
@cleanup_expired_tokens.before_loop
async def before_cleanup(self):
"""Wait until the bot is ready before starting cleanup"""
print("--> Waiting for bot to be ready before starting cleanup task...")
await self.bot.wait_until_ready()
print("--> Bot is ready, starting cleanup task...")

@app_commands.command(name="reset", description="Reset your DevRel thread and memory.")
async def reset_thread(self, interaction: discord.Interaction):
Expand Down
Loading