Skip to content

Commit 443299d

Browse files
Dev NirwalDev Nirwal
authored andcommitted
Feature: add dynamic Gemini model selection in UI and backend
1 parent c7135e9 commit 443299d

3 files changed

Lines changed: 72 additions & 10 deletions

File tree

chat/templates/chat/index.html

Lines changed: 24 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -142,6 +142,15 @@
142142
color: #ff4d4d;
143143
background-color: rgba(255, 77, 77, 0.05);
144144
}
145+
146+
#model-selector {
147+
border-radius: 20px;
148+
padding-left: 15px;
149+
font-size: 0.85rem;
150+
border-color: #667eea;
151+
color: #667eea;
152+
font-weight: 600;
153+
}
145154
</style>
146155
</head>
147156
<body>
@@ -200,7 +209,18 @@ <h5 class="mb-3 mt-2"><i class="fas fa-book me-2"></i>My Library</h5>
200209
<div class="col-md-9 col-lg-9 chat-container">
201210
<div class="chat-header d-flex justify-content-between align-items-center">
202211
<h4 class="m-0 text-secondary" id="current-doc-title">Select a document to begin</h4>
203-
<span class="badge bg-primary rounded-pill"><i class="fas fa-robot me-1"></i>Gemini 1.5 Flash</span>
212+
<div class="d-flex align-items-center">
213+
<label for="model-selector" class="me-2 mb-0 small text-muted">Model:</label>
214+
<select id="model-selector" class="form-select form-select-sm shadow-sm">
215+
{% for model in available_models %}
216+
<option value="{{ model.name }}" {% if model.name == "models/gemini-1.5-flash" %}selected{% endif %}>
217+
{{ model.display_name }}
218+
</option>
219+
{% empty %}
220+
<option value="models/gemini-1.5-flash">Gemini 1.5 Flash</option>
221+
{% endfor %}
222+
</select>
223+
</div>
204224
</div>
205225

206226
<div id="chat-messages" class="chat-messages d-flex flex-column">
@@ -257,6 +277,8 @@ <h5>Welcome to DocuChat!</h5>
257277
async function sendMessage() {
258278
const input = document.getElementById('query-input');
259279
const query = input.value.trim();
280+
const modelName = document.getElementById('model-selector').value;
281+
260282
if (!query || !selectedDocId) return;
261283

262284
const chatMessages = document.getElementById('chat-messages');
@@ -283,7 +305,7 @@ <h5>Welcome to DocuChat!</h5>
283305
'Content-Type': 'application/x-www-form-urlencoded',
284306
'X-CSRFToken': '{{ csrf_token }}'
285307
},
286-
body: `query=${encodeURIComponent(query)}&doc_id=${selectedDocId}`
308+
body: `query=${encodeURIComponent(query)}&doc_id=${selectedDocId}&model_name=${encodeURIComponent(modelName)}`
287309
});
288310

289311
const data = await response.json();

chat/utils.py

Lines changed: 36 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import os
2+
import google.generativeai as genai
23
from django.conf import settings
34
from langchain_community.document_loaders import PyPDFLoader
45
from langchain_text_splitters import RecursiveCharacterTextSplitter
@@ -8,6 +9,32 @@
89
from langchain_classic.chains.combine_documents import create_stuff_documents_chain
910
from langchain_core.prompts import ChatPromptTemplate
1011

12+
def list_gemini_models():
13+
"""
14+
Lists available Gemini models that support content generation.
15+
"""
16+
api_key = getattr(settings, 'GOOGLE_API_KEY', None)
17+
if not api_key:
18+
return []
19+
20+
try:
21+
genai.configure(api_key=api_key)
22+
models = []
23+
for m in genai.list_models():
24+
if 'generateContent' in m.supported_generation_methods:
25+
# We want to show a clean name to the user
26+
# e.g., models/gemini-1.5-flash -> Gemini 1.5 Flash
27+
display_name = m.display_name
28+
model_name = m.name # e.g. models/gemini-1.5-flash
29+
models.append({
30+
'name': model_name,
31+
'display_name': display_name
32+
})
33+
return models
34+
except Exception as e:
35+
print(f"Error listing Gemini models: {str(e)}")
36+
return []
37+
1138
def process_pdf(file_path, vector_store_path):
1239
"""
1340
Loads a PDF, splits it into chunks, creates embeddings, and saves to a FAISS vector store.
@@ -33,19 +60,23 @@ def process_pdf(file_path, vector_store_path):
3360

3461
return vectorstore
3562

36-
def get_answer(query, vector_store_path):
63+
def get_answer(query, vector_store_path, model_name="models/gemini-1.5-flash"):
3764
"""
3865
Loads the FAISS vector store and uses Gemini to answer the query based on the context.
3966
"""
40-
print(f"--- Asking question: {query} ---")
67+
print(f"--- Asking question: {query} using model: {model_name} ---")
4168
# 1. Load embeddings and vector store
4269
embeddings = GoogleGenerativeAIEmbeddings(model="models/gemini-embedding-001")
4370
vectorstore = FAISS.load_local(vector_store_path, embeddings, allow_dangerous_deserialization=True)
4471
print("Vector store loaded.")
4572

46-
# 2. Initialize Gemini LLM
47-
llm = ChatGoogleGenerativeAI(model="models/gemini-flash-latest", temperature=0.3)
48-
print("LLM initialized (models/gemini-flash-latest).")
73+
# 2. Initialize Gemini LLM with the selected model
74+
# Use a fallback if model_name is invalid or not provided
75+
if not model_name:
76+
model_name = "models/gemini-1.5-flash"
77+
78+
llm = ChatGoogleGenerativeAI(model=model_name, temperature=0.3)
79+
print(f"LLM initialized ({model_name}).")
4980

5081
# 3. Define the prompt
5182
system_prompt = (

chat/views.py

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,13 +4,19 @@
44
from django.http import JsonResponse
55
from .forms import PDFUploadForm
66
from .models import PDFDocument
7-
from .utils import process_pdf, get_answer
7+
from .utils import process_pdf, get_answer, list_gemini_models
88
from django.conf import settings
99

1010
def index(request):
1111
documents = PDFDocument.objects.all().order_by('-uploaded_at')
1212
form = PDFUploadForm()
13-
return render(request, 'chat/index.html', {'documents': documents, 'form': form})
13+
# Fetch available models from Gemini
14+
available_models = list_gemini_models()
15+
return render(request, 'chat/index.html', {
16+
'documents': documents,
17+
'form': form,
18+
'available_models': available_models
19+
})
1420

1521
import logging
1622

@@ -53,6 +59,7 @@ def ask_question(request):
5359
if request.method == 'POST':
5460
query = request.POST.get('query')
5561
doc_id = request.POST.get('doc_id')
62+
model_name = request.POST.get('model_name')
5663

5764
if not query or not doc_id:
5865
return JsonResponse({'error': 'Missing query or document selection'}, status=400)
@@ -62,11 +69,13 @@ def ask_question(request):
6269
if not pdf_doc.vector_store_path or not os.path.exists(pdf_doc.vector_store_path):
6370
return JsonResponse({'error': 'Vector store not found for this document'}, status=404)
6471

65-
answer = get_answer(query, pdf_doc.vector_store_path)
72+
# Pass the selected model_name to get_answer
73+
answer = get_answer(query, pdf_doc.vector_store_path, model_name=model_name)
6674
return JsonResponse({'answer': answer})
6775
except PDFDocument.DoesNotExist:
6876
return JsonResponse({'error': 'Document not found'}, status=404)
6977
except Exception as e:
78+
logger.error(f"Error in ask_question: {str(e)}")
7079
return JsonResponse({'error': str(e)}, status=500)
7180

7281
return JsonResponse({'error': 'Invalid request method'}, status=405)

0 commit comments

Comments
 (0)