-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathquick_benchmark.py
More file actions
291 lines (233 loc) · 9.64 KB
/
quick_benchmark.py
File metadata and controls
291 lines (233 loc) · 9.64 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
#!/usr/bin/env python3
"""
Quick RAG System Performance Test
A lightweight benchmark script for rapid performance testing.
Focuses on the most critical performance metrics.
"""
import asyncio
import statistics
import sys
import time
try:
from rag_system.config import RAGConfig
from rag_system.document_processing.embedder import Embedder, EmbeddingConfig
from rag_system.retrieval.retriever import Retriever
except ImportError as e:
print(f"❌ Failed to import RAG system: {e}")
print("Make sure to install with: pipx install .")
sys.exit(1)
class QuickBenchmark:
"""Quick performance benchmark for RAG system."""
def __init__(self):
self.config = RAGConfig.from_file("config/rag_config.yaml")
# Test data
self.test_queries = [
"What is machine learning?",
"How do neural networks work?",
"Explain deep learning",
"What is natural language processing?",
"How does computer vision work?",
]
self.test_texts = [
"Machine learning is a subset of artificial intelligence.",
"Neural networks are computational models inspired by biology.",
"Deep learning uses multiple layers to learn data representations.",
"Natural language processing enables computers to understand human language.",
"Computer vision allows machines to interpret visual information.",
]
def run_quick_test(self) -> dict:
"""Run quick performance test."""
print("🚀 Quick RAG Performance Test")
print("=" * 40)
results = {}
# Test 1: Embedding Performance
print("\n1️⃣ Testing Embedding Performance...")
results["embedding"] = self._test_embedding()
# Test 2: Retrieval Performance
print("\n2️⃣ Testing Retrieval Performance...")
results["retrieval"] = self._test_retrieval()
# Test 3: Cache Performance
print("\n3️⃣ Testing Cache Performance...")
results["cache"] = self._test_cache()
# Test 4: Batch Performance
print("\n4️⃣ Testing Batch Performance...")
results["batch"] = self._test_batch()
return results
def _test_embedding(self) -> dict:
"""Test embedding performance."""
embedding_config = EmbeddingConfig(
model_name=self.config.embedding_model,
batch_size=self.config.embedding_batch_size,
device=self.config.embedding_device,
show_progress_bar=False,
)
embedder = Embedder(embedding_config)
# Single embedding test
start_time = time.time()
embedder.embed_text(self.test_texts[0])
single_time = time.time() - start_time
# Batch embedding test
start_time = time.time()
batch_embeddings = embedder.embed_batch(self.test_texts)
batch_time = time.time() - start_time
successful = sum(1 for e in batch_embeddings if e is not None)
throughput = successful / batch_time
print(f" Single embedding: {single_time:.3f}s")
print(f" Batch ({len(self.test_texts)}): {batch_time:.3f}s")
print(f" Throughput: {throughput:.1f} texts/sec")
return {
"single_time": single_time,
"batch_time": batch_time,
"throughput": throughput,
"batch_size": len(self.test_texts),
}
def _test_retrieval(self) -> dict:
"""Test retrieval performance."""
retriever = Retriever(self.config)
times = []
result_counts = []
for query in self.test_queries[:3]:
start_time = time.time()
try:
documents = retriever.retrieve(query, top_k=5)
query_time = time.time() - start_time
times.append(query_time)
result_counts.append(len(documents))
print(f" Query: {query_time:.3f}s ({len(documents)} results)")
except Exception as e:
print(f" Query failed: {e}")
continue
avg_time = statistics.mean(times) if times else 0
avg_results = statistics.mean(result_counts) if result_counts else 0
return {"avg_time": avg_time, "avg_results": avg_results, "successful_queries": len(times)}
def _test_cache(self) -> dict:
"""Test cache performance."""
retriever = Retriever(self.config)
test_query = "What is machine learning?"
# First query (cache miss)
start_time = time.time()
try:
retriever.retrieve(test_query)
miss_time = time.time() - start_time
# Second query (cache hit)
start_time = time.time()
retriever.retrieve(test_query)
hit_time = time.time() - start_time
speedup = miss_time / hit_time if hit_time > 0 else float("inf")
print(f" Cache miss: {miss_time:.3f}s")
print(f" Cache hit: {hit_time:.3f}s")
print(f" Speedup: {speedup:.1f}x")
return {
"miss_time": miss_time,
"hit_time": hit_time,
"speedup": speedup,
"cache_working": speedup > 2.0,
}
except Exception as e:
print(f" Cache test failed: {e}")
return {"error": str(e)}
def _test_batch(self) -> dict:
"""Test batch vs individual operations."""
retriever = Retriever(self.config)
queries = self.test_queries[:3]
# Individual queries
start_time = time.time()
individual_results = []
for query in queries:
try:
result = retriever.retrieve(query, top_k=3)
individual_results.append(result)
except Exception:
continue
individual_time = time.time() - start_time
# Batch queries
start_time = time.time()
try:
asyncio.run(retriever.batch_retrieve(queries, top_k=3))
batch_time = time.time() - start_time
speedup = individual_time / batch_time if batch_time > 0 else 0
print(f" Individual: {individual_time:.3f}s")
print(f" Batch: {batch_time:.3f}s")
print(f" Speedup: {speedup:.1f}x")
return {
"individual_time": individual_time,
"batch_time": batch_time,
"speedup": speedup,
"batch_working": speedup > 1.5,
}
except Exception as e:
print(f" Batch test failed: {e}")
return {"error": str(e)}
def print_summary(self, results: dict):
"""Print benchmark summary."""
print("\n" + "=" * 40)
print("📊 QUICK BENCHMARK SUMMARY")
print("=" * 40)
# Performance scores
scores = []
if "embedding" in results:
throughput = results["embedding"]["throughput"]
if throughput > 50:
scores.append("🟢 Embedding: Excellent")
elif throughput > 20:
scores.append("🟡 Embedding: Good")
else:
scores.append("🔴 Embedding: Needs optimization")
if "retrieval" in results:
avg_time = results["retrieval"]["avg_time"]
if avg_time < 1.0:
scores.append("🟢 Retrieval: Fast")
elif avg_time < 3.0:
scores.append("🟡 Retrieval: Moderate")
else:
scores.append("🔴 Retrieval: Slow")
if "cache" in results and "speedup" in results["cache"]:
speedup = results["cache"]["speedup"]
if speedup > 10:
scores.append("🟢 Cache: Excellent")
elif speedup > 3:
scores.append("🟡 Cache: Working")
else:
scores.append("🔴 Cache: Not effective")
if "batch" in results and "speedup" in results["batch"]:
speedup = results["batch"]["speedup"]
if speedup > 3:
scores.append("🟢 Batch: Excellent")
elif speedup > 1.5:
scores.append("🟡 Batch: Working")
else:
scores.append("🔴 Batch: Not effective")
for score in scores:
print(f" {score}")
# Configuration info
print("\n⚙️ Configuration:")
print(f" • Embedding batch size: {self.config.embedding_batch_size}")
print(f" • Cache enabled: {self.config.retrieval_enable_cache}")
print(f" • gRPC enabled: {self.config.qdrant_prefer_grpc}")
# Overall assessment
green_count = sum(1 for s in scores if s.startswith("🟢"))
total_tests = len(scores)
if green_count == total_tests:
print(
f"\n🎉 Overall: Excellent performance ({green_count}/{total_tests} tests optimal)"
)
elif green_count >= total_tests * 0.7:
print(f"\n👍 Overall: Good performance ({green_count}/{total_tests} tests optimal)")
else:
print(f"\n⚠️ Overall: Needs optimization ({green_count}/{total_tests} tests optimal)")
print("=" * 40)
def main():
"""Run quick benchmark."""
try:
benchmark = QuickBenchmark()
results = benchmark.run_quick_test()
benchmark.print_summary(results)
print("\n✅ Quick benchmark completed!")
print("💡 For detailed analysis, run: python benchmark_rag_performance.py")
except KeyboardInterrupt:
print("\n⚠️ Benchmark interrupted by user")
except Exception as e:
print(f"\n❌ Benchmark failed: {e}")
sys.exit(1)
if __name__ == "__main__":
main()