-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathaccuracy.py
More file actions
351 lines (282 loc) · 10.3 KB
/
accuracy.py
File metadata and controls
351 lines (282 loc) · 10.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
import os
import base64
import json
import requests
import mss
import subprocess
import wave
import pyaudio
import vosk
import threading
import sys
import platform
import random
import tkinter as tk
from tkinter import messagebox, simpledialog
from pystray import Icon, MenuItem, Menu
from PIL import Image
# Determine if running as bundled executable or script
def get_base_dir():
"""Get the base directory for resources (works in both dev and bundled mode)"""
if getattr(sys, 'frozen', False):
# Running as compiled executable
return os.path.dirname(sys.executable)
else:
# Running as script
return os.path.dirname(os.path.abspath(__file__))
BASE_DIR = get_base_dir()
# API Key management with first-run setup
def get_or_request_api_key():
"""Get API key from file or prompt user to enter it"""
key_file = os.path.join(BASE_DIR, "gemini_key.txt")
if os.path.exists(key_file):
with open(key_file, "r") as f:
key = f.read().strip()
if key:
return key
# Key doesn't exist or is empty - show GUI dialog
root = tk.Tk()
root.withdraw() # Hide main window
messagebox.showinfo(
"Beacon Setup",
"Welcome to Beacon!\n\n"
"To use this assistant, you need a Google Gemini API key.\n\n"
"Get your free API key at:\n"
"https://aistudio.google.com/app/apikey\n\n"
"You'll be prompted to enter it next."
)
api_key = simpledialog.askstring(
"Gemini API Key",
"Please enter your Gemini API key:",
parent=root
)
if not api_key or not api_key.strip():
messagebox.showerror("Error", "API key is required. Application will exit.")
root.destroy()
sys.exit(1)
# Save the key for future use
try:
with open(key_file, "w") as f:
f.write(api_key.strip())
messagebox.showinfo("Success", "API key saved successfully!\n\nBeacon will now start.")
except Exception as e:
messagebox.showerror("Error", f"Failed to save API key: {e}")
root.destroy()
return api_key.strip()
API_KEY = get_or_request_api_key()
API_URL = f"https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent?key={API_KEY}"
SAMPLE_RATE = 16000
CHANNELS = 1
FORMAT = pyaudio.paInt16
CHUNK = 256
TTS_RATE = 1
TTS_VOLUME = 100
TTS_GENDER = "Female"
WHISPER_PATH = os.path.join(BASE_DIR, "whisper.cpp", "whisper-cli.exe")
MODEL_PATH = os.path.join(BASE_DIR, "models", "ggml-tiny.en-q5_1.bin")
VOSK_MODEL_PATH = os.path.join(BASE_DIR, "models", "vosk-model-small-en-us-0.15")
CONSTANT_TEXT = "Assume iam blind and i cannot see desktop.give me reply in short as much you(depends upon question)."
history = []
MAX_MEMORY = 5
WAKE_REPLIES = ["Yes Sir", "I'm listening", "Go ahead Sir", "At your service"]
RECORD_REPLIES = ["Got it Sir", "Understood", "Alright Sir", "Noted"]
def _hidden_startupinfo():
if platform.system() == "Windows":
si = subprocess.STARTUPINFO()
si.dwFlags |= subprocess.STARTF_USESHOWWINDOW
return si
return None
def speak(text: str):
system = platform.system()
print("🔊 Speaking:", text)
if system == "Windows":
safe_text = text.replace("'", "''").replace("\r", " ").replace("\n", " ")
ps_script = (
"Add-Type -AssemblyName System.Speech;"
"$speak = New-Object System.Speech.Synthesis.SpeechSynthesizer;"
f"$speak.Volume = {TTS_VOLUME};"
f"$speak.Rate = {TTS_RATE};"
f"$v = ($speak.GetInstalledVoices() | Where-Object {{ $_.VoiceInfo.Gender -eq '{TTS_GENDER}' }} | Select-Object -First 1);"
"if ($v) { $speak.SelectVoice($v.VoiceInfo.Name) };"
f"$speak.Speak('{safe_text}');"
)
subprocess.run(
["powershell", "-NoProfile", "-Command", ps_script],
check=False,
startupinfo=_hidden_startupinfo(),
creationflags=subprocess.CREATE_NO_WINDOW if system == "Windows" else 0
)
elif system == "Darwin":
subprocess.run(["say", text], check=False)
elif system == "Linux":
gender_flag = "en+m1" if TTS_GENDER.lower() == "male" else "en+f1"
subprocess.run(
["espeak", "-v", gender_flag, "-s", str(int(150 * TTS_RATE)), text],
check=False
)
else:
print("Reply:", text)
def beep(freq=1000, duration_ms=200):
system = platform.system()
try:
if system == "Windows":
import winsound
winsound.Beep(int(freq), int(duration_ms))
else:
sys.stdout.write("\a")
sys.stdout.flush()
except Exception:
pass
def take_screenshot():
with mss.mss() as sct:
filename = os.path.join(BASE_DIR, "screen.png")
sct.shot(output=filename)
print("📸 Screenshot taken:", filename)
return filename
def record_audio(output_file="command.wav", record_seconds=3):
output_file = os.path.join(BASE_DIR, output_file)
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT, channels=CHANNELS,
rate=SAMPLE_RATE, input=True,
frames_per_buffer=CHUNK)
print("🎤 Recording... Speak your command!")
frames = []
for _ in range(0, int(SAMPLE_RATE / CHUNK * record_seconds)):
data = stream.read(CHUNK, exception_on_overflow=False)
frames.append(data)
print("✅ Done recording.")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(output_file, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(SAMPLE_RATE)
wf.writeframes(b''.join(frames))
wf.close()
# Speak random reply instead of beep
speak(random.choice(RECORD_REPLIES))
return output_file
def transcribe_audio(audio_file):
print("📝 Running Whisper.cpp...")
subprocess.run(
[WHISPER_PATH,
"-m", MODEL_PATH,
"-f", audio_file,
"--output-txt",
"--threads", str(os.cpu_count())],
capture_output=True,
text=True,
check=False,
startupinfo=_hidden_startupinfo(),
creationflags=subprocess.CREATE_NO_WINDOW if platform.system() == "Windows" else 0
)
txt_file = audio_file + ".txt"
if os.path.exists(txt_file):
with open(txt_file, "r", encoding="utf-8", errors="ignore") as f:
transcription = f.read().strip()
print("📖 You said:", transcription)
return transcription
else:
print("⚠️ Whisper failed (no .txt produced).")
return None
def ask_gemini(user_text, screenshot_path):
with open(screenshot_path, "rb") as f:
img_bytes = f.read()
img_b64 = base64.b64encode(img_bytes).decode("utf-8")
history.append({
"role": "user",
"parts": [
{"text": user_text},
{
"inline_data": {
"mime_type": "image/png",
"data": img_b64
}
}
]
})
if len(history) > MAX_MEMORY * 2:
del history[0:2]
payload = {"contents": history}
headers = {"Content-Type": "application/json"}
response = requests.post(API_URL, headers=headers, json=payload)
data = response.json()
try:
reply = data["candidates"][0]["content"]["parts"][0]["text"]
history.append({"role": "model", "parts": [{"text": reply}]})
print("🤖 Gemini:", reply)
return reply
except Exception:
return f"Error: {json.dumps(data, indent=2)}"
def listen_for_wake_word():
print("👂 Say 'hey beacon' to activate...")
if not os.path.exists(VOSK_MODEL_PATH):
print("⚠️ Vosk model not found! Please download and place in:", VOSK_MODEL_PATH)
sys.exit(1)
model = vosk.Model(VOSK_MODEL_PATH)
pa = pyaudio.PyAudio()
stream = pa.open(rate=16000, channels=1, format=pyaudio.paInt16,
input=True, frames_per_buffer=4000)
stream.start_stream()
rec = vosk.KaldiRecognizer(model, 16000, '["hey beacon"]')
while True:
data = stream.read(4000, exception_on_overflow=False)
if rec.AcceptWaveform(data):
result = json.loads(rec.Result())
text = result.get("text", "").lower()
if "hey beacon" in text:
print("✅ Wake word detected!")
# Speak random wake response instead of beep
speak(random.choice(WAKE_REPLIES))
return True
def start_screenshot_thread():
container = {"path": None}
def _worker():
try:
container["path"] = take_screenshot()
except Exception as e:
print("⚠️ Screenshot thread error:", e)
container["path"] = None
t = threading.Thread(target=_worker, daemon=True)
t.start()
return t, container
def assistant_loop():
while True:
if listen_for_wake_word():
ss_thread, ss_container = start_screenshot_thread()
audio_file = record_audio()
ss_thread.join(timeout=2.0)
screenshot_path = ss_container.get("path") or os.path.join(BASE_DIR, "screen.png")
if not screenshot_path or not os.path.exists(screenshot_path):
try:
screenshot_path = take_screenshot()
except Exception:
screenshot_path = os.path.join(BASE_DIR, "screen.png")
text = transcribe_audio(audio_file)
if not text:
continue
combined_text = f"{text}. {CONSTANT_TEXT}"
print("📝 Final user input:", combined_text)
reply = ask_gemini(combined_text, screenshot_path)
speak(reply)
def create_icon():
logo_path = os.path.join(BASE_DIR, "logo.ico")
if os.path.exists(logo_path):
return Image.open(logo_path)
else:
img = Image.new('RGB', (64, 64), (0, 0, 0))
return img
def on_quit(icon, item):
print("👋 Assistant stopped.")
icon.stop()
sys.exit(0)
def run_tray():
icon = Icon("Assistant",
create_icon(),
menu=Menu(MenuItem("Quit", on_quit)))
icon.run()
if __name__ == "__main__":
t = threading.Thread(target=assistant_loop, daemon=True)
t.start()
run_tray()