diff --git a/Backend/Automation.py b/Backend/Automation.py
new file mode 100644
index 00000000..4bbdc9dc
--- /dev/null
+++ b/Backend/Automation.py
@@ -0,0 +1,212 @@
+from webbrowser import open as webopen # Import web browser functionality.
+from pywhatkit import search, playonyt # Import functions for Google search and YouTube playback.
+from dotenv import dotenv_values # Import dotenv to manage environment variables.
+from bs4 import BeautifulSoup # Import BeautifulSoup for parsing HTML content.
+from rich import print # Import rich for styled console output.
+from groq import Groq # Import Groq for AI chat functionalities.
+import webbrowser # Import webbrowser for opening URLs.
+import subprocess # Import subprocess for interacting with the system.
+import requests # Import requests for making HTTP requests.
+import asyncio # Import asyncio for asynchronous programming.
+import os # Import os for operating system functionalities.
+
+# Load environment variables from the .env file
+env_vars = dotenv_values(".env")
+GroqAPIKey = env_vars.get("GroqAPIKey") # Retrieve the Groq API key
+
+# Define CSS classes for parsing specific elements in HTML content
+classes = [
+ "zCubwf", "hgKElc", "LTKOO sY7ric", "ZOLcW", "gsrt vk_bk FzvWsb YwPhnf",
+ "pclqee", "tw-Data-text tw-text-small tw-ta", "1Z6rdc", "05uR6d LTKOO",
+ "V1zY6d", "webanswers-webanswers_table_webanswers-table", "dDoNo ikb4Bb gsrt",
+ "sXLaOe", "LwkfKe", "VQF4g", "qviWpe", "kno-rdesc", "SPZz6b"
+]
+
+# Define a user-agent for making web requests (updated for macOS)
+useragent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/1'
+
+# Initialize the Groq client with the API key
+client = Groq(api_key=GroqAPIKey)
+
+# Predefined professional responses for user interactions.
+professional_responses = [
+ "Your satisfaction is my top priority; feel free to reach out if there's anything else I can help with.",
+ "I'm at your service for any additional questions or support you may need—don't hesitate to ask."
+]
+
+# List to store chatbot messages
+messages = []
+SystemChatBot = [{"role": "system", "content": f"Hello, I am {os.environ['Username']}, You're a content writer. You have to write content like letter,codes,applications,essays,notes,songs,poems etc "}]
+
+
+def GoogleSearch(Topic):
+ search(Topic) # Use pywhatkit's search function to perform a Google search
+ return True # Indicate success
+
+
+
+# Function to generate content using AI and save it to a file.
+def Content(Topic):
+ # Nested function to open a file in TextEdit (macOS equivalent of Notepad).
+ def OpenTextEdit(File):
+ subprocess.run(["open", "-a", "TextEdit", File]) # Open the file in TextEdit.
+
+ # Nested function to generate content using the AI chatbot
+ def ContentWriterAI(prompt):
+ messages.append({"role": "user", "content": f"{prompt}"}) # Add the user's prompt to messages
+ completion = client.chat.completions.create(
+ model="mixtral-8x7b-32768",
+ messages=SystemChatBot + messages,
+ max_tokens=2048,
+ temperature=0.7,
+ top_p=1,
+ stream=True,
+ stop=None
+ )
+ Answer = ""
+ for chunk in completion:
+ if chunk.choices[0].delta.content:
+ Answer += chunk.choices[0].delta.content
+ Answer = Answer.replace("", "")
+ messages.append({"role": "assistant", "content": Answer})
+ return Answer
+
+ Topic = Topic.replace("content ", "")
+ ContentByAI = ContentWriterAI(Topic)
+ os.makedirs("Data", exist_ok=True) # Ensure the "Data" directory exists
+ with open(f"Data/{Topic.lower().replace(' ', '')}.txt", "w", encoding="utf-8") as file:
+ file.write(ContentByAI)
+ OpenTextEdit(f"Data/{Topic.lower().replace(' ', '')}.txt")
+ return True
+
+
+# Function to search YouTube.
+def YouTubeSearch(Topic):
+ Url4Search = f"https://www.youtube.com/results?search_query={Topic}"
+ webbrowser.open(Url4Search)
+ return True
+
+
+# Function to play a video on YouTube.
+def PlayYouTube(query):
+ playonyt(query) # Use pywhatkit's playonyt function to play the video
+ return True # Indicate success
+
+
+# Function to open an application.
+def OpenApp(app):
+ try:
+ subprocess.run(["open", "-a", app]) # Open the application using macOS's `open` command
+ return True
+ except Exception as e:
+ print(f"Error opening app: {e}")
+ return False
+
+
+def extract_Links(html):
+ if html is None:
+ return []
+ soup = BeautifulSoup(html, 'html.parser') # Parse the HTML content
+ links = soup.find_all('a', {'jsname': 'UWckNb'}) # Find relevant links
+ return [link.get('href') for link in links] # Return the links
+
+
+# Function to perform a Google search and retrieve HTML.
+def search_google(query):
+ url = f"https://www.google.com/search?q={query}"
+ headers = {"User-Agent": useragent}
+ response = requests.get(url, headers=headers)
+ if response.status_code == 200:
+ return response.text
+ else:
+ print("Error: Unable to fetch search results.")
+ return None
+
+# Function to close an application.
+def CloseApp(app):
+ try:
+ subprocess.run(["pkill", app]) # Close the application using `pkill`
+ return True
+ except Exception as e:
+ print(f"Error closing app: {e}")
+ return False
+
+
+CloseApp("TextEdit")
+
+
+# Function to control system volume.
+def System(command):
+ # Nested function to mute the system volume
+ def mute():
+ subprocess.run(["osascript", "-e", "set Volume 0"]) # Mute volume using AppleScript
+
+ def unmute():
+ subprocess.run(["osascript", "-e", "set Volume 5"]) # Unmute and set volume to 50%
+
+ # Nested function to increase the system volume
+ def volume_up():
+ subprocess.run(["osascript", "-e", "set Volume output volume (output volume of (get volume settings) + 10)"])
+
+ # Nested function to decrease the system volume
+ def volume_down():
+ subprocess.run(["osascript", "-e", "set Volume output volume (output volume of (get volume settings) - 10)"])
+
+ if command == "mute":
+ mute()
+ elif command == "unmute":
+ unmute()
+ elif command == "volume up":
+ volume_up()
+ elif command == "volume down":
+ volume_down()
+ else:
+ return True
+
+
+async def TranslateAndExecute(commands: list[str]):
+ funcs = [] # List to store asynchronous tasks
+ for command in commands:
+ if command.startswith("open "): # Handle "open" commands
+ if "open file" in command: # Ignore "open file" commands
+ pass
+ else:
+ fun = asyncio.to_thread(OpenApp, command.removeprefix("open "))
+ funcs.append(fun)
+ elif command.startswith("general "):
+ pass
+ elif command.startswith("realtime "):
+ pass
+ elif command.startswith("close "): # Handle "close" commands
+ fun = asyncio.to_thread(CloseApp, command.removeprefix("close "))
+ funcs.append(fun)
+ elif command.startswith("play "): # Handle "play" commands
+ fun = asyncio.to_thread(PlayYouTube, command.removeprefix("play "))
+ funcs.append(fun)
+ elif command.startswith("content "): # Handle "content" commands
+ fun = asyncio.to_thread(Content, command.removeprefix("content "))
+ funcs.append(fun)
+ elif command.startswith("google search "): # Handle "google search" commands
+ fun = asyncio.to_thread(GoogleSearch, command.removeprefix("google search "))
+ funcs.append(fun)
+ elif command.startswith("youtube search "): # Handle "youtube search" commands
+ fun = asyncio.to_thread(YouTubeSearch, command.removeprefix("youtube search "))
+ funcs.append(fun)
+ elif command.startswith("system "): # Handle "system" commands
+ fun = asyncio.to_thread(System, command.removeprefix("system "))
+ funcs.append(fun)
+ else:
+ print(f"No function found for command: {command}")
+ results = await asyncio.gather(*funcs)
+ for result in results:
+ if isinstance(result, str):
+ yield result
+ else:
+ yield result
+
+
+# Asynchronous function to automate command execution.
+async def Automation(commands: list[str]):
+ async for result in TranslateAndExecute(commands):
+ pass
+ return True
diff --git a/Backend/Chatbot.py b/Backend/Chatbot.py
new file mode 100644
index 00000000..b06c183b
--- /dev/null
+++ b/Backend/Chatbot.py
@@ -0,0 +1,116 @@
+from groq import Groq # Importing the Groq library
+from json import load, dump # Importing JSON library for reading and writing JSON files
+import datetime # Importing datetime library for date and time operations
+from dotenv import dotenv_values # Importing dotenv library to load environment variables
+
+# Loading environment variables from .env file
+env_vars = dotenv_values(".env")
+
+# Getting the username, assistant name, and API key from the environment variables
+Username = env_vars.get("Username")
+Assistantname = env_vars.get("Assistantname")
+GroqAPIKey = env_vars.get("GroqAPIKey")
+
+# Initializing the Groq client with the API key
+client = Groq(api_key=GroqAPIKey)
+
+# List to store chatbot messages
+messages = []
+
+# Predefined system message for the chatbot
+System = f"""Hello, I am {Username}, You are a very accurate and advanced AI chatbot named {Assistantname} which also has real-time up-to-date information from the internet.
+*** Do not tell time until I ask, do not talk too much, just answer the question.***
+*** Reply in only English, even if the question is in Hindi, reply in English.***
+*** Do not provide notes in the output, just answer the question and never mention your training data. ***
+"""
+
+# System message in the format required by the chatbot
+SystemChatBot = [
+ {"role": "system", "content": System}
+]
+
+# Try to load the chat log from a JSON file, if it doesn't exist, create an empty one
+try:
+ with open(r"Data/ChatLog.json", "r") as f:
+ messages = load(f)
+except FileNotFoundError:
+ with open(r"Data/ChatLog.json", "w") as f:
+ dump([], f)
+
+# Function to get real-time information
+def Realtimelnformation():
+ current_date_time = datetime.datetime.now()
+ day = current_date_time.strftime("%A")
+ date = current_date_time.strftime("%d")
+ month = current_date_time.strftime("%B")
+ year = current_date_time.strftime("%Y")
+ hour = current_date_time.strftime("%H")
+ minute = current_date_time.strftime("%M")
+ second = current_date_time.strftime("%S")
+
+ data = f"Please use this real-time information if needed,\n"
+ data += f"Day: {day}\nDate: {date}\nMonth: {month}\nYear: {year}\n"
+ data += f"Time: {hour} hours : {minute} minutes : {second} seconds.\n"
+ return data
+
+# Function to modify the answer by removing empty lines
+def AnswerModifier(Answer):
+ lines = Answer.split('\n')
+ non_empty_lines = [line for line in lines if line.strip()]
+ modified_answer = '\n'.join(non_empty_lines)
+ return modified_answer
+
+# Function to handle the chatbot interaction
+def ChatBot(Query):
+ """This function sends the user's query to the chatbot and returns the AI's response."""
+ try:
+ # Load the chat log from the JSON file
+ with open(r"Data/ChatLog.json", "r") as f:
+ messages = load(f)
+
+ # Append the user's query to the messages
+ messages.append({"role": "user", "content": f"{Query}"})
+
+ # Create a completion request to the Groq API
+ completion = client.chat.completions.create(
+ model="llama3-70b-8192",
+ messages=SystemChatBot + [{"role": "system", "content": Realtimelnformation()}] + messages,
+ max_tokens=1024,
+ temperature=0.7,
+ top_p=1,
+ stream=True,
+ stop=None
+ )
+
+ Answer = ""
+
+ # Collect the response from the API
+ for chunk in completion:
+ if chunk.choices[0].delta.content:
+ Answer += chunk.choices[0].delta.content
+
+ # Clean the answer
+ Answer = Answer.replace("", "")
+
+ # Append the assistant's response to the messages
+ messages.append({"role": "assistant", "content": Answer})
+
+ # Save the updated chat log to the JSON file
+ with open(r"Data/ChatLog.json", "w") as f:
+ dump(messages, f, indent=4)
+
+ # Return the modified answer
+ return AnswerModifier(Answer=Answer)
+
+ except Exception as e:
+ print(f"error: {e}")
+ # If an error occurs, reset the chat log and retry
+ with open(r"Data/ChatLog.json", "w") as f:
+ dump([], f, indent=4)
+ return ChatBot(Query)
+
+# Main loop to interact with the user
+if __name__ == "__main__":
+ while True:
+ user_input = input("Enter Your Question: ")
+ print(ChatBot(user_input))
diff --git a/Backend/ImageGeneration.py b/Backend/ImageGeneration.py
new file mode 100644
index 00000000..3454794f
--- /dev/null
+++ b/Backend/ImageGeneration.py
@@ -0,0 +1,85 @@
+import asyncio
+from random import randint
+from PIL import Image
+import requests
+from dotenv import get_key
+import os
+from time import sleep
+
+# Function to open and display images based on a given prompt
+def open_images(prompt):
+ folder_path = r"Data" # Folder where images are stored
+ prompt = prompt.replace(" ", "_") # Replace spaces with underscores
+
+ # Generate the filenames for the images
+ Files = [f"{prompt}{i}.jpg" for i in range(1, 5)]
+
+ for jpg_file in Files:
+ image_path = os.path.join(folder_path, jpg_file)
+
+ try:
+ # Try to open the image
+ img = Image.open(image_path)
+ print(f"Opening image: {image_path}")
+ img.show()
+ sleep(1) # Pause for 1 sec before showing next image
+
+ except IOError:
+ print(f"Unable to open {image_path}")
+
+# API details for the Hugging Face stable diffusion model
+API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0"
+headers = {"Authorization": f"Bearer {get_key('.env', 'HuggingFaceAPIKey')}"}
+
+# Async function to send queries to Hugging Face through API
+async def query(payload):
+ response = await asyncio.to_thread(requests.post, API_URL, headers=headers, json=payload)
+ return response.content
+
+# Async function to generate images based on a prompt
+async def generate_images(prompt: str):
+ tasks = []
+
+ # Create 4 image generation tasks
+ for _ in range(4):
+ payload = {
+ "inputs": f"{prompt}, quality=2k, sharpness=maximum, Ultra High details, high resolution, seed = {randint(0, 1000000)}",
+ }
+ task = asyncio.create_task(query(payload))
+ tasks.append(task)
+
+ # Wait for all tasks to complete
+ image_bytes_list = await asyncio.gather(*tasks)
+
+ # Save the generated images to the files
+ for i, image_bytes in enumerate(image_bytes_list):
+ with open(fr"Data/{prompt.replace(' ', '_')}{i + 1}.jpg", "wb") as f:
+ f.write(image_bytes)
+
+# Wrapper function to generate and open images
+def GenerateImages(prompt: str):
+ asyncio.run(generate_images(prompt))
+ open_images(prompt)
+
+# Main loop to check for image generation requests
+while True:
+ try:
+ # Read the prompt and status from the file
+ with open(r"Frontend/Files/ImageGeneration.data", "r") as f:
+ Data: str = f.read()
+ prompt, status = Data.split(",")
+
+ # If status indicates an image generation request
+ if status == "True":
+ print("Generating Images...")
+ GenerateImages(prompt=prompt)
+
+ # Reset the status in the file after generating image
+ with open(r"Frontend/Files/ImageGeneration.data", "w") as f:
+ f.write("False,False")
+ break # Exit the loop after generating image
+
+ else:
+ sleep(1) # Wait for 1 sec before checking again
+ except Exception as e:
+ print(e)
diff --git a/Backend/Model.py b/Backend/Model.py
new file mode 100644
index 00000000..18e6ba97
--- /dev/null
+++ b/Backend/Model.py
@@ -0,0 +1,113 @@
+import cohere # Import the Cohere library for AI services.
+from rich import print # Import the Rich library to enhance the output of the terminal.
+from dotenv import dotenv_values # Import the dotenv_values library to load environment variables from a .env file.
+
+# Load environment variables from a .env file.
+env_vars = dotenv_values('.env')
+# Retrieve the API key.
+CohereAPIKey = env_vars.get("CohereAPIKey")
+# Create a Cohere client using the provided API Key.
+co = cohere.Client(api_key=CohereAPIKey)
+
+# Define a list of recognized function keywords for task categorization.
+funcs = [
+ "exit", "general", "realtime", "open", "close", "play",
+ "generate image", "system", "content", "google search",
+ "youtube search", "reminder"
+]
+
+# Initialize an empty list to store user messages.
+messages = []
+
+#Define the preamble that guides the AI model on how to categorize queries
+preamble = """
+You are a very accurate Decision-Making Model, which decides what kind of a query is given to you.
+You will decide whether a query is a 'general' query, a 'realtime' query, or is asking to perform any task or automation like 'open facebook, instagram', 'can you write a application and open it in notepad'
+*** Do not answer any query, just decide what kind of query is given to you. ***
+-> Respond with 'general ( query )' if a query can be answered by a llm model (conversational ai chatbot) and doesn't require any up to date information like if the query is 'who was akbar?' respond with 'general who was akbar?', if the query is 'how can i study more effectively?' respond with 'general how can i study more effectively?', if the query is 'can you help me with this math problem?' respond with 'general can you help me with this math problem?', if the query is 'Thanks, i really liked it.' respond with 'general thanks, i really liked it.' , if the query is 'what is python programming language?' respond with 'general what is python programming language?', etc. Respond with 'general (query)' if a query doesn't have a proper noun or is incomplete like if the query is 'who is he?' respond with 'general who is he?', if the query is 'what's his networth?' respond with 'general what's his networth?', if the query is 'tell me more about him.' respond with 'general tell me more about him.', and so on even if it require up-to-date information to answer. Respond with 'general (query)' if the query is asking about time, day, date, month, year, etc like if the query is 'what's the time?' respond with 'general what's the time?'.
+-> Respond with 'realtime ( query )' if a query can not be answered by a llm model (because they don't have realtime data) and requires up to date information like if the query is 'who is indian prime minister' respond with 'realtime who is indian prime minister', if the query is 'tell me about facebook's recent update.' respond with 'realtime tell me about facebook's recent update.', if the query is 'tell me news about coronavirus.' respond with 'realtime tell me news about coronavirus.', etc and if the query is asking about any individual or thing like if the query is 'who is akshay kumar' respond with 'realtime who is akshay kumar', if the query is 'what is today's news?' respond with 'realtime what is today's news?', if the query is 'what is today's headline?' respond with 'realtime what is today's headline?', etc.
+-> Respond with 'open (application name or website name)' if a query is asking to open any application like 'open facebook', 'open telegram', etc. but if the query is asking to open multiple applications, respond with 'open 1st application name, open 2nd application name' and so on.
+-> Respond with 'close (application name)' if a query is asking to close any application like 'close notepad', 'close facebook', etc. but if the query is asking to close multiple applications or websites, respond with 'close 1st application name, close 2nd application name' and so on.
+-> Respond with 'play (song name)' if a query is asking to play any song like 'play afsanay by ys', 'play let her go', etc. but if the query is asking to play multiple songs, respond with 'play 1st song name, play 2nd song name' and so on.
+-> Respond with 'generate image (image prompt)' if a query is requesting to generate a image with given prompt like 'generate image of a lion', 'generate image of a cat', etc. but if the query is asking to generate multiple images, respond with 'generate image 1st image prompt, generate image 2nd image prompt' and so on.
+-> Respond with 'reminder (datetime with message)' if a query is requesting to set a reminder like 'set a reminder at 9:00pm on 25th june for my business meeting.' respond with 'reminder 9:00pm 25th june business meeting'.
+-> Respond with 'system (task name)' if a query is asking to mute, unmute, volume up, volume down , etc. but if the query is asking to do multiple tasks, respond with 'system 1st task, system 2nd task', etc.
+-> Respond with 'content (topic)' if a query is asking to write any type of content like application, codes, emails or anything else about a specific topic but if the query is asking to write multiple types of content, respond with 'content 1st topic, content 2nd topic' and so on.
+-> Respond with 'google search (topic)' if a query is asking to search a specific topic on google but if the query is asking to search multiple topics on google, respond with 'google search 1st topic, google search 2nd topic' and so on.
+-> Respond with 'youtube search (topic)' if a query is asking to search a specific topic on youtube but if the query is asking to search multiple topics on youtube, respond with 'youtube search 1st topic, youtube search 2nd topic' and so on.
+*** If the query is asking to perform multiple tasks like 'open facebook, telegram and close whatsapp' respond with 'open facebook, open telegram, close whatsapp' ***
+*** If the user is saying goodbye or wants to end the conversation like 'bye jarvis.' respond with 'exit'.***
+*** Respond with 'general (query)' if you can't decide the kind of query or if a query is asking to perform a task which is not mentioned above. ***
+"""
+
+# Define a chat history with predefined user-chatbot interactions for context.
+ChatHistory = [
+ {"role": "User", "message": "how are you?"},
+ {"role": "Chatbot", "message": "general how are you?"},
+ {"role": "User", "message": "do you like pizza?"},
+ {"role": "Chatbot", "message": "general do you like pizza?"},
+ {"role": "User", "message": "open chrome and tell me about mahatma gandhi."},
+ {"role": "Chatbot", "message": "open chrome, general tell me about mahatma gandhi."},
+ {"role": "User", "message": "open chrome and firefox"},
+ {"role": "Chatbot", "message": "open chrome, open firefox"},
+ {"role": "User", "message": "what is today's date and by the way remind me that I have a dancing performance on 5th Aug at 11pm"},
+ {"role": "Chatbot", "message": "general what is today's date, reminder 11:00pm 5th Aug dancing performance"},
+ {"role": "User", "message": "chat with me."},
+ {"role": "Chatbot", "message": "general chat with me."},
+]
+
+# Define the main function for decision-making on queries.
+def FirstLayerDMM(prompt: str = "test"):
+ # Add the user's query to the message list.
+ messages.append({"role": "User", "content": f"{prompt}"})
+
+ try:
+ # Create a streaming chat session with the Cohere model.
+ stream = co.chat_stream(
+ model='command-r-plus', # Specify the Cohere model to use.
+ message=prompt, # Pass the user query to the model.
+ temperature=0.7, # Set the creativity level of the model.
+ chat_history=ChatHistory, # Pass the predefined chat history to the model.
+ prompt_truncation='OFF', # Turn off prompt truncation.
+ connectors=[], # No additional connectors are used.
+ preamble=preamble # Pass the preamble to the model.
+ )
+
+ # Initialize an empty string to store the chatbot response.
+ response = ""
+
+ # Iterate over events in the chat stream.
+ for event in stream:
+ if event.event_type == 'text-generation':
+ response += event.text # Append generated text to the response variable.
+
+ # Remove newline characters and split the response into individual tasks.
+ response = response.replace("\n", "")
+ response = response.split(",")
+
+ # Strip leading and trailing whitespaces from each task.
+ response = [i.strip() for i in response]
+
+ # Initialize an empty list to filter valid tasks.
+ temp = []
+
+ # Filter the tasks based on recognized function keywords.
+ for task in response:
+ for func in funcs:
+ if task.startswith(func):
+ temp.append(task) # Add valid tasks.
+
+ # Update the response with the filtered tasks.
+ response = temp
+
+ # Return the filtered response.
+ return response
+
+ finally:
+ # Ensure cleanup happens here (e.g., closing the generator).
+ pass
+
+# Entry point for the script.
+if __name__ == "__main__":
+ while True:
+ print(FirstLayerDMM(input(">>> "))) # Print the categorized response to the terminal.
\ No newline at end of file
diff --git a/Backend/RealtimeSearchEngine.py b/Backend/RealtimeSearchEngine.py
new file mode 100644
index 00000000..bd4efdf8
--- /dev/null
+++ b/Backend/RealtimeSearchEngine.py
@@ -0,0 +1,123 @@
+from googlesearch import search # pip install google
+from groq import Groq # pip install groq
+from json import load, dump # json library
+import datetime # datetime python library
+from dotenv import dotenv_values # dotenv library to load environment variables
+
+# Load environment variables from .env file
+env_vars = dotenv_values(".env")
+
+# Retrieve specific environment variables
+Username = env_vars.get("Username")
+Assistantname = env_vars.get("Assistantname")
+GroqAPIKey = env_vars.get("GroqAPIKey")
+
+# Initialize Groq client with API key
+client = Groq(api_key=GroqAPIKey)
+
+# System message template for the chatbot
+System = f"""Hello, I am {Username}, You are a very accurate and advanced AI chatbot named {Assistantname} which has real-time up-to-date information from the internet.
+*** Provide Answers In a Professional Way, make sure to add full stops, commas, question marks, and use proper grammar.***
+*** Just answer the question from the provided data in a professional way. ***"""
+
+# Load chat log from file or create a new one if it doesn't exist
+try:
+ with open(r"Data/ChatLog.json", "r") as f:
+ messages = load(f)
+except:
+ with open(r"Data/ChatLog.json", "w") as f:
+ dump([], f)
+
+# Function to perform Google search and format the results
+def GoogleSearch(query):
+ results = list(search(query, advanced=True, num_results=5))
+ Answer = f"The search results for '{query}' are:\n[start]\n"
+
+ for i in results:
+ Answer += f"Title: {i.title}\nDescription: {i.description}\n\n"
+ Answer += "[end]"
+ return Answer
+
+# Function to remove empty lines from the answer
+def AnswerModifier(Answer):
+ lines = Answer.split('\n')
+ non_empty_lines = [line for line in lines if line.strip()]
+ modified_answer = '\n'.join(non_empty_lines)
+ return modified_answer
+
+# Initial system and user messages for the chatbot
+SystemChatBot = [
+ {"role": "system", "content": System},
+ {"role": "user", "content": "Hi"},
+ {"role": "assistant", "content": "Hello, how can I help you?"}
+]
+
+# Function to get current date and time information
+def Information():
+ data = ""
+ current_date_time = datetime.datetime.now()
+ day = current_date_time.strftime("%A")
+ date = current_date_time.strftime("%d")
+ month = current_date_time.strftime("%B")
+ year = current_date_time.strftime("%Y")
+ hour = current_date_time.strftime("%H")
+ minute = current_date_time.strftime("%M")
+ second = current_date_time.strftime("%S")
+
+ data += f"Use This Real-time Information if needed:\n"
+ data += f"Day: {day}\n"
+ data += f"Date: {date}\n"
+ data += f"Month: {month}\n"
+ data += f"Year: {year}\n"
+ data += f"Time: {hour} hours : {minute} minutes : {second} seconds.\n"
+ return data
+
+# Main function to handle real-time search and generate responses
+def RealtimeSearchEngine(prompt):
+ global SystemChatBot, messages
+
+ # Load chat log from file
+ with open(r"Data/ChatLog.json", "r") as f:
+ messages = load(f)
+
+ # Append user prompt to messages
+ messages.append({"role": "user", "content": f"{prompt}"})
+
+ # Perform Google search and append the result to SystemChatBot
+ SystemChatBot.append({"role": "system", "content": GoogleSearch(prompt)})
+
+ # Generate response using Groq client
+ completion = client.chat.completions.create(
+ model="llama3-70b-8192",
+ messages=SystemChatBot + [{"role": "system", "content": Information()}] + messages,
+ max_tokens=2048,
+ temperature=0.7,
+ top_p=1,
+ stream=True,
+ stop=None
+ )
+
+ Answer = ""
+
+ # Collect response chunks
+ for chunk in completion:
+ if chunk.choices[0].delta.content:
+ Answer += chunk.choices[0].delta.content
+
+ # Clean up the answer
+ Answer = Answer.strip().replace("", "")
+ messages.append({"role": "assistant", "content": Answer})
+
+ # Save updated chat log to file
+ with open(r"Data/ChatLog.json", "w") as f:
+ dump(messages, f, indent=4)
+
+ # Remove the last system message from SystemChatBot
+ SystemChatBot.pop()
+ return AnswerModifier(Answer=Answer)
+
+# Main entry point of the program for interactive querying
+if __name__ == "__main__":
+ while True:
+ prompt = input("Enter your query: ")
+ print(RealtimeSearchEngine(prompt))
\ No newline at end of file
diff --git a/Backend/SpeechToText.py b/Backend/SpeechToText.py
new file mode 100644
index 00000000..2a00eac0
--- /dev/null
+++ b/Backend/SpeechToText.py
@@ -0,0 +1,131 @@
+from selenium import webdriver
+from selenium.webdriver.common.keys import Keys
+from time import sleep
+from selenium.webdriver.common.by import By
+from selenium.webdriver.chrome.service import Service
+from selenium.webdriver.chrome.options import Options
+from webdriver_manager.chrome import ChromeDriverManager
+from dotenv import dotenv_values
+import os
+import mtranslate as mt
+
+# Load environment variables from the .env file
+env_vars = dotenv_values(".env")
+
+# Get the input language setting from the environment
+InputLanguage = env_vars.get("InputLanguage")
+
+HtmlCode = '''
+
+
+ Speech Recognition
+
+
+
+
+
+
+
+'''
+
+HtmlCode = str(HtmlCode).replace("recognition.lang = '';", f"recognition.lang = '{InputLanguage}';")
+
+with open(r"Data/voice.html", "w") as f:
+ f.write(HtmlCode)
+
+# Get the current directory
+current_dir = os.getcwd()
+
+# Link to the current directory
+link = f"{current_dir}/Data/voice.html"
+
+chrome_options = Options()
+user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36"
+chrome_options.add_argument(f"user-agent={user_agent}")
+chrome_options.add_argument("--use-fake-ui-for-media-stream")
+chrome_options.add_argument("--use-fake-device-for-media-stream")
+chrome_options.add_argument("--headless=new")
+
+service = Service(ChromeDriverManager().install())
+driver = webdriver.Chrome(service=service, options=chrome_options)
+
+TempDirPath = rf"{current_dir}/Frontend/Files"
+
+def SetAssistantStatus(Status):
+ with open(rf"{TempDirPath}/Status.data", "w", encoding="utf-8") as file:
+ file.write(Status)
+
+def QueryModifier(Query):
+ new_query = Query.lower().strip()
+ query_words = new_query.split()
+ question_words = ["how", "what", "who", "where", "when", "why", "which", "whose", "whom", "can you", "what's", "where's", "how's", "can your"]
+ if any(word in new_query for word in question_words):
+ if query_words[-1][-1] in ["?", ".", "!"]:
+ new_query = new_query[:-1] + "?"
+ else:
+ new_query += "?"
+ else:
+ if query_words[-1][-1] in ["?", ".", "!"]:
+ new_query = new_query[:-1] + "."
+ else:
+ new_query += "."
+
+ return new_query.capitalize()
+
+def UniversalTranslator(Text):
+ english_translation = mt.translate(Text,"en","auto")
+ return english_translation.capitalize()
+
+# Function to perform speech recognition
+def SpeechRecognition():
+ # Open the HTML file in the browser
+ driver.get("file:///" + link)
+ # Start speech recognition
+ driver.find_element(By.ID, "start").click()
+
+ while True:
+ try:
+ # Get the recognized text from the HTML output element
+ text = driver.find_element(by=By.ID, value="output").text
+
+ if text:
+ driver.find_element(by=By.ID, value="end").click()
+ # If the input language is English, return the modified query
+ if InputLanguage.lower() == "en" or "en" in InputLanguage.lower():
+ return QueryModifier(text)
+ else:
+ SetAssistantStatus("Translating...")
+ return QueryModifier(UniversalTranslator(text))
+
+ except Exception as e:
+ print(f"Error: {e}")
+
+if __name__ == "__main__":
+ while True:
+ text = SpeechRecognition()
+ if text:
+ print(text)
\ No newline at end of file
diff --git a/Backend/TextToSpeech.py b/Backend/TextToSpeech.py
new file mode 100644
index 00000000..6bc1b731
--- /dev/null
+++ b/Backend/TextToSpeech.py
@@ -0,0 +1,92 @@
+import pygame # Import pygame library for handling audio playback
+import random # Import random for generating random choices
+import asyncio # Import asyncio for asynchronous operations
+import edge_tts # Import edge_tts for Text-to-speech functionality
+import os # Import os for file path handling
+from dotenv import dotenv_values # Import dotenv for reading environment variables from a .env file
+
+# Load environment variables from a .env file
+env_vars = dotenv_values(".env")
+AssistantVoice = env_vars.get("AssistantVoice") # Get the AssistantVoice from the environment variables
+
+# Asynchronous function to convert Text to an audio file
+async def TextToAudioFile(Text) -> None:
+ file_path = r"Data/speech.mp3" # Define the path where the speech file will be saved
+
+ if os.path.exists(file_path): # Check if the file already exists
+ os.remove(file_path) # If it exists, remove it to avoid overwriting errors
+
+ # Create the communicate object to generate speech
+ communicate = edge_tts.Communicate(Text,AssistantVoice, pitch='+5Hz', rate='+13%')
+
+ await communicate.save(r"Data/speech.mp3") # Save the generated speech as an MP3 file
+
+def TTS(Text, func=lambda r=None: True):
+ while True:
+ try:
+ asyncio.run(TextToAudioFile(Text))
+
+ # Initialize pygame mixer for audio playback
+ pygame.mixer.init()
+
+ # Load the generated speech file into pygame mixer
+ pygame.mixer.music.load(r"Data/speech.mp3")
+ pygame.mixer.music.play()
+
+ # Loop until the audio is done playing or the function stops
+ while pygame.mixer.music.get_busy():
+ if func() == False: # Check if the external function returns False
+ break
+ pygame.time.Clock().tick(10) # Limit the loop to 10 ticks per second
+
+ return True # Return True if the audio played successfully
+
+ except Exception as e:
+ print(f"Error in TTS: {e}")
+ finally:
+ try:
+ # Call the provided function with false to signal the end of TTS
+ func(False)
+ pygame.mixer.music.stop() # Stop the audio playback
+ pygame.mixer.quit() # Quit the pygame mixer
+ except Exception as e:
+ # Handle any exceptions during cleanup
+ print(f"Error in finally block: {e}")
+
+#function to convert text to speech
+def TextToSpeech(Text, func=lambda r=None: True):
+ Data = str(Text).split(".") # Split the Text by periods into a list
+ responses = [
+ "The rest of the result has been printed to the chat screen, kindly check it out sir.",
+ "The rest of the Text is now on the chat screen, sir, please check it.",
+ "You can see the rest of the Text on the chat screen, sir.",
+ "The remaining part of the Text is now on the chat screen, sir.",
+ "Sir, you'll find more Text on the chat screen for you to see.",
+ "The rest of the answer is now on the chat screen, sir.",
+ "Sir, please look at the chat screen, the rest of the answer is there.",
+ "You'll find the complete answer on the chat screen, sir.",
+ "The next part of the Text is on the chat screen, sir.",
+ "Sir, please check the chat screen for more information.",
+ "There's more Text on the chat screen for you, sir.",
+ "Sir, take a look at the chat screen for additional Text.",
+ "You'll find more to read on the chat screen, sir.",
+ "Sir, check the chat screen for the rest of the Text.",
+ "The chat screen has the rest of the Text, sir.",
+ "There's more to see on the chat screen, sir, please look.",
+ "Sir, the chat screen holds the continuation of the Text.",
+ "You'll find the complete answer on the chat screen, kindly check it out sir.",
+ "Please review the chat screen for the rest of the Text, sir.",
+ "Sir, look at the chat screen for the complete answer."
+ ]
+
+ if len(Data) > 4 and len(Text) >= 250:
+ TTS(" ".join(Text.split(".")[0:2]) + ".", random.choice(responses), func)
+
+ else:
+ TTS(Text, func)
+
+# Main execution loop
+if __name__ == "__main__":
+ while True:
+ # Prompt user for input and pass it to TextToSpeech
+ TTS(input("Enter the text: "))
diff --git a/Backend/__pycache__/Automation.cpython-313.pyc b/Backend/__pycache__/Automation.cpython-313.pyc
new file mode 100644
index 00000000..a01a94b7
Binary files /dev/null and b/Backend/__pycache__/Automation.cpython-313.pyc differ
diff --git a/Backend/__pycache__/Chatbot.cpython-313.pyc b/Backend/__pycache__/Chatbot.cpython-313.pyc
new file mode 100644
index 00000000..86593576
Binary files /dev/null and b/Backend/__pycache__/Chatbot.cpython-313.pyc differ
diff --git a/Backend/__pycache__/Model.cpython-313.pyc b/Backend/__pycache__/Model.cpython-313.pyc
new file mode 100644
index 00000000..e4faaf12
Binary files /dev/null and b/Backend/__pycache__/Model.cpython-313.pyc differ
diff --git a/Backend/__pycache__/RealtimeSearchEngine.cpython-313.pyc b/Backend/__pycache__/RealtimeSearchEngine.cpython-313.pyc
new file mode 100644
index 00000000..f37fd65d
Binary files /dev/null and b/Backend/__pycache__/RealtimeSearchEngine.cpython-313.pyc differ
diff --git a/Backend/__pycache__/SpeechToText.cpython-313.pyc b/Backend/__pycache__/SpeechToText.cpython-313.pyc
new file mode 100644
index 00000000..e87e0919
Binary files /dev/null and b/Backend/__pycache__/SpeechToText.cpython-313.pyc differ
diff --git a/Backend/__pycache__/TextToSpeech.cpython-313.pyc b/Backend/__pycache__/TextToSpeech.cpython-313.pyc
new file mode 100644
index 00000000..2ab7679b
Binary files /dev/null and b/Backend/__pycache__/TextToSpeech.cpython-313.pyc differ
diff --git a/Frontend/Files/Database.data b/Frontend/Files/Database.data
new file mode 100644
index 00000000..e69de29b
diff --git a/Frontend/Files/FormattedChatLog.txt b/Frontend/Files/FormattedChatLog.txt
new file mode 100644
index 00000000..fc7b33ec
--- /dev/null
+++ b/Frontend/Files/FormattedChatLog.txt
@@ -0,0 +1,31 @@
+Rahul Kumar : hii
+Bloom AI : Hello! It's nice to meet you, Rahul Kumar. How can I assist you today?
+Rahul Kumar : net worth of elon
+Bloom AI : According to the latest information available, Elon Musk's net worth is estimated to be around $393.3 billion as of February 21, 2025. However, it's worth noting that his net worth can fluctuate constantly and may have changed slightly since the last update.
+Rahul Kumar : your name
+Bloom AI : My name is Bloom AI, a highly advanced and accurate AI chatbot.
+Rahul Kumar : hii
+Bloom AI : It seems like you typed "hii" again! Based on the search results I have, "HII" stands for Huntington Ingalls Industries, Inc., which is the largest military shipbuilding company in the United States and a provider of professional services to partners in government and industry.
+Rahul Kumar : how are you
+Bloom AI : I'm doing well, thank you for asking! As a digital AI assistant, I don't have feelings or emotions like humans do, but I'm always ready to assist you with any questions or topics you'd like to discuss. By the way, I see that you asked "how are you" - I can provide you with some information on how to respond to this question or ask it in different ways, if you're interested!
+Rahul Kumar : exit
+Bloom AI : Based on the search results, "exit" can have multiple meanings. According to Merriam-Webster, an exit can refer to:
+1. A departure from a stage
+2. The act of going out or away (e.g., "made an early exit")
+3. Death
+4. A way out of an enclosed place or space
+Additionally, the Cambridge Dictionary defines an exit as "the door through which you might leave a room, building, or large vehicle, or a place on a main road where a vehicle can leave it by taking a smaller road."
+There's also a 2019 South Korean disaster comedy film titled "Exit" starring Jo Jong-suk and Im Yoon-ah.
+Which aspect of "exit" would you like to know more about?
+Rahul Kumar : Hello.
+Bloom AI : Hello again!
+Rahul Kumar : Hello can you give me answer?
+Bloom AI : I think you're asking me to respond to your "Hello". My answer is: Hello, Rahul Kumar!
+Rahul Kumar : Can you give me answer?
+Bloom AI : I'm not sure what you're asking for an answer to. Could you please clarify or ask a specific question? I'll do my best to assist you.
+Rahul Kumar : Who is rahul kumar?
+Bloom AI : According to the search results, Rahul Kumar is an Indian actor, model, and singer working in the Bollywood industry. He has been active since 2010 and is known for his performances in films such as "3 Idiots" and "Sandeep Aur Pinky Faraar".
+Rahul Kumar : Hello how are you?
+Bloom AI : I'm doing well, thank you for asking!
+Rahul Kumar : What is today's weather?
+Bloom AI : According to the search results, today's weather in Sonipat, Haryana, India is partly cloudy and hazy, with a high of 24°C and a low of 10°C. The current temperature is around 75°F (23.9°C) with hazy sunshine, and the RealFeel temperature is 74°F (23.3°C).
\ No newline at end of file
diff --git a/Frontend/Files/ImageGeneration.data b/Frontend/Files/ImageGeneration.data
new file mode 100644
index 00000000..73ee4ace
--- /dev/null
+++ b/Frontend/Files/ImageGeneration.data
@@ -0,0 +1 @@
+generate image alan mosque, True
\ No newline at end of file
diff --git a/Frontend/Files/Mic.data b/Frontend/Files/Mic.data
new file mode 100644
index 00000000..c1f22fbc
--- /dev/null
+++ b/Frontend/Files/Mic.data
@@ -0,0 +1 @@
+False
\ No newline at end of file
diff --git a/Frontend/Files/Responses.data b/Frontend/Files/Responses.data
new file mode 100644
index 00000000..321747ba
--- /dev/null
+++ b/Frontend/Files/Responses.data
@@ -0,0 +1 @@
+Rahul Kumar: Now generator image of a dog.
\ No newline at end of file
diff --git a/Frontend/Files/Status.data b/Frontend/Files/Status.data
new file mode 100644
index 00000000..44168eb3
--- /dev/null
+++ b/Frontend/Files/Status.data
@@ -0,0 +1 @@
+Thinking...
\ No newline at end of file
diff --git a/Frontend/GUI.py b/Frontend/GUI.py
new file mode 100644
index 00000000..0c293911
--- /dev/null
+++ b/Frontend/GUI.py
@@ -0,0 +1,435 @@
+from PyQt5.QtWidgets import QApplication, QMainWindow, QTextEdit, QStackedWidget, QWidget, QLineEdit, QGridLayout, QVBoxLayout, QHBoxLayout, QPushButton, QFrame, QLabel, QSizePolicy
+from PyQt5.QtGui import QIcon, QPainter, QMovie, QColor, QTextFormat, QFont, QPixmap, QTextBlockFormat, QTextCharFormat
+from PyQt5.QtCore import Qt, QSize, QTimer
+from dotenv import dotenv_values
+import sys
+import os
+
+env_vars = dotenv_values(".env")
+Assistantname = env_vars.get("Assistantname")
+current_dir = os.getcwd()
+old_chat_message = ""
+TempDirPath = rf"{current_dir}/Frontend/Files"
+GraphicsDirPath = rf"{current_dir}/Frontend/Graphics"
+
+def AnswerModifier(Answer):
+ lines = Answer.split('\n')
+ non_empty_lines = [line for line in lines if line.strip()]
+ modified_answer = '\n'.join(non_empty_lines)
+ return modified_answer
+
+def QueryModifier(Query):
+ new_query = Query.lower().strip()
+ query_words = new_query.split()
+ question_words = ['how', 'what', 'who', 'where', 'when', 'why', 'which', 'whose', 'whom', 'can you', "what's", "where's", "how's"]
+
+ if any(word + " " in new_query for word in question_words):
+ if query_words[-1][-1] in ['?', '.', '!']:
+ new_query = new_query[:-1] + "?"
+ else:
+ new_query += "?"
+ else:
+ if query_words[-1][-1] in ['?', '.', '!']:
+ new_query = new_query[:-1] + "."
+ else:
+ new_query += "."
+ return new_query.capitalize()
+
+def SetMicrophoneStatus(Command):
+ with open(rf"{TempDirPath}/Mic.data", "w", encoding="utf-8") as file:
+ file.write(Command)
+
+def GetMicrophoneStatus():
+ with open(rf"{TempDirPath}/Mic.data", "r", encoding="utf-8") as file:
+ Status = file.read()
+ return Status
+
+def SetAssistantStatus(Status):
+ with open(rf"{TempDirPath}/Status.data", "w", encoding="utf-8") as file:
+ file.write(Status)
+
+def GetAssistantStatus():
+ with open(rf"{TempDirPath}/Status.data", "r", encoding="utf-8") as file:
+ Status = file.read()
+ return Status
+
+def MicButtonInitiated():
+ SetMicrophoneStatus("False")
+
+def MicButtonClosed():
+ SetMicrophoneStatus("True")
+
+def GraphicsDirectoryPath(Filename):
+ Path = rf"{GraphicsDirPath}/{Filename}"
+ return Path
+
+def TempDirectoryPath(Filename):
+ Path = rf"{TempDirPath}/{Filename}"
+ return Path
+
+def ShowTextToScreen(Text):
+ with open(rf"{TempDirPath}/Responses.data", "w", encoding="utf-8") as file:
+ file.write(Text)
+
+class ChatSection(QWidget):
+
+ def __init__(self):
+ super(ChatSection, self).__init__()
+ layout = QVBoxLayout(self)
+ layout.setContentsMargins(-10, 40, 40, 100)
+ layout.setSpacing(-100)
+ self.chat_text_edit = QTextEdit()
+ self.chat_text_edit.setReadOnly(True)
+ self.chat_text_edit.setTextInteractionFlags(Qt.NoTextInteraction)
+ self.chat_text_edit.setFrameStyle(QFrame.NoFrame)
+ layout.addWidget(self.chat_text_edit)
+ self.setStyleSheet("background-color: black;")
+ layout.setSizeConstraint(QVBoxLayout.SetDefaultConstraint)
+ layout.setStretch(1, 1)
+ self.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding))
+
+ text_color = QColor(Qt.blue)
+ text_color_text = QTextCharFormat()
+ text_color_text.setForeground(text_color)
+ self.chat_text_edit.setCurrentCharFormat(text_color_text)
+
+ self.gif_label = QLabel()
+ self.gif_label.setStyleSheet("border: none;")
+ movie = QMovie(GraphicsDirectoryPath("Jarvis.gif"))
+ max_gif_size_H = 250
+ max_gif_size_W = 400
+ movie.setScaledSize(QSize(max_gif_size_W, max_gif_size_H))
+ self.gif_label.setAlignment(Qt.AlignRight | Qt.AlignTop)
+ self.gif_label.setMovie(movie)
+ movie.start()
+ layout.addWidget(self.gif_label)
+
+ self.label = QLabel("")
+ self.label.setStyleSheet("color: white;font-size: 16px ;margin-right:175px;border:none;margin-top: -30px;")
+ self.label.setAlignment(Qt.AlignRight)
+ layout.addWidget(self.label)
+
+ layout.setSpacing(-10)
+ layout.addWidget(self.gif_label)
+
+ font = QFont()
+ font.setPointSize(13)
+ self.chat_text_edit.setFont(font)
+
+ self.timer = QTimer(self)
+ self.timer.timeout.connect(self.loadMessages)
+ self.timer.timeout.connect(self.SpeechRecogText)
+ self.timer.start(5)
+
+ self.chat_text_edit.viewport().installEventFilter(self)
+ self.setStyleSheet("""
+ QScrollBar:vertical {
+ border: none;
+ background: black;
+ width: 10px;
+ margin: 0px 0px 0px 0px;
+ }
+ QScrollBar::handle:vertical {
+ background: white;
+ min-height: 20px;
+ }
+ QScrollBar::add-line:vertical {
+ background: black;
+ height: 10px;
+ subcontrol-position: bottom;
+ subcontrol-origin: margin;
+ }
+ QScrollBar::sub-line:vertical {
+ background: black;
+ height: 10px;
+ subcontrol-position: top;
+ subcontrol-origin: margin;
+ }
+ QScrollBar::up-arrow:vertical, QScrollBar::down-arrow:vertical {
+ border: none;
+ background: none;
+ color: none;
+ }
+ QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical {
+ background: none;
+ }
+ """)
+
+ def loadMessages(self):
+ global old_chat_message
+ with open(TempDirectoryPath('Responses.data'), "r", encoding="utf-8") as file:
+ messages = file.read()
+
+ if messages is None:
+ pass
+ elif len(messages) <= 1:
+ pass
+ elif str(old_chat_message) == str(messages):
+ pass
+ else:
+ self.addMessage(message=messages, color='white')
+ old_chat_message = messages
+
+ def SpeechRecogText(self):
+ with open(TempDirectoryPath('Status.data'), "r", encoding="utf-8") as file:
+ messages = file.read()
+ self.label.setText(messages)
+
+ def load_icon(self, path, width=60, height=60):
+ pixmap = QPixmap(path)
+ new_pixmap = pixmap.scaled(width, height)
+ self.icon_label.setPixmap(new_pixmap)
+
+ def toggle_icon(self, event = None):
+ if self.toggled:
+ self.load_icon(GraphicsDirectoryPath('voice.png'),60,60)
+ MicButtonInitiated()
+
+ else:
+ self.load_icon(GraphicsDirectoryPath('mic.png'),60,60)
+ MicButtonClosed()
+ self.toggled = not self.toggled
+
+ def addMessage(self, message, color):
+ cursor = self.chat_text_edit.textCursor()
+ format = QTextCharFormat()
+ formatm = QTextBlockFormat()
+ formatm.setTopMargin(10)
+ formatm.setLeftMargin(10)
+ format.setForeground(QColor(color))
+ cursor.setCharFormat(format)
+ cursor.setBlockFormat(formatm)
+ cursor.insertText(message + "\n")
+ self.chat_text_edit.setTextCursor(cursor)
+
+class InitialScreen(QWidget):
+
+ def __init__(self, parent=None):
+ super().__init__(parent)
+ desktop = QApplication.desktop()
+ screen_width = desktop.screenGeometry().width()
+ screen_height = desktop.screenGeometry().height()
+ content_layout = QVBoxLayout()
+ content_layout.setContentsMargins(0,0,0,0)
+
+ gif_label = QLabel()
+ movie = QMovie(GraphicsDirectoryPath("Jarvis.gif"))
+ gif_label.setMovie(movie)
+ max_gif_size_H = int(screen_width / 16*9)
+
+ movie.setScaledSize(QSize(screen_width,max_gif_size_H))
+ gif_label.setAlignment(Qt.AlignCenter)
+ movie.start()
+ gif_label.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
+ self.icon_label = QLabel()
+ pixmap = QPixmap(GraphicsDirectoryPath("Mic_on.png"))
+ new_pixmap = pixmap.scaled(60, 60)
+ self.icon_label.setPixmap(new_pixmap)
+ self.icon_label.setFixedSize(150,150)
+ self.icon_label.setAlignment(Qt.AlignCenter)
+ self.toggled = True
+ self.toggle_icon()
+ self.icon_label.mousePressEvent = self.toggle_icon
+ self.label = QLabel("")
+
+ self.label.setStyleSheet("color: white; font-size: 16px;margin-bottom:0;")
+ content_layout.addWidget(gif_label, alignment=Qt.AlignCenter)
+ content_layout.addWidget(self.label, alignment=Qt.AlignCenter)
+ content_layout.addWidget(self.icon_label, alignment=Qt.AlignCenter)
+ content_layout.setContentsMargins(0, 0, 0, 150)
+ self.setLayout(content_layout)
+ self.setFixedHeight(screen_height)
+ self.setFixedWidth(screen_width)
+ self.setStyleSheet("background-color: black;")
+ self.timer = QTimer(self)
+ self.timer.timeout.connect(self.SpeechRecogText)
+ self.timer.start(5)
+
+ def SpeechRecogText(self):
+
+ with open(TempDirectoryPath('Status.data'), "r", encoding="utf-8") as file:
+ messages = file.read()
+ self.label.setText(messages)
+
+ def load_icon(self, path, width=60, height=60):
+ pixmap = QPixmap(path)
+ new_pixmap = pixmap.scaled(width, height)
+ self.icon_label.setPixmap(new_pixmap)
+
+ def toggle_icon(self, event=None):
+ if self.toggled:
+ self.load_icon(GraphicsDirectoryPath('Mic_on.png'), 60, 60)
+ MicButtonInitiated()
+ else:
+ self.load_icon(GraphicsDirectoryPath('mic_off.png'), 60, 60)
+ MicButtonClosed()
+ self.toggled = not self.toggled
+
+class MessageScreen(QWidget):
+ def __init__(self, parent = None):
+ super().__init__(parent)
+ desktop = QApplication.desktop()
+
+ screen_width = desktop.screenGeometry().width()
+ screen_height = desktop.screenGeometry().height()
+
+ layout = QVBoxLayout()
+
+ label = QLabel("")
+
+ layout.addWidget(label)
+
+ chat_section = ChatSection()
+
+ layout.addWidget(chat_section)
+
+ self.setLayout(layout)
+
+ self.setStyleSheet("background-color: black;")
+
+ self.setFixedHeight(screen_height)
+ self.setFixedWidth(screen_width)
+
+class CustomTopBar(QWidget):
+ def __init__(self, parent, stacked_widget):
+ super().__init__(parent)
+ self.stacked_widget = stacked_widget
+ self.initUI()
+
+ def initUI(self):
+ self.setFixedHeight(50)
+ layout = QHBoxLayout(self)
+ layout.setAlignment(Qt.AlignRight)
+
+ home_button = QPushButton()
+ home_icon = QIcon(GraphicsDirectoryPath("Home.png"))
+ home_button.setIcon(home_icon)
+ home_button.setText("Home")
+ home_button.setStyleSheet("height: 40px; line-height: 40px; background-color: black; color: black;")
+
+ message_button = QPushButton()
+ message_icon = QIcon(GraphicsDirectoryPath("Chats.png"))
+ message_button.setIcon(message_icon)
+ message_button.setText("Chat")
+ message_button.setStyleSheet("height: 40px; line-height: 40px; background-color: black; color: black;")
+ minimize_button = QPushButton()
+ minimize_icon = QIcon(GraphicsDirectoryPath('Minimize2.png'))
+ minimize_button.setIcon(minimize_icon)
+ minimize_button.setStyleSheet("background-color: white")
+ minimize_button.clicked.connect(self.minimizeWindow)
+
+ self.maximize_button = QPushButton()
+ self.maximize_icon = QIcon(GraphicsDirectoryPath('Maximize.png'))
+ self.restore_icon = QIcon(GraphicsDirectoryPath('Minimize.png'))
+ self.maximize_button.setIcon(self.maximize_icon)
+ self.maximize_button.setFlat(True)
+ self.maximize_button.setStyleSheet("background-color: white")
+ self.maximize_button.clicked.connect(self.maximizeWindow)
+
+ close_button = QPushButton()
+ close_icon = QIcon(GraphicsDirectoryPath("Close.png"))
+ close_button.setIcon(close_icon)
+ close_button.setStyleSheet("background-color: white")
+ close_button.clicked.connect(self.closeWindow)
+
+ line_frame = QFrame()
+ line_frame.setFixedHeight(1)
+ line_frame.setFrameShape(QFrame.HLine)
+ line_frame.setFrameShadow(QFrame.Sunken)
+ line_frame.setStyleSheet("border-color: black;")
+
+ title_label = QLabel(f"{str(Assistantname).capitalize()} AI ")
+ title_label.setStyleSheet("color: black; font-size: 18px; background-color:white;")
+
+ home_button.clicked.connect(lambda: self.stacked_widget.setCurrentIndex(0))
+ message_button.clicked.connect(lambda: self.stacked_widget.setCurrentIndex(1))
+
+ layout.addWidget(title_label)
+ layout.addStretch(1)
+ layout.addWidget(home_button)
+ layout.addWidget(message_button)
+ layout.addStretch(1)
+ layout.addWidget(minimize_button)
+ layout.addWidget(self.maximize_button)
+ layout.addWidget(close_button)
+ self.draggable = True
+ self.offset = None
+
+ def paintEvent(self,event):
+ painter = QPainter(self)
+ painter.fillRect(self.rect(),Qt.white)
+ super().paintEvent(event)
+
+ def minimizeWindow(self):
+ self.parent().showMinimized()
+
+ def maximizeWindow(self):
+ if self.parent().isMaximized():
+ self.parent().showNormal()
+ self.maximize_button.setIcon(self.maximize_icon)
+ else:
+ self.parent().showMaximized()
+ self.maximize_button.setIcon(self.restore_icon)
+
+ def closeWindow(self):
+ self.parent().close()
+
+ def mousePressEvent(self, event):
+ if self.draggable:
+ self.offset = event.pos()
+
+ def mouseMoveEvent(self, event):
+ if self.draggable and self.offset:
+ new_pos = event.globalPos() - self.offset
+ self.parent().move(new_pos)
+
+ def showMessageScreen(self):
+ if self.current_screen is not None:
+ self.current_screen.hide()
+
+ message_screen = MessageScreen(self)
+ layout = self.parent().layout()
+ if layout is not None:
+ layout.addWidget(message_screen)
+ self.current_screen = message_screen
+
+ def showInitialScreen(self):
+ if self.current_screen is not None:
+ self.current_screen.hide()
+
+ initial_screen = InitialScreen(self)
+ layout = self.parent().layout()
+ if layout is not None:
+ layout.addWidget(initial_screen)
+ self.current_screen = initial_screen
+
+class MainWindow(QMainWindow):
+ def __init__(self):
+ super().__init__()
+ self.setWindowFlags(Qt.FramelessWindowHint)
+ self.initUI()
+
+ def initUI(self):
+ desktop = QApplication.desktop()
+ screen_width = desktop.screenGeometry().width()
+ screen_height = desktop.screenGeometry().height()
+
+ stacked_widget = QStackedWidget(self)
+ initial_screen = InitialScreen()
+ message_screen = MessageScreen()
+ stacked_widget.addWidget(message_screen)
+ stacked_widget.addWidget(initial_screen)
+ self.setGeometry(0, 0, screen_width, screen_height)
+ self.setStyleSheet("background-color: black;")
+ top_bar = CustomTopBar(self, stacked_widget)
+ self.setMenuWidget(top_bar)
+ self.setCentralWidget(stacked_widget)
+
+def GraphicalUserInterface():
+ app = QApplication(sys.argv)
+ window = MainWindow()
+ window.show()
+ sys.exit(app.exec_())
+
+if __name__ == "__main__":
+ GraphicalUserInterface()
\ No newline at end of file
diff --git a/Frontend/Graphics/Chats.png b/Frontend/Graphics/Chats.png
new file mode 100644
index 00000000..fdc909bc
Binary files /dev/null and b/Frontend/Graphics/Chats.png differ
diff --git a/Frontend/Graphics/Close.png b/Frontend/Graphics/Close.png
new file mode 100644
index 00000000..dd621196
Binary files /dev/null and b/Frontend/Graphics/Close.png differ
diff --git a/Frontend/Graphics/Home.png b/Frontend/Graphics/Home.png
new file mode 100644
index 00000000..4e8a279f
Binary files /dev/null and b/Frontend/Graphics/Home.png differ
diff --git a/Frontend/Graphics/Jarvis.gif b/Frontend/Graphics/Jarvis.gif
new file mode 100644
index 00000000..448aeb75
Binary files /dev/null and b/Frontend/Graphics/Jarvis.gif differ
diff --git a/Frontend/Graphics/Maximize.png b/Frontend/Graphics/Maximize.png
new file mode 100644
index 00000000..591059b5
Binary files /dev/null and b/Frontend/Graphics/Maximize.png differ
diff --git a/Frontend/Graphics/Mic_off.png b/Frontend/Graphics/Mic_off.png
new file mode 100644
index 00000000..d2845299
Binary files /dev/null and b/Frontend/Graphics/Mic_off.png differ
diff --git a/Frontend/Graphics/Mic_on.png b/Frontend/Graphics/Mic_on.png
new file mode 100644
index 00000000..c4770773
Binary files /dev/null and b/Frontend/Graphics/Mic_on.png differ
diff --git a/Frontend/Graphics/Minimize.png b/Frontend/Graphics/Minimize.png
new file mode 100644
index 00000000..bf23e4e6
Binary files /dev/null and b/Frontend/Graphics/Minimize.png differ
diff --git a/Frontend/Graphics/Minimize2.png b/Frontend/Graphics/Minimize2.png
new file mode 100644
index 00000000..863519e6
Binary files /dev/null and b/Frontend/Graphics/Minimize2.png differ
diff --git a/Frontend/Graphics/Settings.png b/Frontend/Graphics/Settings.png
new file mode 100644
index 00000000..20b273b6
Binary files /dev/null and b/Frontend/Graphics/Settings.png differ
diff --git a/Frontend/__pycache__/GUI.cpython-313.pyc b/Frontend/__pycache__/GUI.cpython-313.pyc
new file mode 100644
index 00000000..71b864a9
Binary files /dev/null and b/Frontend/__pycache__/GUI.cpython-313.pyc differ
diff --git a/Frontend/gui-dumy.txt b/Frontend/gui-dumy.txt
new file mode 100644
index 00000000..aec5f382
--- /dev/null
+++ b/Frontend/gui-dumy.txt
@@ -0,0 +1,435 @@
+from PyQt5.QtWidgets import QApplication, QMainWindow, QTextEdit, QStackedWidget, QWidget, QLineEdit, QGridLayout, QVBoxLayout, QHBoxLayout, QPushButton, QFrame, QLabel, QSizePolicy
+from PyQt5.QtGui import QIcon, QPainter, QMovie, QColor, QTextFormat, QFont, QPixmap, QTextBlockFormat, QTextCharFormat
+from PyQt5.QtCore import Qt, QSize, QTimer
+from dotenv import dotenv_values
+import sys
+import os
+
+env_vars = dotenv_values(".env")
+Assistantname = env_vars.get("Assistantname")
+current_dir = os.getcwd()
+old_chat_message = ""
+TempDirPath = rf"{current_dir}/Frontend/Files"
+GraphicsDirPath = rf"{current_dir}/Frontend/Graphics"
+
+def AnswerModifier(Answer):
+ lines = Answer.split('\n')
+ non_empty_lines = [line for line in lines if line.strip()]
+ modified_answer = '\n'.join(non_empty_lines)
+ return modified_answer
+
+def QueryModifier(Query):
+ new_query = Query.lower().strip()
+ query_words = new_query.split()
+ question_words = ['how', 'what', 'who', 'where', 'when', 'why', 'which', 'whose', 'whom', 'can you', "what's", "where's", "how's"]
+
+ if any(word + " " in new_query for word in question_words):
+ if query_words[-1][-1] in ['?', '.', '!']:
+ new_query = new_query[:-1] + "?"
+ else:
+ new_query += "?"
+ else:
+ if query_words[-1][-1] in ['?', '.', '!']:
+ new_query = new_query[:-1] + "."
+ else:
+ new_query += "."
+ return new_query.capitalize()
+
+def SetMicrophoneStatus(Command):
+ with open(rf"{TempDirPath}/Mic.data", "w", encoding="utf-8") as file:
+ file.write(Command)
+
+def GetMicrophoneStatus():
+ with open(rf"{TempDirPath}/Mic.data", "r", encoding="utf-8") as file:
+ Status = file.read()
+ return Status
+
+def SetAssistantStatus(Status):
+ with open(rf"{TempDirPath}/Status.data", "w", encoding="utf-8") as file:
+ file.write(Status)
+
+def GetAssistantStatus():
+ with open(rf"{TempDirPath}/Status.data", "r", encoding="utf-8") as file:
+ Status = file.read()
+ return Status
+
+def MicButtonInitiated():
+ SetMicrophoneStatus("False")
+
+def MicButtonClosed():
+ SetMicrophoneStatus("True")
+
+def GraphicsDirectoryPath(Filename):
+ Path = rf"{GraphicsDirPath}/{Filename}"
+ return Path
+
+def TempDirectoryPath(Filename):
+ Path = rf"{TempDirPath}/{Filename}"
+ return Path
+
+def ShowTextToScreen(Text):
+ with open(rf"{TempDirPath}/Responses.data", "w", encoding="utf-8") as file:
+ file.write(Text)
+
+class ChatSection(QWidget):
+
+ def __init__(self):
+ super(ChatSection, self).__init__()
+ layout = QVBoxLayout(self)
+ layout.setContentsMargins(-10, 40, 40, 100)
+ layout.setSpacing(-100)
+ self.chat_text_edit = QTextEdit()
+ self.chat_text_edit.setReadOnly(True)
+ self.chat_text_edit.setTextInteractionFlags(Qt.NoTextInteraction)
+ self.chat_text_edit.setFrameStyle(QFrame.NoFrame)
+ layout.addWidget(self.chat_text_edit)
+ self.setStyleSheet("background-color: black;")
+ layout.setSizeConstraint(QVBoxLayout.SetDefaultConstraint)
+ layout.setStretch(1, 1)
+ self.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding))
+
+ text_color = QColor(Qt.blue)
+ text_color_text = QTextCharFormat()
+ text_color_text.setForeground(text_color)
+ self.chat_text_edit.setCurrentCharFormat(text_color_text)
+
+ self.gif_label = QLabel()
+ self.gif_label.setStyleSheet("border: none;")
+ movie = QMovie(GraphicsDirectoryPath("Jarvis.gif"))
+ max_gif_size_H = 480
+ max_gif_size_W = 270
+ movie.setScaledSize(QSize(max_gif_size_W, max_gif_size_H))
+ self.gif_label.setAlignment(Qt.AlignRight | Qt.AlignTop)
+ self.gif_label.setMovie(movie)
+ movie.start()
+ layout.addWidget(self.gif_label)
+
+ self.label = QLabel("")
+ self.label.setStyleSheet("color: white;font-size: 16px ;margin-right:195px;border:none;margin-top: -30px;")
+ self.label.setAlignment(Qt.AlignRight)
+ layout.addWidget(self.label)
+
+ layout.setSpacing(-10)
+ layout.addWidget(self.gif_label)
+
+ font = QFont()
+ font.setPointSize(13)
+ self.chat_text_edit.setFont(font)
+
+ self.timer = QTimer(self)
+ self.timer.timeout.connect(self.loadMessages)
+ self.timer.timeout.connect(self.SpeechRecogText)
+ self.timer.start(5)
+
+ self.chat_text_edit.viewport().installEventFilter(self)
+ self.setStyleSheet("""
+ QScrollBar:vertical {
+ border: none;
+ background: black;
+ width: 10px;
+ margin: 0px 0px 0px 0px;
+ }
+ QScrollBar::handle:vertical {
+ background: white;
+ min-height: 20px;
+ }
+ QScrollBar::add-line:vertical {
+ background: black;
+ height: 10px;
+ subcontrol-position: bottom;
+ subcontrol-origin: margin;
+ }
+ QScrollBar::sub-line:vertical {
+ background: black;
+ height: 10px;
+ subcontrol-position: top;
+ subcontrol-origin: margin;
+ }
+ QScrollBar::up-arrow:vertical, QScrollBar::down-arrow:vertical {
+ border: none;
+ background: none;
+ color: none;
+
+ QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical {
+ background: none;
+ }
+ """)
+
+ def loadMessages(self):
+ global old_chat_message
+ with open(TempDirectoryPath('Responses.data'), "r", encoding="utf-8") as file:
+ messages = file.read()
+
+ if messages is None:
+ pass
+ elif len(messages) <= 1:
+ pass
+ elif str(old_chat_message) == str(messages):
+ pass
+ else:
+ self.addMessage(messages=messages, color='white')
+ old_chat_message = messages
+
+ def SpeechRecogText(self):
+ with open(TempDirectoryPath('Status.data'), "r", encoding="utf-8") as file:
+ messages = file.read()
+ self.label.setText(messages)
+
+ def load_icon(self, path, width=60, height=60):
+ pixmap = QPixmap(path)
+ new_pixmap = pixmap.scaled(width, height)
+ self.icon_label.setPixmap(new_pixmap)
+
+ def toggle_icon(self, event = None):
+ if self.toggled:
+ self.load_icon(GraphicsDirectoryPath('voice.png'),60,60)
+ MicButtonInitiated()
+
+ else:
+ self.load_icon(GraphicsDirectoryPath('mic.png'),60,60)
+ MicButtonClosed()
+ self.toggled = not self.toggled
+
+ def addMessage(self, message, color):
+ cursor = self.chat_text_edit.textCursor()
+ format = QTextCharFormat()
+ formatm = QTextBlockFormat()
+ formatm.setTopMargin(10)
+ formatm.setLeftMargin(10)
+ format.setForeground(QColor(color))
+ cursor.setCharFormat(format)
+ cursor.setBlockFormat(formatm)
+ cursor.insertText(message + "\n")
+ self.chat_text_edit.setTextCursor(cursor)
+
+class InitialScreen(QWidget):
+
+ def __init__(self, parent=None):
+ super().__init__(parent)
+ desktop = QApplication.desktop()
+ screen_width = desktop.screenGeometry().width()
+ screen_height = desktop.screenGeometry().height()
+ content_layout = QVBoxLayout()
+ content_layout.setContentsMargins(0,0,0,0)
+
+ gif_label = QLabel()
+ movie = QMovie(GraphicsDirectoryPath("Jarvis.gif"))
+ gif_label.setMovie(movie)
+ max_gif_size_H = int(screen_width / 16*9)
+
+ movie.setScaledSize(QSize(screen_width,max_gif_size_H))
+ gif_label.setAlignment(Qt.AlignCenter)
+ movie.start()
+ gif_label.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
+ self.icon_label = QLabel()
+ pixmap = QPixmap(GraphicsDirectoryPath("Mic_on.png"))
+ new_pixmap = pixmap.scaled(60, 60)
+ self.icon_label.setPixmap(new_pixmap)
+ self.icon_label.setFixedSize(150,150)
+ self.icon_label.setAlignment(Qt.AlignCenter)
+ self.toggled = True
+ self.toggle_icon()
+ self.icon_label.mousePressEvent = self.toggle_icon
+ self.label = QLabel("")
+
+ self.label.setStyleSheet("color: white; font-size: 16px;margin-bottom:0;")
+ content_layout.addWidget(gif_label, alignment=Qt.AlignCenter)
+ content_layout.addWidget(self.label, alignment=Qt.AlignCenter)
+ content_layout.addWidget(self.icon_label, alignment=Qt.AlignCenter)
+ content_layout.setContentsMargins(0, 0, 0, 150)
+ self.setLayout(content_layout)
+ self.setFixedHeight(screen_height)
+ self.setFixedWidth(screen_width)
+ self.setStyleSheet("background-color: black;")
+ self.timer = QTimer(self)
+ self.timer.timeout.connect(self.SpeechRecogText)
+ self.timer.start(5)
+
+ def SpeechRecogText(self):
+
+ with open(TempDirectoryPath('Status.data'), "r", encoding="utf-8") as file:
+ messages = file.read()
+ self.label.setText(messages)
+
+ def load_icon(self, path, width=60, height=60):
+ pixmap = QPixmap(path)
+ new_pixmap = pixmap.scaled(width, height)
+ self.icon_label.setPixmap(new_pixmap)
+
+ def toggle_icon(self, event=None):
+ if self.toggled:
+ self.load_icon(GraphicsDirectoryPath('Mic_on.png'), 60, 60)
+ MicButtonInitiated()
+ else:
+ self.load_icon(GraphicsDirectoryPath('mic_off.png'), 60, 60)
+ MicButtonClosed()
+ self.toggled = not self.toggled
+
+class MessageScreen(QWidget):
+ def __init__(self, parent = None):
+ super().__init__(parent)
+ desktop = QApplication.desktop()
+
+ screen_width = desktop.screenGeometry().width()
+ screen_height = desktop.screenGeometry().height()
+
+ layout = QVBoxLayout()
+
+ label = QLabel("")
+
+ layout.addWidget(label)
+
+ chat_section = ChatSection()
+
+ layout.addWidget(chat_section)
+
+ self.setLayout(layout)
+
+ self.setStyleSheet("background-color: black;")
+
+ self.setFixedHeight(screen_height)
+ self.setFixedWidth(screen_width)
+
+class CustomTopBar(QWidget):
+ def __init__(self, parent, stacked_widget):
+ super().__init__(parent)
+ self.stacked_widget = stacked_widget
+ self.initUI()
+
+ def initUI(self):
+ self.setFixedHeight(50)
+ layout = QHBoxLayout(self)
+ layout.setAlignment(Qt.AlignRight)
+
+ home_button = QPushButton()
+ home_icon = QIcon(GraphicsDirectoryPath("Home.png"))
+ home_button.setIcon(home_icon)
+ home_button.setText("Home")
+ home_button.setStyleSheet("height: 40px; line-height: 40px; background-color: black; color: black;")
+
+ message_button = QPushButton()
+ message_icon = QIcon(GraphicsDirectoryPath("Chats.png"))
+ message_button.setIcon(message_icon)
+ message_button.setText("Chat")
+ message_button.setStyleSheet("height: 40px; line-height: 40px; background-color: black; color: black;")
+ minimize_button = QPushButton()
+ minimize_icon = QIcon(GraphicsDirectoryPath('Minimize2.png'))
+ minimize_button.setIcon(minimize_icon)
+ minimize_button.setStyleSheet("background-color: white")
+ minimize_button.clicked.connect(self.minimizeWindow)
+
+ self.maximize_button = QPushButton()
+ self.maximize_icon = QIcon(GraphicsDirectoryPath('Maximize.png'))
+ self.restore_icon = QIcon(GraphicsDirectoryPath('Minimize.png'))
+ self.maximize_button.setIcon(self.maximize_icon)
+ self.maximize_button.setFlat(True)
+ self.maximize_button.setStyleSheet("background-color: white")
+ self.maximize_button.clicked.connect(self.maximizeWindow)
+
+ close_button = QPushButton()
+ close_icon = QIcon(GraphicsDirectoryPath("Close.png"))
+ close_button.setIcon(close_icon)
+ close_button.setStyleSheet("background-color: white")
+ close_button.clicked.connect(self.closeWindow)
+
+ line_frame = QFrame()
+ line_frame.setFixedHeight(1)
+ line_frame.setFrameShape(QFrame.HLine)
+ line_frame.setFrameShadow(QFrame.Sunken)
+ line_frame.setStyleSheet("border-color: black;")
+
+ title_label = QLabel(f"{str(Assistantname).capitalize()} AI ")
+ title_label.setStyleSheet("color: black; font-size: 18px; background-color:white;")
+
+ home_button.clicked.connect(lambda: self.stacked_widget.setCurrentIndex(0))
+ message_button.clicked.connect(lambda: self.stacked_widget.setCurrentIndex(1))
+
+ layout.addWidget(title_label)
+ layout.addStretch(1)
+ layout.addWidget(home_button)
+ layout.addWidget(message_button)
+ layout.addStretch(1)
+ layout.addWidget(minimize_button)
+ layout.addWidget(self.maximize_button)
+ layout.addWidget(close_button)
+ self.draggable = True
+ self.offset = None
+
+ def paintEvent(self,event):
+ painter = QPainter(self)
+ painter.fillRect(self.rect(),Qt.white)
+ super().paintEvent(event)
+
+ def minimizeWindow(self):
+ self.parent().showMinimized()
+
+ def maximizeWindow(self):
+ if self.parent().isMaximized():
+ self.parent().showNormal()
+ self.maximize_button.setIcon(self.maximize_icon)
+ else:
+ self.parent().showMaximized()
+ self.maximize_button.setIcon(self.restore_icon)
+
+ def closeWindow(self):
+ self.parent().close()
+
+ def mousePressEvent(self, event):
+ if self.draggable:
+ self.offset = event.pos()
+
+ def mouseMoveEvent(self, event):
+ if self.draggable and self.offset:
+ new_pos = event.globalPos() - self.offset
+ self.parent().move(new_pos)
+
+ def showMessageScreen(self):
+ if self.current_screen is not None:
+ self.current_screen.hide()
+
+ message_screen = MessageScreen(self)
+ layout = self.parent().layout()
+ if layout is not None:
+ layout.addWidget(message_screen)
+ self.current_screen = message_screen
+
+ def showInitialScreen(self):
+ if self.current_screen is not None:
+ self.current_screen.hide()
+
+ initial_screen = InitialScreen(self)
+ layout = self.parent().layout()
+ if layout is not None:
+ layout.addWidget(initial_screen)
+ self.current_screen = initial_screen
+
+class MainWindow(QMainWindow):
+ def __init__(self):
+ super().__init__()
+ self.setWindowFlags(Qt.FramelessWindowHint)
+ self.initUI()
+
+ def initUI(self):
+ desktop = QApplication.desktop()
+ screen_width = desktop.screenGeometry().width()
+ screen_height = desktop.screenGeometry().height()
+
+ stacked_widget = QStackedWidget(self)
+ initial_screen = InitialScreen()
+ message_screen = MessageScreen()
+ stacked_widget.addWidget(message_screen)
+ stacked_widget.addWidget(initial_screen)
+ self.setGeometry(0, 0, screen_width, screen_height)
+ self.setStyleSheet("background-color: black;")
+ top_bar = CustomTopBar(self, stacked_widget)
+ self.setMenuWidget(top_bar)
+ self.setCentralWidget(stacked_widget)
+
+def GraphicalUserInterface():
+ app = QApplication(sys.argv)
+ window = MainWindow()
+ window.show()
+ sys.exit(app.exec_())
+
+if __name__ == "__main__":
+ GraphicalUserInterface()
\ No newline at end of file
diff --git a/Main.py b/Main.py
new file mode 100644
index 00000000..d60d282c
--- /dev/null
+++ b/Main.py
@@ -0,0 +1,203 @@
+from Frontend.GUI import (
+ GraphicalUserInterface,
+ SetAssistantStatus,
+ ShowTextToScreen,
+ TempDirectoryPath,
+ SetMicrophoneStatus,
+ AnswerModifier,
+ QueryModifier,
+ GetMicrophoneStatus,
+ GetAssistantStatus
+)
+from Backend.Model import FirstLayerDMM
+from Backend.RealtimeSearchEngine import RealtimeSearchEngine
+from Backend.Automation import Automation
+from Backend.SpeechToText import SpeechRecognition
+from Backend.Chatbot import ChatBot
+from Backend.TextToSpeech import TextToSpeech
+
+# Ensure the audio output is configured correctly
+import pyaudio
+import wave
+
+def play_audio(file_path):
+ chunk = 1024
+ wf = wave.open(file_path, 'rb')
+ pa = pyaudio.PyAudio()
+
+ stream = pa.open(format=pa.get_format_from_width(wf.getsampwidth()),
+ channels=wf.getnchannels(),
+ rate=wf.getframerate(),
+ output=True)
+
+ data = wf.readframes(chunk)
+
+ while data:
+ stream.write(data)
+ data = wf.readframes(chunk)
+
+ stream.stop_stream()
+ stream.close()
+ pa.terminate()
+from dotenv import dotenv_values
+from asyncio import run
+from time import sleep
+import subprocess
+import threading
+import json
+import os
+
+env_vars = dotenv_values(".env")
+Username = env_vars.get("Username")
+Assistantname = env_vars.get("Assistantname")
+DefaultMessage = f'''{Username}: Hello {Assistantname}, How may I help you?'''
+subprocesses = []
+
+Functions = ["open", "close", "play", "system", "content", "google search"]
+
+def ShowDefaultChatIfNoChats():
+ File = open(r'Data/ChatLog.json', "r", encoding="utf-8")
+ if len(File.read()) < 5:
+ with open(TempDirectoryPath('Database.data'), 'w', encoding="utf-8") as file:
+ file.write("")
+ with open(TempDirectoryPath('Responses.data'), 'w', encoding='utf-8') as file:
+ file.write(DefaultMessage)
+
+def ReadChatLogJson():
+ with open(r'Data/ChatLog.json', "r", encoding="utf-8") as file:
+ chatlog_data = json.load(file)
+ return chatlog_data
+
+def ChatLogIntegration():
+ json_data = ReadChatLogJson()
+ formatted_chatlog = ""
+ for entry in json_data:
+ if entry["role"] == "user":
+ formatted_chatlog += f"User: {entry['content']}\n"
+ elif entry["role"] == "assistant":
+ formatted_chatlog += f"Assistant: {entry['content']}\n"
+ formatted_chatlog = formatted_chatlog.replace("User", Username + " ")
+ formatted_chatlog = formatted_chatlog.replace("Assistant", Assistantname + " ")
+ with open(TempDirectoryPath('FormattedChatLog.txt'), 'w', encoding='utf-8') as file:
+ file.write(AnswerModifier(formatted_chatlog))
+
+def ShowChatsOnGUI():
+ File = open(TempDirectoryPath('Database.data'), "r", encoding="utf-8")
+ Data = File.read()
+ if len(str(Data)) > 0:
+ lines = Data.split('\n')
+ result = '\n'.join(lines)
+ File.close()
+ File = open(TempDirectoryPath('Responses.data'), "w", encoding="utf-8")
+ File.write(result)
+ File.close()
+
+def InitialExecution():
+ SetMicrophoneStatus("False")
+ ShowTextToScreen("")
+ ShowDefaultChatIfNoChats()
+ ChatLogIntegration()
+ ShowChatsOnGUI()
+
+InitialExecution()
+
+def MainExecution():
+ TaskExecution = False
+ ImageExecution = False
+ ImageGenerationQuery = ""
+
+ SetAssistantStatus("Listening...")
+ Query = SpeechRecognition()
+ ShowTextToScreen(f"{Username}: {Query}")
+ SetAssistantStatus("Thinking...")
+ Decision = FirstLayerDMM(Query)
+
+ print("")
+ print(f"Decision: {Decision}")
+ print("")
+
+ G = any([i for i in Decision if i.startswith("general")])
+ R = any([i for i in Decision if i.startswith("realtime")])
+
+
+ Merged_query = " and ".join([" ".join(i.split()[1:]) for i in Decision if i.startswith("general") or i.startswith("realtime")])
+
+ for queries in Decision:
+ if "generate" in queries:
+ ImageGenerationQuery = str(queries)
+ audio_file = TextToSpeech(Answer)
+ play_audio(audio_file)
+
+ for queries in Decision:
+ if TaskExecution == False:
+ if any(queries.startswith(func) for func in Functions):
+ run(Automation(list(Decision)))
+ TaskExecution = True
+
+ if ImageExecution == True:
+ with open(r'Frontend/Files/ImageGeneration.data', "w") as file:
+ file.write(f"{ImageGenerationQuery}, True")
+
+ try:
+ p1 = subprocess.Popen(['python', r'Backend/ImageGeneration.py'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, shell=False)
+ subprocesses.append(p1)
+ except Exception as e:
+ print(f"Error starting ImageGeneration.py: {e}")
+
+ if G and R or R:
+
+ SetAssistantStatus("Searching...")
+ Answer = RealtimeSearchEngine(QueryModifier(Merged_query))
+ ShowTextToScreen(f"{Assistantname}: {Answer}")
+ SetAssistantStatus("Answering...")
+ TextToSpeech(Answer)
+ return True
+ else:
+ for Queries in Decision:
+
+ if "general" in Queries:
+ SetAssistantStatus("Thinking...")
+ QueryFinal = Queries.replace("general", "")
+ Answer = ChatBot(QueryModifier(QueryFinal))
+ ShowTextToScreen(f"{Assistantname}: {Answer}")
+ SetAssistantStatus("Answering...")
+ TextToSpeech(Answer)
+ return True
+
+ elif "realtime" in Queries:
+ SetAssistantStatus("Searching...")
+ QueryFinal = Queries.replace("realtime", "")
+ Answer = RealtimeSearchEngine(QueryModifier(QueryFinal))
+ ShowTextToScreen(f"{Assistantname}: {Answer}")
+ SetAssistantStatus("Answering...")
+ TextToSpeech(Answer)
+ return True
+
+ elif "exit" in Queries:
+ QueryFinal = "Okay, Bye!"
+ Answer = ChatBot(QueryModifier(QueryFinal))
+ ShowTextToScreen(f"{Assistantname}: {Answer}")
+ SetAssistantStatus("Answering...")
+ TextToSpeech(Answer)
+ SetAssistantStatus("Answering...")
+ os._exit(1)
+
+def FirstThread():
+ while True:
+ Currentstatus = GetMicrophoneStatus()
+ if Currentstatus == "True":
+ MainExecution()
+ else:
+ ATStatus = GetAssistantStatus()
+ if "Available..." in ATStatus:
+ sleep(0.1)
+ else:
+ SetAssistantStatus("Available...")
+
+def SecondThread():
+ GraphicalUserInterface()
+
+if __name__ == "__main__":
+ thread2 = threading.Thread(target=FirstThread, daemon=True)
+ thread2.start()
+ SecondThread()
\ No newline at end of file