forked from Concept-Bytes/Jarvis
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathassist.py
109 lines (94 loc) · 3.62 KB
/
assist.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
from openai import OpenAI
import time
from dotenv import load_dotenv
from pathlib import Path
from pygame import mixer # Load the popular external library
import time
import os
tts_enabled = True
load_dotenv() # Load environment variables from .env file
# Initialize the client with environment variables
client = OpenAI(api_key=os.getenv('OPENAI_API_KEY'))
mixer.init()
# Retrieve the assistant using environment variable
assistant = client.beta.assistants.retrieve(os.getenv('ASSISTANT_ID'))
# Create and retrieve the thread using environment variable
jarvis_thread = os.getenv('CHAT_THREAD_ID')
thread = client.beta.threads.retrieve(jarvis_thread)
# Function to ask a question to the assistant
def ask_question_standard(question):
#this is an example of how you can feed in context
#Hint LLMs won't know the time or date unless you tell them
date_and_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
context = """
You are an assistant named Jarvis like from the ironman movies.
You are to act like him and provide help as best you can.
Be funny and witty. Keep it brief and serious.
Be a little sassy in your responses.
You have a variety of smart devices to control.
You can control them by ending your sentence with #light1-off like this.
Only use commands like this if I tell you to do so. nd your sentence with #lamp-1 for on and #lamp-0 for off.
Response in less than 80 words.
""" + date_and_time
response = client.chat.completions.create(
model="gpt-3.5-turbo-0125",
messages=[
{"role": "system", "content": context},
{"role": "user", "content": question}
]
)
return response.choices[0].message.content
# Try this if you want Jarvis to remember the conversation
def ask_question_memory(question):
global thread
global thread_message
thread_message = client.beta.threads.messages.create(
thread.id,
role="user",
content=question,
)
# Create a run for the thread
run = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant.id,
)
# Wait for the run to complete
while True:
run_status = client.beta.threads.runs.retrieve(
thread_id=thread.id,
run_id=run.id
)
if run_status.status == 'completed':
break
elif run_status.status == 'failed':
return "The run failed."
time.sleep(1) # Wait for 1 second before checking again
# Retrieve messages after the run has succeeded
messages = client.beta.threads.messages.list(
thread_id=thread.id
)
return messages.data[0].content[0].text.value
# Function to ask a question to the assistant with an image
def play_sound(file_path):
mixer.music.load(file_path)
mixer.music.play()
# Function to generate TTS for each sentence and play them
def TTS(text):
speech_file_path = Path(f"speech.mp3")
speech_file_path = generate_tts(text, speech_file_path)
play_sound(speech_file_path)
while mixer.music.get_busy(): # Wait for the mixer to finish
time.sleep(1)
mixer.music.unload()
#delete the file after playing
os.remove(speech_file_path)
return "done"
# Function to generate TTS and return the file path
def generate_tts(sentence, speech_file_path):
response = client.audio.speech.create(
model="tts-1",
voice="echo",
input=sentence,
)
response.stream_to_file(speech_file_path)
return str(speech_file_path)