-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathklotbot_novi.py
358 lines (313 loc) · 15.5 KB
/
klotbot_novi.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
import base64
import io
import mysql
import os
import streamlit as st
import uuid
from openai import OpenAI
from streamlit_mic_recorder import mic_recorder
from myfunc.embeddings import rag_tool_answer
from myfunc.mojafunkcija import positive_login, initialize_session_state, check_openai_errors, read_txts, copy_to_clipboard
from myfunc.mssql import ConversationDatabase, work_prompts
from myfunc.pyui_javascript import chat_placeholder_color, st_fixed_container
from myfunc.various_tools import play_audio_from_stream_s, predlozeni_odgovori, process_request
mprompts = work_prompts()
default_values = {
"prozor": st.query_params.get('prozor', "d"),
"_last_speech_to_text_transcript_id": 0,
"_last_speech_to_text_transcript": None,
"success": False,
"toggle_state": False,
"button_clicks": False,
"prompt": '',
"vrsta": False,
"messages": {},
"image_ai": None,
"thread_id": 'ime',
"filtered_messages": "",
"selected_question": None,
"username": "positive",
"openai_model": "gpt-4o",
"azure_filename": "altass.csv",
"app_name": "KlotBot",
"upload_key": 0,
}
initialize_session_state(default_values)
if st.session_state.thread_id not in st.session_state.messages:
st.session_state.messages[st.session_state.thread_id] = [{'role': 'system', 'content': mprompts["sys_ragbot"]}]
api_key=os.getenv("OPENAI_API_KEY")
client=OpenAI()
# Set chat input placeholder color
chat_placeholder_color("#f1f1f1")
avatar_bg="botbg.png"
avatar_ai="bot.png"
avatar_user = "user.webp"
avatar_sys = "positivelogo.jpg"
global phglob
phglob=st.empty()
# Function to get image as base64
@st.cache_data
def get_img_as_base64(file):
with open(file, "rb") as f:
data = f.read()
return base64.b64encode(data).decode()
# Apply background image
def apply_background_image(img_path):
img = get_img_as_base64(img_path)
page_bg_img = f"""
<style>
[data-testid="stAppViewContainer"] > .main {{
background-image: url("data:image/png;base64,{img}");
background-size: auto;
background-position: center;
background-repeat: no-repeat;
background-attachment: fixed;
}}
</style>
"""
st.markdown(page_bg_img, unsafe_allow_html=True)
def custom_streamlit_style():
custom_streamlit_style = """
<style>
div[data-testid="stHorizontalBlock"] {
display: flex;
flex-direction: row;
width: 100%x;
flex-wrap: nowrap;
align-items: center;
justify-content: flex-start;
}
.horizontal-item {
margin-right: 5px; /* Adjust spacing as needed */
}
/* Mobile styles */
@media (max-width: 640px) {
div[data-testid="stHorizontalBlock"] {
width: 160px; /* Fixed width for mobile */
}
}
</style>
"""
st.markdown(custom_streamlit_style, unsafe_allow_html=True)
# Callback function for audio recorder
def callback():
if st.session_state.my_recorder_output:
return st.session_state.my_recorder_output['bytes']
custom_streamlit_style()
apply_background_image(avatar_bg)
def reset_memory():
st.session_state.messages[st.session_state.thread_id] = [{'role': 'system', 'content': mprompts["sys_ragbot"]}]
st.session_state.filtered_messages = ""
def main():
if "thread_id" not in st.session_state:
def get_thread_ids():
with ConversationDatabase() as db:
return db.list_threads(st.session_state.app_name, st.session_state.username)
new_thread_id = str(uuid.uuid4())
thread_name = f"Thread_{new_thread_id}"
conversation_data = [{'role': 'system', 'content': mprompts["sys_ragbot"]}]
if thread_name not in get_thread_ids():
with ConversationDatabase() as db:
try:
db.add_sql_record(st.session_state.app_name, st.session_state.username, thread_name, conversation_data)
except mysql.connector.IntegrityError as e:
if e.errno == 1062: # Duplicate entry for key
st.error("Thread ID already exists. Please try again with a different ID.")
else:
raise # Re-raise the exception if it's not related to a duplicate entry
st.session_state.thread_id = thread_name
st.session_state.messages[thread_name] = []
try:
if "Thread_" in st.session_state.thread_id:
contains_system_role = any(message.get('role') == 'system' for message in st.session_state.messages[thread_name])
if not contains_system_role:
st.session_state.messages[thread_name].append({'role': 'system', 'content': mprompts["sys_ragbot"]})
except:
pass
if st.session_state.thread_id is None:
st.info("Start a conversation by selecting a new or existing conversation.")
else:
current_thread_id = st.session_state.thread_id
try:
if "Thread_" in st.session_state.thread_id:
contains_system_role = any(message.get('role') == 'system' for message in st.session_state.messages[thread_name])
if not contains_system_role:
st.session_state.messages[thread_name].append({'role': 'system', 'content': mprompts["sys_ragbot"]})
except:
pass
# Check if there's an existing conversation in the session state
if current_thread_id not in st.session_state.messages:
# If not, initialize it with the conversation from the database or as an empty list
with ConversationDatabase() as db:
st.session_state.messages[current_thread_id] = db.query_sql_record(st.session_state.app_name, st.session_state.username, current_thread_id) or []
if current_thread_id in st.session_state.messages:
# avatari primena
for message in st.session_state.messages[current_thread_id]:
if message["role"] == "assistant":
with st.chat_message(message["role"], avatar=avatar_ai):
st.markdown(message["content"])
elif message["role"] == "user":
with st.chat_message(message["role"], avatar=avatar_user):
st.markdown(message["content"])
elif message["role"] == "system":
pass
else:
with st.chat_message(message["role"], avatar=avatar_sys):
st.markdown(message["content"])
# Opcije
col1, col2, col3 = st.columns(3)
with col1:
# Use the fixed container and apply the horizontal layout
with st_fixed_container(mode="fixed", position="bottom", border=False, margin='10px'):
with st.popover("Više opcija", help = "Snimanje pitanja, Slušanje odgovora, Priloži sliku"):
# prica
audio = mic_recorder(
key='my_recorder',
callback=callback,
start_prompt="🎤 Počni snimanje pitanja",
stop_prompt="⏹ Završi snimanje i pošalji ",
just_once=False,
use_container_width=False,
format="webm",
)
#predlozi
st.session_state.toggle_state = st.toggle('✎ Predlozi pitanja/odgovora', key='toggle_button_predlog', help = "Predlažze sledeće pitanje")
# govor
st.session_state.button_clicks = st.toggle('🔈 Slušaj odgovor', key='toggle_button', help = "Glasovni odgovor asistenta")
# slika
st.session_state.image_ai, st.session_state.vrsta = read_txts()
# main conversation prompt
st.session_state.prompt = st.chat_input("Kako vam mogu pomoći?")
if st.session_state.selected_question != None:
st.session_state.prompt = st.session_state['selected_question']
st.session_state['selected_question'] = None
if st.session_state.prompt is None:
# snimljeno pitanje
if audio is not None:
id = audio['id']
if id > st.session_state._last_speech_to_text_transcript_id:
st.session_state._last_speech_to_text_transcript_id = id
audio_bio = io.BytesIO(audio['bytes'])
audio_bio.name = 'audio.webm'
st.session_state.success = False
err = 0
while not st.session_state.success and err < 3:
try:
transcript = client.audio.transcriptions.create(
model="whisper-1",
file=audio_bio,
language="sr"
)
except Exception as e:
st.error(f"Neočekivana Greška : {str(e)} pokušajte malo kasnije.")
err += 1
else:
st.session_state.success = True
st.session_state.prompt = transcript.text
# Main conversation answer
if st.session_state.prompt:
# Original processing to generate complete_prompt
result = rag_tool_answer(st.session_state.prompt, phglob)
if result=="CALENDLY":
full_prompt=""
full_response=""
temp_full_prompt = {"role": "user", "content": [{"type": "text", "text": st.session_state.prompt}]}
elif st.session_state.image_ai:
if st.session_state.vrsta:
full_prompt = st.session_state.prompt + st.session_state.image_ai
temp_full_prompt = {
"role": "user",
"content": [
{"type": "text", "text": full_prompt},
]
}
st.session_state.messages[current_thread_id].append(
{"role": "user", "content": st.session_state.prompt}
)
with st.chat_message("user", avatar=avatar_user):
st.markdown(st.session_state.prompt)
if 3>5:
pre_prompt = """Describe the uploaded image in detail, focusing on the key elements such as objects, colors, sizes,
positions, actions, and any notable characteristics or interactions. Provide a clear and vivid description
that captures the essence and context of the image. """
full_prompt = pre_prompt + st.session_state.prompt
temp_full_prompt = {
"role": "user",
"content": [
{"type": "text", "text": full_prompt},
{"type": "image_url", "image_url": {"url": st.session_state.image_ai}}
]
}
st.session_state.messages[current_thread_id].append(
{"role": "user", "content": st.session_state.prompt}
)
with st.chat_message("user", avatar=avatar_user):
st.markdown(st.session_state.prompt)
else:
temp_full_prompt = {"role": "user", "content": [{"type": "text", "text": f"""Using the following context:
{result}
answer the question:
{st.session_state.prompt} :
"""}]}
# Append only the user's original prompt to the actual conversation log
st.session_state.messages[current_thread_id].append({"role": "user", "content": st.session_state.prompt})
# Display user prompt in the chat
with st.chat_message("user", avatar=avatar_user):
st.markdown(st.session_state.prompt)
# mislim da sve ovo ide samo ako nije kalendly
if result!="CALENDLY":
# Generate and display the assistant's response using the temporary messages list
with st.chat_message("assistant", avatar=avatar_ai):
message_placeholder = st.empty()
full_response = ""
for response in client.chat.completions.create(
model="gpt-4o",
temperature=0,
messages=st.session_state.messages[current_thread_id] + [temp_full_prompt],
stream=True,
stream_options={"include_usage":True},
):
try:
full_response += (response.choices[0].delta.content or "")
message_placeholder.markdown(full_response + "▌")
except Exception as e:
pass
message_placeholder.markdown(full_response)
copy_to_clipboard(full_response)
# Append assistant's response to the conversation
st.session_state.messages[current_thread_id].append({"role": "assistant", "content": full_response})
st.session_state.filtered_messages = ""
filtered_data = [entry for entry in st.session_state.messages[current_thread_id] if entry['role'] in ["user", 'assistant']]
for item in filtered_data: # lista za download conversation
st.session_state.filtered_messages += (f"{item['role']}: {item['content']}\n")
# ako su oba async, ako ne onda redovno
if st.session_state.button_clicks and st.session_state.toggle_state:
process_request(client, temp_full_prompt, full_response, api_key)
else:
if st.session_state.button_clicks: # ako treba samo da cita odgovore
play_audio_from_stream_s(full_response)
if st.session_state.toggle_state: # ako treba samo da prikaze podpitanja
predlozeni_odgovori(temp_full_prompt)
if st.session_state.vrsta:
st.info(f"Dokument je učitan ({st.session_state.vrsta}) - uklonite ga iz uploadera kada ne želite više da pričate o njegovom sadržaju.")
with ConversationDatabase() as db: #cuva konverzaciju i sql bazu i tokene
db.update_sql_record(st.session_state.app_name, st.session_state.username, current_thread_id, st.session_state.messages[current_thread_id])
with col2: # cuva konverzaciju u txt fajl
with st_fixed_container(mode="fixed", position="bottom", border=False, margin='10px'):
st.download_button(
"⤓ Preuzmi",
st.session_state.filtered_messages,
file_name="istorija.txt",
help = "Čuvanje istorije ovog razgovora"
)
with col3:
with st_fixed_container(mode="fixed", position="bottom", border=False, margin='10px'):
st.button("🗑 Obriši", on_click=reset_memory)
def main_wrap_for_st():
check_openai_errors(main)
deployment_environment = os.environ.get("DEPLOYMENT_ENVIRONMENT")
if deployment_environment == "Streamlit":
name, authentication_status, username = positive_login(main_wrap_for_st, " ")
else:
if __name__ == "__main__":
check_openai_errors(main)