From 78ff3d361b9f8b74cfb8de5616c02f8920d67723 Mon Sep 17 00:00:00 2001 From: Moodbot11 Date: Fri, 21 Jun 2024 14:10:49 -0500 Subject: [PATCH 1/7] Update Chatbot.py chanded model --- Chatbot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Chatbot.py b/Chatbot.py index 0a4f2df45..d93243aa3 100644 --- a/Chatbot.py +++ b/Chatbot.py @@ -23,7 +23,7 @@ client = OpenAI(api_key=openai_api_key) st.session_state.messages.append({"role": "user", "content": prompt}) st.chat_message("user").write(prompt) - response = client.chat.completions.create(model="gpt-3.5-turbo", messages=st.session_state.messages) + response = client.chat.completions.create(model="gpt-4o", messages=st.session_state.messages) msg = response.choices[0].message.content st.session_state.messages.append({"role": "assistant", "content": msg}) st.chat_message("assistant").write(msg) From cfbc31622e2ca6a12bf8ea173dd3f847bddddff8 Mon Sep 17 00:00:00 2001 From: Moodbot11 Date: Sat, 22 Jun 2024 00:42:41 -0500 Subject: [PATCH 2/7] Update Chatbot.py --- Chatbot.py | 73 +++++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 64 insertions(+), 9 deletions(-) diff --git a/Chatbot.py b/Chatbot.py index d93243aa3..e9bcd1946 100644 --- a/Chatbot.py +++ b/Chatbot.py @@ -1,29 +1,84 @@ +Such a simple little tinyimport streamlit as st +import os from openai import OpenAI -import streamlit as st +from pydub import AudioSegment +from pydub.playback import play +import requests +from io import BytesIO + +# Initialize OpenAI client instance +def init_openai(): + openai_api_key = st.session_state.get("openai_api_key") + if openai_api_key: + return OpenAI(api_key=openai_api_key) + return None + +# Function to generate speech from text +def generate_speech(text, client): + response = client.audio.speech.create( + model="tts-1", + voice="alloy", + input=text, + ) + audio_content = response.data + st.audio(audio_content, format="audio/mp3") + +# Function to transcribe speech to text +def transcribe_speech(file_buffer, client): + response = client.audio.transcriptions.create( + file=file_buffer, + model="whisper-1" + ) + return response.get("text") with st.sidebar: - openai_api_key = st.text_input("OpenAI API Key", key="chatbot_api_key", type="password") + openai_api_key = st.text_input("OpenAI API Key", key="openai_api_key", type="password") + if openai_api_key: + st.session_state["openai_api_key"] = openai_api_key + "[Get an OpenAI API key](https://platform.openai.com/account/api-keys)" "[View the source code](https://github.com/streamlit/llm-examples/blob/main/Chatbot.py)" "[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/streamlit/llm-examples?quickstart=1)" st.title("💬 Chatbot") st.caption("🚀 A Streamlit chatbot powered by OpenAI") + +# Initialize session state for messages if "messages" not in st.session_state: st.session_state["messages"] = [{"role": "assistant", "content": "How can I help you?"}] -for msg in st.session_state.messages: +# Display message history +for msg in st.session_state["messages"]: st.chat_message(msg["role"]).write(msg["content"]) +openai_client = init_openai() + if prompt := st.chat_input(): - if not openai_api_key: + if not openai_client: st.info("Please add your OpenAI API key to continue.") st.stop() - client = OpenAI(api_key=openai_api_key) - st.session_state.messages.append({"role": "user", "content": prompt}) + st.session_state["messages"].append({"role": "user", "content": prompt}) st.chat_message("user").write(prompt) - response = client.chat.completions.create(model="gpt-4o", messages=st.session_state.messages) - msg = response.choices[0].message.content - st.session_state.messages.append({"role": "assistant", "content": msg}) + + response = openai_client.chat.completions.create( + model="gpt-4", + messages=st.session_state["messages"] + ) + + msg = response.choices[0]["message"]["content"] + st.session_state["messages"].append({"role": "assistant", "content": msg}) st.chat_message("assistant").write(msg) + + # Provide options to generate speech or upload voice for transcription + if st.button("Generate Speech"): + if openai_client: + generate_speech(msg, openai_client) + st.file_uploader("Upload Audio for Transcription", type=["mp3", "wav"]) + + uploaded_file = st.file_uploader("Upload an audio file for transcription") + if uploaded_file is not None: + file_buffer = BytesIO(uploaded_file.read()) + if openai_client: + transcribed_text = transcribe_speech(file_buffer, openai_client) + st.write("Transcribed text: ", transcribed_text) but From 31397a3159e2568543e3cdbcf7d79310901228ce Mon Sep 17 00:00:00 2001 From: Moodbot11 Date: Sat, 22 Jun 2024 00:44:34 -0500 Subject: [PATCH 3/7] Update Chatbot.py --- Chatbot.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Chatbot.py b/Chatbot.py index e9bcd1946..eb4628f72 100644 --- a/Chatbot.py +++ b/Chatbot.py @@ -1,4 +1,4 @@ -Such a simple little tinyimport streamlit as st +import streamlit as st import os from openai import OpenAI from pydub import AudioSegment @@ -81,4 +81,4 @@ def transcribe_speech(file_buffer, client): file_buffer = BytesIO(uploaded_file.read()) if openai_client: transcribed_text = transcribe_speech(file_buffer, openai_client) - st.write("Transcribed text: ", transcribed_text) but + st.write("Transcribed text: ", transcribed_text) From ee5cf9a9036c38e5555308d026e1aca19f9ca2d2 Mon Sep 17 00:00:00 2001 From: Moodbot11 Date: Sat, 22 Jun 2024 01:02:15 -0500 Subject: [PATCH 4/7] Create packages.txt --- packages.txt | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 packages.txt diff --git a/packages.txt b/packages.txt new file mode 100644 index 000000000..1f6e863ed --- /dev/null +++ b/packages.txt @@ -0,0 +1,2 @@ +libportaudio2 +ffmpeg From cd55d73ff3ff278fe03cbb181e2c7c53ce92f6f3 Mon Sep 17 00:00:00 2001 From: Moodbot11 Date: Sat, 22 Jun 2024 01:03:38 -0500 Subject: [PATCH 5/7] Update requirements.txt --- requirements.txt | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/requirements.txt b/requirements.txt index 07fad9589..028381359 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,5 @@ -streamlit>=1.28 -langchain>=0.0.217 -openai>=1.2 -duckduckgo-search -anthropic>=0.3.0 -trubrics>=1.4.3 -streamlit-feedback +streamlit +numpy +openai +pydub +requests From 4920e5a690e86480d584d4d1a9611eca6497ae69 Mon Sep 17 00:00:00 2001 From: Moodbot11 Date: Sat, 22 Jun 2024 01:04:40 -0500 Subject: [PATCH 6/7] Update Chatbot.py --- Chatbot.py | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/Chatbot.py b/Chatbot.py index eb4628f72..4ff41898e 100644 --- a/Chatbot.py +++ b/Chatbot.py @@ -1,35 +1,34 @@ import streamlit as st -import os -from openai import OpenAI +import openai from pydub import AudioSegment from pydub.playback import play -import requests from io import BytesIO # Initialize OpenAI client instance def init_openai(): openai_api_key = st.session_state.get("openai_api_key") if openai_api_key: - return OpenAI(api_key=openai_api_key) + openai.api_key = openai_api_key + return openai return None # Function to generate speech from text def generate_speech(text, client): - response = client.audio.speech.create( + response = client.Audio.create( model="tts-1", voice="alloy", input=text, ) - audio_content = response.data + audio_content = response["audio"] st.audio(audio_content, format="audio/mp3") # Function to transcribe speech to text def transcribe_speech(file_buffer, client): - response = client.audio.transcriptions.create( + response = client.Audio.transcriptions.create( file=file_buffer, model="whisper-1" ) - return response.get("text") + return response["text"] with st.sidebar: openai_api_key = st.text_input("OpenAI API Key", key="openai_api_key", type="password") @@ -61,7 +60,7 @@ def transcribe_speech(file_buffer, client): st.session_state["messages"].append({"role": "user", "content": prompt}) st.chat_message("user").write(prompt) - response = openai_client.chat.completions.create( + response = openai_client.ChatCompletion.create( model="gpt-4", messages=st.session_state["messages"] ) @@ -74,9 +73,8 @@ def transcribe_speech(file_buffer, client): if st.button("Generate Speech"): if openai_client: generate_speech(msg, openai_client) - st.file_uploader("Upload Audio for Transcription", type=["mp3", "wav"]) - uploaded_file = st.file_uploader("Upload an audio file for transcription") + uploaded_file = st.file_uploader("Upload Audio for Transcription", type=["mp3", "wav"]) if uploaded_file is not None: file_buffer = BytesIO(uploaded_file.read()) if openai_client: From 8070164d6c18306b6d9d9356b5157319fac79321 Mon Sep 17 00:00:00 2001 From: Moodbot11 Date: Sat, 22 Jun 2024 03:46:52 -0500 Subject: [PATCH 7/7] Update Chatbot.py --- Chatbot.py | 1 + 1 file changed, 1 insertion(+) diff --git a/Chatbot.py b/Chatbot.py index 4ff41898e..f9b2cef43 100644 --- a/Chatbot.py +++ b/Chatbot.py @@ -1,5 +1,6 @@ import streamlit as st import openai +import os from pydub import AudioSegment from pydub.playback import play from io import BytesIO