diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..f735c81 --- /dev/null +++ b/.env.example @@ -0,0 +1,3 @@ +OPENAI_API_KEY="your_api_key" +LANGSMITH_API_KEY="your_langsmith_api_key" #Find it here: https://smith.langchain.com +PORT="3000" \ No newline at end of file diff --git a/.github/workflows/cd.yml b/.github/workflows/cd.yml new file mode 100644 index 0000000..0b9b3ec --- /dev/null +++ b/.github/workflows/cd.yml @@ -0,0 +1,43 @@ +name: CD + +on: + workflow_run: + workflows: ["CI"] + types: + - completed + +jobs: + build: + runs-on: ubuntu-latest + + # Only runs if CI was successful + if: ${{ github.event.workflow_run.conclusion == 'success' }} + + steps: + # Checkout the repository + - name: Checkout code + uses: actions/checkout@v3 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Log in to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract short SHA + id: git_sha + run: echo "GIT_SHA=$(git rev-parse --short $GITHUB_SHA)" >> $GITHUB_ENV + + - name: Build and Push Docker Backend Image + run: | + docker build -t ghcr.io/cogitontnu/jarvis-core:${{ env.GIT_SHA }} ./core + docker push ghcr.io/cogitontnu/jarvis-core:${{ env.GIT_SHA }} + + ## Add Build and Push for Docker Frontend Image when it becomes relevant \ No newline at end of file diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..9b037a9 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,20 @@ +name: CI + +on: + push: + branches: ["main"] + pull_request: + branches: ["main"] + +jobs: + build: + runs-on: ubuntu-latest + strategy: + fail-fast: true + steps: + - uses: actions/checkout@v3 + - name: Build docker image and run tests + run: | + docker compose build + docker compose up -d + docker compose down \ No newline at end of file diff --git a/.gitignore b/.gitignore index 0e8f19d..f8b6f1d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,7 @@ +# Custom Ignores +user_data + + # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] diff --git a/benchmarking/readme.md b/benchmarking/readme.md index 1e59d9c..2f21e31 100644 --- a/benchmarking/readme.md +++ b/benchmarking/readme.md @@ -1,2 +1,3 @@ ### Benchmarking -Standardized tasks and tests for Jarvis to evaluate performance. \ No newline at end of file +Standardized tasks and tests for Jarvis to evaluate performance. + diff --git a/core/agent.py b/core/agent.py index e60dfe3..ed73e4d 100644 --- a/core/agent.py +++ b/core/agent.py @@ -10,6 +10,15 @@ +class Agent1: + llm = ChatOpenAI( + model = Model.gpt_4o, + temperature=0, + max_tokens=512, + #streaming=True, #Can't use because of metadata + ) + + class Agent: def __init__(self, model_type) -> None: #Langsmith Tracing @@ -33,6 +42,7 @@ def __init__(self, model_type) -> None: # Defining edges between nodes self.workflow.add_edge(START, "chatbot") self.workflow.add_edge("tools", "chatbot") + self.workflow.add_edge("chatbot", END) # Defining conditional edges self.workflow.add_conditional_edges( @@ -75,19 +85,14 @@ def run(self, user_prompt: str) -> tuple[str, int]: response and the total amount of tokens used. """ first = True - for event in self.graph.stream({"messages": [("user", user_prompt)]}): + for event in self.graph.stream("tell me about orcas?"): for value in event.values(): messages = value["messages"][-1] gathered = "" # if messages.content and not isinstance(messages, HumanMessage): # print(messages.content, end="|", flush=True) - if isinstance(messages, AIMessageChunk): - if first: - gathered = messages - first = False - else: - gathered += messages + gathered += messages if isinstance(messages, BaseMessage): if hasattr(messages, 'usage_metadata'): diff --git a/core/ai_message.py b/core/ai_message.py index aabaf40..19e0934 100644 --- a/core/ai_message.py +++ b/core/ai_message.py @@ -1,4 +1,4 @@ class Ai_message: def __init__(self, message:str, token_count:int) -> None: self.message = message - self.token_count = token_count + self.token_count = token_count \ No newline at end of file diff --git a/core/config.py b/core/config.py index b562f0e..b9cd51a 100644 --- a/core/config.py +++ b/core/config.py @@ -7,11 +7,7 @@ PERPLEXITY_API_KEY = os.getenv("PERPLEXITY_API_KEY") #add langsmith api to env as LANGSMITH_API_KEY = "your_api_key" on EU server -LANGSMITH_API_KEY: str -if os.getenv(key="LANGSMITH_API_KEY"): - LANGSMITH_API_KEY: str = os.getenv(key="LANGSMITH_API_KEY") -else: - LANGSMITH_API_KEY: str = "lmao" +LANGSMITH_API_KEY = os.getenv("LANGSMITH_API_KEY", "no_key") os.environ["LANGCHAIN_TRACING_V2"] = "true" os.environ["LANGCHAIN_ENDPOINT"] = "https://eu.api.smith.langchain.com" diff --git a/core/graph.py b/core/graph.py deleted file mode 100644 index fb1e594..0000000 --- a/core/graph.py +++ /dev/null @@ -1,85 +0,0 @@ -from langchain_openai import ChatOpenAI -from graphstate import GraphState -from tools.tools import get_tools -from langgraph.graph import StateGraph, START, END -from langgraph.prebuilt import ToolNode, tools_condition -from langchain_core.messages import BaseMessage, AIMessageChunk, HumanMessage -from models import Model -import json -from config import OPENAI_API_KEY -from Agents.simpleagent import SimpleAgent - - -class Graph: - def __init__(self): - LANGCHAIN_TRACING_V2: str = "true" - - self.llm = SimpleAgent.llm - - self.llm_with_tools = self.llm.bind_tools(get_tools()) - self.workflow = StateGraph(GraphState) - # Adding nodes to the workflow - self.workflow.add_node("chatbot", self.chatbot) - self.workflow.add_node("tools", ToolNode(get_tools())) - # TODO: Visualize these tools - - # Defining edges between nodes - self.workflow.add_edge(START, "chatbot") - self.workflow.add_edge("tools", "chatbot") - - # Defining conditional edges - self.workflow.add_conditional_edges( - "chatbot", - tools_condition - ) - self.graph = self.workflow.compile() - - #with open("core/graph_node_network.png", 'wb') as f: - #f.write(self.graph.get_graph().draw_mermaid_png()) - - def chatbot(self, state: GraphState): - """ - Simple bot that invokes the list of previous messages - and returns the result which will be added to the list of messages. - """ - return {"messages": [self.llm_with_tools.invoke(state["messages"])]} - - -# UNFINISHED - def run_stream_only(self, user_prompt: str): - """ - Run the agent, returning a token stream. - """ - print('Running stream...') - print(user_prompt) - print(type(user_prompt)) - for chunk in self.llm.stream(user_prompt): - yield chunk.content - - #for running the agent comment out for testing in terminal - def run(self, user_prompt: str) -> tuple[str, int]: - """ - Run the agent with a user prompt and return a tuple containing the llm - response and the total amount of tokens used. - """ - first = True - for event in self.graph.stream({"messages": [("user", user_prompt)]}): - #print(event) - for value in event.values(): - messages = value["messages"][-1] - gathered = "" - # if messages.content and not isinstance(messages, HumanMessage): - # print(messages.content, end="|", flush=True) - - if isinstance(messages, AIMessageChunk): - if first: - gathered = messages - first = False - else: - gathered += messages - - if isinstance(messages, BaseMessage): - total_tokens = messages.usage_metadata.get('total_tokens', 0) - - - return messages.content, total_tokens \ No newline at end of file diff --git a/core/graphAgent.py b/core/graphAgent.py new file mode 100644 index 0000000..cb3e9ab --- /dev/null +++ b/core/graphAgent.py @@ -0,0 +1,116 @@ +from langchain_openai import ChatOpenAI +from graphstate import GraphState +from tools.tools import get_tools +from langgraph.graph import StateGraph, START, END +from langgraph.prebuilt import ToolNode, tools_condition +from langchain_core.messages import BaseMessage, AIMessageChunk, HumanMessage, AIMessage +from models import Model +import json +from config import OPENAI_API_KEY +from Agents.simpleagent import SimpleAgent +#from agent import Agent, Agent1 +import asyncio +from time import sleep + + +class Graph: + def __init__(self): + LANGCHAIN_TRACING_V2: str = "true" + + self.llm = SimpleAgent.llm + + self.llm_with_tools = self.llm.bind_tools(get_tools()) + self.workflow = StateGraph(GraphState) + # Adding nodes to the workflow + self.workflow.add_node("chatbot", self.chatbot) + self.workflow.add_node("tools", ToolNode(get_tools())) + # TODO: Visualize these tools + + # Defining edges between nodes + self.workflow.add_edge(START, "chatbot") + self.workflow.add_edge("tools", "chatbot") + self.workflow.add_edge("chatbot", END) + + # Defining conditional edges + self.workflow.add_conditional_edges( + "chatbot", + tools_condition + ) + self.graph = self.workflow.compile() + + #with open("core/graph_node_network.png", 'wb') as f: + #f.write(self.graph.get_graph().draw_mermaid_png()) + + def chatbot(self, state: GraphState): + """ + Simple bot that invokes the list of previous messages + and returns the result which will be added to the list of messages. + """ + return {"messages": [self.llm_with_tools.invoke(state["messages"])]} + + +# UNFINISHED + def run_stream_only(self, user_prompt: str): + """ + Run the agent, returning a token stream. + """ + print('Running stream...') + print(user_prompt) + print(type(user_prompt)) + for chunk in self.llm.stream(user_prompt): + yield chunk.content + + #for running the agent comment out for testing in terminal + async def run(self, user_prompt: str, socketio): + """ + Run the agent with a user prompt and emit the response and total tokens via socket + """ + try: + input = {"messages": [("human", user_prompt)]} + socketio.emit("start_message", " ") + async for event in self.graph.astream_events(input, version='v2'): + event_type = event.get('event') + + # Passes over events that are start events + if event_type == 'on_chain_start': + print("This event is on_chain_start") + continue + + # Returns the AI response + # //TODO Fix that it streams chuncks it rather than AIMessage + if event_type == 'on_chain_end': + print(event['data']) + for message in event['data']['output']['messages']: + if isinstance(message, AIMessage): + data = message.content + socketio.emit("chunk", data) + + if hasattr(message, 'usage_metadata'): + usage_metadata = message.usage_metadata + if usage_metadata: + total_tokens = usage_metadata.get('total_tokens') + socketio.emit("tokens", total_tokens) + + return "success" + except Exception as e: + print(e) + return "error" + + # for event in self.graph.stream(input): + #print(event) + # for value in event.values(): + # messages = value["messages"][-1] + # gathered = "" + # # if messages.content and not isinstance(messages, HumanMessage): + # # print(messages.content, end="|", flush=True) + + # if isinstance(messages, AIMessageChunk): + # if first: + # gathered = messages + # first = False + # else: + # gathered += messages + + # if isinstance(messages, BaseMessage): + # total_tokens = messages.usage_metadata.get('total_tokens', 0) + # return messages.content, total_tokens \ No newline at end of file diff --git a/core/graphtools.py b/core/graphtools.py index 446eb4e..1d3e4df 100644 --- a/core/graphtools.py +++ b/core/graphtools.py @@ -16,7 +16,7 @@ class AgentState(TypedDict): messages: Annotated[Sequence[BaseMessage], operator.add] sender: str -class gaphtool: +class graphtool: def __init__(self, graph): self.graph = graph self.nodes = graph.nodes() diff --git a/core/main.py b/core/main.py index cd67d6f..8f39685 100644 --- a/core/main.py +++ b/core/main.py @@ -1,5 +1,5 @@ from flask import Flask, request, url_for, jsonify -from graph import Graph +from graphAgent import Graph from models import Model from summarize_chat import summarize_chat from rag import embed_and_store @@ -7,6 +7,18 @@ from flask_cors import CORS from config import PORT import asyncio +from modules.user_data_setup import check_folders +from modules.chat import read_chat +import logging +log = logging.getLogger('werkzeug') +log.setLevel(logging.ERROR) + +# +# Setup +# +print("J is booting up....") +check_folders() # Check directories are made for user data +read_chat("1") # # Server config @@ -22,18 +34,16 @@ # # # HTTP(S) routes below -#s +# # @app.route("/") def hello_world(): return app.send_static_file('index.html') - -@app.route('/send_message', methods=['POST', 'GET']) -def llm_request(): - if(request.method == 'POST'): - data = request.json - ai_message = jarvis.run(data['prompt']) - return {"message": ai_message} + +# Route to get metadata like name, id, descriptions of all user chats +@app.route("/chats/metadata") +def get_chats(): + return "lmao" @app.route('/vectorize_chat', methods=['POST']) def summarize_store(): @@ -58,37 +68,27 @@ def summarize_store(): @socketio.on('connect') def connect(data): emit("You're connected to Jarvis streaming server...") - print('Client connected') + print('UI connected to backend') # Base event that's fired when user gracefully disconnects @socketio.on('disconnect') def disconnect(): - print('Client disconnected') + print('UI disconnected') # Custom event. Fired when the user sends a prompt. @socketio.on('user_prompt') def handle_prompt(data): - print("huh") try: conversation_id = data['conversation_id'] # grabs the conversation ID socketio.emit("start_message") - stream, tokens = jarvis.run(data['prompt']) # prompts Jarvis - #stream = jarvis.run_stream_only(data['prompt']) - chunk = "" - for char in stream: - if len(chunk) > 4: - socketio.emit("chunk", chunk) - chunk = char - else: - chunk += char - #asyncio.sleep(500) - socketio.emit("chunk", chunk) - socketio.emit("tokens", tokens) - + asyncio.run(jarvis.run(data['prompt'], socketio), debug=True) # prompts Jarvis and hands off emitting to the graphAgent. + return jsonify({"status": "success"}) except Exception as e: print(f'Something very bad happened: {e}') return jsonify({"status": "error"}) if __name__ == '__main__': - socketio.run(app, debug=True, host='0.0.0.0', port=PORT, allow_unsafe_werkzeug=True) \ No newline at end of file + socketio.run(app, debug=True, host='0.0.0.0', port=PORT, allow_unsafe_werkzeug=True) + +# hello \ No newline at end of file diff --git a/core/modules/chat.py b/core/modules/chat.py new file mode 100644 index 0000000..5292ec3 --- /dev/null +++ b/core/modules/chat.py @@ -0,0 +1,48 @@ +import os +import json + +def read_chat(id: str) -> dict: + ''' + Uses chat_id to get the chat JSON file and returns a python dict object. + ''' + try: + dirname = os.path.dirname(os.path.dirname(__file__)) # Creates folder in core named user_data + filepath = os.path.join(dirname, f'user_data/chats/{id}.txt') + # Open and read the chat txt file + with open(filepath, 'r') as file: + raw_text = file.read() + chat = json.loads("[" + raw_text + "]") # creates a dict by wrapping all messages as an array, making it valid json. Then loading it using json.load. + return chat + except Exception as e: + return e + +def append_message_to_chat(message: dict, id: str): + try: + dirname = os.path.dirname(os.path.dirname(__file__)) # Creates folder in core named user_data + filepath = os.path.join(dirname, f'user_data/chats/{id}.txt') + # Convert json to text and prepare for appending + message_txt = json.dump(message) + message_txt = f"\n,{message_txt}" + # Open the chat file + with open(filepath, 'a') as file: + file.write(message_txt) + except Exception as e: + return e + +# def upsert_chat(chat_object: dict, id: str): +# ''' +# Upserts a chat dictionary object, saving it as json file in the user_data folder. +# Upserting means to update or create if the file doesn't exist yet. Overwriting previous data. +# ''' +# try: +# dirname = os.path.dirname(os.path.dirname(__file__)) # Creates folder in core named user_data +# filepath = os.path.join(dirname, f'user_data/chats/{id}.txt') +# # Open and write the JSON file +# with open(filepath, 'w') as file: +# file.write(json.dump(chat_object)) +# return True # Returns true if successfull +# except Exception as e: +# return e + +# json.dumps() - From python to json +# json.load() - From json to python \ No newline at end of file diff --git a/core/modules/user_data_setup.py b/core/modules/user_data_setup.py new file mode 100644 index 0000000..7b5721b --- /dev/null +++ b/core/modules/user_data_setup.py @@ -0,0 +1,18 @@ +import os + +def check_folders(): + main_folder = "user_data/" + pathsToCheck = ["chats", "chats_metadata"] + for directory in pathsToCheck: + path = main_folder + directory # creates path user_data/chats for example. Everything should be under user_data as its gitignored. + check_and_create_folder(path) # Does a relative folder check, and builds the directory if it doesn't exist + +def check_and_create_folder(path): + dirname = os.path.dirname(os.path.dirname(__file__)) # Creates folder in core named user_data + relativedir = os.path.join(dirname, path) + if not os.path.exists(relativedir): + try: + print("Created user_data director under core folder. This is first-time setup.") + os.makedirs(path) + except Exception as e: + print(e) \ No newline at end of file diff --git a/core/static/chat.js b/core/static/chat.js index a3deec0..edd9acd 100644 --- a/core/static/chat.js +++ b/core/static/chat.js @@ -1,5 +1,5 @@ // When user sends a message (pressing send button) this funciton runs -sendMessage = async () => { +sendMessage = () => { let userInput = "" try{ let chat_text_field = document.getElementById('chat_input_text') @@ -8,13 +8,17 @@ sendMessage = async () => { chat_text_field.value = "" chat_history = document.getElementById("chat_history") chat_history.scrollTop = chat_history.scrollHeight; - }catch(e){ + } catch(e){ console.log(e) } // Send the message via the open socket try{ - let res = await socket.emit('user_prompt', {prompt: userInput, conversation_id: state.activeConversationId}) + console.log("User promt is: " + userInput); + const payload = {prompt: userInput, conversation_id: state.activeConversationId} + console.log("Payload is: ", payload); + let res = socket.emit('user_prompt', payload) + console.log("Prompt sent to backend"); // Stream to the current active AI chat box }catch(e){ console.log("Something went wrong", e) @@ -30,7 +34,7 @@ addStreamedChunk = (messagePart) => { } } -let endStreamedAIMessage = () => { +endStreamedAIMessage = () => { if (state.activeAIMessage) { console.log("Message end") let output = state.activeAIMessage.innerHTML @@ -40,14 +44,7 @@ let endStreamedAIMessage = () => { } else { console.log("No active AI message to end.") } - -} -let startStreamedAIMessage = (uuid) => { - console.log("Message start") - addMessage(uuid); // Create an AI message when it begins streaming. - let ai_message = document.getElementById(uuid) - state.activeAIMessage = ai_message // Active element gets added to the state. } // Generates unique id on socket.on("start_message") diff --git a/core/static/index.css b/core/static/index.css index e5e5324..03a8065 100644 --- a/core/static/index.css +++ b/core/static/index.css @@ -70,6 +70,8 @@ body { border: 1px solid #050505; background-color: #2d2d2d; overflow-y: scroll; + display: flex; + flex-direction: row; } /* Hide scrollbar for Chrome, Safari and Opera */ #chat_input::-webkit-scrollbar { @@ -83,13 +85,22 @@ body { } #chat_input_text { - width: 100%; + width: 90%; height: 9vh; border: none; background-color: #2d2d2d; color: var(--onSurface); } +#voice_button { + width: 10%; + height: 9vh; + background-color: #4d4d4d; + border-radius: 10px; + font-size: 24px; + +} + .chat_input_container{ display: flex; flex-direction: column; @@ -146,11 +157,11 @@ body { } .chat{ - width: 65%; + width: 100%; } .chatHistory { - width: 250px; + width: 400px; height: 80vh; margin: 6px; margin-top: 8px; @@ -205,7 +216,7 @@ p{ } .processesContainer{ - width: 250px; + width: 400px; height: 80vh; margin-top: 8px; border-radius: 10px; diff --git a/core/static/index.html b/core/static/index.html index 7d7eaf6..701e97d 100644 --- a/core/static/index.html +++ b/core/static/index.html @@ -44,6 +44,7 @@

Jarvis

+
diff --git a/core/static/index.js b/core/static/index.js index b26adfe..c3196e5 100644 --- a/core/static/index.js +++ b/core/static/index.js @@ -58,7 +58,7 @@ let setLoading = (newLoadingVal) => { async function addMessage(message, uuid) { let html = /*html*/`
  • - +
    ${message}
  • `; @@ -80,7 +80,7 @@ async function addStreamedMessage(uuid, messagePart) { addUserMessage = (message) => { let html = /*html*/`
  • - +
    ${message}
  • ` diff --git a/core/static/rickroll-roll.gif b/core/static/rickroll-roll.gif new file mode 100644 index 0000000..b404e3e Binary files /dev/null and b/core/static/rickroll-roll.gif differ diff --git a/core/static/socketEvents.js b/core/static/socketEvents.js index 5f26638..d5bf7b6 100644 --- a/core/static/socketEvents.js +++ b/core/static/socketEvents.js @@ -12,7 +12,13 @@ let uuid = 0 // prints chunks that are streamed to the console and adds them to the chat socket.on("chunk", async (chunk)=>{ - console.log(chunk); + if(!state.activeAIMessage){ + console.log("STARTED MESSAGE") + uuid = generateUUID(); + await addStreamedMessage(uuid, ""); + ai_message = document.getElementById(uuid) + state.activeAIMessage = ai_message + } await addStreamedMessage(uuid, chunk); }) @@ -23,9 +29,7 @@ socket.on("tokens", async (tokens) => { }) socket.on("start_message", async () => { - uuid = generateUUID(); - console.log(uuid); - await addStreamedMessage(uuid, ""); + }) // Remember to parse the streamed response diff --git a/docker-compose.yml b/docker-compose.yml index f344eaa..d3a6956 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,23 +1,12 @@ services: - # ui-service: - # build: ./ui - # env_file: .env - # restart: unless-stopped - # volumes: - # - ./ui:/app # Mount the application code - # - /app/node_modules - # networks: - # - backend - # stop_signal: SIGINT - # ports: - # - "3000:3000" - llm-service: build: ./core - env_file: .env restart: unless-stopped environment: - - FLASK_ENV=development # Autorestarts flask when code changes are detected + FLASK_ENV: ${FLASK_ENV} # Autorestarts flask when code changes are detected + OPENAI_API_KEY: ${OPENAI_API_KEY} + LANGSMITH_API_KEY: ${LANGSMITH_API_KEY} + PORT: ${PORT} volumes: - ./core:/app # Mount the application code to detect live changes networks: @@ -26,18 +15,6 @@ services: ports: - "3000:3000" -# speech-service: -# build: ./speech -# env_file: .env -# restart: unless-stopped -# environment: -# volumes: -# - ./speech:/app -# networks: -# - backend -# stop_signal: SIGINT - # ports: - # - "3069:3069" #nice networks: backend: diff --git a/src/speech/audioAnalyse.py b/src/speech/audioAnalyse.py new file mode 100644 index 0000000..04650fa --- /dev/null +++ b/src/speech/audioAnalyse.py @@ -0,0 +1,41 @@ +import numpy as np +import matplotlib.pyplot as plt +from scipy.io import wavfile +import seaborn as sns +import librosa +import librosa.display + +def analyze_audio(audio_path): + # Load the WAV file + sr, audio_data = wavfile.read(audio_path) + + # If stereo, take only one channel (convert to mono) + if len(audio_data.shape) == 2: + audio_data = audio_data.mean(axis=1) + + # Normalize audio data to range between -1 and 1 + audio_data = audio_data / np.max(np.abs(audio_data)) + + + + # Plot the spectrogram + audio_data_librosa, _ = librosa.load(audio_path, sr=sr) + D = librosa.amplitude_to_db(np.abs(librosa.stft(audio_data_librosa)), ref=np.max) + librosa.display.specshow(D, sr=sr, x_axis='time', y_axis='log') + plt.colorbar(format='%+2.0f dB') + plt.title('Spectrogram') + + plt.tight_layout() + plt.show() + +if __name__ == "__main__": + import sys + if len(sys.argv) < 2: + print("Usage: python audio_analysis.py ") + sys.exit(1) + + audio_path = sys.argv[1] + analyze_audio(audio_path) + if len(sys.argv) > 2: + audio_path2 = sys.argv[2] + analyze_audio(audio_path2)