Skip to content

Commit

Permalink
Merge branch 'astream_experiment'
Browse files Browse the repository at this point in the history
  • Loading branch information
WilliamMRS committed Oct 17, 2024
2 parents c97d566 + 41ce9ab commit c906612
Show file tree
Hide file tree
Showing 8 changed files with 67 additions and 66 deletions.
3 changes: 2 additions & 1 deletion benchmarking/readme.md
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
### Benchmarking
Standardized tasks and tests for Jarvis to evaluate performance.
Standardized tasks and tests for Jarvis to evaluate performance.

17 changes: 10 additions & 7 deletions core/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,14 @@



class Agent1:
llm = ChatOpenAI(
model = Model.gpt_4o,
temperature=0,
max_tokens=512,
)


class Agent:
def __init__(self, model_type) -> None:
#Langsmith Tracing
Expand Down Expand Up @@ -76,19 +84,14 @@ def run(self, user_prompt: str) -> tuple[str, int]:
response and the total amount of tokens used.
"""
first = True
for event in self.graph.stream({"messages": [("user", user_prompt)]}):
for event in self.graph.stream("tell me about orcas?"):
for value in event.values():
messages = value["messages"][-1]
gathered = ""
# if messages.content and not isinstance(messages, HumanMessage):
# print(messages.content, end="|", flush=True)

if isinstance(messages, AIMessageChunk):
if first:
gathered = messages
first = False
else:
gathered += messages
gathered += messages

if isinstance(messages, BaseMessage):
if hasattr(messages, 'usage_metadata'):
Expand Down
56 changes: 37 additions & 19 deletions core/graph.py → core/graphAgent.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,9 @@
import json
from config import OPENAI_API_KEY
from Agents.simpleagent import SimpleAgent
from agent import Agent, Agent1
import asyncio
from time import sleep


class Graph:
Expand Down Expand Up @@ -57,29 +60,44 @@ def run_stream_only(self, user_prompt: str):
yield chunk.content

#for running the agent comment out for testing in terminal
def run(self, user_prompt: str) -> tuple[str, int]:
async def run(self, user_prompt: str, socketio) -> tuple[str, int]:
"""
Run the agent with a user prompt and return a tuple containing the llm
response and the total amount of tokens used.
"""
first = True
for event in self.graph.stream({"messages": [("user", user_prompt)]}):
#print(event)
for value in event.values():
messages = value["messages"][-1]
gathered = ""
# if messages.content and not isinstance(messages, HumanMessage):
# print(messages.content, end="|", flush=True)
try:
input = {"messages": [("human", user_prompt)]}
socketio.emit("start_message", " ")
async for chunk in self.graph.astream(input, stream_mode="values"):
if type(chunk["messages"][-1]) == HumanMessage:
continue
event_message = chunk["messages"][-1].content
event_message = event_message.split(" ")
for word in event_message:
sleep(0.05)
socketio.emit("chunk", word+" ")
socketio.emit("chunk", "<br>")
socketio.emit("tokens", 0) # a way to emit ending of the message
return "success"
except Exception as e:
print(e)
return "error"

if isinstance(messages, AIMessageChunk):
if first:
gathered = messages
first = False
else:
gathered += messages
# for event in self.graph.stream(input):
#print(event)
# for value in event.values():
# messages = value["messages"][-1]
# gathered = ""
# # if messages.content and not isinstance(messages, HumanMessage):
# # print(messages.content, end="|", flush=True)

if isinstance(messages, BaseMessage):
total_tokens = messages.usage_metadata.get('total_tokens', 0)

# if isinstance(messages, AIMessageChunk):
# if first:
# gathered = messages
# first = False
# else:
# gathered += messages

return messages.content, total_tokens
# if isinstance(messages, BaseMessage):
# total_tokens = messages.usage_metadata.get('total_tokens', 0)
# return messages.content, total_tokens
31 changes: 7 additions & 24 deletions core/main.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from flask import Flask, request, url_for, jsonify
from graph import Graph
from graphAgent import Graph
from models import Model
from summarize_chat import summarize_chat
from rag import embed_and_store
Expand All @@ -22,18 +22,11 @@
#
#
# HTTP(S) routes below
#s
#
#
@app.route("/")
def hello_world():
return app.send_static_file('index.html')

@app.route('/send_message', methods=['POST', 'GET'])
def llm_request():
if(request.method == 'POST'):
data = request.json
ai_message = jarvis.run(data['prompt'])
return {"message": ai_message}

@app.route('/vectorize_chat', methods=['POST'])
def summarize_store():
Expand All @@ -49,6 +42,8 @@ def summarize_store():

return {"status": "success", "summary": summary}



#
#
# Socket.IO events below
Expand All @@ -68,24 +63,12 @@ def disconnect():
# Custom event. Fired when the user sends a prompt.
@socketio.on('user_prompt')
def handle_prompt(data):
print("huh")
try:
conversation_id = data['conversation_id'] # grabs the conversation ID
socketio.emit("start_message")
stream, tokens = jarvis.run(data['prompt']) # prompts Jarvis
#stream = jarvis.run_stream_only(data['prompt'])
chunk = ""
for char in stream:
if len(chunk) > 4:
socketio.emit("chunk", chunk)
chunk = char
else:
chunk += char
#asyncio.sleep(500)
socketio.emit("chunk", chunk)
socketio.emit("tokens", tokens)

return jsonify({"status": "success"})
asyncio.run(jarvis.run(data['prompt'], socketio), debug=True) # prompts Jarvis and hands off emitting to the graphAgent.

return jsonify({"status": response})
except Exception as e:
print(f'Something very bad happened: {e}')
return jsonify({"status": "error"})
Expand Down
7 changes: 0 additions & 7 deletions core/static/chat.js
Original file line number Diff line number Diff line change
Expand Up @@ -47,13 +47,6 @@ endStreamedAIMessage = () => {

}

let startStreamedAIMessage = (uuid) => {
console.log("Message start")
addMessage(uuid); // Create an AI message when it begins streaming.
let ai_message = document.getElementById(uuid)
state.activeAIMessage = ai_message // Active element gets added to the state.
}

// Generates unique id on socket.on("start_message")
let generateUUID = () => {
return "10000000-1000-4000-8000-100000000000".replace(/[018]/g, c =>
Expand Down
6 changes: 3 additions & 3 deletions core/static/index.css
Original file line number Diff line number Diff line change
Expand Up @@ -146,11 +146,11 @@ body {
}

.chat{
width: 65%;
width: 100%;
}

.chatHistory {
width: 250px;
width: 400px;
height: 80vh;
margin: 6px;
margin-top: 8px;
Expand Down Expand Up @@ -205,7 +205,7 @@ p{
}

.processesContainer{
width: 250px;
width: 400px;
height: 80vh;
margin-top: 8px;
border-radius: 10px;
Expand Down
12 changes: 8 additions & 4 deletions core/static/socketEvents.js
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,13 @@ let uuid = 0

// prints chunks that are streamed to the console and adds them to the chat
socket.on("chunk", async (chunk)=>{
console.log(chunk);
if(!state.activeAIMessage){
console.log("STARTED MESSAGE")
uuid = generateUUID();
await addStreamedMessage(uuid, "");
ai_message = document.getElementById(uuid)
state.activeAIMessage = ai_message
}
await addStreamedMessage(uuid, chunk);
})

Expand All @@ -23,9 +29,7 @@ socket.on("tokens", async (tokens) => {
})

socket.on("start_message", async () => {
uuid = generateUUID();
console.log(uuid);
await addStreamedMessage(uuid, "");

})

// Remember to parse the streamed response
Expand Down
1 change: 0 additions & 1 deletion docker-compose.yml
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
services:

llm-service:
build: ./core
restart: unless-stopped
Expand Down

0 comments on commit c906612

Please sign in to comment.