Skip to content

Commit

Permalink
Merge pull request #6 from ArjunBakhale/main
Browse files Browse the repository at this point in the history
Slack App Suggested Featues
  • Loading branch information
krokicki authored Jul 29, 2024
2 parents 72566d9 + 5ff48c5 commit d10674a
Show file tree
Hide file tree
Showing 3 changed files with 34 additions and 66 deletions.
2 changes: 1 addition & 1 deletion docker-compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ services:
SLACK_TOKEN:

slack-bot:
image: ghcr.io/janeliascicomp/gpt-semantic-search-slack-bot:0.1.4
image: ghcr.io/janeliascicomp/gpt-semantic-search-slack-bot:0.1.5
restart: unless-stopped
environment:
WEAVIATE_URL: 'http://weaviate:8080'
Expand Down
4 changes: 2 additions & 2 deletions generate_full_response.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ def get_slack_client(self):

def get_query_engine(self):
# Assuming settings like model, class_prefix, etc., are set elsewhere or passed as parameters
llm = OpenAI(model="gpt-3.5-turbo", temperature=0)
llm = OpenAI(model="gpt-4o", temperature=0)
embed_model = OpenAIEmbedding(model="text-embedding-3-large")
prompt_helper = PromptHelper(4096, 256, 0.1)

Expand All @@ -82,7 +82,7 @@ def get_query_engine(self):

retriever = VectorIndexRetriever(
index,
similarity_top_k=5,
similarity_top_k=3,
vector_store_query_mode=VectorStoreQueryMode.HYBRID,
alpha=0.5,
)
Expand Down
94 changes: 31 additions & 63 deletions slack_app.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,20 +6,18 @@
from threading import Thread, Event
from concurrent.futures import ThreadPoolExecutor, TimeoutError

# Import your SemanticSearchService
from generate_full_response import SemanticSearchService
import logging
import re

logging.basicConfig(level=logging.INFO)
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
app = App(token=os.environ["SLACK_BOT_TOKEN"])

# # Initialize the SemanticSearchService
weaviate_url = os.environ["WEAVIATE_URL"]
service = SemanticSearchService(weaviate_url)

# # Conversation history (we'll keep this for future use)
# conversation_history = {}
service = SemanticSearchService(weaviate_url)

def update_message(client, channel, timestamp, text, blocks=None):
client.chat_update(
Expand All @@ -29,24 +27,19 @@ def update_message(client, channel, timestamp, text, blocks=None):
blocks=blocks
)

def generate_response_with_animation(event, say, client):
user_id = event['user']
channel = event['channel']
text = event['text'].split('>')[1].strip()

# Initialize or update conversation history
# if user_id not in conversation_history:
# conversation_history[user_id] = []
# conversation_history[user_id].append(f"Human: {text}")

def generate_response_with_animation(text, channel, thread_ts, client):
# Send an initial message with a random thinking phrase
thinking_phrases = [
"Thinking...",
"Processing your request...",
"Hmmmmm..."
]
initial_message = random.choice(thinking_phrases)
result = say(initial_message)
result = client.chat_postMessage(
channel=channel,
text=initial_message,
thread_ts=thread_ts # This ensures the message is posted in the thread if there is one
)
message_ts = result['ts']

# Start the loading animation
Expand Down Expand Up @@ -82,60 +75,35 @@ def animate():
stop_event.set()
animation_thread.join()

# Update conversation history
# conversation_history[user_id].append(f"Assistant: {response}")

# # Truncate conversation history if it gets too long
# if len(conversation_history[user_id]) > 10:
# conversation_history[user_id] = conversation_history[user_id][-10:]

# Create a formatted response with markdown
formatted_response = f"*Here's what I found:*\n\n{response}"

# Update the message with the final response
# blocks = [
# {
# "type": "section",
# "text": {"type": "mrkdwn", "text": formatted_response}
# },
# {
# "type": "context",
# "elements": [
# {
# "type": "mrkdwn",
# "text": "If you found this helpful, react with :thumbsup:. If not, react with :thumbsdown:."
# }
# ]
# }
# ]
update_message(client, channel, message_ts, formatted_response)

@app.event("app_mention")
def handle_mention(event, say, client):
Thread(target=generate_response_with_animation, args=(event, say, client)).start()

# @app.event("reaction_added")
# def handle_reaction(event, say):
# logger.debug(f"Reaction event received: {event}")
def process_message(event, client):
text = event['text']
channel = event['channel']
thread_ts = event.get('thread_ts', event['ts'])

# reaction = event.get("reaction")
# user = event.get("user")
# item = event.get("item", {})
# ts = item.get("ts")
# channel = item.get("channel")

# logger.info(f"Reaction: {reaction}, User: {user}, Timestamp: {ts}, Channel: {channel}")
# Remove the bot mention if it exists
bot_id = client.auth_test()["user_id"]
text = re.sub(f'<@{bot_id}>', '', text).strip()

Thread(target=generate_response_with_animation, args=(text, channel, thread_ts, client)).start()

# if reaction == "thumbsup":
# say(text="Thank you for the positive feedback! I'm glad I could help.", channel=channel, thread_ts=ts)
# logger.info("Positive feedback received")
# elif reaction == "thumbsdown":
# say(text="I'm sorry my response wasn't helpful. Could you provide more details about what you're looking for?", channel=channel, thread_ts=ts)
# logger.info("Negative feedback received")
# else:
# logger.info(f"Reaction {reaction} received but not handled")
@app.event("app_mention")
def handle_mention(event, client):
process_message(event, client)

@app.event("message")
def handle_message(event, client):
# Ignore messages from bots
if "bot_id" in event:
return
# Process messages in DMs
logger.info(event)
if event['channel_type'] == 'im':
process_message(event, client)

if __name__ == "__main__":
handler = SocketModeHandler(app, str(os.environ["SLACK_APP_TOKEN"]))
handler.start()
handler.start()

0 comments on commit d10674a

Please sign in to comment.