diff --git a/.github/workflows/hugging_face.yml b/.github/workflows/hugging_face.yml index 9c33b50..7fd237c 100644 --- a/.github/workflows/hugging_face.yml +++ b/.github/workflows/hugging_face.yml @@ -12,11 +12,10 @@ jobs: - uses: actions/checkout@v3 with: fetch-depth: 0 - lfs: true - - name: Navigate to frontend directory - run: cd ./frontend + lfs: true - name: Push to hub env: HF_TOKEN: ${{ secrets.HF_TOKEN }} - run: git push https://salgadev:$HF_TOKEN@huggingface.co/spaces/salgadev/docverifyrag main + HF_USER: ${{ variables.HF_USER }} + run: git push https://$HF_USER:$HF_TOKEN@huggingface.co/spaces/$HF_USER/docverifyrag main diff --git a/app.py b/app.py index 02c3d47..cd10c67 100644 --- a/app.py +++ b/app.py @@ -1,9 +1,9 @@ -import streamlit as st import io import os +import streamlit as st import tempfile -from scripts import generate_metadata, ingest +from scripts import generate_metadata, ingest, model_name st.title('DocVerifyRAG') @@ -19,8 +19,9 @@ st.write(f'Created temporary file {file_path}') docs = ingest(file_path) + st.write('## Querying Together.ai API') metadata = generate_metadata(docs) - st.write('## Converted Text') + st.write(f'## Metadata Generated by {model_name}') st.write(metadata) # Clean up the temporary file diff --git a/scripts.py b/scripts.py index d4060c8..a1c7616 100644 --- a/scripts.py +++ b/scripts.py @@ -72,8 +72,9 @@ def generate_metadata(docs): ) # Call the LLM with the JSON schema + model_name = "mistralai/Mixtral-8x7B-Instruct-v0.1" chat_completion = client.chat.completions.create( - model="mistralai/Mixtral-8x7B-Instruct-v0.1", + model=model_name, messages=[ { "role": "system",