Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feat: GPT4All Model Integration #16

Open
wants to merge 9 commits into
base: main
Choose a base branch
from
13 changes: 8 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,11 @@ To set up your environment, you will need to generate a `utils.py` file that con
### Step 1. Generate Utils File
In the `reverie/backend_server` folder (where `reverie.py` is located), create a new file titled `utils.py` and copy and paste the content below into the file:
```
# Copy and paste your OpenAI API Key
openai_api_key = "<Your OpenAI API>"
# Select the GPT4All Model you'll use for the simulation. See: https://observablehq.com/@simonw/gpt4all-models
gpt4all_model="orca-mini-3b.ggmlv3.q4_0.bin"
max_tokens = 30
temperature = 0.5

# Put your name
key_owner = "<Name>"

Expand All @@ -31,7 +34,7 @@ collision_block_id = "32125"
# Verbose
debug = True
```
Replace `<Your OpenAI API>` with your OpenAI API key, and `<name>` with your name.
Replace `<name>` with your name.

### Step 2. Install requirements.txt
Install everything listed in the `requirements.txt` file (I strongly recommend first setting up a virtualenv as usual). A note on Python version: we tested our environment on Python 3.9.12.
Expand Down Expand Up @@ -112,12 +115,12 @@ For a more involved customization, you will need to author your own base simulat

## <img src="https://joonsungpark.s3.amazonaws.com:443/static/assets/characters/profile/Eddy_Lin.png" alt="Generative Eddy"> Authors and Citation

**Authors:** Joon Sung Park, Joseph C. O'Brien, Carrie J. Cai, Meredith Ringel Morris, Percy Liang, Michael S. Bernstein
**Authors:** Cassini Saturn, Joon Sung Park, Joseph C. O'Brien, Carrie J. Cai, Meredith Ringel Morris, Percy Liang, Michael S. Bernstein

Please cite our paper if you use the code or data in this repository.
```
@inproceedings{Park2023GenerativeAgents,
author = {Park, Joon Sung and O'Brien, Joseph C. and Cai, Carrie J. and Morris, Meredith Ringel and Liang, Percy and Bernstein, Michael S.},
author = {Cassini, Saturn and Park, Joon Sung and O'Brien, Joseph C. and Cai, Carrie J. and Morris, Meredith Ringel and Liang, Percy and Bernstein, Michael S.},
title = {Generative Agents: Interactive Simulacra of Human Behavior},
year = {2023},
publisher = {Association for Computing Machinery},
Expand Down
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ matplotlib==3.7.2
multidict==6.0.4
nltk==3.6.5
numpy==1.25.2
openai==0.27.0
gpt4all==1.0.5
outcome==1.2.0
packaging==23.0
pandas==2.0.3
Expand Down
131 changes: 51 additions & 80 deletions reverie/backend_server/persona/prompt_template/gpt_structure.py
Original file line number Diff line number Diff line change
@@ -1,81 +1,70 @@
"""
Author: Joon Sung Park ([email protected])
Author:
Cassini Saturn ([email protected])
Joon Sung Park ([email protected])

File: gpt_structure.py
Description: Wrapper functions for calling OpenAI APIs.
Description: Wrapper functions for calling GPT4All APIs.
"""
import json
import random
import openai
import time

from utils import *

openai.api_key = openai_api_key
from gpt4all import GPT4All, Embed4All

model = GPT4All(gpt4all_model)

def temp_sleep(seconds=0.1):
time.sleep(seconds)

def ChatGPT_single_request(prompt):
temp_sleep()

completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": prompt}]
)
return completion["choices"][0]["message"]["content"]

output = model.generate(prompt, max_tokens=max_tokens)
return output

# ============================================================================
# #####################[SECTION 1: CHATGPT-3 STRUCTURE] ######################
# #####################[SECTION 1: CHATGPT4All STRUCTURE] ######################
# ============================================================================

def GPT4_request(prompt):
def GPT4All_request(prompt):
"""
Given a prompt and a dictionary of GPT parameters, make a request to OpenAI
server and returns the response.
Given a prompt, make a request to GPT4All model and returns the response.
ARGS:
prompt: a str prompt
gpt_parameter: a python dictionary with the keys indicating the names of
the parameter and the values indicating the parameter
values.
RETURNS:
a str of GPT-3's response.
a str of GPT4All's response.
"""
temp_sleep()

try:
completion = openai.ChatCompletion.create(
model="gpt-4",
messages=[{"role": "user", "content": prompt}]
)
return completion["choices"][0]["message"]["content"]

output = model.generate(prompt, max_tokens=max_tokens)
return output
except:
print ("ChatGPT ERROR")
return "ChatGPT ERROR"


def ChatGPT_request(prompt):
"""
Given a prompt and a dictionary of GPT parameters, make a request to OpenAI
server and returns the response.
Given a prompt, make a request to GPT4All model

ARGS:
prompt: a str prompt
gpt_parameter: a python dictionary with the keys indicating the names of
the parameter and the values indicating the parameter
values.
RETURNS:
a str of GPT-3's response.
a str of GPT4All's response.
"""
# temp_sleep()
try:
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": prompt}]
)
return completion["choices"][0]["message"]["content"]

output = model.generate(prompt, max_tokens=max_tokens)
return output
except:
print ("ChatGPT ERROR")
return "ChatGPT ERROR"
Expand All @@ -89,19 +78,19 @@ def GPT4_safe_generate_response(prompt,
func_validate=None,
func_clean_up=None,
verbose=False):
prompt = 'GPT-3 Prompt:\n"""\n' + prompt + '\n"""\n'
prompt = 'GPT4All Prompt:\n"""\n' + prompt + '\n"""\n'
prompt += f"Output the response to the prompt above in json. {special_instruction}\n"
prompt += "Example output json:\n"
prompt += '{"output": "' + str(example_output) + '"}'

if verbose:
print ("CHAT GPT PROMPT")
print ("GPT4All PROMPT")
print (prompt)

for i in range(repeat):

try:
curr_gpt_response = GPT4_request(prompt).strip()
curr_gpt_response = GPT4All_request(prompt).strip()
end_index = curr_gpt_response.rfind('}') + 1
curr_gpt_response = curr_gpt_response[:end_index]
curr_gpt_response = json.loads(curr_gpt_response)["output"]
Expand All @@ -128,14 +117,14 @@ def ChatGPT_safe_generate_response(prompt,
func_validate=None,
func_clean_up=None,
verbose=False):
# prompt = 'GPT-3 Prompt:\n"""\n' + prompt + '\n"""\n'
# prompt = 'GPT4All Prompt:\n"""\n' + prompt + '\n"""\n'
prompt = '"""\n' + prompt + '\n"""\n'
prompt += f"Output the response to the prompt above in json. {special_instruction}\n"
prompt += "Example output json:\n"
prompt += '{"output": "' + str(example_output) + '"}'

if verbose:
print ("CHAT GPT PROMPT")
print ("GPT4All PROMPT")
print (prompt)

for i in range(repeat):
Expand Down Expand Up @@ -171,7 +160,7 @@ def ChatGPT_safe_generate_response_OLD(prompt,
func_clean_up=None,
verbose=False):
if verbose:
print ("CHAT GPT PROMPT")
print ("GPT4All PROMPT")
print (prompt)

for i in range(repeat):
Expand All @@ -191,34 +180,34 @@ def ChatGPT_safe_generate_response_OLD(prompt,


# ============================================================================
# ###################[SECTION 2: ORIGINAL GPT-3 STRUCTURE] ###################
# ###################[SECTION 2: ORIGINAL GPT4All STRUCTURE] ###################
# ============================================================================

def GPT_request(prompt, gpt_parameter):
"""
Given a prompt and a dictionary of GPT parameters, make a request to OpenAI
Given a prompt and a dictionary of GPT parameters, make a request to GPT4All
server and returns the response.
ARGS:
prompt: a str prompt
gpt_parameter: a python dictionary with the keys indicating the names of
the parameter and the values indicating the parameter
values.
RETURNS:
a str of GPT-3's response.
a str of GPT4All's response.
"""
temp_sleep()
try:
response = openai.Completion.create(
model=gpt_parameter["engine"],
prompt=prompt,
temperature=gpt_parameter["temperature"],
max_tokens=gpt_parameter["max_tokens"],
top_p=gpt_parameter["top_p"],
frequency_penalty=gpt_parameter["frequency_penalty"],
presence_penalty=gpt_parameter["presence_penalty"],
stream=gpt_parameter["stream"],
stop=gpt_parameter["stop"],)
return response.choices[0].text


output = model.generate(
prompt,
max_tokens=gpt_parameter["max_tokens"],
temp=gpt_parameter["temperature"],
top_p=gpt_parameter["top_p"],


)
return output
except:
print ("TOKEN LIMIT EXCEEDED")
return "TOKEN LIMIT EXCEEDED"
Expand All @@ -236,7 +225,7 @@ def generate_prompt(curr_input, prompt_lib_file):
INPUT, THIS CAN BE A LIST.)
prompt_lib_file: the path to the promopt file.
RETURNS:
a str prompt that will be sent to OpenAI's GPT server.
a str prompt that will be sent to GPT4All's model.
"""
if type(curr_input) == type("string"):
curr_input = [curr_input]
Expand Down Expand Up @@ -273,17 +262,19 @@ def safe_generate_response(prompt,
return fail_safe_response


def get_embedding(text, model="text-embedding-ada-002"):
text = text.replace("\n", " ")
if not text:
text = "this is blank"
return openai.Embedding.create(
input=[text], model=model)['data'][0]['embedding']
def get_embedding(text):
text = text.replace("\n", " ")
if not text:
text = "this is blank"
embedder = Embed4All()
embedding = embedder.embed(text)
return embedding



if __name__ == '__main__':
gpt_parameter = {"engine": "text-davinci-003", "max_tokens": 50,
"temperature": 0, "top_p": 1, "stream": False,
gpt_parameter = {"max_tokens": max_tokens,
"temperature": temperature, "top_p": 1, "stream": False,
"frequency_penalty": 0, "presence_penalty": 0,
"stop": ['"']}
curr_input = ["driving to a friend's house"]
Expand All @@ -309,23 +300,3 @@ def __func_clean_up(gpt_response):
True)

print (output)