Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[BUG CLIENT]: 'generator' object has no attribute 'iter_bytes' #167

Open
leejoeee opened this issue Dec 25, 2024 · 0 comments
Open

[BUG CLIENT]: 'generator' object has no attribute 'iter_bytes' #167

leejoeee opened this issue Dec 25, 2024 · 0 comments

Comments

@leejoeee
Copy link

Python -VV

AttributeError: 'generator' object has no attribute 'iter_bytes'

Pip Freeze

pip mistralai

Reproduction Steps

just run the code

Expected Behavior

expect run the code

Additional Context

import os
import torch
os.environ['OMP_NUM_THREADS'] = '4'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import openai
openai.api_key = os.environ['OPENAI_API_KEY']
openai.api_base = os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1")
from data.serialize import SerializerSettings
from models.utils import grid_iter
from models.promptcast import get_promptcast_predictions_data
from models.darts import get_arima_predictions_data
from models.llmtime import get_llmtime_predictions_data
from data.small_context import get_datasets
from models.validation_likelihood_tuning import get_autotuned_predictions_data

#os.environ['MISTRAL_KEY'] = 'CeDIlKzDq13swZtTAiCIMNdpK9GxmQwd'

def plot_preds(train, test, pred_dict, model_name, show_samples=False):
pred = pred_dict['median']
pred = pd.Series(pred, index=test.index)
plt.figure(figsize=(8, 6), dpi=100)
plt.plot(train)
plt.plot(test, label='Truth', color='black')
plt.plot(pred, label=model_name, color='purple')
# shade 90% confidence interval
samples = pred_dict['samples']
lower = np.quantile(samples, 0.05, axis=0)
upper = np.quantile(samples, 0.95, axis=0)
plt.fill_between(pred.index, lower, upper, alpha=0.3, color='purple')
if show_samples:
samples = pred_dict['samples']
# convert df to numpy array
samples = samples.values if isinstance(samples, pd.DataFrame) else samples
for i in range(min(10, samples.shape[0])):
plt.plot(pred.index, samples[i], color='purple', alpha=0.3, linewidth=1)
plt.legend(loc='upper left')
if 'NLL/D' in pred_dict:
nll = pred_dict['NLL/D']
if nll is not None:
plt.text(0.03, 0.85, f'NLL/D: {nll:.2f}', transform=plt.gca().transAxes, bbox=dict(facecolor='white', alpha=0.5))
plt.show()

print(torch.cuda.max_memory_allocated())
print()

gpt4_hypers = dict(
alpha=0.3,
basic=True,
temp=1.0,
top_p=0.8,
settings=SerializerSettings(base=10, prec=3, signed=True, time_sep=', ', bit_sep='', minus_sign='-')
)

mistral_api_hypers = dict(
alpha=0.3,
basic=True,
temp=1.0,
top_p=0.8,
settings=SerializerSettings(base=10, prec=3, signed=True, time_sep=', ', bit_sep='', minus_sign='-')
)

gpt3_hypers = dict(
temp=0.7,
alpha=0.95,
beta=0.3,
basic=False,
settings=SerializerSettings(base=10, prec=3, signed=True, half_bin_correction=True)
)

llma2_hypers = dict(
temp=0.7,
alpha=0.95,
beta=0.3,
basic=False,
settings=SerializerSettings(base=10, prec=3, signed=True, half_bin_correction=True)
)

promptcast_hypers = dict(
temp=0.7,
settings=SerializerSettings(base=10, prec=0, signed=True,
time_sep=', ',
bit_sep='',
plus_sign='',
minus_sign='-',
half_bin_correction=False,
decimal_point='')
)

arima_hypers = dict(p=[12,30], d=[1,2], q=[0])

model_hypers = {
'LLMTime GPT-3.5': {'model': 'gpt-3.5-turbo-instruct', **gpt3_hypers},
'LLMTime GPT-4': {'model': 'gpt-4', **gpt4_hypers},
'LLMTime GPT-3': {'model': 'text-davinci-003', **gpt3_hypers},
'PromptCast GPT-3': {'model': 'text-davinci-003', **promptcast_hypers},
'LLMA2': {'model': 'llama-7b', **llma2_hypers},
'mistral': {'model': 'mistral', **llma2_hypers},
'mistral-api-tiny': {'model': 'mistral-api-tiny', **mistral_api_hypers},
'mistral-api-small': {'model': 'mistral-api-tiny', **mistral_api_hypers},
'mistral-api-medium': {'model': 'mistral-api-tiny', **mistral_api_hypers},
'ARIMA': arima_hypers,

}

model_predict_fns = {
#'LLMA2': get_llmtime_predictions_data,
#'mistral': get_llmtime_predictions_data,
#'LLMTime GPT-4': get_llmtime_predictions_data,
'mistral-api-tiny': get_llmtime_predictions_data
}

model_names = list(model_predict_fns.keys())

datasets = get_datasets()
ds_name = 'AirPassengersDataset'

data = datasets[ds_name]
train, test = data # or change to your own data
out = {}

for model in model_names: # GPT-4 takes a about a minute to run
model_hypers[model].update({'dataset_name': ds_name}) # for promptcast
hypers = list(grid_iter(model_hypers[model]))
num_samples = 10
pred_dict = get_autotuned_predictions_data(train, test, hypers, num_samples, model_predict_fns[model], verbose=False, parallel=False)
out[model] = pred_dict
plot_preds(train, test, pred_dict, model, show_samples=True)

Suggested Solutions

Problem 'generator' object has no attribute 'iter_bytes' has not occurred before, is it because the version is updated? May I ask which version will not have this problem

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant