diff --git a/evaluation/discoverybench/README.md b/evaluation/discoverybench/README.md new file mode 100644 index 00000000000..a0d8994709d --- /dev/null +++ b/evaluation/discoverybench/README.md @@ -0,0 +1,37 @@ +# DiscoveryBench with OpenHands + +[DiscoveryBench](https://github.com/allenai/discoverybench/) [(Paper)](https://arxiv.org/abs/2407.01725v1) contains 264 tasks collected across 6 diverse domains, such as biology, economics, and sociology. It incorporates discovery workflows from published papers to approximate the real-world challenges faced by researchers. + +
+ + + +
+ + +## Setup Environment and LLM Configuration + +1. Please follow instructions mentioned [here](https://github.com/openlocus/OpenHands/blob/discoverybench-openhands-integration/evaluation/README.md#setup) to setup OpenHands development environment and LLMs locally + +2. Execute the bash script to start DiscoveryBench Evaluation + +``` +./evaluation/discoverybench/scripts/run_infer.sh [YOUR MODEL CONFIG] +``` +Replace `[YOUR MODEL CONFIG]` with any model the model that you have set up in `config.toml` + + +## Run Inference on DiscoveryBench Instances + +When the `run_infer.sh` script is started, it will automatically pull the latest DiscoveryBench instances & set up the agent environment. The OpenHands agent is invoked to process the task within this environment, producing a hypothesis. We then evaluate it against the “gold” hypothesis provided by DiscoveryBench. The evaluation result, along with the agent chat history is logged to `output.jsonl` under `evaluation_outputs`. + + +``` +./evaluation/discoverybench/scripts/run_infer.sh [MODEL_CONFIG] [GIT_COMMIT] [AGENT] [EVAL_LIMIT] [NUM_WORKERS] +``` + +- `MODEL_CONFIG`: Name of the model you want to evaluate with +- `GIT_COMMIT`: This should be the git commit hash or release tag for OpenHands, e.g., HEAD or a specific tag like 0.6.2. +- `AGENT`: Use CoderActAgent, right now it only supports that. +- `EVAL_LIMIT`: Number of samples to evaluate. +- `NUM_WORKERS`: Number of workers to parallelize the evaluation process. diff --git a/evaluation/discoverybench/eval_utils/README.md b/evaluation/discoverybench/eval_utils/README.md new file mode 100644 index 00000000000..13c98ebaa8d --- /dev/null +++ b/evaluation/discoverybench/eval_utils/README.md @@ -0,0 +1,7 @@ +## DiscoveryBench Evaluation Utils + +- **`eval_w_subhypo_gen.py`**: Implements the DiscoveryBench logic for evaluating agent-generated hypotheses. +- **`lm_utils.py`**: Provides utility functions necessary for the evaluation process. +- **`openai_helpers.py`**: Includes helper functions for OpenAI-related tasks. +- **`openai_semantic_gen_prompts.py`**: Contains prompts used for semantic generation. +- **`response_parser.py`**: Handles the parsing of agent-generated hypotheses. diff --git a/evaluation/discoverybench/eval_utils/__init__.py b/evaluation/discoverybench/eval_utils/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/evaluation/discoverybench/eval_utils/eval_w_subhypo_gen.py b/evaluation/discoverybench/eval_utils/eval_w_subhypo_gen.py new file mode 100644 index 00000000000..a80df8279cf --- /dev/null +++ b/evaluation/discoverybench/eval_utils/eval_w_subhypo_gen.py @@ -0,0 +1,538 @@ +import json +import logging + +from openai import OpenAI + +from .lm_utils import run_chatgpt_query_multi_turn +from .openai_helpers import get_response + +logging.basicConfig( + format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', + datefmt='%m/%d/%Y %H:%M:%S', + level=logging.INFO, +) +logger = logging.getLogger(__name__) + + +def get_score_from_answer(type, answer): + if type == 'context': + answer = answer.replace('Answer:', '').strip() + if answer.startswith('A)'): + return 1.0 + elif answer.startswith('B)'): + return 0.0 + return -1.0 + + elif type == 'var': + try: + var_json = json.loads(answer) + # print(f"var_json:{var_json}") + p = 0.0 + r = 0.0 + f1 = 0.0 + if var_json['sizeB']: + p = var_json['intersection'] / var_json['sizeB'] + if var_json['sizeA']: + r = var_json['intersection'] / var_json['sizeA'] + if p > 0.0 and r > 0.0: + f1 = (2 * p * r) / (p + r) + else: + f1 = 0.0 + eval_rec = { + 'p': p, + 'r': r, + 'f1': f1, + 'sizeA': var_json['sizeA'], + 'sizeB': var_json['sizeB'], + 'intersection': var_json['intersection'], + 'explanation': var_json['explanation'], + } + print(f'var_eval: {eval_rec}') + return eval_rec + except Exception: # COMMENT: added Exception + return {'p': -1.0, 'r': -1.0, 'f1': -1.0} + elif type == 'rel': + print(answer) + rel_json = json.loads(answer) + answer_str = rel_json['answer'].strip() + if answer_str.startswith('A') or 'very similar' in answer_str: + return 1.0 + elif ( + answer_str.startswith('B') or 'similar but general than HypoA' in answer_str + ): + return 0.5 + elif answer_str.startswith('C') or 'different' in answer_str: + return 0.0 + return -1.0 + return -1.0 + + +def ask_dimension_question( + query, + gold_hypo, + gold_workflow, + gen_hypo, + gen_workflow, + dataset_meta, + llm_used, + dimension, + dataset_type, + use_column_metadata=True, +): + dimension_question = '' + answer = '' + score = 0.0 + if dimension == 'var': + score = {'p': -1.0, 'r': -1.0, 'f1': -1.0} + num_tokens = 256 + num_retries = 1 + json_response = False + + messages = [ + { + 'role': 'system', + 'content': 'You are an AI assistant that helps evaluate a data-driven hypothesis. You are a helpful assistant who is not talkative. You only respond with the exact answer to a query without additional conversation.', + }, + ] + if dimension == 'context': + dimension_question = """\ + Question: Is HypoB defined in the same context as HypoA? + (Context refers to assumptions/stratification under which the hypotheses are defined.) + Options: A) same B) different + What is your answer?""" + elif dimension == 'var': + dimension_question = """\ + Question: For both HypoA and HypoB, what are the different variables found in the hypotheses? \ + Return your answer as a JSON object in the following format: + ```json + {{ + "sizeA": num of variables used in HypoA + "sizeB": num of variables used in HypoB + "intersection": num of variables common in HypoA and HypoB. Use *fuzzy matching* to determine intersection, accounting for paraphrases or slightly different surface forms + "explanation": a short text explanation about the variables + }}``` + Answer:""" + num_tokens = 512 + num_retries = 1 + json_response = True + elif dimension == 'rel': + dimension_question = """\ + Question: Does HypoB exhibit the same relation as HypoA? + Compare using following example hierarchy of relationships (based on specificity): \ + "there exists a relationship" > "positive relationship" > "positive AND (linear OR quadratic)" > "positive AND linear". + Options: A) very similar B) similar but general than HypoA C) different + Return your answer as a JSON object in the following format: + ```json + {{ + "answer": one of the options from A) very similar B) similar but general than HypoA C) different + "explanation": a short text explanation about the relationship comparison + }}``` + Answer:""" + num_tokens = 512 + num_retries = 1 + json_response = True + + datasets_json = prepare_dataset_metadata_json( + dataset_meta, dataset_type=dataset_type, use_column_metadata=use_column_metadata + ) + + dimension_question_str = f"""\ + You are going to compare two natural-language hypotheses HypoA and HypoB accompanied with optional workflows: WorkflowA for HypoA and WorkflowB for HypoB. \ + Both the hypotheses answer the natural language query "QUERY" over the dataset(s) described by dataset description(s) and column description(s) below. \ + Compare HypoA and HypoB in terms of three aspects: Contexts, Variables, and Relations. \ + E.g., for the hypothesis "From 1995 to 2009, the number of sandhill cranes around the tundra (Indigilka River) surged by an astounding ~10X": + * Contexts refer to stratification of the data under which the given hypothesis is True. E.g., "For all women", "From 1995 to 2009". + * Variables refer to the set of variables (either dependent or independent) that are mentioned in the hypothesis. E.g., number of sandhill cranes, location. + * Relations refer to the form of relation between the variables. E.g., "surged by ~10x". + + Answer following questions for a given pair of hypotheses, HypoA and HypoB, along with an explanation grounded on the QUERY and the DATASET(S). + + Here is the metadata for the task: + ```json + {{ + "datasets": {datasets_json}, + "query": {query}, + "HypoA": {gold_hypo}, + "WorkflowA": {gold_workflow}, + "HypoB": {gen_hypo}, + "WorkflowB": {gen_workflow} + }} + ``` + + {dimension_question}""" + + messages.append({'role': 'user', 'content': dimension_question_str}) + for retry in range(num_retries): + response = run_chatgpt_query_multi_turn( + messages=messages, + model_name=llm_used, + max_tokens=num_tokens, + temperature=0, # 0 for greedy best decoding + json_response=json_response, + ) + if response is not None: # COMMENT: changed from != to is not + break + + if response is not None: # COMMENT: changed from != to is not + answer = response.choices[0].message.content.strip() + score = get_score_from_answer(type=dimension, answer=answer) + + return dimension_question, answer, score + + +def prepare_dataset_metadata_json(dataset_meta, dataset_type, use_column_metadata=True): + if dataset_meta is None: # COMMENT: changed from == to is None + return [ + { + 'dataset_description': '', + 'columns': [], + } + ] + datasets_json = [] + if dataset_type == 'real': + for d in dataset_meta['datasets']: + datasets_json.append( + { + 'dataset_description': d['description'], + 'columns': [ + {'name': col['name'], 'description': col['description']} + for col in d['columns']['raw'] + ] + if use_column_metadata + else [], + } + ) + else: + for d in dataset_meta['datasets']: + datasets_json.append( + { + 'dataset_description': d['description'], + 'columns': [ + {'name': col['name'], 'description': col['description']} + for col in d['columns'] + ] + if use_column_metadata + else [], + } + ) + return datasets_json + + +def get_sub_hypotheses( + query, + hypo, + workflow, + dataset_meta, + llm_used, + dataset_type, + use_column_metadata=True, +): + client = OpenAI() + extraction_prompt = """\ + Given a set of dataset columns, a ground-truth hypothesis, and the analysis workflow used, your task is to extract three dimensions that define the hypothesis: Context, Variables, and Relations. \ + Here are the definitions for these dimensions: + - Contexts: Boundary conditions that limit the scope of a hypothesis. E.g., “for men over \ + the age of 30”, “in Asia and Europe”. If the context applies to the full dataset, then extract the context from the dataset_descrption. + - Variables: Known concepts that interact in a meaningful way under a given context to \ + produce the hypothesis. E.g., gender, age, income, or "None" if there is no interacting variable. + - Relations: Interactions between a given set of variables under a given context to produce \ + the hypothesis. E.g., “quadratic relationship”, “inversely proportional”, piecewise conditionals, \ + or "None" if there is no interacting relationship. + Make sure to only use the information present in the hypothesis and the workflow. Do not add any new information. \ + For each dimension, be specific, and do not omit any important details. + + Here is the metadata for the task: + ```json + { + "datasets": %s, + "hypothesis": "%s", + "workflow": "%s" + } + ``` + + Return your answer as a JSON object in the following format: + ```json + { + "sub_hypo": [ + { + "text": the hypothesis in natural language, + "context": a short text description of the context of the hypothesis, + "variables": a list of columns involved in the hypothesis, + "relations": a short text description of the relationship between the variables of the hypothesis + }, + ... + ] + }``` + """ + datasets_json = prepare_dataset_metadata_json( + dataset_meta, dataset_type, use_column_metadata=use_column_metadata + ) + _prompt = extraction_prompt % (datasets_json, hypo, workflow) + sub_hypo_json = get_response(client, _prompt, model=llm_used, max_retry=1) + + if sub_hypo_json is not None: # COMMENT: changed from != to is not + # print(f"full hypothesis: {hypo}") + print(f'sub_hypo_json: {sub_hypo_json}') + else: + sub_hypo_json = { + 'sub_hypo': [], + } + + sub_hypo_json['full_hypo'] = hypo + + return sub_hypo_json + + +def match_context_with_gpt( + gold_hyp, gold_context, pred_hyp, pred_context, model='gpt-3.5-turbo' +): + prompt = f"""\ + Given a gold hypothesis, a gold context, a predicted hypothesis, and a predicted context, your task is \ + to determine if the predicted context semantically matches the ground-truth context. \ + Here is the definition for Context: Boundary conditions that limit the scope of a sub-hypothesis. E.g., “for men over the age of 30”, “in Asia and Europe”. If the context applies to the full dataset, then the context is derived from the dataset_descrption. \ + Here is the definition for Context: Boundary conditions that limit the scope of a sub-hypothesis. E.g., “for men over the age of 30”, “in Asia and Europe”. If the context applies to the full dataset, then the context is derived from the dataset_descrption. \ + If the predicted context matches the gold context, return true, otherwise return false. + If both gold and predicted hypotheses are defined over the context of the full dataset, then also return true. + If both gold and predicted hypotheses are defined over the context of the full dataset, then also return true. + + Here is the metadata for the task: + ```json + {{ + "gold_hypothesis": "{gold_hyp}", + "gold_context": "{gold_context}", + "predicted_hypothesis": "{pred_hyp}", + "predicted_context": "{pred_context}" + }} + ``` + + Return your answer as a JSON object in the following format: + ```json + {{ + "match": true or false + }} + ```""" + + client = OpenAI() + output = get_response(client, prompt, model=model) + return output.get('match', False) + + +def is_matching_context(gold_hyp, gold_context, pred_hyp, pred_context, llm_used): + if gold_context == pred_context: + return True + if 'None' in [gold_context, pred_context]: + return False + return match_context_with_gpt( + gold_hyp, gold_context, pred_hyp, pred_context, model=llm_used + ) + + +def run_eval_gold_vs_gen_NL_subhypo( + query, + gold_hypo, + gold_workflow, + gen_hypo, + gen_workflow, + dataset_meta, + llm_used, + context_score, + dataset_type, + use_column_metadata=True, +): + # GPT-4 based evaluation to evaluate generated hypothesis in terms of context, variables, relation + + eval_rec = { + 'query': query, + 'HypoA': gold_hypo, + 'WorkflowA': gold_workflow, + 'HypoB': gen_hypo, + 'WorkflowB': gen_workflow, + } + + for dimension in ['var', 'rel']: + question, answer, score = ask_dimension_question( + query, + gold_hypo, + gold_workflow, + gen_hypo, + gen_workflow, + dataset_meta, + llm_used, + dimension=dimension, + dataset_type=dataset_type, + use_column_metadata=use_column_metadata, + ) + + eval_rec[dimension] = {'question': question, 'answer': answer, 'score': score} + + eval_rec['context'] = context_score + eval_rec['accuracy_score'] = ( + 1.0 + * eval_rec['context']['score'] + * eval_rec['var']['score']['f1'] + * eval_rec['rel']['score'] + ) + + return eval_rec + + +def run_eval_gold_vs_gen_NL_hypo_workflow( + query, + gold_hypo, + gold_workflow, + gen_hypo, + gen_workflow, + dataset_meta, + llm_used, + dataset_type, + use_column_metadata=True, +): + # Input: Dataset Metadata, Query, Gold {Hg, Wg}, Predicted {Hp, Wp} + # Output: eval_rec json includes final_score + + # Procedure: + # Dataset Metadata, Query, Gold {Hg, Wg}, Pred {Hg, Wg} + # Gold: [Hg1, Hg2] (compute on the fly) Hg1 is a NL form of subhypothesis + # Predicted: [Hp1, Hp2] (compute on the fly) + + # Compute Intersection: [(Hg_i, Hp_j), …] # tuples of (gold,pred) that matched with context (do this w/o explicit extraction) + # # filter so that a gold context and a predicted context are only attached to one tuple + # Compute recall_context (programmatically) + + # r_v_list = [] + # For (Hg_i, Hp_j) in the intersection: + # With Hg_i, Hp_j in NL, ask GPT4 → #variables and #intersection and a paragraph explanation and programmatically calculate f1_v + # Hg_i, Hp_j in NL, ask GPT4 → matching score (0, 0.5 or 1) : A) very similar B) similar but general than HypoA C) different + explanation + # r_v_list ← f1_v * score_r + # accuracy_score = mean(r_v_list) + # score = [ recall_context * mean over predicted context(context_score * var_score *rel_score )] + + # recall_context = 1.0 # COMMENT: never used + eval_rec = { + 'query': query, + 'HypoA': gold_hypo, + 'WorkflowA': gold_workflow, + 'HypoB': gen_hypo, + 'WorkflowB': gen_workflow, + } + + gold_sub_hypo_json = get_sub_hypotheses( + query=query, + hypo=gold_hypo, + workflow=gold_workflow, + dataset_meta=dataset_meta, + llm_used=llm_used, + dataset_type=dataset_type, + use_column_metadata=use_column_metadata, + ) + if len(gold_sub_hypo_json['sub_hypo']) == 0: + gold_sub_hypo_json['sub_hypo'] = [ + { + 'text': gold_hypo, + 'context': 'None', + 'variables': [], + 'relations': '', + 'explanation': 'unable to segment', + } + ] + print(f'gold_sub_hypo_json: {gold_sub_hypo_json}') + + gen_sub_hypo_json = get_sub_hypotheses( + query=query, + hypo=gen_hypo, + workflow=gen_workflow, + dataset_meta=dataset_meta, + llm_used=llm_used, + dataset_type=dataset_type, + use_column_metadata=use_column_metadata, + ) + if len(gen_sub_hypo_json['sub_hypo']) == 0: + gen_sub_hypo_json['sub_hypo'] = [ + { + 'text': gen_hypo, + 'context': 'None', + 'variables': [], + 'relations': '', + 'explanation': 'unable to segment', + } + ] + print(f'gen_sub_hypo_json: {gen_sub_hypo_json}') + + eval_rec['gold_sub_hypo'] = gold_sub_hypo_json + eval_rec['gen_sub_hypo'] = gen_sub_hypo_json + + gold_subh_covered = [] + gen_subh_to_gold_subh = dict() + gen_gold_subh_to_context = dict() + + for p_id, gen_subh in enumerate(gen_sub_hypo_json['sub_hypo']): + gen_subh_to_gold_subh[p_id] = -1 + + for g_id, gold_subh in enumerate(gold_sub_hypo_json['sub_hypo']): + if g_id in gold_subh_covered: + continue + + # match context + context_bool = is_matching_context( + gold_subh['text'], + gold_subh.get('context', ''), + gen_subh['text'], + gen_subh.get('context', ''), + llm_used, + ) + if context_bool: + context_score = 1.0 + else: + context_score = 0.0 + + if context_score == 1.0: # match only when context_score = 1.0 + gen_subh_to_gold_subh[p_id] = g_id + gold_subh_covered.append(g_id) + gen_gold_subh_to_context[f'P{p_id}||G{g_id}'] = { + 'question': f"""Comapring: GoldH: {gold_subh["text"]}, GoldC: {gold_subh['context']}\nGenH: {gen_subh['text']}, GenC: {gen_subh['context']}""", + 'answer': context_bool, + 'score': context_score, + } + break + + print(f'gen_subh_to_gold_subh: {gen_subh_to_gold_subh}') + eval_rec['gen_subh_to_gold_subh'] = gen_subh_to_gold_subh + eval_rec['gold_subh_covered'] = gold_subh_covered + matched_gold_gen_subh_evals = dict() + sum_accuracy_score = 0.0 + for p_id, g_id in gen_subh_to_gold_subh.items(): + if g_id >= 0: + key = f'P{p_id}||G{g_id}' + context_score = gen_gold_subh_to_context[key] + subh_eval_rec = run_eval_gold_vs_gen_NL_subhypo( + query, + gold_hypo, + gold_workflow, + gen_hypo, + gen_workflow, + dataset_meta, + llm_used, + context_score, + dataset_type=dataset_type, + use_column_metadata=use_column_metadata, + ) + sum_accuracy_score += subh_eval_rec['accuracy_score'] + matched_gold_gen_subh_evals[key] = subh_eval_rec + + eval_rec['matched_gold_gen_subh_evals'] = matched_gold_gen_subh_evals + eval_rec['recall_context'] = ( + len(gold_subh_covered) / len(gold_sub_hypo_json['sub_hypo']) + if len(gold_sub_hypo_json['sub_hypo']) + else 0.0 + ) + mean_accuracy_score = ( + sum_accuracy_score / len(gen_subh_to_gold_subh) + if len(gen_subh_to_gold_subh) + else 0.0 + ) + eval_rec['mean_accuracy_score'] = mean_accuracy_score + final_score = eval_rec['recall_context'] * mean_accuracy_score + eval_rec['final_score'] = final_score + print(f'eval_rec: {json.dumps(eval_rec, indent=2)}') + + return eval_rec diff --git a/evaluation/discoverybench/eval_utils/lm_utils.py b/evaluation/discoverybench/eval_utils/lm_utils.py new file mode 100644 index 00000000000..10486ee8229 --- /dev/null +++ b/evaluation/discoverybench/eval_utils/lm_utils.py @@ -0,0 +1,64 @@ +import os +import sys +import time + +from openai import OpenAI +from tenacity import ( + retry, + stop_after_attempt, # type: ignore + wait_random_exponential, # type: ignore +) + +if sys.version_info >= (3, 8): + from typing import Literal +else: + from typing_extensions import Literal + + +Model = Literal['gpt-4', 'gpt-3.5-turbo', 'text-davinci-003'] + +OpenAI.api_key = os.getenv('OPENAI_API_KEY') +OPENAI_GEN_HYP = { + 'temperature': 0, + 'max_tokens': 250, + 'top_p': 1.0, + 'frequency_penalty': 0, + 'presence_penalty': 0, +} + + +@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6)) +def run_chatgpt_query_multi_turn( + messages, + model_name='gpt-4-turbo', # pass "gpt4" for more recent model output + max_tokens=256, + temperature=0.0, + json_response=False, +): + response = None + num_retries = 3 + retry = 0 + while retry < num_retries: + retry += 1 + try: + client = OpenAI() + + if json_response: + response = client.chat.completions.create( + model=model_name, + response_format={'type': 'json_object'}, + messages=messages, + **OPENAI_GEN_HYP, + ) + else: + response = client.chat.completions.create( + model=model_name, messages=messages, **OPENAI_GEN_HYP + ) + break + + except Exception as e: + print(e) + print('GPT error. Retrying in 2 seconds...') + time.sleep(2) + + return response diff --git a/evaluation/discoverybench/eval_utils/openai_helpers.py b/evaluation/discoverybench/eval_utils/openai_helpers.py new file mode 100644 index 00000000000..95ab23cf9c2 --- /dev/null +++ b/evaluation/discoverybench/eval_utils/openai_helpers.py @@ -0,0 +1,190 @@ +import json + + +def OPENAI_TOPIC_GEN_MESSAGES(n=10): + return [ + { + 'role': 'system', + 'content': 'You are a helpful assistant who is not talkative. You only respond with the exact answer to a query without additional conversation.', + }, + { + 'role': 'user', + 'content': f'Given `n`, come up with a list of `n` distinct topics and their descriptions. The topics can be absolutely anything. Be as creative as possible. Return your answer as a JSON object. \n\nFor example, for `n`=3, a valid answer might be:\n```json\n{{"topics": [\n {{"id": 1, "topic": "cooking", "description": "Related to recipes, ingredients, chefs, etc."}},\n {{"id": 2, "topic": "sports", "description": "Related to players, stadiums, trophies, etc."}},\n {{"id": 3, "topic": "antiquing", "description": "Related to unique items, history, etc."}}\n]}}```\n\nNow, give me a list for `n`={n}. Remember, pick diverse topics from everything possible. No consecutive topics should be broadly similar. Directly respond with the answer JSON object.', + }, + ] + + +OPENAI_GEN_HYP = { + 'temperature': 1.0, + 'max_tokens': 4096, + 'top_p': 1.0, + 'frequency_penalty': 0, + 'presence_penalty': 0, +} + + +def OPENAI_SEMANTICS_GEN_MESSAGES(dependent, relationship, domain, domain_desc): + return [ + { + 'role': 'system', + 'content': 'You are a helpful assistant who is not talkative. You only respond with the exact answer to a query without additional conversation.', + }, + { + 'role': 'user', + 'content': f'Given the true relationship in a dataset and a given domain, your task is to come up with an interpretation of some real-world concepts that the relationship could be modeling from the provided domain. It\'s okay to be wrong, but suggest something reasonable. Try as much as possible to make sure that the TARGET is actually derivable from the other variables. Give your answer as a JSON object. Here\'s an example:\n\nRelationship for x2 = "(96.4 * x1 ** 3) + (88.72 * x5 ** 2) + (81.96 * x6 ** -2) + (28.13 * x3) + (97.0) + (0 * x4)"\nDomain="Sales"\nDomain description="Related to product distribution, revenues, marketing, etc."\n\nBased on this, the following real-world concepts might be applicable:\n```json\n{{\n "dependent": "x2",\n "relationship": "(96.4 * x1 ** 3) + (88.72 * x5 ** 2) + (81.96 * x6 ** -2) + (28.13 * x3) + (97.0) + (0 * x4)",\n "domain": "Sales",\n "trends": {{\n "x1": "Positive, cubic factor",\n "x2": "TARGET",\n "x3": "Positive, linear factor",\n "x4": "No relation",\n "x5": "Positive quadratic factor",\n "x6": "Positive, inverse quadratic factor"\n }},\n "interpretation": {{\n "x2": {{"description": "Volume of product sales by area", "name": "sales_area", "is_target": true}},\n "x1": {{"description": "Population by area", "name": "pop_area"}},\n "x3": {{"description": "Advertising spending", "name": "ad_spend"}},\n "x4": {{"description": "Gender ratio of marketing team", "name": "gdr_ratio_mkt_team"}},\n "x5": {{"description": "Intensity of marketing campaign", "name": "mkt_intensity"}}\n }},\n "x6": {{"description": "Distance to distribution center", "name": "dist_to_distr_ctr"}}\n}}```\n\nHere\'s a new test question:\nRelationship for {dependent} = "{relationship}"\nDomain = "{domain}"\nDomain description="{domain_desc}"\n\nRespond only with the answer JSON. Make sure that you do not forget to include the TARGET variable in the interpretation object.', + }, + ] + + +def OPENAI_SEMANTICS_GEN_W_MAP_MESSAGES( + dependent, relationship, domain, domain_desc, mapping +): + return [ + { + 'role': 'system', + 'content': 'You are a helpful assistant who is not talkative. You only respond with the exact answer to a query without additional conversation.', + }, + { + 'role': 'user', + 'content': f'Given a partial mapping from variables to real-world concepts and a true relationship in a dataset, your task is to come up with an interpretation of real-world concepts for the variables without any assigned mapping (those starting with x). Suggest something reasonable. The dependent variable must be derivable only from the other variables in the dependent relationship. Give your answer as a JSON object. Here\'s an example:\n\nExample partial mapping and relationship:\n```json\n{{\n "domain": "Sales",\n "domain_description": "Related to product distribution, revenues, marketing, etc.",\n "variable_mapping": {{\n "x1": {{"description": "Population by area", "name": "pop_area"}},\n "x2": {{"description": "Volume of product sales by area", "name": "sales_area"}},\n "x4": {{"description": "Gender ratio of marketing team", "name": "gdr_ratio_mkt_team"}},\n "x6": {{"description": "Distance to distribution center", "name": "dist_to_distr_ctr"}}\n }},\n "dependent_variable": "sales_area",\n "dependent_relationship": "(96.4 * pop_area ** 3) + (88.72 * x5 ** 2) + (81.96 * dist_to_distr_ctr ** -2) + (28.13 * x3) + (97.0)"\n}}```\nBased on this, an example answer would be:\n```json\n{{\n "dependent_variable": "sales_area",\n "missing_mapping": ["x3", "x5"],\n "trends": {{\n "x3": "Positive, linear factor",\n "x5": "Positive quadratic factor"\n }},\n "interpretation": {{\n "x3": {{"description": "Advertising spending", "name": "ad_spend"}},\n "x5": {{"description": "Intensity of marketing campaign", "name": "mkt_intensity"}}\n }}\n}}```\n\nHere\'s a new test question:\n```json\n{{\n "domain": "{domain}",\n "domain_description": "{domain_desc}",\n "variable_mapping": {json.dumps(mapping, indent=2)},\n "dependent_variable": "{dependent}",\n "dependent_relationship": "{relationship}"\n}}```\nRespond only with the answer JSON.', + }, + ] + + +def OPENAI_SEMANTICS_GEN_SUMMARY_MESSAGES(dataset): + return [ + { + 'role': 'system', + 'content': 'You are a helpful assistant who is not talkative. You only respond with the exact answer to a query without additional conversation.', + }, + { + 'role': 'user', + 'content': f'Given the following descriptions of the columns of a dataset, your task is to come up with a natural language overview of the dataset, which should include (1) what the dataset is about, (2) how the data was collected, (3) when the data was collected, and (3) for what purpose the data was collected. Be specific and creative.\n\nExample dataset:\n```json\n{{ \n "dataset": {{ \n "x6": {{"description": "Ancient artifact significance score", "name": "artifact_significance_score", "is_target": true}},\n "x1": {{"description": "Distance to ancient city center", "name": "dist_to_ancient_city_ctr"}},\n "x2": {{"description": "Quantity of discovered relics", "name": "relic_discovery_qty"}},\n "x3": {{"description": "Years since last archaeological expedition", "name": "years_since_exp"}},\n "x4": {{"description": "Number of artifacts in excavation site", "name": "artifact_qty"}},\n "x5": {{"description": "Soil fertility coefficient", "name": "soil_fertility_coef"}},\n "x7": {{"description": "Distance to ancient burial grounds", "name": "dist_to_burial_grounds"}},\n "x8": {{"description": "Population estimate of ancient civilization", "name": "ancient_civilization_pop_estimate"}},\n "x9": {{"description": "Temperature variation in excavation region", "name": "temp_variation"}}\n }}\n}}```\nExample description:\nThis dataset is about archaeological explorations and findings linked to ancient civilizations. The data was collected in the form of field metrics during various archaeological expeditions during the late mid-20th century. The purpose of the data collection is to evaluate the significance of ancient artifacts discovered during excavations.\n\nHere is a new test dataset.\n{json.dumps(dataset, indent=2)}\nProvide only the description.', + }, + ] + + +def OPENAI_GEN_HYPO_MESSAGES(dataset): + return [ + { + 'role': 'system', + 'content': 'You are a helpful assistant who is not talkative. You only respond with the exact answer to a query without additional conversation.', + }, + { + 'role': 'user', + 'content': f'Given a dataset with its descriptions and the true functional relationship between its variables, your task is to generate 3 levels of hypotheses for the stated relationship in plain English. The three levels are "broad", "medium" and "narrow". Make sure that the hypotheses sound natural. *Only include concepts for variables that are present in the provided functional relationship.* Give your answer as a JSON.\n\nFor example, an example dataset might be the following:\n```json\n{{\n "domain": "cybersecurity",\n "summary": "This dataset is about measuring cybersecurity threats in a system. The data was collected by monitoring various cybersecurity metrics in a network environment. The purpose of the data collection is to assess and predict potential cybersecurity risks and vulnerabilities.",\n "variables": [\n {{\n "description": "Level of cybersecurity threat",\n "name": "cybersecurity_threat",\n "is_target": true\n }},\n {{\n "description": "Number of failed login attempts",\n "name": "failed_login_attempts"\n }},\n {{\n "description": "Amount of encrypted data",\n "name": "encrypted_data"\n }},\n {{\n "description": "Frequency of software updates",\n "name": "software_updates"\n }},\n {{\n "description": "Number of antivirus software installed",\n "name": "antivirus_software"\n }},\n {{\n "description": "Quality of firewall protection",\n "name": "firewall_quality"\n }}\n ],\n "relationship": {{\n "dependent": "cybersecurity_threat",\n "relation": "-53.5*encrypted_data**2 - 53.85*failed_login_attempts**2 + 67.75*firewall_quality - 92.16 - 36.68/software_updates**3"\n }}\n}}```\nGiven this dataset, the following is a valid answer:\n```json\n{{\n "broad": {{\n "instruction": "Be vague. Only indicate which concepts might be related but not how they are related",\n "hypothesis": "Threat to cybersecurity is influenced by several factors including the amount of encrypted data, the number of failed login attempts, the quality of the firewall, as well as how often the software is updated."\n }},\n "medium": {{\n "instruction": "Be slightly more specific. For each factor, indicate carefully whether it positively or negatively affects the relationship, but do not indicate what the exponent is.",\n "hypothesis": "Cybersecurity threat tends to decrease with the amount of data encryption, the number of failed login attempts, as well as the frequency of software updates to some extent, while improvement in the firewall quality has a positive effect."\n }},\n "narrow": {{\n "instruction": "Be specific. Communicate the concepts, whether there is a positive or negative effect (be careful), and the meaning of the exponent",\n "hypothesis": "The threat to cybersecurity interacts in a complex manner with various factors. As the amount of encrypted data increases, there is a quadratic decrease in threat. Similarly for the number of failed login attempts, there is a negative quadratic relationship. The quality of the firewall protection on the other hand demonstrates a positive and linear relationship. Finally, the frequency of software updates has an inverse cubic relationship to the threat."\n }},\n}}\n```\n\nBased on this, provide an answer for the following test dataset:\n```json\n{dataset}```\nRespond only with a JSON.', + }, + ] + + +def create_prompt(usr_msg): + return [ + { + 'role': 'system', + 'content': 'You are a helpful assistant who is not talkative. You only respond with the exact answer to a query without additional conversation.', + }, + {'role': 'user', 'content': usr_msg}, + ] + + +def get_response(client, prompt, max_retry=5, model='gpt-3.5-turbo', verbose=False): + n_try = 0 + while n_try < max_retry: + response = client.chat.completions.create( + model=model, messages=create_prompt(prompt), **OPENAI_GEN_HYP + ) + + # COMMENT: changed from + # response.choices[0].message.content.strip().strip('```json').strip('```') + content = response.choices[0].message.content + cleaned_content = content.split('```json')[1].split('```')[0].strip() + output = cleaned_content + try: + response_json = json.loads(output) + return response_json + except ValueError: + if verbose: + print(f'Bad JSON output:\n\n{output}') + n_try += 1 + if n_try < max_retry: + if verbose: + print('Retrying...') + else: + if verbose: + print('Retry limit reached') + return None + + +def get_code_fix( + client, code, error, max_retry=5, model='gpt-3.5-turbo', verbose=False +): + prompt = f"""\ +Given the following code snippet and error message, provide a single-line fix for the error. \ +Note that the code is going to be executed using python `eval`. \ +The code should be executable and should not produce the error message. Be as specific as possible. + +Here's the code and the error: +{{ + "code": "{code}", + "error": "{error}" +}} + +Return only a JSON object with the fixed code in the following format: +```json +{{ + "fixed_code": "..." +}}""" + response = get_response( + client, prompt, max_retry=max_retry, model=model, verbose=verbose + ) + return response + + +def get_new_hypothesis( + client, target, old, expr, cols, model='gpt-3.5-turbo', verbose=False +): + prompt = f"""\ +Given a target column from a dataset, a pandas expression to derive the column from existing columns, a list of \ +existing columns, and a previously written hypothesis text, carefully check if the hypothesis text is consistent with \ +the pandas expression or not. If it is consistent, simply return the hypothesis as it is. If it is not consistent, \ +provide a new natural language hypothesis that is consistent with the pandas expression using only the provided \ +information. Be specific. + +Here's the information: +```json +{{ + "target_column": "{target}", + "pandas_expression": "{expr}", + "existing_columns": {json.dumps(cols, indent=4)} + "old_hypothesis": "{old}", +}}``` + +Give your answer as a new JSON with the following format: +```json +{{ + "hypothesis": "..." +}}""" + response = get_response(client, prompt, model=model, verbose=verbose) + return response + + +def replace_variable(client, expr, old, new, model='gpt-3.5-turbo', verbose=False): + prompt = f"""\ +Given a pandas "expression", replace mentions of the "old" column with its "new" value such that the resultant \ +expression is equivalent to the original expression. + +Here's the information: +```json +{{ + "expression": "{expr}", + "old": "{old}", + "new": "{new}" +}}``` + +Give your answer as a new JSON with the following format: +```json +{{ + "new_expression": "..." +}}""" + response = get_response(client, prompt, model=model, verbose=verbose) + return response diff --git a/evaluation/discoverybench/eval_utils/openai_semantic_gen_prompts.py b/evaluation/discoverybench/eval_utils/openai_semantic_gen_prompts.py new file mode 100644 index 00000000000..a0b5438e4c8 --- /dev/null +++ b/evaluation/discoverybench/eval_utils/openai_semantic_gen_prompts.py @@ -0,0 +1,151 @@ +common_hypothesis_features = [ + '1-2 sentences', + 'surprising finding', + 'includes numeric concepts', + 'includes categorical concepts', + 'includes binary concepts', +] +hypothesis_features = [ + ['requires within-cluster analysis'], + ['requires across-cluster analysis'], + ['corresponds to a polynomial relationship of some columns'], + ['corresponds to a ratio between some columns'], + ['requires temporal analysis'], + ['relationship is based on descriptive statistics of some columns'], + ['requires concepts based on percentage or percentiles'], + ['relationship is only applicable to one cluster in the data and not the others'], +] + +column_features = [ + [ + 'must have one target column', + 'must have quantifiable columns', + 'must have a few categorical columns', + 'make sure the categorical column values do not contain special characters', + 'include a few distractor columns', + ] +] + +common_pandas_features = [ + 'must be executable using python `eval` to create the target column in variable `df` (pandas dataframe)', + "for e.g., df['A']**2 + 3*df['B'] + 9, np.where(df['A'] > 3, 'Yes', 'No'), etc.", + 'variables in pandas_expression must be from the existing columns listed above', + 'variables in pandas_expression must NOT contain the target column itself', +] +pandas_features = [ + ['expression is a quadratic polynomial'], + ['expression is a cubic polynomial'], + ['expression is a ratio of existing columns'], + ['expression is derived through logical combination of existing columns'], + # workflow +] +pandas_features = [common_pandas_features + p for p in pandas_features] + +common_derived_features = [ + '1-2 sentences', + 'includes numeric concepts', + 'includes categorical concepts', + 'includes binary concepts', +] +derived_features = [common_derived_features + h for h in hypothesis_features] +hypothesis_features = [common_hypothesis_features + h for h in hypothesis_features] + +PROMPT_HYP = """\ +Given a dataset topic and description, generate an interesting hypothesis based on \ +the provided instructions. Be creative and come up with an unusual finding. + +```json +{ + "topic": "%s", + "description": "%s", + "hypothesis_features": %s, + "hypothesis": "..." +}``` + +Give your answer as a new JSON with the following format: +```json +{ + "hypothesis": "..." +} +```""" + +PROMPT_COL = """\ +Given a dataset topic, its description, and a true hypothesis that can be determined from it, \ +generate a list of valid columns based on the provided instructions. + +```json +{ + "topic": "%s", + "description": "%s", + "hypothesis": "%s", + "column_instructions": %s, + "columns": [ + { + "col_name": "...", # should be an "_"-separated string + "description": "...", + "data_type": "...", # should be executable using python's `eval` function. E.g., str, float, int, bool + "data_range": {...}, # should be either {"min": ..., "max": ...} or {"values": [...]} + "is_distractor": true/false, # boolean indicating whether this is a distractor that could cause confusion during data analysis + "is_target": true/false # boolean indicating whether this is the target variable for the hypothesis; at least one column should be the target + }, + ... + ], + "pandas_instructions": %s, + "pandas_equation_for_hypothesis": { + "target_col": "...", + "target_col_type": "...", + "target_col_range": {...}, + "independent_cols_in_pandas_expression": [], # list of column names that will be used to derive the target column + "pandas_expression": "..." # expression to derive df[target_col] using df[ind_col1], df[ind_col2], etc. + } +}``` + +Give your answer as a new JSON with the "columns" and "pandas_equation_for_hypothesis" keys filled using the following format: +```json +{ + "columns": [...], + "pandas_equation_for_hypothesis": {...} +} +```""" + +PROMPT_DER = """\ +Given a dataset topic, description, a true hypothesis that can be determined from the data, \ +and a target column from the dataset, generate a hypothesis for the target column using new independent columns not present in the existing columns. + +```json +{ + "topic": "%s", + "description": "%s", + "hypothesis": "%s", + "existing_columns": %s, + "target_column": "%s", + "new_to_target_instructions": %s, + "new_to_target_hypothesis": "...", # describe a relationship between new columns that explains the target column + "new_columns_for_target": [ # do not repeat any of the existing columns in the dataset + { + "col_name": "...", # should be an "_"-separated string + "description": "...", + "data_type": "...", # should be executable using python's `eval` function. E.g., str, float, int, bool + "data_range": {...}, # should be either {"min": ..., "max": ...} or {"values": [...]} + }, + ... + ], + "pandas_instructions": %s, + "pandas_equation_for_new_to_target_hypothesis": { + "target_col": "...", + "target_col_type": "...", + "target_col_range": {...}, + "independent_cols_in_pandas_expression": [], # list of column names from new_columns_for_target that will be used to derive target_col + "pandas_expression": "..." # expression to derive df[target_col] using df[ind_col1], df[ind_col2], etc. + } +}``` + +Give your answer as a new JSON with the "new_to_target_hypothesis", "new_columns_for_target", and \ +"pandas_equation_for_new_to_target_hypothesis" keys filled using the following format: +```json +{ + "new_to_target_hypothesis": "...", + "new_columns_for_target": [...], + "pandas_equation_for_new_to_target_hypothesis": {...} +} +```""" diff --git a/evaluation/discoverybench/eval_utils/response_parser.py b/evaluation/discoverybench/eval_utils/response_parser.py new file mode 100644 index 00000000000..b5de82b5df9 --- /dev/null +++ b/evaluation/discoverybench/eval_utils/response_parser.py @@ -0,0 +1,52 @@ +workflow_summary_markers = [ + 'WORKFLOW SUMMARY', + 'WORKFLOW_SUMMARY', + 'WORKFLOW-SUMMARY', + 'Workflow Summary', +] + +final_answer_markers = [ + 'FINAL ANSWER', + 'FINAL_ANSWER', + 'FINAL-ANSWER', + 'Final Answer', + 'Scientific Hypothesis', + 'Hypothesis', +] + +next_agent_markers = [ + 'NEXT AGENT', + 'NEXT-AGENT', + 'NEXT_AGENT', + 'FEEDBACK', +] + + +def extract_between(content, start_markers, end_markers=None): + for marker in start_markers: + if marker in content: + result = content.split(marker, 1)[1] + if end_markers: + for end_marker in end_markers: + if end_marker in result: + result = result.split(end_marker, 1)[0] + return result + return '' + + +def extract_gen_hypo_from_logs(content: str): + error = '' + + gen_workflow = extract_between( + content, workflow_summary_markers, final_answer_markers + ) + + if not gen_workflow: + error += 'No Workflow Summary found in the line. | ' + + gen_hypothesis = extract_between(content, final_answer_markers, next_agent_markers) + + if not gen_hypothesis: + error += 'No Final Answer in the line.' + + return gen_hypothesis, gen_workflow, error diff --git a/evaluation/discoverybench/run_infer.py b/evaluation/discoverybench/run_infer.py new file mode 100644 index 00000000000..77d72d04775 --- /dev/null +++ b/evaluation/discoverybench/run_infer.py @@ -0,0 +1,491 @@ +import asyncio +import json +import os + +import git +import pandas as pd + +from evaluation.discoverybench.eval_utils.eval_w_subhypo_gen import ( + run_eval_gold_vs_gen_NL_hypo_workflow, +) +from evaluation.discoverybench.eval_utils.response_parser import ( + extract_gen_hypo_from_logs, +) +from evaluation.utils.shared import ( + EvalMetadata, + EvalOutput, + codeact_user_response, + make_metadata, + prepare_dataset, + reset_logger_for_multiprocessing, + run_evaluation, +) +from openhands.controller.state.state import State +from openhands.core.config import ( + AgentConfig, + AppConfig, + SandboxConfig, + get_llm_config_arg, + parse_arguments, +) +from openhands.core.logger import openhands_logger as logger +from openhands.core.main import create_runtime, run_controller +from openhands.events.action import AgentFinishAction, CmdRunAction, MessageAction +from openhands.events.observation import CmdOutputObservation +from openhands.runtime.base import Runtime +from openhands.utils.async_utils import call_async_from_sync + +EVALUATION_LLM = 'gpt-4-1106-preview' + +DATA_FILES = {} + +LIBRARIES = [ + 'pandas', + 'numpy', + 'scipy', + 'matplotlib', + 'seaborn', + 'scikit-learn', + 'statsmodels', +] + +AGENT_CLS_TO_FAKE_USER_RESPONSE_FN = { + 'CodeActAgent': codeact_user_response, +} + +AGENT_CLS_TO_INST_SUFFIX = { + 'CodeActAgent': 'When you think you have fixed the issue through code changes, please run the following command: