selfevolveagent / examples /optimization /aflow /aflow_humanevalplus.py
iLOVE2D's picture
Upload 2846 files
5374a2d verified
import os
from dotenv import load_dotenv
from evoagentx.optimizers import AFlowOptimizer
from evoagentx.models import LiteLLMConfig, LiteLLM, OpenAILLMConfig, OpenAILLM
from evoagentx.benchmark import AFlowHumanEval, AFlowHumanEvalPLUS
import difflib
import nest_asyncio
nest_asyncio.apply()
load_dotenv()
api_key = "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"
# Function to encode the image
import os
os.environ["OPENAI_API_KEY"] = api_key
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
EXPERIMENTAL_CONFIG = {
"humaneval": {
"question_type": "code",
"operators": ["Custom", "CustomCodeGenerate", "Test", "ScEnsemble"]
},
"mbpp": {
"question_type": "code",
"operators": ["Custom", "CustomCodeGenerate", "Test", "ScEnsemble"]
},
"hotpotqa": {
"question_type": "qa",
"operators": ["Custom", "AnswerGenerate", "QAScEnsemble"]
},
"gsm8k": {
"question_type": "math",
"operators": ["Custom", "ScEnsemble", "Programmer"]
},
"math": {
"question_type": "math",
"operators": ["Custom", "ScEnsemble", "Programmer"]
}
}
class HumanEvalPLUSSplits(AFlowHumanEvalPLUS):
def _load_data(self):
# load the original test data
super()._load_data()
# split the data into dev and test
import numpy as np
np.random.seed(42)
num_dev_samples = int(len(self._test_data) * 0.2)
random_indices = np.random.permutation(len(self._test_data))
self._dev_data = [self._test_data[i] for i in random_indices[:num_dev_samples]]
self._test_cases = [self._test_data[i] for i in random_indices[num_dev_samples:]]
self._test_data = self._test_cases.copy()
def main():
from evoagentx.models import OpenAILLMConfig, OpenAILLM,AzureOpenAIConfig,LiteLLMConfig,LiteLLM
from evoagentx.workflow import SEWWorkFlowGraph
from evoagentx.agents import AgentManager
from evoagentx.evaluators import Evaluator
from evoagentx.optimizers import SEWOptimizer
from evoagentx.core.callbacks import suppress_logger_info
# os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"] = "gpt-4o-mini"
# os.environ["AZURE_OPENAI_ENDPOINT"] = "https://75244-mfztkr7x-eastus2.cognitiveservices.azure.com/"
# os.environ["AZURE_OPENAI_KEY"] = "8PNMdsUYGdMPsCfl0baO0hjtnGE2m40zJTrUGC3vKnHdpjnkOgeQJQQJ99BIACHYHv6XJ3w3AAAAACOG7VZI"
# os.environ["AZURE_OPENAI_API_VERSION"] = "2024-12-01-preview"
llm_config = OpenAILLMConfig(model="gpt-4o-mini-2024-07-18", openai_key=OPENAI_API_KEY, top_p=0.85, temperature=0.2, frequency_penalty=0.0, presence_penalty=0.0)
# llm_config = LiteLLMConfig(model="azure/" + os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME"), # Azure model format
# azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"),
# azure_key=os.getenv("AZURE_OPENAI_KEY"),
# api_version=os.getenv("AZURE_OPENAI_API_VERSION", "2024-12-01-preview"), top_p=0.85, temperature=0.2, frequency_penalty=0.0, presence_penalty=0.0)
# executor_llm = LiteLLM(config=llm_config)
# optimizer_llm = LiteLLM(config=llm_config)
executor_llm = OpenAILLM(config=llm_config)
optimizer_llm = OpenAILLM(config=llm_config)
# load benchmark
humaneval_old = HumanEvalPLUSSplits()
humaneval = AFlowHumanEvalPLUS()
humaneval._train_data = humaneval_old._dev_data.copy()
humaneval._dev_data = humaneval_old._dev_data.copy()
humaneval._test_data = humaneval_old._test_data.copy()
humaneval._test_cases = humaneval_old._test_cases.copy()
humaneval.error_list = {}
print(humaneval._test_cases[0])
# create optimizer
optimizer = AFlowOptimizer(
graph_path = "examples/aflow/code_generation",
optimized_path = "examples/aflow/humanevalplus_update/optimized",
optimizer_llm=optimizer_llm,
executor_llm=executor_llm,
validation_rounds=5,
eval_rounds=2,
max_rounds=20,
**EXPERIMENTAL_CONFIG["humaneval"]
)
# run optimization
optimizer.optimize(humaneval)
# run test
optimizer.test(humaneval, [0,1,2,3,4]) # use `test_rounds: List[int]` to specify the rounds to test
if __name__ == "__main__":
main()