|
|
import os |
|
|
from dotenv import load_dotenv |
|
|
from evoagentx.optimizers import AFlowOptimizer |
|
|
from evoagentx.models import LiteLLMConfig, LiteLLM, OpenAILLMConfig, OpenAILLM |
|
|
from evoagentx.benchmark import AFlowHumanEval, AFlowHumanEvalPLUS |
|
|
|
|
|
import difflib |
|
|
import nest_asyncio |
|
|
nest_asyncio.apply() |
|
|
|
|
|
load_dotenv() |
|
|
|
|
|
api_key = "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" |
|
|
|
|
|
import os |
|
|
os.environ["OPENAI_API_KEY"] = api_key |
|
|
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") |
|
|
|
|
|
|
|
|
EXPERIMENTAL_CONFIG = { |
|
|
"humaneval": { |
|
|
"question_type": "code", |
|
|
"operators": ["Custom", "CustomCodeGenerate", "Test", "ScEnsemble"] |
|
|
}, |
|
|
"mbpp": { |
|
|
"question_type": "code", |
|
|
"operators": ["Custom", "CustomCodeGenerate", "Test", "ScEnsemble"] |
|
|
}, |
|
|
"hotpotqa": { |
|
|
"question_type": "qa", |
|
|
"operators": ["Custom", "AnswerGenerate", "QAScEnsemble"] |
|
|
}, |
|
|
"gsm8k": { |
|
|
"question_type": "math", |
|
|
"operators": ["Custom", "ScEnsemble", "Programmer"] |
|
|
}, |
|
|
"math": { |
|
|
"question_type": "math", |
|
|
"operators": ["Custom", "ScEnsemble", "Programmer"] |
|
|
} |
|
|
} |
|
|
|
|
|
class HumanEvalPLUSSplits(AFlowHumanEvalPLUS): |
|
|
|
|
|
def _load_data(self): |
|
|
|
|
|
super()._load_data() |
|
|
|
|
|
import numpy as np |
|
|
np.random.seed(42) |
|
|
num_dev_samples = int(len(self._test_data) * 0.2) |
|
|
random_indices = np.random.permutation(len(self._test_data)) |
|
|
self._dev_data = [self._test_data[i] for i in random_indices[:num_dev_samples]] |
|
|
self._test_cases = [self._test_data[i] for i in random_indices[num_dev_samples:]] |
|
|
self._test_data = self._test_cases.copy() |
|
|
|
|
|
def main(): |
|
|
|
|
|
from evoagentx.models import OpenAILLMConfig, OpenAILLM,AzureOpenAIConfig,LiteLLMConfig,LiteLLM |
|
|
from evoagentx.workflow import SEWWorkFlowGraph |
|
|
from evoagentx.agents import AgentManager |
|
|
from evoagentx.evaluators import Evaluator |
|
|
from evoagentx.optimizers import SEWOptimizer |
|
|
from evoagentx.core.callbacks import suppress_logger_info |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llm_config = OpenAILLMConfig(model="gpt-4o-mini-2024-07-18", openai_key=OPENAI_API_KEY, top_p=0.85, temperature=0.2, frequency_penalty=0.0, presence_penalty=0.0) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
executor_llm = OpenAILLM(config=llm_config) |
|
|
optimizer_llm = OpenAILLM(config=llm_config) |
|
|
|
|
|
humaneval_old = HumanEvalPLUSSplits() |
|
|
humaneval = AFlowHumanEvalPLUS() |
|
|
|
|
|
humaneval._train_data = humaneval_old._dev_data.copy() |
|
|
humaneval._dev_data = humaneval_old._dev_data.copy() |
|
|
humaneval._test_data = humaneval_old._test_data.copy() |
|
|
humaneval._test_cases = humaneval_old._test_cases.copy() |
|
|
|
|
|
humaneval.error_list = {} |
|
|
|
|
|
print(humaneval._test_cases[0]) |
|
|
|
|
|
|
|
|
optimizer = AFlowOptimizer( |
|
|
graph_path = "examples/aflow/code_generation", |
|
|
optimized_path = "examples/aflow/humanevalplus_update/optimized", |
|
|
optimizer_llm=optimizer_llm, |
|
|
executor_llm=executor_llm, |
|
|
validation_rounds=5, |
|
|
eval_rounds=2, |
|
|
max_rounds=20, |
|
|
**EXPERIMENTAL_CONFIG["humaneval"] |
|
|
) |
|
|
|
|
|
|
|
|
optimizer.optimize(humaneval) |
|
|
|
|
|
|
|
|
optimizer.test(humaneval, [0,1,2,3,4]) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |