File size: 3,899 Bytes
5374a2d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
import os 
from dotenv import load_dotenv

from evoagentx.optimizers import AFlowOptimizer
from evoagentx.models import LiteLLMConfig, LiteLLM, OpenAILLMConfig, OpenAILLM 
import nest_asyncio
nest_asyncio.apply()

import os 
from dotenv import load_dotenv

from evoagentx.benchmark import LiveCodeBench, AFlowLiveCodeBench
from evoagentx.optimizers import AFlowOptimizer
from evoagentx.models import LiteLLMConfig, LiteLLM, OpenAILLMConfig, OpenAILLM 

api_key = "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"
# Function to encode the image
import os
os.environ["OPENAI_API_KEY"] = api_key
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")

EXPERIMENTAL_CONFIG = {
    "humaneval": {
        "question_type": "code", 
        "operators": ["Custom", "CustomCodeGenerate", "Test", "ScEnsemble"] 
    }, 
    "livecodebench": {
        "question_type": "code", 
        "operators": ["Custom", "CustomCodeGenerate", "Test", "ScEnsemble"] 
    }, 
    "mbpp": {
        "question_type": "code", 
        "operators": ["Custom", "CustomCodeGenerate", "Test", "ScEnsemble"] 
    },
    "hotpotqa": {
        "question_type": "qa", 
        "operators": ["Custom", "AnswerGenerate", "QAScEnsemble"]
    },
    "gsm8k": {
        "question_type": "math", 
        "operators": ["Custom", "ScEnsemble", "Programmer"]
    },
    "math": {
        "question_type": "math", 
        "operators": ["Custom", "ScEnsemble", "Programmer"]
    }
    
}


class LiveCodeBenchSplits(AFlowLiveCodeBench):

    def _load_data(self):

        # load the original MBPP data 
        mbpp_test_data = LiveCodeBench().get_test_data()
        # split the data into dev and test
        import numpy as np 
        np.random.seed(42)
        permutation = np.random.permutation(len(mbpp_test_data))
        # radnomly select 50 samples for dev and 100 samples for test (be consistent with other models)
#         dev_data_task_ids = [mbpp_test_data[idx].question_id for idx in permutation[:50]]
#         test_data_task_ids = [mbpp_test_data[idx].question_id for idx in permutation[50:]]

#         super()._load_data() 
#         full_data = mbpp_test_data
#         self._dev_data = [example for example in full_data if example.question_id in dev_data_task_ids]
#         self._test_data = [example for example in full_data if example.question_id in test_data_task_ids]
        dev_data_task_ids = [mbpp_test_data[idx] for idx in permutation[:50]]
        test_data_task_ids = [mbpp_test_data[idx] for idx in permutation[50:200]]
#         dev_data_task_ids = [mbpp_test_data[idx] for idx in permutation[:1]]
#         test_data_task_ids = [mbpp_test_data[idx] for idx in permutation[1:2]]

        super()._load_data() 
        full_data = mbpp_test_data
        self._dev_data = dev_data_task_ids
        self._test_data = test_data_task_ids

    

def main():

    openai_config = OpenAILLMConfig(
        model="gpt-4o-mini", 
        openai_key=OPENAI_API_KEY
    )

    claude_config = LiteLLMConfig(
        model="gpt-4o-mini", 
        openai_key=OPENAI_API_KEY
    )
    executor_llm = OpenAILLM(config=openai_config)
    optimizer_llm = LiteLLM(config=claude_config)

    # load benchmark
    mbpp = LiveCodeBenchSplits()

    # create optimizer
    optimizer = AFlowOptimizer(
        graph_path = "examples/aflow/code_generation",
        optimized_path = "examples/aflow/livecodebench/optimized",
        optimizer_llm=optimizer_llm,
        executor_llm=executor_llm,
        validation_rounds=1,
        eval_rounds=1,
        max_rounds=10,
        **EXPERIMENTAL_CONFIG["livecodebench"]
    )

    # run optimization
    optimizer.optimize(mbpp)

    # run test 
    optimizer.test(mbpp) # use `test_rounds: List[int]` to specify the rounds to test 


if __name__ == "__main__":
    main()