File size: 59,376 Bytes
5374a2d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
import yaml 
import regex
import random
import inspect
import numpy as np
from pydantic import Field 
from copy import deepcopy
import xml.etree.ElementTree as ET
from typing import Literal, Union, Optional, List
from evoagentx.models import OpenAILLMConfig, OpenAILLM
from evoagentx.evaluators import Evaluator 

from .optimizer import Optimizer
from ..core.logging import logger
from ..models.base_model import BaseLLM 
from ..benchmark.benchmark import Benchmark
from ..workflow.action_graph import ActionGraph
from ..core.callbacks import suppress_logger_info
from ..workflow.workflow_graph import SequentialWorkFlowGraph,WorkFlowGraph
from ..prompts.workflow.sew_optimizer import mutation_prompts, thinking_styles

VALID_SCHEMES = ["python", "yaml", "code", "core", "bpmn"]

import difflib

def find_closest_name(inputname, name_refernece):
    name_reference_correct = [step["task_name"] for step in name_refernece]
    print("inputname", inputname)
    print("correct_list", name_reference_correct)
    correct_name = difflib.get_close_matches(inputname, name_reference_correct, n=1, cutoff=0.1)
    print(correct_name)
    correct_step = name_refernece[name_reference_correct.index(correct_name[0])]
    
    return correct_step
    

class STRUCTUREWorkFlowScheme:

    """
    The scheme of the workflow for SEW optimizer.
    """
    def __init__(self, graph: WorkFlowGraph, **kwargs):
        self.graph = graph # the workflow graph to be transformed
        self.kwargs = kwargs

    def convert_to_scheme(self, scheme: str) -> str:
        """
        Transform the WorkflowGraph to the desired scheme.
        """
        if scheme not in VALID_SCHEMES:
            raise ValueError(f"Invalid scheme: {scheme}. The scheme should be one of {VALID_SCHEMES}.") 
        if scheme == "python":
            repr = self.get_workflow_python_repr()
        elif scheme == "yaml":
            repr = self.get_workflow_yaml_repr()
        elif scheme == "code":
            repr = self.get_workflow_code_repr()
        elif scheme == "core":
            repr = self.get_workflow_core_repr()
        elif scheme == "bpmn":
            repr = self.get_workflow_bpmn_repr()
        return repr

    def parse_from_scheme(self, scheme: str, repr: str) -> WorkFlowGraph:
        """
        Parse the SequentialWorkFlowGraph from the given scheme and representation.
        """
        if scheme not in VALID_SCHEMES:
            raise ValueError(f"Invalid scheme: {scheme}. The scheme should be one of {VALID_SCHEMES}.")
        if scheme == "python":
            graph = self.parse_workflow_python_repr(repr)
        elif scheme == "yaml":
            graph = self.parse_workflow_yaml_repr(repr)
        elif scheme == "code":
            graph = self.parse_workflow_code_repr(repr)
        elif scheme == "core":
            graph = self.parse_workflow_core_repr(repr)
        elif scheme == "bpmn":
            graph = self.parse_workflow_bpmn_repr(repr)
        return graph

    def _get_workflow_repr_info(self) -> List[dict]:
        """
        Get the information for the workflow representation.
        """
        info = []
        for node in self.graph.nodes:
            task_name = node.name
            input_names = [param.name for param in node.inputs] 
            output_names = [param.name for param in node.outputs]
            task_info = {
                "task_name": task_name,
                "input_names": input_names,
                "output_names": output_names
            }
            info.append(task_info)
        return info
    
    def _convert_to_func_name(self, name: str) -> str:
        """
        Convert the task name to the function name.
        """
        name = name.lower().strip()
        name = name.replace(' ', '_').replace('-', '_')
        name = ''.join(c for c in name if c.isalnum() or c == '_')
        # Replace multiple consecutive underscores with a single underscore
        name = regex.sub(r'_+', "_", name)
        # Remove leading/trailing underscores
        name = name.strip('_')
        return name
    
    def _convert_to_title(self, name: str) -> str:
        func_name = self._convert_to_func_name(name)
        words = func_name.split('_')
        return ' '.join(word.capitalize() for word in words)
    
    def get_workflow_python_repr(self) -> str: 
        repr_info = self._get_workflow_repr_info()
        if not repr_info:
            return ""
        
        python_workflow_info = [] 
        for task_info in repr_info:
            name = self._convert_to_func_name(task_info['task_name'])
            input_names = [f'{input_name}' for input_name in task_info['input_names']]
            output_names = [f'{output_name}' for output_name in task_info['output_names']]
            python_workflow_info.append(
                "{{'name': '{name}', 'args': {args}, 'outputs': {outputs}}}".format(
                    name=name,
                    args=input_names,
                    outputs=output_names
                )
            )
        python_workflow_repr = "steps = [\n" + ",\n".join(python_workflow_info) + "\n]"
        # print(python_workflow_repr)
        return python_workflow_repr
    
    def get_workflow_yaml_repr(self) -> str:
        repr_info = self._get_workflow_repr_info() 
        if not repr_info:
            return ""
        
        yaml_workflow_info = []
        for task_info in repr_info:
            name = self._convert_to_func_name(task_info['task_name'])
            input_names = "\n".join([f'    - {input_name}' for input_name in task_info['input_names']])
            output_names = "\n".join([f'    - {output_name}' for output_name in task_info['output_names']])
            yaml_workflow_info.append(
                "- name: {name}\n  args:\n{input_names}\n  outputs:\n{output_names}".format(
                    name=name,
                    input_names=input_names,
                    output_names=output_names
                )
            )
        yaml_workflow_repr = "\n\n".join(yaml_workflow_info)
        return yaml_workflow_repr

    def get_workflow_code_repr(self) -> str:
        repr_info = self._get_workflow_repr_info()
        if not repr_info:
            return ""
            
        workflow_lines = []
        for task_info in repr_info:
            # Convert task name to snake_case
            name = self._convert_to_func_name(task_info['task_name'])
            
            # Format inputs and outputs
            inputs = ", ".join(task_info['input_names'])
            outputs = ", ".join(task_info['output_names'])
            
            # Create the line in format: task_name(inputs) -> outputs
            line = f"{name}({inputs}) -> {outputs}"
            workflow_lines.append(line)
            
        # Join all lines with newlines
        workflow_repr = "\n".join(workflow_lines)
        
        return workflow_repr

    def get_workflow_bpmn_repr(self) -> str:

        repr_info = self._get_workflow_repr_info()
        if not repr_info:
            return ""
            
        # Start the BPMN XML
        bpmn_lines = [
            '<definitions xmlns="http://www.omg.org/spec/BPMN/20100524/MODEL">',
            '<process id="software_dev_workflow" isExecutable="true">',
            '    <startEvent id="start" />'
        ]
        
        # Add tasks
        for i, task_info in enumerate(repr_info):
            task_name = self._convert_to_func_name(task_info['task_name'])
            task_title = self._convert_to_title(task_info['task_name'])
            bpmn_lines.append(f'    <task id="{task_name}" name="{task_title}" />')
            
        bpmn_lines.append('    <endEvent id="end" />')
        bpmn_lines.append('')
        bpmn_lines.append('    <!-- Workflow connections -->')
        
        # Add sequence flows
        # First flow from start to first task
        if repr_info:
            first_task_id = self._convert_to_func_name(repr_info[0]['task_name'])
            bpmn_lines.append(f'    <sequenceFlow id="flow1" sourceRef="start" targetRef="{first_task_id}" />')
            
        # Flows between tasks
        for i in range(len(repr_info) - 1):
            source_id = self._convert_to_func_name(repr_info[i]['task_name'])
            target_id = self._convert_to_func_name(repr_info[i + 1]['task_name'])
            flow_num = i + 2
            bpmn_lines.append(f'    <sequenceFlow id="flow{flow_num}" sourceRef="{source_id}" targetRef="{target_id}" />')
            
        # Last flow from last task to end
        if repr_info:
            last_task_id = self._convert_to_func_name(repr_info[-1]['task_name'])
            flow_num = len(repr_info) + 1
            bpmn_lines.append(f'    <sequenceFlow id="flow{flow_num}" sourceRef="{last_task_id}" targetRef="end" />')
            
        # Close tags
        bpmn_lines.append('</process>')
        bpmn_lines.append('</definitions>')
        
        return '\n'.join(bpmn_lines)
    
    def get_workflow_core_repr(self) -> str:

        repr_info = self._get_workflow_repr_info()
        if not repr_info:
            return ""
            
        workflow_lines = []
        for i, task_info in enumerate(repr_info, 1):
            # Convert task name to title case
            task_name = self._convert_to_title(task_info['task_name'])
            # Create the line with the specified format
            next_step = i + 1
            line = f"Step {i}::: Process ::: {task_name}:::next::Step {next_step}"
            workflow_lines.append(line)
            
        # Add the terminal step
        last_step = len(repr_info) + 1
        workflow_lines.append(f"Step {last_step}::: Terminal ::: End of Workflow:::")
        
        return "\n".join(workflow_lines)

    def _find_task_index(self, step: dict, graph_repr_info: List[dict]) -> int:
        """
        Find the index of the task in the original workflow graph. If the task is not found, return -1. 

        Args:
            step (dict): The step of the workflow.
            graph_repr_info (List[dict]): The information of the original workflow graph.
        
        Returns:
            int: The index of the task.
        """
        def _is_task_name_match(task_name: str, another_name: str) -> bool:
            return self._convert_to_func_name(task_name) == self._convert_to_func_name(another_name)

        def _is_task_inputs_match(task_inputs: List[str], another_inputs: List[str]) -> bool:
            return len(set(task_inputs) & set(another_inputs)) == len(task_inputs)
        
        def _is_task_outputs_match(task_outputs: List[str], another_outputs: List[str]) -> bool:
            return len(set(task_outputs) & set(another_outputs)) == len(task_outputs)
        
        for i, task in enumerate(graph_repr_info):
            # if _is_task_name_match(task["task_name"], step["name"]) and _is_task_inputs_match(task["input_names"], step["args"]) and _is_task_outputs_match(task["output_names"], step["outputs"]):
            #     return i
            if _is_task_name_match(task["task_name"], step["name"]) and _is_task_outputs_match(task["output_names"], step["outputs"]):
                return i
        return -1

    def create_workflow_graph_from_steps(
        self, 
        steps: List[dict]
    ) -> WorkFlowGraph:
        
        """
        Create a new workflow graph from the steps.
        Since both the inputs and outputs are provided, new tasks will be created in the new workflow graph. 
        It is used for the `python` `yaml` and `code` representations. 

        Args:
            steps (List[dict]): The steps of the workflow. The steps are in the format of:
                [
                    {
                        "name": str,
                        "args": List[str],
                        "outputs": List[str]
                    }
                ]
        
        Returns:
            SequentialWorkFlowGraph: The new workflow graph.
        """
        original_workflow_config = self.graph.get_graph_info()
        repr_info = self._get_workflow_repr_info()
        new_tasks = []
        get_known_list = []
        for step in repr_info: 
            get_known_list.append(step)
        for step in steps: 
            task_index = self._find_task_index(step=step, graph_repr_info=repr_info)
            if task_index == -1:
                # create a new task
                task_name = step["name"]
                most_known_step = find_closest_name(task_name, get_known_list)
                most_known_step['name'] = most_known_step['task_name']
                most_known_step['args'] = most_known_step['input_names']
                most_known_step['outputs'] = most_known_step['output_names']
                task_index_new = self._find_task_index(step=most_known_step, graph_repr_info=repr_info)
                print(step)
                print(task_index)
                item_new = deepcopy(original_workflow_config["tasks"][task_index_new])
                
                item_new["name"] = task_name +str(np.random.randint(0,10000))
                item_new['task_name'] = task_name +str(np.random.randint(0,10000))
                
                new_tasks.append(item_new)
                
#                 description = f"Task to {task_name.lower()}. " 
#                 if step["args"]: 
#                     description += f"Takes {', '.join(step['args'])} as input. " 
#                 if step["outputs"]: 
#                     description += f"Produces {', '.join(step['outputs'])} as output."
                
#                 new_task = {
#                     "name": task_name, 
#                     "description": description,
#                     "inputs": [
#                         {
#                             "name": input_name, 
#                             "type": "str", 
#                             "description": f"Input parameter {input_name} for {task_name}"
#                         } for input_name in step["args"]
#                     ], 
#                     "outputs": [
#                         {
#                             "name": output_name, 
#                             "type": "str", 
#                             "description": f"Output parameter {output_name} from {task_name}"
#                         } for output_name in step["outputs"]
#                     ], 
#                     "prompt": "to be updated",
# #                     "llm_config": original_workflow_config["tasks"][0]["llm_config"], 
#                     "parse_mode": "str"     
#                 }
#                 new_tasks.append(new_task)
            else:
                # copy the task from the original workflow graph
                if original_workflow_config["tasks"][task_index] not in new_tasks:
                    new_tasks.append(deepcopy(original_workflow_config["tasks"][task_index]))
        # create new workflow configuration 
        new_workflow_config = {
            "goal": original_workflow_config["goal"],
            "tasks": new_tasks
        }

        # create new workflow graph
        new_graph = SequentialWorkFlowGraph.from_dict(new_workflow_config)
        return new_graph

    def create_workflow_graph_from_task_names(
        self,
        task_names: Optional[List[str]] = None,
        task_titles: Optional[List[str]] = None
    ) -> SequentialWorkFlowGraph:
        """
        Create a new workflow graph from the task names or titles. 
        Since only the task names or titles are provided, the tasks in the new workflow graph will be copied from the original workflow graph. 
        It is used for the `bpmn` and `core` representations. 

        Args:
            task_names (Optional[List[str]]): The names of the tasks.
            task_titles (Optional[List[str]]): The titles of the tasks.
        
        Returns:
            SequentialWorkFlowGraph: The new workflow graph.
        """
        if task_names:
            original_workflow_config = self.graph.get_graph_info()
            tasks = task_names
            original_tasks = {self._convert_to_func_name(task["name"]): task for task in original_workflow_config["tasks"]} 
        elif task_titles:
            original_workflow_config = self.graph.get_graph_info()
            tasks = task_titles 
            original_tasks = {self._convert_to_title(task["name"]): task for task in original_workflow_config["tasks"]}
        else:
            raise ValueError("No task names or titles provided.")

        new_tasks = []
        for task in tasks:
            if task not in original_tasks:
                raise ValueError(f"Task {task} not found in the original workflow.")
            new_tasks.append(deepcopy(original_tasks[task]))
        
        # create new workflow configuration 
        new_workflow_config = {
            "goal": original_workflow_config["goal"],
            "tasks": new_tasks
        }

        # create new workflow graph
        new_graph = WorkFlowGraph.from_dict(new_workflow_config)
        return new_graph

    def parse_workflow_python_repr(self, repr: str) -> WorkFlowGraph:
        """
        Parse the workflow from the python representation. The input format is:
        steps = [
            {"name": task_name, "args": [input1, input2, ...],"outputs": [output1, output2, ...]}, 
            {"name": another_task_name, "args": [input1, input2, ...],"outputs": [output1, output2, ...]}, 
            ...
        ]
        """
#         try:
#             # extract ```python ``` 
#             code_block = regex.search(r'```python\s*(.*?)\s*```', repr, regex.DOTALL)
#             if not code_block:
#                 raise ValueError("No Python code block found in the representation")
#             code_block = code_block.group(1).strip()
#             # relevant_lines = [] 
#             # for line in code_block.splitlines():
#             #     line = line.strip()
#             #     if not line or line.startswith("#") or line.startswith("```"):
#             #         continue
#             #     if all(key in line for key in ["name", "args", "outputs"]):
#             #         relevant_lines.append(line)
#             # steps_str = "[\n" + "\n".join(relevant_lines) + "\n]"
#             # steps = eval(steps_str)
#             steps = eval(code_block.replace("steps = ", "").strip())
#             new_graph = self.create_workflow_graph_from_steps(steps=steps)
#             return new_graph
#         except Exception as e:
#             logger.warning(f"Failed to parse workflow string: {e}. Return the original workflow.")
        # extract ```python ``` 
        code_block = regex.search(r'```python\s*(.*?)\s*```', repr, regex.DOTALL)
        if not code_block:
            raise ValueError("No Python code block found in the representation")
        code_block = code_block.group(1).strip()
        # relevant_lines = [] 
        # for line in code_block.splitlines():
        #     line = line.strip()
        #     if not line or line.startswith("#") or line.startswith("```"):
        #         continue
        #     if all(key in line for key in ["name", "args", "outputs"]):
        #         relevant_lines.append(line)
        # steps_str = "[\n" + "\n".join(relevant_lines) + "\n]"
        # steps = eval(steps_str)
        steps = eval(code_block.replace("steps = ", "").strip())
#         print(steps)
        new_graph = self.create_workflow_graph_from_steps(steps=steps)
        return new_graph

    
    def parse_workflow_yaml_repr(self, repr: str) -> WorkFlowGraph:
        """
        Parse the workflow from the yaml representation. The input format is:
        - name: task_name
          args:
            - input1
            - input2
          outputs:
            - output1
        """
        try:
            # extract ```yaml ``` 
            match = regex.search(r'```yaml\s*(.*?)\s*```', repr, regex.DOTALL) 
            if not match:
                raise ValueError("No YAML code block found in the representation")
            yaml_block = match.group(1).strip()
            steps = yaml.safe_load(yaml_block)
            # relevant_lines = []  
            # in_step = False  
            # for line in yaml_block.splitlines(): 
            #     stripped_line = line.strip() 
            #     if stripped_line.startswith('- name:'):
            #         in_step = True 
            #         relevant_lines.append(line) 
            #     elif in_step and (
            #         stripped_line.startswith('args:') or 
            #         stripped_line.startswith('outputs:') or 
            #         stripped_line.startswith('- ')
            #     ):
            #         relevant_lines.append(line)
            #     elif not stripped_line: 
            #         in_step = False  
            # yaml_step = "\n".join(relevant_lines)
            # steps = yaml.safe_load(yaml_step)
            new_graph = self.create_workflow_graph_from_steps(steps=steps)
            return new_graph
        except Exception as e:
            logger.warning(f"Failed to parse workflow string: {e}. Return the original workflow.")

        return self.graph
    
    def parse_workflow_code_repr(self, repr: str) -> WorkFlowGraph:
        """
        Parse the workflow from the code representation. 
        The input format is:
        task_name(input1, input2, ...) -> output1, output2, ...
        another_task_name(input1, input2, ...) -> output1, output2, ...
        ...
        """
        try:
            # extract ```code ``` 
            match = regex.search(r'```code\s*(.*?)\s*```', repr, regex.DOTALL)
            if not match:
                raise ValueError("No code block found in the representation")
            code_block = match.group(1).strip()
            lines = [line.strip() for line in code_block.split("\n") if line.strip() and "->" in line]
            steps = []
            for line in lines:
                # Remove any leading numbers and dots (e.g., "1. ")
                line = regex.sub(r'^\d+\.\s*', '', line)
                func_part, output_part = line.split('->')
                func_part = func_part.strip()
                name = func_part[:func_part.index('(')]
                args_str = func_part[func_part.index('(') + 1:func_part.rindex(')')]
                args = [arg.strip() for arg in args_str.split(',') if arg.strip()]
                outputs = [out.strip() for out in output_part.split(',') if out.strip()]
                step = {"name": name, "args": args, "outputs": outputs}
                steps.append(step)
            if not steps:
                raise ValueError("No steps found in the workflow.")
            new_graph = self.create_workflow_graph_from_steps(steps=steps)
            return new_graph
        except Exception as e:
            logger.warning(f"Failed to parse workflow string: {e}. Return the original workflow.")

        return self.graph
    
    def parse_workflow_bpmn_repr(self, repr: str) -> WorkFlowGraph:
        """
        Parse the workflow from the BPMN XML representation.
        
        The input format is BPMN XML with:
        - task elements defining the tasks
        - sequenceFlow elements defining the order of tasks
        
        Will extract ordered task names from the sequence flows and create a workflow.
        """
        try:
            # extract ```bpmn ``` 
            match = regex.search(r'```bpmn\s*(.*?)\s*```', repr, regex.DOTALL) 
            if not match:
                raise ValueError("No BPMN code block found in the representation")
            bpmn_block = match.group(1).strip()
            # Parse XML string
            root = ET.fromstring(bpmn_block)
            
            # Define namespace for BPMN XML
            ns = {'bpmn': 'http://www.omg.org/spec/BPMN/20100524/MODEL'}
            
            # Get process element
            process = root.find('bpmn:process', ns) or root.find('process')
            
            if process is None:
                raise ValueError("No process element found in BPMN XML")
                
            # Create a dictionary of all tasks
            tasks = {}
            # for task in process.findall('.//task', ns) or process.findall('.//task'):
            for task in process.findall("bpmn:task", ns): 
                tasks[task.get('id')] = task.get('name')
            
            # Get sequence flows and order them
            flows = {}
            ordered_tasks = []
            current_ref = 'start'
            
            # Create dictionary of source -> target
            # for flow in process.findall('.//sequenceFlow', ns) or process.findall('.//sequenceFlow'):
            for flow in process.findall("bpmn:sequenceFlow", ns): 
                flows[flow.get('sourceRef')] = flow.get('targetRef')
            
            # Follow the sequence flows to get ordered tasks
            while current_ref in flows:
                next_ref = flows[current_ref]
                if next_ref in tasks:  # Only add if it's a task (not end event)
                    ordered_tasks.append(tasks[next_ref])
                current_ref = next_ref
            
            # Create new workflow graph using the ordered task names
            new_graph = self.create_workflow_graph_from_task_names(task_titles=ordered_tasks)
            return new_graph
            
        except Exception as e:
            logger.warning(f"Failed to parse BPMN workflow string: {e}. Return the original workflow.")
        
        return self.graph
        
    def parse_workflow_core_repr(self, repr: str) -> WorkFlowGraph:
        """
        Parse the workflow from the Core representation.
        
        The input format is:
        Step 1::: Process ::: Task Name:::next::Step 2
        Step 2::: Process ::: Another Task:::next::Step 3
        ...
        Step N::: Terminal ::: End of Workflow:::
        
        Will extract task names from Process steps and create a workflow.
        """
        try:
            # extract ```core ```
            match = regex.search(r'```core\s*(.*?)\s*```', repr, regex.DOTALL) 
            if not match:
                raise ValueError("No core code block found in the representation")
            core_block = match.group(1).strip()
            # Split into lines and remove empty lines
            lines = [line.strip() for line in core_block.split('\n') if line.strip()]
            
            # Initialize flows and tasks dictionaries
            flows = {}  # step -> next_step
            tasks = {}  # step -> task_title
            
            # First pass: build flows and tasks mappings
            for line in lines:
                parts = line.split(':::')
                current_step = parts[0].strip()
                step_type = parts[1].strip()
                
                if step_type == 'Process':
                    # Extract task title and next step 
                    task_title = parts[2].strip()
                    tasks[current_step] = task_title 
                    if len(parts) > 3 and "next" in parts[3]: 
                        next_step = parts[3].split("::")[-1].strip()
                        flows[current_step] = next_step
                elif step_type == 'Terminal':
                    flows[current_step] = None
            
            # Second pass: follow flows to build ordered task list
            ordered_tasks = []
            current_step = 'Step 1'
            
            while current_step in flows:
                if current_step in tasks:  # Only add if it's a Process step
                    ordered_tasks.append(tasks[current_step])
                current_step = flows[current_step]
            # Create new workflow graph using the ordered task titles
            new_graph = self.create_workflow_graph_from_task_names(task_titles=ordered_tasks)
            return new_graph
            
        except Exception as e:
            logger.warning(f"Failed to parse Core workflow string: {e}. Return the original workflow.")
        
        return self.graph


class SimplePromptBreeder:

    def __init__(self, llm: BaseLLM, evaluator: None, **kwargs):
        self.llm = llm
        self.evaluator = evaluator
        self.history_log = []
        self.kwargs = kwargs

    def generate_mutation_prompt(self, task_description: str, **kwargs) -> str:
        """
        Generate the mutation prompt for optimization.
        """
        thinking_style = random.choice(thinking_styles)
#         hyper_mutation_prompt = thinking_style + "\n\nProblem Description: " + task_description + ".\n" + "Output: "
        hyper_mutation_prompt = "Please generate a improved prompts based on the following information. " + "\n\nProblem Description: " + task_description + ".\n" + "Output: "
        # print(">>>>>>>>>> Hyper mutation prompt: <<<<<<<<<<<\n", hyper_mutation_prompt)
        try:
            mutation_prompt = self.llm.generate(
                prompt=hyper_mutation_prompt, 
                system_message="You are a helpful assistant. Do not generate harmful content. ",
            ).content
        except:
            mutation_prompt = self.llm.generate(
                prompt=hyper_mutation_prompt, 
                system_message="You are a helpful assistant. Do not generate harmful content. ",
            ).content
        return mutation_prompt
    
    def get_mutation_prompt(self, task_description: str, order: Literal["zero-order", "first-order"], **kwargs) -> str:
        """
        Get the mutation prompt for optimization.
        """
        if order == "zero-order":
            mutation_prompt = self.generate_mutation_prompt(task_description=task_description)
        elif order == "first-order":
            mutation_prompt = random.choice(mutation_prompts)
        else:
            raise ValueError(f"Invalid order: {order}. The order should be either 'zero-order' or 'first-order'.")
        return mutation_prompt

    def generate_prompt(self, task_description: str, prompt: str, order: Literal["zero-order", "first-order"], **kwargs) -> str:
        """
        Generate the prompt for optimization. 
        
        Args:
            task_description (str): The description of the task, normally the goal of the workflow. 
            prompt (str): The prompt to optimize.
            order (Literal["zero-order", "first-order"]): The order of the mutation prompt.
        
        Returns:
            str: The optimized prompt.
        """
        mutation_prompt = self.get_mutation_prompt(task_description=task_description, order=order)
        prompt = mutation_prompt + "\n\nINSTRUCTION:\n\n" + prompt
        # print(">>>>>>>>>> Prompt: <<<<<<<<<<<\n", prompt)
        new_prompt = self.llm.generate(
            prompt=prompt, 
            system_message="You are a helpful assistant",
        ).content
        return new_prompt

    def critic_and_update_prompt(self, task_description: str, prompt: str, order: Literal["zero-order", "first-order"], scorer=None, calltime=1, **kwargs) -> str:
        """
        Generate the prompt for optimization. 
        
        Args:
            task_description (str): The description of the task, normally the goal of the workflow. 
            prompt (str): The prompt to optimize.
            order (Literal["zero-order", "first-order"]): The order of the mutation prompt.
        
        Returns:
            str: The optimized prompt.
        """
#         print(self.evaluator._evaluation_records)
        problem_list = ''''''
        for item in self.evaluator._evaluation_records.keys():
            problem_s = "Questions: " + self.evaluator._evaluation_records[item]['trajectory'][0].content['question']+'\n'
            prediction_s = "Predictions: " + self.evaluator._evaluation_records[item]['prediction']+'\n'
            solution_s = "Solutions: " + self.evaluator._evaluation_records[item]['label']['canonical_solution']+'\n'
#             if self.evaluator.dataname != "humanevalplus":
#                 if 'test' in list(self.evaluator._evaluation_records[item]['label'].keys()):
#                     test_s = "Unit tests: " + self.evaluator._evaluation_records[item]['label']['test'][0:1000]
#                 elif 'tests' in list(self.evaluator._evaluation_records[item]['label'].keys()):
#                     test_s = "Unit tests: " + self.evaluator._evaluation_records[item]['label']['tests'][0:1000]
#             else:
#                 test_s = "Example solution: " + self.evaluator._evaluation_records[item]['label']["canonical_solution"]
            if 'test' in list(self.evaluator._evaluation_records[item]['label'].keys()):
                test_s = "Unit tests: " + self.evaluator._evaluation_records[item]['label']['test'][0:10000]+'\n'
            elif 'tests' in list(self.evaluator._evaluation_records[item]['label'].keys()):
                test_s = "Unit tests: " + self.evaluator._evaluation_records[item]['label']['tests'][0:10000]+'\n'
            metric_s = "Score: " + str(self.evaluator._evaluation_records[item]['metrics']['pass@1']) + "\n"
            if self.evaluator._evaluation_records[item]['metrics']['pass@1'] ==0:
                if "An error occurred:" == self.evaluator.error_list[item]:
                    metric_s += "Error reason: Computation result is incorrect."
                else:
                    erroreason = self.evaluator.error_list[item].replace("An error occurred:", "")
                    metric_s += f"Error reason: {erroreason}"
            else:
                metric_s += "The solution is correct."
            
            joint_s = problem_s + prediction_s + solution_s +test_s + metric_s
            
            problem_list += joint_s
#         print(problem_list)
        if calltime==1:
            critic_prompt = "You will evaluate a coding solution as a workflow for code generation. You should analyze failures using execution history as well as workflow trajectory. Do not attempt to solve it yourself, do not give a solution, only identify errors and problems in the structure of this AI agent workflow and incorrect information in prompts. Only return the problems in one paragraph. Be super concise. The workflow is:"
            question_prompt = "The questions, solutions, unit tests, and evaluated metrics based on this workflow is: " + problem_list
#             question_prompt = ""
            critic_out = self.llm.generate(
                prompt=prompt+question_prompt, 
                system_message=critic_prompt,
            ).content
            print(critic_out)
        else:
            critic_prompt_outlist = '''Please summarize the following problems in one paragraph. Be super concise.\n'''
            for item in range(calltime):
                critic_prompt = "You will evaluate a coding solution as a workflow for code generation. You should analyze failures using execution history of tested problems as well as workflow trajectory. Do not attempt to solve it yourself, do not give a solution, only identify errors and problems in the structure of this agent workflow. Be super concise. The workflow is:"
                question_prompt = "The questions, solutions, and evaluated metrics based on this workflow is: " + problem_list
                critic_out = self.llm.generate(
                    prompt=prompt+question_prompt, 
                    system_message=critic_prompt,
                ).content
                critic_prompt_outlist = critic_prompt_outlist + f"Detected Issue {item+1}:" + critic_out +"\n"
            critic_out = self.llm.generate(
                prompt=critic_prompt_outlist, 
                system_message="You are an expert in summarizing information and data.",
            ).content
            print(critic_out)
#         mutation_prompt = self.get_mutation_prompt(task_description=task_description, order=order)
        if scorer == None:
            prompt = "The detected issue is:" + critic_out+"You should always improve workflow by correcting the issue without changing the inputs and outputs of nodes in the workflow. You can remove redundant agents. You should keep the graph executable.\n" + "\n\nThe original workflow is:\n\n" + prompt
        else:
            prompt = "The detected issue is:" + critic_out + f"You also need to ensure the new workflow can increase the model performance score: {scorer}."  + "\n\nThe original workflow is:\n\n" + prompt + "You should always improve workflow by correcting the issue without changing the inputs and outputs of nodes in the workflow. You can remove redundant agents. You should keep the graph executable.\nYour OUTPUT:"
#         print(">>>>>>>>>> Prompt: <<<<<<<<<<<\n", prompt)
        new_prompt = self.llm.generate(
            prompt=prompt, 
            system_message="You are a Graph Optimization Agent. Your goal is to iteratively improve graph performance through systematic optimization. You need to modify the workflow and improve the structure by solving the issue. Only change the order of agents or recall the KNOWN agents in the workflow. DO NOT change the names and inputs of agents. You should keep the graph executable.",
        ).content
        print(new_prompt)
        return new_prompt


def update_dev_set(dataset):
    import numpy as np 
    permutation = np.random.permutation(len(dataset._dev_data_full))
    # radnomly select 50 samples for dev and 100 samples for test (be consistent with other models)
    dev_data_task_ids = [dataset._dev_data_full[idx]["task_id"] for idx in permutation[:len(dataset._dev_data)]]
    full_data = dataset._dev_data_full
    dev_data = [example for example in full_data if example["task_id"] in dev_data_task_ids] 
    
    return dev_data
    
class STRUCTUREOptimizer(Optimizer):

    graph: Union[WorkFlowGraph, ActionGraph] = Field(description="The workflow to optimize.")
    repr_scheme: str = Field(default="python", description="The scheme to represent the workflow.")
    optimize_mode: Literal["all", "structure", "prompt"] = Field(default="all", description="The mode to optimize the workflow.")
    order: Literal["zero-order", "first-order"] = Field(default="zero-order", description="Whether to use zero-order (using hyper-mutation prompt) or first-order (using mutation prompt) optimization.")

    calltime: int = Field(default=1, description="Number of textgrad used for evaluation.")
    num_workers: int = Field(default=1, description="Number of textgrad used for evaluation.")
    def init_module(self, **kwargs):
        self._snapshot: List[dict] = []
        self._prompt_breeder = SimplePromptBreeder(llm=self.llm, evaluator = self.evaluator) # generate prompt for optimization
        self._convergence_check_counter = 0 
        self._best_score = float("-inf")
        
        self._prompt_dict = {}
        if isinstance(self.graph, ActionGraph):
            if self.optimize_mode != "prompt":
                raise ValueError(
                    f"{type(self).__name__} only support prompt optimization when `graph` is an `ActionGraph`. "
                    f"The `optimize_mode` should be set to `prompt`, but got {self.optimize_mode}."
                )

    def optimize(self, dataset: Benchmark, **kwargs):

        if isinstance(self.graph, WorkFlowGraph):
            logger.info(f"Optimizing the {type(self.graph).__name__} workflow with {self.repr_scheme} representation.")
        elif isinstance(self.graph, ActionGraph):
            logger.info(f"Optimizing the {type(self.graph).__name__} graph ...")
        graph: Union[WorkFlowGraph, ActionGraph] = self.graph 
        logger.info("Run initial evaluation on the original workflow ...")
        with suppress_logger_info():
            metrics = self.evaluate(dataset, eval_mode="dev", graph=graph)
        self._prompt_breeder = SimplePromptBreeder(llm=self.llm, evaluator = self.evaluator) # generate prompt for optimization
        logger.info(f"Initial metrics: {metrics}")
        self.log_snapshot(graph=graph, metrics=metrics)
        
        set_scorer = None
        if kwargs["provided_scorer"] == True:
            set_scorer = metrics

        for i in range(self.max_steps):
#             try:
#                 # perform a step of optimization
#                 graph = self.step(set_scorer=set_scorer)
# #                 print(graph)
#                 # evaluate the workflow
#                 if (i + 1) % self.eval_every_n_steps == 0:
#                     logger.info(f"Evaluate the workflow at step {i+1} ...")
#                     with suppress_logger_info():
#                         metrics = self.evaluate(dataset, eval_mode="dev")
#                     logger.info(f"Step {i+1} metrics: {metrics}")
#                     self.log_snapshot(graph=graph, metrics=metrics)
#             except Exception as e:
#                 logger.warning(f"Error in step {i}: {e}. Skip this step.")
#                 continue
#             if self.convergence_check():
#                 logger.info(f"Convergence check passed at step {i+1}. Stop the optimization.")
#                 break
                # perform a step of optimization
            graph = self.step(set_scorer=set_scorer, step=i)
#             print(graph)
            # evaluate the workflow
            if (i + 1) % self.eval_every_n_steps == 0:
                logger.info(f"Evaluate the workflow at step {i+1} ...")
                with suppress_logger_info():
                    metrics = self.evaluate(dataset, eval_mode="dev")
                logger.info(f"Step {i+1} metrics: {metrics}")
                self.log_snapshot(graph=graph, metrics=metrics)
                print("randomly update dataset")
                self.dataset._dev_data = update_dev_set(self.dataset)
        
        if i == self.max_steps - 1:
            logger.info(f"Reach the maximum number of steps {self.max_steps}. Stop the optimization.")
        
        # set self.graph to the best graph
        logger.info("Restore the best graph from the snapshot ...")
        self.restore_best_graph()
    
    def step(self, **kwargs) -> Union[WorkFlowGraph, ActionGraph]:
        """
        Take a step of optimization and return the optimized graph.
        """
        graph = self._select_graph_with_highest_score(return_metrics=False)
        if isinstance(graph, WorkFlowGraph):
            new_graph = self._workflow_graph_step(graph, kwargs["set_scorer"], kwargs["step"])
        elif isinstance(graph, ActionGraph):
            new_graph = self._action_graph_step(graph, kwargs["set_scorer"], kwargs["step"])
        else:
            raise ValueError(f"Invalid graph type: {type(graph)}. The graph should be an instance of `WorkFlowGraph` or `ActionGraph`.")
        return new_graph
    
    def evaluate(
        self, 
        dataset: Benchmark, 
        eval_mode: str = "test", 
        graph: Optional[Union[WorkFlowGraph, ActionGraph]] = None,
        indices: Optional[List[int]] = None,
        sample_k: Optional[int] = None,
        **kwargs
    ) -> dict:
        """
        Evaluate the workflow. If `graph` is provided, use the provided graph for evaluation. Otherwise, use the graph in the optimizer. 
        
        Args:
            dataset (Benchmark): The dataset to evaluate the workflow on.
            eval_mode (str): The evaluation mode. Choices: ["test", "dev", "train"].
            graph (Union[WorkFlowGraph, ActionGraph], optional): The graph to evaluate. If not provided, use the graph in the optimizer.
            indices (List[int], optional): The indices of the data to evaluate the workflow on.
            sample_k (int, optional): The number of data to evaluate the workflow on. If provided, a random sample of size `sample_k` will be used.
        
        Returns:
            dict: The metrics of the workflow evaluation.
        """
        self.dataset = dataset
        graph = graph if graph is not None else self.graph
        agent_manager = self.evaluator.agent_manager
        agent_manager.add_agents_from_workflow(graph, llm_config=self.llm.config)
#         print(agent_manager)
        # obtain Evaluator
        self.evaluator = Evaluator(llm=self.llm, agent_manager=agent_manager, collate_func=self.collate_func, num_workers=self.num_workers, verbose=True)
        self.evaluator.dataname = self.dataset.dataname
        metrics_list = []
        for i in range(self.eval_rounds):
            eval_info = [
                f"[{type(graph).__name__}]", 
                f"Evaluation round {i+1}/{self.eval_rounds}", 
                f"Mode: {eval_mode}"
            ]
            if indices is not None:
                eval_info.append(f"Indices: {len(indices)} samples")
            if sample_k is not None:
                eval_info.append(f"Sample size: {sample_k}")
            logger.info(" | ".join(eval_info))
#             if self.dataset.dataname == 'scicode':
#                 metrics = await self.evaluator.async_evaluate(
#                     graph=graph, 
#                     benchmark=dataset, 
#                     eval_mode=eval_mode, 
#                     indices=indices, 
#                     sample_k=sample_k,
#                     **kwargs
#                 )
#             else:
#                 metrics = self.evaluator.evaluate(
#                     graph=graph, 
#                     benchmark=dataset, 
#                     eval_mode=eval_mode, 
#                     indices=indices, 
#                     sample_k=sample_k,
#                     **kwargs
#                 )

            metrics = self.evaluator.evaluate(
                graph=graph, 
                benchmark=dataset, 
                eval_mode=eval_mode, 
                indices=indices, 
                sample_k=sample_k,
                **kwargs
            )
            metrics_list.append(metrics)
        avg_metrics = self.evaluator._calculate_average_score(metrics_list)
        self.dataset = dataset
        self.evaluator.error_list = deepcopy(self.dataset.error_list)
        self.dataset.error_list = {}
        
        return avg_metrics
    
    def log_snapshot(self, graph: Union[WorkFlowGraph, ActionGraph], metrics: dict):
        
        if isinstance(graph, WorkFlowGraph):
            graph_info = graph.get_graph_info()
        elif isinstance(graph, ActionGraph):
            # TODO check if the action graph is valid 
            graph_info = graph
        else:
            raise ValueError(f"Invalid graph type: {type(graph)}. The graph should be an instance of `SequentialWorkFlowGraph` or `ActionGraph`.")
        
        self._snapshot.append(
            {
                "index": len(self._snapshot),
                "graph": deepcopy(graph_info),
                "metrics": metrics,
            }
        )

    def _select_graph_with_highest_score(self, return_metrics: bool = False) -> Union[SequentialWorkFlowGraph, ActionGraph]:

        if len(self._snapshot) == 0:
            return self.graph
        snapshot_scores = [np.mean(list(snapshot["metrics"].values())) for snapshot in self._snapshot]
        best_index = np.argmax(snapshot_scores)

        if isinstance(self.graph, WorkFlowGraph):
            graph = WorkFlowGraph.from_dict(self._snapshot[best_index]["graph"])
        elif isinstance(self.graph, ActionGraph):
            # TODO check if the action graph is valid
            graph = self._snapshot[best_index]["graph"]
        else:
            raise ValueError(f"Invalid graph type: {type(self.graph)}. The graph should be an instance of `SequentialWorkFlowGraph` or `ActionGraph`.")
        
        if return_metrics:
            return graph, self._snapshot[best_index]["metrics"]
        return graph
    
    def restore_best_graph(self):

        best_graph, best_metrics = self._select_graph_with_highest_score(return_metrics=True)
        logger.info(f"Restore the best graph from snapshot with metrics {best_metrics} ...")
        self.graph = best_graph

    def _wfg_structure_optimization_step(self, graph: WorkFlowGraph, scorer, step) -> WorkFlowGraph:
        """
        optinize the structure of the workflow graph and return the optimized graph.
        Args:
            graph (SequentialWorkFlowGraph): The workflow graph to optimize.
        
        Returns:
            SequentialWorkFlowGraph: The optimized workflow graph.  
        """
        graph_scheme = STRUCTUREWorkFlowScheme(graph=graph)
        graph_repr = graph_scheme.convert_to_scheme(scheme=self.repr_scheme)
        if self.repr_scheme == "python":
            output_format = "\n\nALWAYS wrap the refined workflow in ```python\n``` format and DON'T include any other text within the code block!"
        elif self.repr_scheme == "yaml":
            output_format = "\n\nALWAYS wrap the refined workflow in ```yaml\n``` format and DON'T include any other text within the code block!"
        elif self.repr_scheme == "code":
            output_format = "\n\nALWAYS wrap the refined workflow in ```code\n``` format and DON'T include any other text within the code block!"
        elif self.repr_scheme == "core":
            output_format = "\n\nALWAYS wrap the refined workflow in ```core\n``` format and DON'T include any other text within the code block!"
        elif self.repr_scheme == "bpmn":
            output_format = "\n\nALWAYS wrap the refined workflow in ```bpmn\n``` format and DON'T include any other text within the code block!"
        else:
            raise ValueError(f"Invalid representation scheme: {self.repr_scheme}. The scheme should be one of {VALID_SCHEMES}.")
        prompt = "Task Description: " + graph.goal + "\n\nWorkflow Steps: " + graph_repr + output_format
#         if step%5==0:
#     #         print(prompt)
#             new_graph_repr = self._prompt_breeder.critic_and_update_prompt(task_description=graph.goal, prompt=prompt, order=self.order, scorer=scorer, calltime=self.calltime)
#     #         print(new_graph_repr)
#             new_graph = graph_scheme.parse_from_scheme(scheme=self.repr_scheme, repr=new_graph_repr)
#             print(new_graph)
#         else:
#             new_graph = graph

        new_graph_repr = self._prompt_breeder.critic_and_update_prompt(task_description=graph.goal, prompt=prompt, order=self.order, scorer=scorer, calltime=self.calltime)
        new_graph = graph_scheme.parse_from_scheme(scheme=self.repr_scheme, repr=new_graph_repr)
        return new_graph
    
    def _wfg_prompt_optimization_step(self, graph: WorkFlowGraph, scorer=None) -> WorkFlowGraph:

        task_description = graph.goal
        graph_scheme = STRUCTUREWorkFlowScheme(graph=graph)
        graph_repr = graph_scheme.convert_to_scheme(scheme=self.repr_scheme)
        graph_info = graph.get_graph_info()
        
        
        problem_list = ''''''
        for item in self.evaluator._evaluation_records.keys():
            problem_s = "Questions: " + self.evaluator._evaluation_records[item]['trajectory'][0].content['question']+'\n'
            prediction_s = "Predictions: " + self.evaluator._evaluation_records[item]['prediction']+'\n'
            solution_s = "Solutions: " + self.evaluator._evaluation_records[item]['label']['canonical_solution']+'\n'
#             if self.evaluator.dataname != "humanevalplus":
#                 if 'test' in list(self.evaluator._evaluation_records[item]['label'].keys()):
#                     test_s = "Unit tests: " + self.evaluator._evaluation_records[item]['label']['test'][0:1000]
#                 elif 'tests' in list(self.evaluator._evaluation_records[item]['label'].keys()):
#                     test_s = "Unit tests: " + self.evaluator._evaluation_records[item]['label']['tests'][0:1000]
#             else:
#                 test_s = "Example solution: " + self.evaluator._evaluation_records[item]['label']["canonical_solution"]

            if 'test' in list(self.evaluator._evaluation_records[item]['label'].keys()):
                test_s = "Unit tests: " + self.evaluator._evaluation_records[item]['label']['test'][0:10000]+'\n'
            elif 'tests' in list(self.evaluator._evaluation_records[item]['label'].keys()):
                test_s = "Unit tests: " + self.evaluator._evaluation_records[item]['label']['tests'][0:10000]+'\n'
            metric_s = "Score: " + str(self.evaluator._evaluation_records[item]['metrics']['pass@1']) + "\n"
            if self.evaluator._evaluation_records[item]['metrics']['pass@1'] ==0:
                if "An error occurred: " == self.evaluator.error_list[item]:
                    metric_s += "Error reason: Computation result is incorrect."
                else:
                    erroreason = self.evaluator.error_list[item].replace("An error occurred: ", "")
                    metric_s += f"Error reason: {erroreason}"
            else:
                metric_s += "The solution is correct."
            
            joint_s = problem_s + prediction_s + solution_s +test_s + metric_s
            
            problem_list += joint_s
        print(problem_list)
        for i, task in enumerate(graph_info["tasks"]):
            if task['name'] not in list(self._prompt_dict.keys()):
                self._prompt_dict[task['name']] = []
            
            original_prompt = task["prompt"]
            optimization_prompt = "Task Description: " + task_description + "\n\nWorkflow Steps:\n" + graph_repr + f"\n\nINSTRUCTION for the {i+1}-th task:\n\"\"\"\n" + original_prompt + "\n\"\"\""
            error_prompt = optimization_prompt + f"The name of this agent is {task['name']}" + "The questions, solutions, and evaluated metrics based on this workflow is: " + problem_list + "You should detect the issues in the original prompt by considering these questions, predictions, tests, solutions, and score!"
            
            critic_issues = self.llm.generate(error_prompt).content
            optimization_prompt += f"The new prompts should consider fixing the issues by adjusting the prompt content: {critic_issues}. You should not change the original role and task of the assigned agent."
            
            if self._prompt_dict[task['name']] != []:
                prev_prompt = "\n".join(self._prompt_dict[task['name']])
                optimization_prompt += f"The previous prompts are: {prev_prompt}\nYou should also fix the problems in these prompts."
            
            optimization_prompt += f"\n\nGiven the above information, please refine the instruction for the {i+1}-th task.\n"
            optimization_prompt += r"Note that you must always use bracket (e.g. `{input_name}`, `{code}`, `{question}`) to wrap the inputs of the tasks in your refined instruction. You must ensure the prompts contain all inputs. You cannot change the name of functions.\n"
            ###new one
            optimization_prompt += "Your prompt should not change the function name and entry_point in the question. Only output the refined instruction and DON'T include any other text!" 
            new_prompt = self._prompt_breeder.generate_prompt(task_description=task_description, prompt=optimization_prompt, order=self.order)
            graph_info["tasks"][i]["prompt"] = new_prompt
#             print("task name", task['name'])
#             print("detected issue", critic_issues)
#             print("renewed prompt", new_prompt)
            self._prompt_dict[task['name']].append(new_prompt)
        new_graph = SequentialWorkFlowGraph.from_dict(graph_info)
        return new_graph
        
    def _workflow_graph_step(self, graph: WorkFlowGraph, scorer, step) -> WorkFlowGraph:

        if self.optimize_mode == "structure" or self.optimize_mode == "all":
            # optimize the structure of the graph    
            graph = self._wfg_structure_optimization_step(graph, scorer=scorer, step=step)
        if self.optimize_mode == "prompt" or self.optimize_mode == "all":
            # optimize the prompt of the graph
            graph = self._wfg_prompt_optimization_step(graph, scorer=scorer)
        
        return graph
    
    def _action_graph_prompt_optimization_step(self, graph: ActionGraph) -> ActionGraph:

        task_description = graph.description
        graph_info = graph.get_graph_info()
        graph_steps = inspect.getsource(getattr(graph, "execute"))
        for operator_name, operator_info in graph_info["operators"].items():
            original_prompt = operator_info["prompt"]
            optimization_prompt = "Task Description: " + task_description + "\n\nWorkflow Steps:\n" + graph_steps + f"\n\nINSTRUCTION for the `{operator_name}` operator:\n\"\"\"\n" + original_prompt + "\n\"\"\""
            optimization_prompt += "\n\nThe interface of the operator is as follows:\n" + operator_info["interface"]
            optimization_prompt += f"\n\nGiven the above information, please refine the instruction for the `{operator_name}` operator.\n"
            optimization_prompt += r"Note that you should always use bracket (e.g. `{input_name}`) to wrap the inputs of the operator in your refined instruction, "
            optimization_prompt += "and the input names should be EXACTLY the same as those defined in the interface. DON'T use bracket to wrap output names."
            optimization_prompt += "\nOnly output the refined instruction and DON'T include any other text!"
            new_prompt = self._prompt_breeder.generate_prompt(task_description=task_description, prompt=optimization_prompt, order=self.order)
            new_prompt = new_prompt.replace("\"", "").strip()
            graph_info["operators"][operator_name]["prompt"] = new_prompt
        new_graph = ActionGraph.from_dict(graph_info)
        return new_graph

    def _action_graph_step(self, graph: ActionGraph) -> ActionGraph:
        
        if self.optimize_mode == "prompt":
            graph = self._action_graph_prompt_optimization_step(graph)
        else:
            raise ValueError(f"{type(self).__name__} only support prompt optimization when `self.graph` is an `ActionGraph` instance. "
                    f"The `optimize_mode` should be set to `prompt`, but got {self.optimize_mode}.")
        return graph

    def convergence_check(self, **kwargs) -> bool:
        
        if not self._snapshot:
            logger.warning("No snapshots available for convergence check")
            return False
        
        # Get scores from snapshots
        scores = [np.mean(list(snapshot["metrics"].values())) for snapshot in self._snapshot]
        current_score = scores[-1]

        if current_score > self._best_score:
            self._best_score = current_score
            self._convergence_check_counter = 0
        else:
            self._convergence_check_counter += 1

        if self._convergence_check_counter >= self.convergence_threshold:
            logger.info(f"Early stopping triggered: No improvement for {self.convergence_threshold} iterations")
            # logger.info(f"Score history: {scores[-self.convergence_threshold:]}")
            return True
        return False

    def save(self, path: str, ignore: List[str] = []):
        """
        Save the (optimized) workflow graph to a file. 

        Args:
            path (str): The path to save the workflow graph.
            ignore (List[str]): The keys to ignore when saving the workflow graph.
        """
        self.graph.save_module(path, ignore=ignore)