|
|
import yaml |
|
|
import regex |
|
|
import random |
|
|
import inspect |
|
|
import numpy as np |
|
|
from typing import Any, Dict |
|
|
from pydantic import Field |
|
|
from copy import deepcopy |
|
|
from typing import Literal, Any, Dict |
|
|
import xml.etree.ElementTree as ET |
|
|
from typing import Literal, Union, Optional, List |
|
|
from evoagentx.models import OpenAILLMConfig, OpenAILLM |
|
|
from evoagentx.evaluators import Evaluator |
|
|
from evoagentx.prompts import StringTemplate |
|
|
|
|
|
from .optimizer import Optimizer |
|
|
from ..core.logging import logger |
|
|
from ..models.base_model import BaseLLM |
|
|
from ..benchmark.benchmark import Benchmark |
|
|
from ..workflow.action_graph import ActionGraph |
|
|
from ..core.callbacks import suppress_logger_info |
|
|
from ..workflow.workflow_graph import SequentialWorkFlowGraph,WorkFlowGraph |
|
|
from ..prompts.workflow.sew_optimizer import mutation_prompts, thinking_styles |
|
|
|
|
|
VALID_SCHEMES = ["python", "yaml", "code", "core", "bpmn"] |
|
|
|
|
|
import difflib |
|
|
|
|
|
def find_closest_name(inputname, name_refernece): |
|
|
name_reference_correct = [step["task_name"] for step in name_refernece] |
|
|
print("inputname", inputname) |
|
|
print("correct_list", name_reference_correct) |
|
|
correct_name = difflib.get_close_matches(inputname, name_reference_correct, n=1, cutoff=0.1) |
|
|
print(correct_name) |
|
|
correct_step = name_refernece[name_reference_correct.index(correct_name[0])] |
|
|
|
|
|
return correct_step |
|
|
|
|
|
|
|
|
class STRUCTUREWorkFlowScheme: |
|
|
|
|
|
""" |
|
|
The scheme of the workflow for SEW optimizer. |
|
|
""" |
|
|
def __init__(self, graph: WorkFlowGraph, **kwargs): |
|
|
self.graph = graph |
|
|
self.kwargs = kwargs |
|
|
|
|
|
def convert_to_scheme(self, scheme: str) -> str: |
|
|
""" |
|
|
Transform the WorkflowGraph to the desired scheme. |
|
|
""" |
|
|
if scheme not in VALID_SCHEMES: |
|
|
raise ValueError(f"Invalid scheme: {scheme}. The scheme should be one of {VALID_SCHEMES}.") |
|
|
if scheme == "python": |
|
|
repr = self.get_workflow_python_repr() |
|
|
elif scheme == "yaml": |
|
|
repr = self.get_workflow_yaml_repr() |
|
|
elif scheme == "code": |
|
|
repr = self.get_workflow_code_repr() |
|
|
elif scheme == "core": |
|
|
repr = self.get_workflow_core_repr() |
|
|
elif scheme == "bpmn": |
|
|
repr = self.get_workflow_bpmn_repr() |
|
|
return repr |
|
|
|
|
|
def parse_from_scheme(self, scheme: str, repr: str) -> WorkFlowGraph: |
|
|
""" |
|
|
Parse the SequentialWorkFlowGraph from the given scheme and representation. |
|
|
""" |
|
|
if scheme not in VALID_SCHEMES: |
|
|
raise ValueError(f"Invalid scheme: {scheme}. The scheme should be one of {VALID_SCHEMES}.") |
|
|
if scheme == "python": |
|
|
graph = self.parse_workflow_python_repr(repr) |
|
|
elif scheme == "yaml": |
|
|
graph = self.parse_workflow_yaml_repr(repr) |
|
|
elif scheme == "code": |
|
|
graph = self.parse_workflow_code_repr(repr) |
|
|
elif scheme == "core": |
|
|
graph = self.parse_workflow_core_repr(repr) |
|
|
elif scheme == "bpmn": |
|
|
graph = self.parse_workflow_bpmn_repr(repr) |
|
|
return graph |
|
|
|
|
|
def _get_workflow_repr_info(self) -> List[dict]: |
|
|
""" |
|
|
Get the information for the workflow representation. |
|
|
""" |
|
|
info = [] |
|
|
for node in self.graph.nodes: |
|
|
task_name = node.name |
|
|
input_names = [param.name for param in node.inputs] |
|
|
output_names = [param.name for param in node.outputs] |
|
|
task_info = { |
|
|
"task_name": task_name, |
|
|
"input_names": input_names, |
|
|
"output_names": output_names |
|
|
} |
|
|
info.append(task_info) |
|
|
return info |
|
|
|
|
|
def _convert_to_func_name(self, name: str) -> str: |
|
|
""" |
|
|
Convert the task name to the function name. |
|
|
""" |
|
|
name = name.lower().strip() |
|
|
name = name.replace(' ', '_').replace('-', '_') |
|
|
name = ''.join(c for c in name if c.isalnum() or c == '_') |
|
|
|
|
|
name = regex.sub(r'_+', "_", name) |
|
|
|
|
|
name = name.strip('_') |
|
|
return name |
|
|
|
|
|
def _convert_to_title(self, name: str) -> str: |
|
|
func_name = self._convert_to_func_name(name) |
|
|
words = func_name.split('_') |
|
|
return ' '.join(word.capitalize() for word in words) |
|
|
|
|
|
def get_workflow_python_repr(self) -> str: |
|
|
repr_info = self._get_workflow_repr_info() |
|
|
if not repr_info: |
|
|
return "" |
|
|
|
|
|
python_workflow_info = [] |
|
|
for task_info in repr_info: |
|
|
name = self._convert_to_func_name(task_info['task_name']) |
|
|
input_names = [f'{input_name}' for input_name in task_info['input_names']] |
|
|
output_names = [f'{output_name}' for output_name in task_info['output_names']] |
|
|
python_workflow_info.append( |
|
|
"{{'name': '{name}', 'args': {args}, 'outputs': {outputs}}}".format( |
|
|
name=name, |
|
|
args=input_names, |
|
|
outputs=output_names |
|
|
) |
|
|
) |
|
|
python_workflow_repr = "steps = [\n" + ",\n".join(python_workflow_info) + "\n]" |
|
|
|
|
|
return python_workflow_repr |
|
|
|
|
|
def get_workflow_yaml_repr(self) -> str: |
|
|
repr_info = self._get_workflow_repr_info() |
|
|
if not repr_info: |
|
|
return "" |
|
|
|
|
|
yaml_workflow_info = [] |
|
|
for task_info in repr_info: |
|
|
name = self._convert_to_func_name(task_info['task_name']) |
|
|
input_names = "\n".join([f' - {input_name}' for input_name in task_info['input_names']]) |
|
|
output_names = "\n".join([f' - {output_name}' for output_name in task_info['output_names']]) |
|
|
yaml_workflow_info.append( |
|
|
"- name: {name}\n args:\n{input_names}\n outputs:\n{output_names}".format( |
|
|
name=name, |
|
|
input_names=input_names, |
|
|
output_names=output_names |
|
|
) |
|
|
) |
|
|
yaml_workflow_repr = "\n\n".join(yaml_workflow_info) |
|
|
return yaml_workflow_repr |
|
|
|
|
|
def get_workflow_code_repr(self) -> str: |
|
|
repr_info = self._get_workflow_repr_info() |
|
|
if not repr_info: |
|
|
return "" |
|
|
|
|
|
workflow_lines = [] |
|
|
for task_info in repr_info: |
|
|
|
|
|
name = self._convert_to_func_name(task_info['task_name']) |
|
|
|
|
|
|
|
|
inputs = ", ".join(task_info['input_names']) |
|
|
outputs = ", ".join(task_info['output_names']) |
|
|
|
|
|
|
|
|
line = f"{name}({inputs}) -> {outputs}" |
|
|
workflow_lines.append(line) |
|
|
|
|
|
|
|
|
workflow_repr = "\n".join(workflow_lines) |
|
|
|
|
|
return workflow_repr |
|
|
|
|
|
def get_workflow_bpmn_repr(self) -> str: |
|
|
|
|
|
repr_info = self._get_workflow_repr_info() |
|
|
if not repr_info: |
|
|
return "" |
|
|
|
|
|
|
|
|
bpmn_lines = [ |
|
|
'<definitions xmlns="http://www.omg.org/spec/BPMN/20100524/MODEL">', |
|
|
'<process id="software_dev_workflow" isExecutable="true">', |
|
|
' <startEvent id="start" />' |
|
|
] |
|
|
|
|
|
|
|
|
for i, task_info in enumerate(repr_info): |
|
|
task_name = self._convert_to_func_name(task_info['task_name']) |
|
|
task_title = self._convert_to_title(task_info['task_name']) |
|
|
bpmn_lines.append(f' <task id="{task_name}" name="{task_title}" />') |
|
|
|
|
|
bpmn_lines.append(' <endEvent id="end" />') |
|
|
bpmn_lines.append('') |
|
|
bpmn_lines.append(' <!-- Workflow connections -->') |
|
|
|
|
|
|
|
|
|
|
|
if repr_info: |
|
|
first_task_id = self._convert_to_func_name(repr_info[0]['task_name']) |
|
|
bpmn_lines.append(f' <sequenceFlow id="flow1" sourceRef="start" targetRef="{first_task_id}" />') |
|
|
|
|
|
|
|
|
for i in range(len(repr_info) - 1): |
|
|
source_id = self._convert_to_func_name(repr_info[i]['task_name']) |
|
|
target_id = self._convert_to_func_name(repr_info[i + 1]['task_name']) |
|
|
flow_num = i + 2 |
|
|
bpmn_lines.append(f' <sequenceFlow id="flow{flow_num}" sourceRef="{source_id}" targetRef="{target_id}" />') |
|
|
|
|
|
|
|
|
if repr_info: |
|
|
last_task_id = self._convert_to_func_name(repr_info[-1]['task_name']) |
|
|
flow_num = len(repr_info) + 1 |
|
|
bpmn_lines.append(f' <sequenceFlow id="flow{flow_num}" sourceRef="{last_task_id}" targetRef="end" />') |
|
|
|
|
|
|
|
|
bpmn_lines.append('</process>') |
|
|
bpmn_lines.append('</definitions>') |
|
|
|
|
|
return '\n'.join(bpmn_lines) |
|
|
|
|
|
def get_workflow_core_repr(self) -> str: |
|
|
|
|
|
repr_info = self._get_workflow_repr_info() |
|
|
if not repr_info: |
|
|
return "" |
|
|
|
|
|
workflow_lines = [] |
|
|
for i, task_info in enumerate(repr_info, 1): |
|
|
|
|
|
task_name = self._convert_to_title(task_info['task_name']) |
|
|
|
|
|
next_step = i + 1 |
|
|
line = f"Step {i}::: Process ::: {task_name}:::next::Step {next_step}" |
|
|
workflow_lines.append(line) |
|
|
|
|
|
|
|
|
last_step = len(repr_info) + 1 |
|
|
workflow_lines.append(f"Step {last_step}::: Terminal ::: End of Workflow:::") |
|
|
|
|
|
return "\n".join(workflow_lines) |
|
|
|
|
|
def _find_task_index(self, step: dict, graph_repr_info: List[dict]) -> int: |
|
|
""" |
|
|
Find the index of the task in the original workflow graph. If the task is not found, return -1. |
|
|
|
|
|
Args: |
|
|
step (dict): The step of the workflow. |
|
|
graph_repr_info (List[dict]): The information of the original workflow graph. |
|
|
|
|
|
Returns: |
|
|
int: The index of the task. |
|
|
""" |
|
|
def _is_task_name_match(task_name: str, another_name: str) -> bool: |
|
|
return self._convert_to_func_name(task_name) == self._convert_to_func_name(another_name) |
|
|
|
|
|
def _is_task_inputs_match(task_inputs: List[str], another_inputs: List[str]) -> bool: |
|
|
return len(set(task_inputs) & set(another_inputs)) == len(task_inputs) |
|
|
|
|
|
def _is_task_outputs_match(task_outputs: List[str], another_outputs: List[str]) -> bool: |
|
|
return len(set(task_outputs) & set(another_outputs)) == len(task_outputs) |
|
|
|
|
|
for i, task in enumerate(graph_repr_info): |
|
|
|
|
|
|
|
|
if _is_task_name_match(task["task_name"], step["name"]) and _is_task_outputs_match(task["output_names"], step["outputs"]): |
|
|
return i |
|
|
return -1 |
|
|
|
|
|
def create_workflow_graph_from_steps( |
|
|
self, |
|
|
steps: List[dict] |
|
|
) -> WorkFlowGraph: |
|
|
|
|
|
""" |
|
|
Create a new workflow graph from the steps. |
|
|
Since both the inputs and outputs are provided, new tasks will be created in the new workflow graph. |
|
|
It is used for the `python` `yaml` and `code` representations. |
|
|
|
|
|
Args: |
|
|
steps (List[dict]): The steps of the workflow. The steps are in the format of: |
|
|
[ |
|
|
{ |
|
|
"name": str, |
|
|
"args": List[str], |
|
|
"outputs": List[str] |
|
|
} |
|
|
] |
|
|
|
|
|
Returns: |
|
|
SequentialWorkFlowGraph: The new workflow graph. |
|
|
""" |
|
|
original_workflow_config = self.graph.get_graph_info() |
|
|
repr_info = self._get_workflow_repr_info() |
|
|
new_tasks = [] |
|
|
get_known_list = [] |
|
|
for step in repr_info: |
|
|
get_known_list.append(step) |
|
|
for step in steps: |
|
|
task_index = self._find_task_index(step=step, graph_repr_info=repr_info) |
|
|
if task_index == -1: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
task_name = step["name"] |
|
|
task_name = task_name +str(np.random.randint(0,10000)) |
|
|
description = f"Task to {task_name.lower()}. " |
|
|
if step["args"]: |
|
|
description += f"Takes {', '.join(step['args'])} as input. " |
|
|
if step["outputs"]: |
|
|
description += f"Produces {', '.join(step['outputs'])} as output." |
|
|
|
|
|
try: |
|
|
toolname = step['tool_names'] |
|
|
except: |
|
|
toolname = None |
|
|
|
|
|
new_task = { |
|
|
"name": task_name, |
|
|
"description": description, |
|
|
"inputs": [ |
|
|
{ |
|
|
"name": input_name, |
|
|
"type": "str", |
|
|
"description": f"Input parameter {input_name} for {task_name}", |
|
|
"required":False |
|
|
} for input_name in step["args"] |
|
|
], |
|
|
"outputs": [ |
|
|
{ |
|
|
"name": output_name, |
|
|
"type": "str", |
|
|
"description": f"Output parameter {output_name} from {task_name}" |
|
|
} for output_name in step["outputs"] |
|
|
], |
|
|
"prompt": "Your are a task solver.", |
|
|
|
|
|
"parse_mode": "xml", |
|
|
|
|
|
"tool_names": toolname |
|
|
} |
|
|
new_tasks.append(new_task) |
|
|
else: |
|
|
|
|
|
if original_workflow_config["tasks"][task_index] not in new_tasks: |
|
|
new_tasks.append(deepcopy(original_workflow_config["tasks"][task_index])) |
|
|
|
|
|
new_workflow_config = { |
|
|
"goal": original_workflow_config["goal"], |
|
|
"tasks": new_tasks |
|
|
} |
|
|
|
|
|
|
|
|
new_graph = SequentialWorkFlowGraph.from_dict(new_workflow_config) |
|
|
return new_graph |
|
|
|
|
|
def create_workflow_graph_from_task_names( |
|
|
self, |
|
|
task_names: Optional[List[str]] = None, |
|
|
task_titles: Optional[List[str]] = None |
|
|
) -> SequentialWorkFlowGraph: |
|
|
""" |
|
|
Create a new workflow graph from the task names or titles. |
|
|
Since only the task names or titles are provided, the tasks in the new workflow graph will be copied from the original workflow graph. |
|
|
It is used for the `bpmn` and `core` representations. |
|
|
|
|
|
Args: |
|
|
task_names (Optional[List[str]]): The names of the tasks. |
|
|
task_titles (Optional[List[str]]): The titles of the tasks. |
|
|
|
|
|
Returns: |
|
|
SequentialWorkFlowGraph: The new workflow graph. |
|
|
""" |
|
|
if task_names: |
|
|
original_workflow_config = self.graph.get_graph_info() |
|
|
tasks = task_names |
|
|
original_tasks = {self._convert_to_func_name(task["name"]): task for task in original_workflow_config["tasks"]} |
|
|
elif task_titles: |
|
|
original_workflow_config = self.graph.get_graph_info() |
|
|
tasks = task_titles |
|
|
original_tasks = {self._convert_to_title(task["name"]): task for task in original_workflow_config["tasks"]} |
|
|
else: |
|
|
raise ValueError("No task names or titles provided.") |
|
|
|
|
|
new_tasks = [] |
|
|
for task in tasks: |
|
|
if task not in original_tasks: |
|
|
raise ValueError(f"Task {task} not found in the original workflow.") |
|
|
new_tasks.append(deepcopy(original_tasks[task])) |
|
|
|
|
|
|
|
|
new_workflow_config = { |
|
|
"goal": original_workflow_config["goal"], |
|
|
"tasks": new_tasks |
|
|
} |
|
|
|
|
|
|
|
|
new_graph = WorkFlowGraph.from_dict(new_workflow_config) |
|
|
return new_graph |
|
|
|
|
|
def parse_workflow_python_repr(self, repr: str) -> WorkFlowGraph: |
|
|
""" |
|
|
Parse the workflow from the python representation. The input format is: |
|
|
steps = [ |
|
|
{"name": task_name, "args": [input1, input2, ...],"outputs": [output1, output2, ...]}, |
|
|
{"name": another_task_name, "args": [input1, input2, ...],"outputs": [output1, output2, ...]}, |
|
|
... |
|
|
] |
|
|
""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
code_block = regex.search(r'```python\s*(.*?)\s*```', repr, regex.DOTALL) |
|
|
if not code_block: |
|
|
raise ValueError("No Python code block found in the representation") |
|
|
code_block = code_block.group(1).strip() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
steps = eval(code_block.replace("steps = ", "").strip()) |
|
|
|
|
|
new_graph = self.create_workflow_graph_from_steps(steps=steps) |
|
|
return new_graph |
|
|
|
|
|
|
|
|
def parse_workflow_yaml_repr(self, repr: str) -> WorkFlowGraph: |
|
|
""" |
|
|
Parse the workflow from the yaml representation. The input format is: |
|
|
- name: task_name |
|
|
args: |
|
|
- input1 |
|
|
- input2 |
|
|
outputs: |
|
|
- output1 |
|
|
""" |
|
|
try: |
|
|
|
|
|
match = regex.search(r'```yaml\s*(.*?)\s*```', repr, regex.DOTALL) |
|
|
if not match: |
|
|
raise ValueError("No YAML code block found in the representation") |
|
|
yaml_block = match.group(1).strip() |
|
|
steps = yaml.safe_load(yaml_block) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
new_graph = self.create_workflow_graph_from_steps(steps=steps) |
|
|
return new_graph |
|
|
except Exception as e: |
|
|
logger.warning(f"Failed to parse workflow string: {e}. Return the original workflow.") |
|
|
|
|
|
return self.graph |
|
|
|
|
|
def parse_workflow_code_repr(self, repr: str) -> WorkFlowGraph: |
|
|
""" |
|
|
Parse the workflow from the code representation. |
|
|
The input format is: |
|
|
task_name(input1, input2, ...) -> output1, output2, ... |
|
|
another_task_name(input1, input2, ...) -> output1, output2, ... |
|
|
... |
|
|
""" |
|
|
try: |
|
|
|
|
|
match = regex.search(r'```code\s*(.*?)\s*```', repr, regex.DOTALL) |
|
|
if not match: |
|
|
raise ValueError("No code block found in the representation") |
|
|
code_block = match.group(1).strip() |
|
|
lines = [line.strip() for line in code_block.split("\n") if line.strip() and "->" in line] |
|
|
steps = [] |
|
|
for line in lines: |
|
|
|
|
|
line = regex.sub(r'^\d+\.\s*', '', line) |
|
|
func_part, output_part = line.split('->') |
|
|
func_part = func_part.strip() |
|
|
name = func_part[:func_part.index('(')] |
|
|
args_str = func_part[func_part.index('(') + 1:func_part.rindex(')')] |
|
|
args = [arg.strip() for arg in args_str.split(',') if arg.strip()] |
|
|
outputs = [out.strip() for out in output_part.split(',') if out.strip()] |
|
|
step = {"name": name, "args": args, "outputs": outputs} |
|
|
steps.append(step) |
|
|
if not steps: |
|
|
raise ValueError("No steps found in the workflow.") |
|
|
new_graph = self.create_workflow_graph_from_steps(steps=steps) |
|
|
return new_graph |
|
|
except Exception as e: |
|
|
logger.warning(f"Failed to parse workflow string: {e}. Return the original workflow.") |
|
|
|
|
|
return self.graph |
|
|
|
|
|
def parse_workflow_bpmn_repr(self, repr: str) -> WorkFlowGraph: |
|
|
""" |
|
|
Parse the workflow from the BPMN XML representation. |
|
|
|
|
|
The input format is BPMN XML with: |
|
|
- task elements defining the tasks |
|
|
- sequenceFlow elements defining the order of tasks |
|
|
|
|
|
Will extract ordered task names from the sequence flows and create a workflow. |
|
|
""" |
|
|
try: |
|
|
|
|
|
match = regex.search(r'```bpmn\s*(.*?)\s*```', repr, regex.DOTALL) |
|
|
if not match: |
|
|
raise ValueError("No BPMN code block found in the representation") |
|
|
bpmn_block = match.group(1).strip() |
|
|
|
|
|
root = ET.fromstring(bpmn_block) |
|
|
|
|
|
|
|
|
ns = {'bpmn': 'http://www.omg.org/spec/BPMN/20100524/MODEL'} |
|
|
|
|
|
|
|
|
process = root.find('bpmn:process', ns) or root.find('process') |
|
|
|
|
|
if process is None: |
|
|
raise ValueError("No process element found in BPMN XML") |
|
|
|
|
|
|
|
|
tasks = {} |
|
|
|
|
|
for task in process.findall("bpmn:task", ns): |
|
|
tasks[task.get('id')] = task.get('name') |
|
|
|
|
|
|
|
|
flows = {} |
|
|
ordered_tasks = [] |
|
|
current_ref = 'start' |
|
|
|
|
|
|
|
|
|
|
|
for flow in process.findall("bpmn:sequenceFlow", ns): |
|
|
flows[flow.get('sourceRef')] = flow.get('targetRef') |
|
|
|
|
|
|
|
|
while current_ref in flows: |
|
|
next_ref = flows[current_ref] |
|
|
if next_ref in tasks: |
|
|
ordered_tasks.append(tasks[next_ref]) |
|
|
current_ref = next_ref |
|
|
|
|
|
|
|
|
new_graph = self.create_workflow_graph_from_task_names(task_titles=ordered_tasks) |
|
|
return new_graph |
|
|
|
|
|
except Exception as e: |
|
|
logger.warning(f"Failed to parse BPMN workflow string: {e}. Return the original workflow.") |
|
|
|
|
|
return self.graph |
|
|
|
|
|
def parse_workflow_core_repr(self, repr: str) -> WorkFlowGraph: |
|
|
""" |
|
|
Parse the workflow from the Core representation. |
|
|
|
|
|
The input format is: |
|
|
Step 1::: Process ::: Task Name:::next::Step 2 |
|
|
Step 2::: Process ::: Another Task:::next::Step 3 |
|
|
... |
|
|
Step N::: Terminal ::: End of Workflow::: |
|
|
|
|
|
Will extract task names from Process steps and create a workflow. |
|
|
""" |
|
|
try: |
|
|
|
|
|
match = regex.search(r'```core\s*(.*?)\s*```', repr, regex.DOTALL) |
|
|
if not match: |
|
|
raise ValueError("No core code block found in the representation") |
|
|
core_block = match.group(1).strip() |
|
|
|
|
|
lines = [line.strip() for line in core_block.split('\n') if line.strip()] |
|
|
|
|
|
|
|
|
flows = {} |
|
|
tasks = {} |
|
|
|
|
|
|
|
|
for line in lines: |
|
|
parts = line.split(':::') |
|
|
current_step = parts[0].strip() |
|
|
step_type = parts[1].strip() |
|
|
|
|
|
if step_type == 'Process': |
|
|
|
|
|
task_title = parts[2].strip() |
|
|
tasks[current_step] = task_title |
|
|
if len(parts) > 3 and "next" in parts[3]: |
|
|
next_step = parts[3].split("::")[-1].strip() |
|
|
flows[current_step] = next_step |
|
|
elif step_type == 'Terminal': |
|
|
flows[current_step] = None |
|
|
|
|
|
|
|
|
ordered_tasks = [] |
|
|
current_step = 'Step 1' |
|
|
|
|
|
while current_step in flows: |
|
|
if current_step in tasks: |
|
|
ordered_tasks.append(tasks[current_step]) |
|
|
current_step = flows[current_step] |
|
|
|
|
|
new_graph = self.create_workflow_graph_from_task_names(task_titles=ordered_tasks) |
|
|
return new_graph |
|
|
|
|
|
except Exception as e: |
|
|
logger.warning(f"Failed to parse Core workflow string: {e}. Return the original workflow.") |
|
|
|
|
|
return self.graph |
|
|
|
|
|
|
|
|
class QASimplePromptBreeder: |
|
|
|
|
|
def __init__(self, llm: BaseLLM, evaluator: None, **kwargs): |
|
|
self.llm = llm |
|
|
self.evaluator = evaluator |
|
|
self.history_log = [] |
|
|
self.kwargs = kwargs |
|
|
|
|
|
def generate_mutation_prompt(self, task_description: str, **kwargs) -> str: |
|
|
""" |
|
|
Generate the mutation prompt for optimization. |
|
|
""" |
|
|
thinking_style = random.choice(thinking_styles) |
|
|
|
|
|
hyper_mutation_prompt = "Please generate a improved prompts based on the following information. " + "\n\nProblem Description: " + task_description + ".\n" + "Output: " |
|
|
|
|
|
try: |
|
|
mutation_prompt = self.llm.generate( |
|
|
prompt=hyper_mutation_prompt, |
|
|
system_message="You are a helpful assistant. Do not generate harmful content. ", |
|
|
).content |
|
|
except: |
|
|
mutation_prompt = self.llm.generate( |
|
|
prompt=hyper_mutation_prompt, |
|
|
system_message="You are a helpful assistant. Do not generate harmful content. ", |
|
|
).content |
|
|
return mutation_prompt |
|
|
|
|
|
def get_mutation_prompt(self, task_description: str, order: Literal["zero-order", "first-order"], **kwargs) -> str: |
|
|
""" |
|
|
Get the mutation prompt for optimization. |
|
|
""" |
|
|
if order == "zero-order": |
|
|
mutation_prompt = self.generate_mutation_prompt(task_description=task_description) |
|
|
elif order == "first-order": |
|
|
mutation_prompt = random.choice(mutation_prompts) |
|
|
else: |
|
|
raise ValueError(f"Invalid order: {order}. The order should be either 'zero-order' or 'first-order'.") |
|
|
return mutation_prompt |
|
|
|
|
|
def generate_prompt(self, task_description: str, prompt: str, order: Literal["zero-order", "first-order"], **kwargs) -> str: |
|
|
""" |
|
|
Generate the prompt for optimization. |
|
|
|
|
|
Args: |
|
|
task_description (str): The description of the task, normally the goal of the workflow. |
|
|
prompt (str): The prompt to optimize. |
|
|
order (Literal["zero-order", "first-order"]): The order of the mutation prompt. |
|
|
|
|
|
Returns: |
|
|
str: The optimized prompt. |
|
|
""" |
|
|
mutation_prompt = self.get_mutation_prompt(task_description=task_description, order=order) |
|
|
prompt = mutation_prompt + "\n\nINSTRUCTION:\n\n" + prompt |
|
|
|
|
|
new_prompt = self.llm.generate( |
|
|
prompt=prompt, |
|
|
system_message="You are a helpful assistant", |
|
|
).content |
|
|
return new_prompt |
|
|
|
|
|
def critic_and_update_prompt(self, task_description: str, prompt: str, order: Literal["zero-order", "first-order"], scorer=None, calltime=1, **kwargs) -> str: |
|
|
""" |
|
|
Generate the prompt for optimization. |
|
|
|
|
|
Args: |
|
|
task_description (str): The description of the task, normally the goal of the workflow. |
|
|
prompt (str): The prompt to optimize. |
|
|
order (Literal["zero-order", "first-order"]): The order of the mutation prompt. |
|
|
|
|
|
Returns: |
|
|
str: The optimized prompt. |
|
|
""" |
|
|
|
|
|
problem_list = '''''' |
|
|
for item in self.evaluator._evaluation_records.keys(): |
|
|
problem_s = "Questions: " + self.evaluator._evaluation_records[item]['trajectory'][0].content['question']+'\n' |
|
|
prediction_s = "Predictions: " + self.evaluator._evaluation_records[item]['prediction']+'\n' |
|
|
solution_s = "Solutions: " + self.evaluator._evaluation_records[item]['label']+'\n' |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
metric_s = "Score: " + str(self.evaluator._evaluation_records[item]['metrics']['acc']) + "\n" |
|
|
if self.evaluator._evaluation_records[item]['metrics']['acc'] ==0: |
|
|
metric_s += "Error reason: Computation result is incorrect." |
|
|
else: |
|
|
metric_s += "The solution is correct." |
|
|
|
|
|
joint_s = problem_s + prediction_s + solution_s + metric_s |
|
|
|
|
|
problem_list += joint_s |
|
|
|
|
|
if calltime==1: |
|
|
critic_prompt = '''You are a workflow critic, not a problem solver. Given a question, its final answer, the execution history, and the workflow trajectory, your task is to evaluate the quality of the workflow used for question answering. Identify only problems, including: |
|
|
(1) Structural flaws in the workflow (e.g., incorrect step ordering, missing steps, invalid assumptions, flawed control flow, or absent validation), |
|
|
(2) Failures or inconsistencies exposed by the execution history or trajectory (e.g., errors, contradictions, premature termination, or unhandled cases), and |
|
|
(3) Incorrect, misleading, ambiguous, or underspecified content in prompts or intermediate steps. |
|
|
Do not solve the question, do not propose fixes or improvements, and do not restate or evaluate the correctness of the final answer itself. |
|
|
Output one single paragraph consisting of clearly enumerated or bullet-style problem statements only. Be extremely concise, critical, and factual. The workflow to critique is as follows: ''' |
|
|
question_prompt = "The questions, predictions, solutions, and evaluated metrics based on this workflow is: " + problem_list |
|
|
|
|
|
critic_out = self.llm.generate( |
|
|
prompt=prompt+question_prompt, |
|
|
system_message=critic_prompt, |
|
|
).content |
|
|
print(critic_out) |
|
|
else: |
|
|
critic_prompt_outlist = '''Please summarize the following problems in one paragraph. Be super concise and proposing different points.\n''' |
|
|
for item in range(calltime): |
|
|
critic_prompt = "You are a workflow critic, not a solver. Given a question, its answer, the execution history, and the workflow trajectory, evaluate the workflow used for question answering. Identify only problems, including: (1) structural flaws in the workflow (e.g., incorrect step ordering, missing validation, flawed control flow), (2) failures revealed by the execution history or trajectory, and (3) incorrect, misleading, or ambiguous information in the prompts. Do not attempt to solve the question, do not suggest fixes, and do not restate the answer. Return one single paragraph listing the problems as clearly numbered or bullet-style points, and be extremely concise. The previous workflow is: " |
|
|
question_prompt = "The questions, solutions, and evaluated metrics based on this workflow is: " + problem_list |
|
|
critic_out = self.llm.generate( |
|
|
prompt=prompt+question_prompt, |
|
|
system_message=critic_prompt, |
|
|
).content |
|
|
critic_prompt_outlist = critic_prompt_outlist + f"Detected Issue {item+1}:" + critic_out +"\n" |
|
|
critic_out = self.llm.generate( |
|
|
prompt=critic_prompt_outlist, |
|
|
system_message="You are an expert in summarizing information and data.", |
|
|
).content |
|
|
print(critic_out) |
|
|
|
|
|
if scorer == None: |
|
|
prompt = "The detected issue is:" + critic_out+"You should always improve workflow by correcting the issue without changing the inputs and outputs of nodes in the workflow. You can remove redundant agents. You should keep the graph executable.\n" + "\n\nThe original workflow is:\n\n" + prompt |
|
|
else: |
|
|
prompt = f'''Detected issues: |
|
|
{critic_out} |
|
|
|
|
|
Your task is to design an improved agent workflow that resolves the issues above while strictly preserving the original workflow’s external behavior. |
|
|
|
|
|
Performance objective: The revised workflow should increase the model performance score according to {scorer}. |
|
|
|
|
|
Original workflow: |
|
|
{prompt} |
|
|
|
|
|
Constraints: |
|
|
|
|
|
Do not change the inputs or outputs of any existing node. |
|
|
|
|
|
You may remove redundant or unnecessary agents, but must not introduce breaking dependencies. |
|
|
|
|
|
You may reorder, merge, or refactor internal steps only if the workflow remains fully executable. |
|
|
|
|
|
The resulting workflow must form a valid, connected, and runnable graph. |
|
|
|
|
|
Do not modify the task definition or evaluation metric. |
|
|
|
|
|
Output requirements: |
|
|
|
|
|
Output only the revised workflow definition. |
|
|
|
|
|
Do not include explanations, justifications, or commentary. |
|
|
|
|
|
Your output:''' |
|
|
|
|
|
new_prompt = self.llm.generate( |
|
|
prompt=prompt, |
|
|
system_message='''You are a Graph Optimization Agent. Your objective is to iteratively improve the performance of an agent workflow graph by correcting identified issues and optimizing its structure. |
|
|
|
|
|
You may only perform the following operations: Reorder existing agents. Remove redundant or unnecessary agents. Add new agents. |
|
|
|
|
|
Hard constraints (must not be violated): Do not change agent names, agent inputs, or agent outputs. Do not modify the task name. Do not change the input of the first node. Every agent in the resulting workflow must accept the **question** as input directly. The workflow must remain a valid, connected, and executable graph at all times. You should only use one prompt. |
|
|
|
|
|
Your output must be only the revised workflow definition, with no explanations or commentary.''', |
|
|
).content |
|
|
print(new_prompt) |
|
|
return new_prompt |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def update_dev_set(dataset): |
|
|
import numpy as np |
|
|
permutation = np.random.permutation(len(dataset._dev_data_full)) |
|
|
full_data = dataset._dev_data_full |
|
|
dev_data = [full_data[idx] for idx in permutation[:50]] |
|
|
return dev_data |
|
|
|
|
|
class QASTRUCTUREOptimizer(Optimizer): |
|
|
|
|
|
graph: Union[WorkFlowGraph, ActionGraph] = Field(description="The workflow to optimize.") |
|
|
repr_scheme: str = Field(default="python", description="The scheme to represent the workflow.") |
|
|
optimize_mode: Literal["all", "structure", "prompt"] = Field(default="all", description="The mode to optimize the workflow.") |
|
|
order: Literal["zero-order", "first-order"] = Field(default="zero-order", description="Whether to use zero-order (using hyper-mutation prompt) or first-order (using mutation prompt) optimization.") |
|
|
|
|
|
calltime: int = Field(default=1, description="Number of textgrad used for evaluation.") |
|
|
num_workers: int = Field(default=1, description="Number of textgrad used for evaluation.") |
|
|
def init_module(self, **kwargs): |
|
|
self._snapshot: List[dict] = [] |
|
|
self._prompt_breeder = QASimplePromptBreeder(llm=self.llm, evaluator = self.evaluator) |
|
|
self._convergence_check_counter = 0 |
|
|
self._best_score = float("-inf") |
|
|
|
|
|
self._prompt_dict = {} |
|
|
if isinstance(self.graph, ActionGraph): |
|
|
if self.optimize_mode != "prompt": |
|
|
raise ValueError( |
|
|
f"{type(self).__name__} only support prompt optimization when `graph` is an `ActionGraph`. " |
|
|
f"The `optimize_mode` should be set to `prompt`, but got {self.optimize_mode}." |
|
|
) |
|
|
|
|
|
def optimize(self, dataset: Benchmark, **kwargs): |
|
|
|
|
|
if isinstance(self.graph, WorkFlowGraph): |
|
|
logger.info(f"Optimizing the {type(self.graph).__name__} workflow with {self.repr_scheme} representation.") |
|
|
elif isinstance(self.graph, ActionGraph): |
|
|
logger.info(f"Optimizing the {type(self.graph).__name__} graph ...") |
|
|
graph: Union[WorkFlowGraph, ActionGraph] = self.graph |
|
|
logger.info("Run initial evaluation on the original workflow ...") |
|
|
with suppress_logger_info(): |
|
|
metrics = self.evaluate(dataset, eval_mode="dev", graph=graph) |
|
|
self._prompt_breeder = QASimplePromptBreeder(llm=self.llm, evaluator = self.evaluator) |
|
|
logger.info(f"Initial metrics: {metrics}") |
|
|
self.log_snapshot(graph=graph, metrics=metrics) |
|
|
|
|
|
set_scorer = None |
|
|
if kwargs["provided_scorer"] == True: |
|
|
set_scorer = metrics |
|
|
|
|
|
for i in range(self.max_steps): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
graph = self.step(set_scorer=set_scorer, step=i) |
|
|
|
|
|
|
|
|
if (i + 1) % self.eval_every_n_steps == 0: |
|
|
logger.info(f"Evaluate the workflow at step {i+1} ...") |
|
|
with suppress_logger_info(): |
|
|
metrics = self.evaluate(dataset, eval_mode="dev") |
|
|
logger.info(f"Step {i+1} metrics: {metrics}") |
|
|
self.log_snapshot(graph=graph, metrics=metrics) |
|
|
print("randomly update dataset") |
|
|
self.dataset._dev_data = update_dev_set(self.dataset) |
|
|
self.dataset._train_data = update_dev_set(self.dataset) |
|
|
|
|
|
if i == self.max_steps - 1: |
|
|
logger.info(f"Reach the maximum number of steps {self.max_steps}. Stop the optimization.") |
|
|
|
|
|
|
|
|
logger.info("Restore the best graph from the snapshot ...") |
|
|
self.restore_best_graph() |
|
|
|
|
|
def step(self, **kwargs) -> Union[WorkFlowGraph, ActionGraph]: |
|
|
""" |
|
|
Take a step of optimization and return the optimized graph. |
|
|
""" |
|
|
graph = self._select_graph_with_highest_score(return_metrics=False) |
|
|
if isinstance(graph, WorkFlowGraph): |
|
|
new_graph = self._workflow_graph_step(graph, kwargs["set_scorer"], kwargs["step"]) |
|
|
elif isinstance(graph, ActionGraph): |
|
|
new_graph = self._action_graph_step(graph, kwargs["set_scorer"], kwargs["step"]) |
|
|
else: |
|
|
raise ValueError(f"Invalid graph type: {type(graph)}. The graph should be an instance of `WorkFlowGraph` or `ActionGraph`.") |
|
|
return new_graph |
|
|
|
|
|
def evaluate( |
|
|
self, |
|
|
dataset: Benchmark, |
|
|
eval_mode: str = "test", |
|
|
graph: Optional[Union[WorkFlowGraph, ActionGraph]] = None, |
|
|
indices: Optional[List[int]] = None, |
|
|
sample_k: Optional[int] = None, |
|
|
**kwargs |
|
|
) -> dict: |
|
|
""" |
|
|
Evaluate the workflow. If `graph` is provided, use the provided graph for evaluation. Otherwise, use the graph in the optimizer. |
|
|
|
|
|
Args: |
|
|
dataset (Benchmark): The dataset to evaluate the workflow on. |
|
|
eval_mode (str): The evaluation mode. Choices: ["test", "dev", "train"]. |
|
|
graph (Union[WorkFlowGraph, ActionGraph], optional): The graph to evaluate. If not provided, use the graph in the optimizer. |
|
|
indices (List[int], optional): The indices of the data to evaluate the workflow on. |
|
|
sample_k (int, optional): The number of data to evaluate the workflow on. If provided, a random sample of size `sample_k` will be used. |
|
|
|
|
|
Returns: |
|
|
dict: The metrics of the workflow evaluation. |
|
|
""" |
|
|
self.dataset = dataset |
|
|
graph = graph if graph is not None else self.graph |
|
|
agent_manager = self.evaluator.agent_manager |
|
|
agent_manager.add_agents_from_workflow(graph, llm_config=self.llm.config) |
|
|
|
|
|
|
|
|
self.evaluator = Evaluator(llm=self.llm, agent_manager=agent_manager, collate_func=self.collate_func, num_workers=self.num_workers, verbose=True) |
|
|
self.evaluator.dataname = self.dataset.dataname |
|
|
metrics_list = [] |
|
|
for i in range(self.eval_rounds): |
|
|
eval_info = [ |
|
|
f"[{type(graph).__name__}]", |
|
|
f"Evaluation round {i+1}/{self.eval_rounds}", |
|
|
f"Mode: {eval_mode}" |
|
|
] |
|
|
if indices is not None: |
|
|
eval_info.append(f"Indices: {len(indices)} samples") |
|
|
if sample_k is not None: |
|
|
eval_info.append(f"Sample size: {sample_k}") |
|
|
logger.info(" | ".join(eval_info)) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
metrics = self.evaluator.evaluate( |
|
|
graph=graph, |
|
|
benchmark=dataset, |
|
|
eval_mode=eval_mode, |
|
|
indices=indices, |
|
|
sample_k=sample_k, |
|
|
**kwargs |
|
|
) |
|
|
metrics_list.append(metrics) |
|
|
avg_metrics = self.evaluator._calculate_average_score(metrics_list) |
|
|
self.dataset = dataset |
|
|
self.evaluator.error_list = deepcopy(self.dataset.error_list) |
|
|
self.dataset.error_list = {} |
|
|
|
|
|
return avg_metrics |
|
|
|
|
|
|
|
|
def group_eval(self, dataset: Benchmark, |
|
|
eval_mode: str = "test", |
|
|
graph: Optional[Union[WorkFlowGraph, ActionGraph]] = None, |
|
|
indices: Optional[List[int]] = None, |
|
|
sample_k: Optional[int] = None,iteritem=5): |
|
|
self.evaluator._evaluation_records.clear() |
|
|
result_all = [] |
|
|
for data in dataset._test_data: |
|
|
result_list = [] |
|
|
for seed in range(0,iteritem): |
|
|
results = self._evaluate_graph(graph=graph, data=data, benchmark=benchmark,**kwargs) |
|
|
result_list.append(results) |
|
|
result_all.append(result_list) |
|
|
|
|
|
|
|
|
def log_snapshot(self, graph: Union[WorkFlowGraph, ActionGraph], metrics: dict): |
|
|
|
|
|
if isinstance(graph, WorkFlowGraph): |
|
|
graph_info = graph.get_graph_info() |
|
|
elif isinstance(graph, ActionGraph): |
|
|
|
|
|
graph_info = graph |
|
|
else: |
|
|
raise ValueError(f"Invalid graph type: {type(graph)}. The graph should be an instance of `SequentialWorkFlowGraph` or `ActionGraph`.") |
|
|
|
|
|
self._snapshot.append( |
|
|
{ |
|
|
"index": len(self._snapshot), |
|
|
"graph": deepcopy(graph_info), |
|
|
"metrics": metrics, |
|
|
} |
|
|
) |
|
|
|
|
|
def _select_graph_with_highest_score(self, return_metrics: bool = False) -> Union[SequentialWorkFlowGraph, ActionGraph]: |
|
|
|
|
|
if len(self._snapshot) == 0: |
|
|
return self.graph |
|
|
snapshot_scores = [np.mean(list(snapshot["metrics"].values())) for snapshot in self._snapshot] |
|
|
best_index = np.argmax(snapshot_scores) |
|
|
|
|
|
if isinstance(self.graph, WorkFlowGraph): |
|
|
graph = WorkFlowGraph.from_dict(self._snapshot[best_index]["graph"]) |
|
|
elif isinstance(self.graph, ActionGraph): |
|
|
|
|
|
graph = self._snapshot[best_index]["graph"] |
|
|
else: |
|
|
raise ValueError(f"Invalid graph type: {type(self.graph)}. The graph should be an instance of `SequentialWorkFlowGraph` or `ActionGraph`.") |
|
|
|
|
|
if return_metrics: |
|
|
return graph, self._snapshot[best_index]["metrics"] |
|
|
return graph |
|
|
|
|
|
def restore_best_graph(self): |
|
|
|
|
|
best_graph, best_metrics = self._select_graph_with_highest_score(return_metrics=True) |
|
|
logger.info(f"Restore the best graph from snapshot with metrics {best_metrics} ...") |
|
|
self.graph = best_graph |
|
|
|
|
|
def _wfg_structure_optimization_step(self, graph: WorkFlowGraph, scorer, step) -> WorkFlowGraph: |
|
|
""" |
|
|
optinize the structure of the workflow graph and return the optimized graph. |
|
|
Args: |
|
|
graph (SequentialWorkFlowGraph): The workflow graph to optimize. |
|
|
|
|
|
Returns: |
|
|
SequentialWorkFlowGraph: The optimized workflow graph. |
|
|
""" |
|
|
graph_scheme = STRUCTUREWorkFlowScheme(graph=graph) |
|
|
graph_repr = graph_scheme.convert_to_scheme(scheme=self.repr_scheme) |
|
|
if self.repr_scheme == "python": |
|
|
output_format = "\n\nALWAYS wrap the refined workflow in ```python\n``` format and DON'T include any other text within the code block!" |
|
|
elif self.repr_scheme == "yaml": |
|
|
output_format = "\n\nALWAYS wrap the refined workflow in ```yaml\n``` format and DON'T include any other text within the code block!" |
|
|
elif self.repr_scheme == "code": |
|
|
output_format = "\n\nALWAYS wrap the refined workflow in ```code\n``` format and DON'T include any other text within the code block!" |
|
|
elif self.repr_scheme == "core": |
|
|
output_format = "\n\nALWAYS wrap the refined workflow in ```core\n``` format and DON'T include any other text within the code block!" |
|
|
elif self.repr_scheme == "bpmn": |
|
|
output_format = "\n\nALWAYS wrap the refined workflow in ```bpmn\n``` format and DON'T include any other text within the code block!" |
|
|
else: |
|
|
raise ValueError(f"Invalid representation scheme: {self.repr_scheme}. The scheme should be one of {VALID_SCHEMES}.") |
|
|
prompt = "Task Description: " + graph.goal + "\n\nWorkflow Steps: " + graph_repr + output_format |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
new_graph_repr = self._prompt_breeder.critic_and_update_prompt(task_description=graph.goal, prompt=prompt, order=self.order, scorer=scorer, calltime=self.calltime) |
|
|
new_graph = graph_scheme.parse_from_scheme(scheme=self.repr_scheme, repr=new_graph_repr) |
|
|
return new_graph |
|
|
|
|
|
def _wfg_prompt_optimization_step(self, graph: WorkFlowGraph, scorer=None) -> WorkFlowGraph: |
|
|
|
|
|
task_description = graph.goal |
|
|
graph_scheme = STRUCTUREWorkFlowScheme(graph=graph) |
|
|
graph_repr = graph_scheme.convert_to_scheme(scheme=self.repr_scheme) |
|
|
graph_info = graph.get_graph_info() |
|
|
|
|
|
|
|
|
problem_list = '''''' |
|
|
for item in self.evaluator._evaluation_records.keys(): |
|
|
problem_s = "Questions: " + self.evaluator._evaluation_records[item]['trajectory'][0].content['question']+'\n' |
|
|
prediction_s = "Predictions: " + self.evaluator._evaluation_records[item]['prediction']+'\n' |
|
|
solution_s = "Solutions: " + self.evaluator._evaluation_records[item]['label']+'\n' |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
metric_s = "Score: " + str(self.evaluator._evaluation_records[item]['metrics']['acc']) + "\n" |
|
|
if self.evaluator._evaluation_records[item]['metrics']['acc'] ==0: |
|
|
metric_s += "Error reason: Computation result is incorrect." |
|
|
else: |
|
|
metric_s += "The solution is correct." |
|
|
|
|
|
joint_s = problem_s + prediction_s + solution_s + metric_s |
|
|
|
|
|
problem_list += joint_s |
|
|
print(problem_list) |
|
|
for i, task in enumerate(graph_info["tasks"]): |
|
|
if task['name'] not in list(self._prompt_dict.keys()): |
|
|
self._prompt_dict[task['name']] = [] |
|
|
|
|
|
print(task) |
|
|
try: |
|
|
task['prompt'] = task["prompt_template"]["instruction"] |
|
|
except: |
|
|
task['prompt'] = task['prompt'] |
|
|
original_prompt = task["prompt"] |
|
|
optimization_prompt = "Task Description: " + task_description + "\n\nWorkflow Steps:\n" + graph_repr + f"\n\nINSTRUCTION for the {i+1}-th task:\n\"\"\"\n" + original_prompt + "\n\"\"\"" |
|
|
error_prompt = f'''{optimization_prompt} |
|
|
|
|
|
Agent name: {task['name']} |
|
|
|
|
|
Observed evidence: |
|
|
The following questions, model predictions, ground-truth solutions, tests, and evaluation metrics were produced using this agent and its prompt: |
|
|
{problem_list} |
|
|
|
|
|
Your task is to critically evaluate the original agent prompt, using the evidence above. Identify only problems, including: |
|
|
(1) Structural or logical flaws in how the prompt guides reasoning or execution (e.g., incorrect step ordering, missing validation instructions, flawed control flow, or implicit assumptions), |
|
|
(2) Failures or inconsistencies revealed by the execution history or trajectory (e.g., systematic errors, contradictions, brittle behavior, or poor generalization), and |
|
|
(3) Incorrect, misleading, ambiguous, or underspecified language in the prompt that could degrade performance or reliability. |
|
|
|
|
|
Do not solve any task, do not propose fixes or improvements, and do not restate or judge the correctness of answers. |
|
|
|
|
|
Output requirements: |
|
|
|
|
|
Return one single paragraph only. |
|
|
|
|
|
List problems as clearly enumerated or bullet-style points. |
|
|
|
|
|
Be extremely concise, precise, and critical.''' |
|
|
critic_issues = self.llm.generate(error_prompt).content |
|
|
optimization_prompt += f"The new prompts should consider fixing the issues by adjusting the prompt content: {critic_issues}. You should not change the original role and task of the assigned agent." |
|
|
if self._prompt_dict[task['name']] != []: |
|
|
prev_prompt = "\n".join(self._prompt_dict[task['name']]) |
|
|
optimization_prompt += f"The previous prompts are: {prev_prompt}\nYou should also fix the problems in these prompts." |
|
|
|
|
|
optimization_prompt += f"\n\nGiven the above information, please refine the instruction for the {i+1}-th task.\n" |
|
|
optimization_prompt += r"Note that you must always use bracket (e.g. `{input_name}`, `{code}`, `{question}`) to wrap the inputs of the tasks in your refined instruction. You must ensure the prompts contain all inputs. You cannot change the name of functions.\n" |
|
|
|
|
|
optimization_prompt += "Your prompt should not change the function name and entry_point in the question. Only output the refined instruction and DON'T include any other text!" |
|
|
new_prompt = self._prompt_breeder.generate_prompt(task_description=task_description, prompt=optimization_prompt, order=self.order) |
|
|
graph_info["tasks"][i]["prompt"] = new_prompt |
|
|
|
|
|
|
|
|
|
|
|
self._prompt_dict[task['name']].append(new_prompt) |
|
|
new_graph = SequentialWorkFlowGraph.from_dict(graph_info) |
|
|
return new_graph |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _workflow_graph_step(self, graph: WorkFlowGraph, scorer, step) -> WorkFlowGraph: |
|
|
|
|
|
if self.optimize_mode == "structure" or self.optimize_mode == "all": |
|
|
|
|
|
graph = self._wfg_structure_optimization_step(graph, scorer=scorer, step=step) |
|
|
if self.optimize_mode == "prompt" or self.optimize_mode == "all": |
|
|
|
|
|
graph = self._wfg_prompt_optimization_step(graph, scorer=scorer) |
|
|
|
|
|
return graph |
|
|
|
|
|
def _action_graph_prompt_optimization_step(self, graph: ActionGraph) -> ActionGraph: |
|
|
|
|
|
task_description = graph.description |
|
|
graph_info = graph.get_graph_info() |
|
|
graph_steps = inspect.getsource(getattr(graph, "execute")) |
|
|
for operator_name, operator_info in graph_info["operators"].items(): |
|
|
original_prompt = operator_info["prompt"] |
|
|
optimization_prompt = "Task Description: " + task_description + "\n\nWorkflow Steps:\n" + graph_steps + f"\n\nINSTRUCTION for the `{operator_name}` operator:\n\"\"\"\n" + original_prompt + "\n\"\"\"" |
|
|
optimization_prompt += "\n\nThe interface of the operator is as follows:\n" + operator_info["interface"] |
|
|
optimization_prompt += f"\n\nGiven the above information, please refine the instruction for the `{operator_name}` operator.\n" |
|
|
optimization_prompt += r"Note that you should always use bracket (e.g. `{input_name}`) to wrap the inputs of the operator in your refined instruction, " |
|
|
optimization_prompt += "and the input names should be EXACTLY the same as those defined in the interface. DON'T use bracket to wrap output names." |
|
|
optimization_prompt += "\nOnly output the refined instruction and DON'T include any other text!" |
|
|
new_prompt = self._prompt_breeder.generate_prompt(task_description=task_description, prompt=optimization_prompt, order=self.order) |
|
|
new_prompt = new_prompt.replace("\"", "").strip() |
|
|
graph_info["operators"][operator_name]["prompt"] = new_prompt |
|
|
new_graph = ActionGraph.from_dict(graph_info) |
|
|
return new_graph |
|
|
|
|
|
def _action_graph_step(self, graph: ActionGraph) -> ActionGraph: |
|
|
|
|
|
if self.optimize_mode == "prompt": |
|
|
graph = self._action_graph_prompt_optimization_step(graph) |
|
|
else: |
|
|
raise ValueError(f"{type(self).__name__} only support prompt optimization when `self.graph` is an `ActionGraph` instance. " |
|
|
f"The `optimize_mode` should be set to `prompt`, but got {self.optimize_mode}.") |
|
|
return graph |
|
|
|
|
|
def convergence_check(self, **kwargs) -> bool: |
|
|
|
|
|
if not self._snapshot: |
|
|
logger.warning("No snapshots available for convergence check") |
|
|
return False |
|
|
|
|
|
|
|
|
scores = [np.mean(list(snapshot["metrics"].values())) for snapshot in self._snapshot] |
|
|
current_score = scores[-1] |
|
|
|
|
|
if current_score > self._best_score: |
|
|
self._best_score = current_score |
|
|
self._convergence_check_counter = 0 |
|
|
else: |
|
|
self._convergence_check_counter += 1 |
|
|
|
|
|
if self._convergence_check_counter >= self.convergence_threshold: |
|
|
logger.info(f"Early stopping triggered: No improvement for {self.convergence_threshold} iterations") |
|
|
|
|
|
return True |
|
|
return False |
|
|
|
|
|
def save(self, path: str, ignore: List[str] = []): |
|
|
""" |
|
|
Save the (optimized) workflow graph to a file. |
|
|
|
|
|
Args: |
|
|
path (str): The path to save the workflow graph. |
|
|
ignore (List[str]): The keys to ignore when saving the workflow graph. |
|
|
""" |
|
|
self.graph.save_module(path, ignore=ignore) |
|
|
|