selfevolveagent / evoagentx /optimizers /qastructure_optimizer.py
iLOVE2D's picture
Upload 2846 files
5374a2d verified
import yaml
import regex
import random
import inspect
import numpy as np
from typing import Any, Dict
from pydantic import Field
from copy import deepcopy
from typing import Literal, Any, Dict
import xml.etree.ElementTree as ET
from typing import Literal, Union, Optional, List
from evoagentx.models import OpenAILLMConfig, OpenAILLM
from evoagentx.evaluators import Evaluator
from evoagentx.prompts import StringTemplate
from .optimizer import Optimizer
from ..core.logging import logger
from ..models.base_model import BaseLLM
from ..benchmark.benchmark import Benchmark
from ..workflow.action_graph import ActionGraph
from ..core.callbacks import suppress_logger_info
from ..workflow.workflow_graph import SequentialWorkFlowGraph,WorkFlowGraph
from ..prompts.workflow.sew_optimizer import mutation_prompts, thinking_styles
VALID_SCHEMES = ["python", "yaml", "code", "core", "bpmn"]
import difflib
def find_closest_name(inputname, name_refernece):
name_reference_correct = [step["task_name"] for step in name_refernece]
print("inputname", inputname)
print("correct_list", name_reference_correct)
correct_name = difflib.get_close_matches(inputname, name_reference_correct, n=1, cutoff=0.1)
print(correct_name)
correct_step = name_refernece[name_reference_correct.index(correct_name[0])]
return correct_step
class STRUCTUREWorkFlowScheme:
"""
The scheme of the workflow for SEW optimizer.
"""
def __init__(self, graph: WorkFlowGraph, **kwargs):
self.graph = graph # the workflow graph to be transformed
self.kwargs = kwargs
def convert_to_scheme(self, scheme: str) -> str:
"""
Transform the WorkflowGraph to the desired scheme.
"""
if scheme not in VALID_SCHEMES:
raise ValueError(f"Invalid scheme: {scheme}. The scheme should be one of {VALID_SCHEMES}.")
if scheme == "python":
repr = self.get_workflow_python_repr()
elif scheme == "yaml":
repr = self.get_workflow_yaml_repr()
elif scheme == "code":
repr = self.get_workflow_code_repr()
elif scheme == "core":
repr = self.get_workflow_core_repr()
elif scheme == "bpmn":
repr = self.get_workflow_bpmn_repr()
return repr
def parse_from_scheme(self, scheme: str, repr: str) -> WorkFlowGraph:
"""
Parse the SequentialWorkFlowGraph from the given scheme and representation.
"""
if scheme not in VALID_SCHEMES:
raise ValueError(f"Invalid scheme: {scheme}. The scheme should be one of {VALID_SCHEMES}.")
if scheme == "python":
graph = self.parse_workflow_python_repr(repr)
elif scheme == "yaml":
graph = self.parse_workflow_yaml_repr(repr)
elif scheme == "code":
graph = self.parse_workflow_code_repr(repr)
elif scheme == "core":
graph = self.parse_workflow_core_repr(repr)
elif scheme == "bpmn":
graph = self.parse_workflow_bpmn_repr(repr)
return graph
def _get_workflow_repr_info(self) -> List[dict]:
"""
Get the information for the workflow representation.
"""
info = []
for node in self.graph.nodes:
task_name = node.name
input_names = [param.name for param in node.inputs]
output_names = [param.name for param in node.outputs]
task_info = {
"task_name": task_name,
"input_names": input_names,
"output_names": output_names
}
info.append(task_info)
return info
def _convert_to_func_name(self, name: str) -> str:
"""
Convert the task name to the function name.
"""
name = name.lower().strip()
name = name.replace(' ', '_').replace('-', '_')
name = ''.join(c for c in name if c.isalnum() or c == '_')
# Replace multiple consecutive underscores with a single underscore
name = regex.sub(r'_+', "_", name)
# Remove leading/trailing underscores
name = name.strip('_')
return name
def _convert_to_title(self, name: str) -> str:
func_name = self._convert_to_func_name(name)
words = func_name.split('_')
return ' '.join(word.capitalize() for word in words)
def get_workflow_python_repr(self) -> str:
repr_info = self._get_workflow_repr_info()
if not repr_info:
return ""
python_workflow_info = []
for task_info in repr_info:
name = self._convert_to_func_name(task_info['task_name'])
input_names = [f'{input_name}' for input_name in task_info['input_names']]
output_names = [f'{output_name}' for output_name in task_info['output_names']]
python_workflow_info.append(
"{{'name': '{name}', 'args': {args}, 'outputs': {outputs}}}".format(
name=name,
args=input_names,
outputs=output_names
)
)
python_workflow_repr = "steps = [\n" + ",\n".join(python_workflow_info) + "\n]"
# print(python_workflow_repr)
return python_workflow_repr
def get_workflow_yaml_repr(self) -> str:
repr_info = self._get_workflow_repr_info()
if not repr_info:
return ""
yaml_workflow_info = []
for task_info in repr_info:
name = self._convert_to_func_name(task_info['task_name'])
input_names = "\n".join([f' - {input_name}' for input_name in task_info['input_names']])
output_names = "\n".join([f' - {output_name}' for output_name in task_info['output_names']])
yaml_workflow_info.append(
"- name: {name}\n args:\n{input_names}\n outputs:\n{output_names}".format(
name=name,
input_names=input_names,
output_names=output_names
)
)
yaml_workflow_repr = "\n\n".join(yaml_workflow_info)
return yaml_workflow_repr
def get_workflow_code_repr(self) -> str:
repr_info = self._get_workflow_repr_info()
if not repr_info:
return ""
workflow_lines = []
for task_info in repr_info:
# Convert task name to snake_case
name = self._convert_to_func_name(task_info['task_name'])
# Format inputs and outputs
inputs = ", ".join(task_info['input_names'])
outputs = ", ".join(task_info['output_names'])
# Create the line in format: task_name(inputs) -> outputs
line = f"{name}({inputs}) -> {outputs}"
workflow_lines.append(line)
# Join all lines with newlines
workflow_repr = "\n".join(workflow_lines)
return workflow_repr
def get_workflow_bpmn_repr(self) -> str:
repr_info = self._get_workflow_repr_info()
if not repr_info:
return ""
# Start the BPMN XML
bpmn_lines = [
'<definitions xmlns="http://www.omg.org/spec/BPMN/20100524/MODEL">',
'<process id="software_dev_workflow" isExecutable="true">',
' <startEvent id="start" />'
]
# Add tasks
for i, task_info in enumerate(repr_info):
task_name = self._convert_to_func_name(task_info['task_name'])
task_title = self._convert_to_title(task_info['task_name'])
bpmn_lines.append(f' <task id="{task_name}" name="{task_title}" />')
bpmn_lines.append(' <endEvent id="end" />')
bpmn_lines.append('')
bpmn_lines.append(' <!-- Workflow connections -->')
# Add sequence flows
# First flow from start to first task
if repr_info:
first_task_id = self._convert_to_func_name(repr_info[0]['task_name'])
bpmn_lines.append(f' <sequenceFlow id="flow1" sourceRef="start" targetRef="{first_task_id}" />')
# Flows between tasks
for i in range(len(repr_info) - 1):
source_id = self._convert_to_func_name(repr_info[i]['task_name'])
target_id = self._convert_to_func_name(repr_info[i + 1]['task_name'])
flow_num = i + 2
bpmn_lines.append(f' <sequenceFlow id="flow{flow_num}" sourceRef="{source_id}" targetRef="{target_id}" />')
# Last flow from last task to end
if repr_info:
last_task_id = self._convert_to_func_name(repr_info[-1]['task_name'])
flow_num = len(repr_info) + 1
bpmn_lines.append(f' <sequenceFlow id="flow{flow_num}" sourceRef="{last_task_id}" targetRef="end" />')
# Close tags
bpmn_lines.append('</process>')
bpmn_lines.append('</definitions>')
return '\n'.join(bpmn_lines)
def get_workflow_core_repr(self) -> str:
repr_info = self._get_workflow_repr_info()
if not repr_info:
return ""
workflow_lines = []
for i, task_info in enumerate(repr_info, 1):
# Convert task name to title case
task_name = self._convert_to_title(task_info['task_name'])
# Create the line with the specified format
next_step = i + 1
line = f"Step {i}::: Process ::: {task_name}:::next::Step {next_step}"
workflow_lines.append(line)
# Add the terminal step
last_step = len(repr_info) + 1
workflow_lines.append(f"Step {last_step}::: Terminal ::: End of Workflow:::")
return "\n".join(workflow_lines)
def _find_task_index(self, step: dict, graph_repr_info: List[dict]) -> int:
"""
Find the index of the task in the original workflow graph. If the task is not found, return -1.
Args:
step (dict): The step of the workflow.
graph_repr_info (List[dict]): The information of the original workflow graph.
Returns:
int: The index of the task.
"""
def _is_task_name_match(task_name: str, another_name: str) -> bool:
return self._convert_to_func_name(task_name) == self._convert_to_func_name(another_name)
def _is_task_inputs_match(task_inputs: List[str], another_inputs: List[str]) -> bool:
return len(set(task_inputs) & set(another_inputs)) == len(task_inputs)
def _is_task_outputs_match(task_outputs: List[str], another_outputs: List[str]) -> bool:
return len(set(task_outputs) & set(another_outputs)) == len(task_outputs)
for i, task in enumerate(graph_repr_info):
# if _is_task_name_match(task["task_name"], step["name"]) and _is_task_inputs_match(task["input_names"], step["args"]) and _is_task_outputs_match(task["output_names"], step["outputs"]):
# return i
if _is_task_name_match(task["task_name"], step["name"]) and _is_task_outputs_match(task["output_names"], step["outputs"]):
return i
return -1
def create_workflow_graph_from_steps(
self,
steps: List[dict]
) -> WorkFlowGraph:
"""
Create a new workflow graph from the steps.
Since both the inputs and outputs are provided, new tasks will be created in the new workflow graph.
It is used for the `python` `yaml` and `code` representations.
Args:
steps (List[dict]): The steps of the workflow. The steps are in the format of:
[
{
"name": str,
"args": List[str],
"outputs": List[str]
}
]
Returns:
SequentialWorkFlowGraph: The new workflow graph.
"""
original_workflow_config = self.graph.get_graph_info()
repr_info = self._get_workflow_repr_info()
new_tasks = []
get_known_list = []
for step in repr_info:
get_known_list.append(step)
for step in steps:
task_index = self._find_task_index(step=step, graph_repr_info=repr_info)
if task_index == -1:
# # create a new task
# task_name = step["name"]
# most_known_step = find_closest_name(task_name, get_known_list)
# most_known_step['name'] = most_known_step['task_name']
# most_known_step['args'] = most_known_step['input_names']
# most_known_step['outputs'] = most_known_step['output_names']
# task_index_new = self._find_task_index(step=most_known_step, graph_repr_info=repr_info)
# print(step)
# print(task_index)
# item_new = deepcopy(original_workflow_config["tasks"][task_index_new])
# item_new["name"] = task_name +str(np.random.randint(0,10000))
# item_new['task_name'] = task_name +str(np.random.randint(0,10000))
# new_tasks.append(item_new)
task_name = step["name"]
task_name = task_name +str(np.random.randint(0,10000))
description = f"Task to {task_name.lower()}. "
if step["args"]:
description += f"Takes {', '.join(step['args'])} as input. "
if step["outputs"]:
description += f"Produces {', '.join(step['outputs'])} as output."
try:
toolname = step['tool_names']
except:
toolname = None
new_task = {
"name": task_name,
"description": description,
"inputs": [
{
"name": input_name,
"type": "str",
"description": f"Input parameter {input_name} for {task_name}",
"required":False
} for input_name in step["args"]
],
"outputs": [
{
"name": output_name,
"type": "str",
"description": f"Output parameter {output_name} from {task_name}"
} for output_name in step["outputs"]
],
"prompt": "Your are a task solver.",
# "llm_config": original_workflow_config["tasks"][0]["llm_config"],
"parse_mode": "xml",
# "prompt_template": StringTemplate(instruction="Think step by step to answer the question based on the question context. You should integrate context for answering. You should explain your thinking process in the 'thought' field, and provide the final answer in the 'answer' field.\nFormat your output in xml format, such as <thought>xxx</thought> and <answer>xxx</answer>."),
"tool_names": toolname
}
new_tasks.append(new_task)
else:
# copy the task from the original workflow graph
if original_workflow_config["tasks"][task_index] not in new_tasks:
new_tasks.append(deepcopy(original_workflow_config["tasks"][task_index]))
# create new workflow configuration
new_workflow_config = {
"goal": original_workflow_config["goal"],
"tasks": new_tasks
}
# create new workflow graph
new_graph = SequentialWorkFlowGraph.from_dict(new_workflow_config)
return new_graph
def create_workflow_graph_from_task_names(
self,
task_names: Optional[List[str]] = None,
task_titles: Optional[List[str]] = None
) -> SequentialWorkFlowGraph:
"""
Create a new workflow graph from the task names or titles.
Since only the task names or titles are provided, the tasks in the new workflow graph will be copied from the original workflow graph.
It is used for the `bpmn` and `core` representations.
Args:
task_names (Optional[List[str]]): The names of the tasks.
task_titles (Optional[List[str]]): The titles of the tasks.
Returns:
SequentialWorkFlowGraph: The new workflow graph.
"""
if task_names:
original_workflow_config = self.graph.get_graph_info()
tasks = task_names
original_tasks = {self._convert_to_func_name(task["name"]): task for task in original_workflow_config["tasks"]}
elif task_titles:
original_workflow_config = self.graph.get_graph_info()
tasks = task_titles
original_tasks = {self._convert_to_title(task["name"]): task for task in original_workflow_config["tasks"]}
else:
raise ValueError("No task names or titles provided.")
new_tasks = []
for task in tasks:
if task not in original_tasks:
raise ValueError(f"Task {task} not found in the original workflow.")
new_tasks.append(deepcopy(original_tasks[task]))
# create new workflow configuration
new_workflow_config = {
"goal": original_workflow_config["goal"],
"tasks": new_tasks
}
# create new workflow graph
new_graph = WorkFlowGraph.from_dict(new_workflow_config)
return new_graph
def parse_workflow_python_repr(self, repr: str) -> WorkFlowGraph:
"""
Parse the workflow from the python representation. The input format is:
steps = [
{"name": task_name, "args": [input1, input2, ...],"outputs": [output1, output2, ...]},
{"name": another_task_name, "args": [input1, input2, ...],"outputs": [output1, output2, ...]},
...
]
"""
# try:
# # extract ```python ```
# code_block = regex.search(r'```python\s*(.*?)\s*```', repr, regex.DOTALL)
# if not code_block:
# raise ValueError("No Python code block found in the representation")
# code_block = code_block.group(1).strip()
# # relevant_lines = []
# # for line in code_block.splitlines():
# # line = line.strip()
# # if not line or line.startswith("#") or line.startswith("```"):
# # continue
# # if all(key in line for key in ["name", "args", "outputs"]):
# # relevant_lines.append(line)
# # steps_str = "[\n" + "\n".join(relevant_lines) + "\n]"
# # steps = eval(steps_str)
# steps = eval(code_block.replace("steps = ", "").strip())
# new_graph = self.create_workflow_graph_from_steps(steps=steps)
# return new_graph
# except Exception as e:
# logger.warning(f"Failed to parse workflow string: {e}. Return the original workflow.")
# extract ```python ```
code_block = regex.search(r'```python\s*(.*?)\s*```', repr, regex.DOTALL)
if not code_block:
raise ValueError("No Python code block found in the representation")
code_block = code_block.group(1).strip()
# relevant_lines = []
# for line in code_block.splitlines():
# line = line.strip()
# if not line or line.startswith("#") or line.startswith("```"):
# continue
# if all(key in line for key in ["name", "args", "outputs"]):
# relevant_lines.append(line)
# steps_str = "[\n" + "\n".join(relevant_lines) + "\n]"
# steps = eval(steps_str)
steps = eval(code_block.replace("steps = ", "").strip())
# print(steps)
new_graph = self.create_workflow_graph_from_steps(steps=steps)
return new_graph
def parse_workflow_yaml_repr(self, repr: str) -> WorkFlowGraph:
"""
Parse the workflow from the yaml representation. The input format is:
- name: task_name
args:
- input1
- input2
outputs:
- output1
"""
try:
# extract ```yaml ```
match = regex.search(r'```yaml\s*(.*?)\s*```', repr, regex.DOTALL)
if not match:
raise ValueError("No YAML code block found in the representation")
yaml_block = match.group(1).strip()
steps = yaml.safe_load(yaml_block)
# relevant_lines = []
# in_step = False
# for line in yaml_block.splitlines():
# stripped_line = line.strip()
# if stripped_line.startswith('- name:'):
# in_step = True
# relevant_lines.append(line)
# elif in_step and (
# stripped_line.startswith('args:') or
# stripped_line.startswith('outputs:') or
# stripped_line.startswith('- ')
# ):
# relevant_lines.append(line)
# elif not stripped_line:
# in_step = False
# yaml_step = "\n".join(relevant_lines)
# steps = yaml.safe_load(yaml_step)
new_graph = self.create_workflow_graph_from_steps(steps=steps)
return new_graph
except Exception as e:
logger.warning(f"Failed to parse workflow string: {e}. Return the original workflow.")
return self.graph
def parse_workflow_code_repr(self, repr: str) -> WorkFlowGraph:
"""
Parse the workflow from the code representation.
The input format is:
task_name(input1, input2, ...) -> output1, output2, ...
another_task_name(input1, input2, ...) -> output1, output2, ...
...
"""
try:
# extract ```code ```
match = regex.search(r'```code\s*(.*?)\s*```', repr, regex.DOTALL)
if not match:
raise ValueError("No code block found in the representation")
code_block = match.group(1).strip()
lines = [line.strip() for line in code_block.split("\n") if line.strip() and "->" in line]
steps = []
for line in lines:
# Remove any leading numbers and dots (e.g., "1. ")
line = regex.sub(r'^\d+\.\s*', '', line)
func_part, output_part = line.split('->')
func_part = func_part.strip()
name = func_part[:func_part.index('(')]
args_str = func_part[func_part.index('(') + 1:func_part.rindex(')')]
args = [arg.strip() for arg in args_str.split(',') if arg.strip()]
outputs = [out.strip() for out in output_part.split(',') if out.strip()]
step = {"name": name, "args": args, "outputs": outputs}
steps.append(step)
if not steps:
raise ValueError("No steps found in the workflow.")
new_graph = self.create_workflow_graph_from_steps(steps=steps)
return new_graph
except Exception as e:
logger.warning(f"Failed to parse workflow string: {e}. Return the original workflow.")
return self.graph
def parse_workflow_bpmn_repr(self, repr: str) -> WorkFlowGraph:
"""
Parse the workflow from the BPMN XML representation.
The input format is BPMN XML with:
- task elements defining the tasks
- sequenceFlow elements defining the order of tasks
Will extract ordered task names from the sequence flows and create a workflow.
"""
try:
# extract ```bpmn ```
match = regex.search(r'```bpmn\s*(.*?)\s*```', repr, regex.DOTALL)
if not match:
raise ValueError("No BPMN code block found in the representation")
bpmn_block = match.group(1).strip()
# Parse XML string
root = ET.fromstring(bpmn_block)
# Define namespace for BPMN XML
ns = {'bpmn': 'http://www.omg.org/spec/BPMN/20100524/MODEL'}
# Get process element
process = root.find('bpmn:process', ns) or root.find('process')
if process is None:
raise ValueError("No process element found in BPMN XML")
# Create a dictionary of all tasks
tasks = {}
# for task in process.findall('.//task', ns) or process.findall('.//task'):
for task in process.findall("bpmn:task", ns):
tasks[task.get('id')] = task.get('name')
# Get sequence flows and order them
flows = {}
ordered_tasks = []
current_ref = 'start'
# Create dictionary of source -> target
# for flow in process.findall('.//sequenceFlow', ns) or process.findall('.//sequenceFlow'):
for flow in process.findall("bpmn:sequenceFlow", ns):
flows[flow.get('sourceRef')] = flow.get('targetRef')
# Follow the sequence flows to get ordered tasks
while current_ref in flows:
next_ref = flows[current_ref]
if next_ref in tasks: # Only add if it's a task (not end event)
ordered_tasks.append(tasks[next_ref])
current_ref = next_ref
# Create new workflow graph using the ordered task names
new_graph = self.create_workflow_graph_from_task_names(task_titles=ordered_tasks)
return new_graph
except Exception as e:
logger.warning(f"Failed to parse BPMN workflow string: {e}. Return the original workflow.")
return self.graph
def parse_workflow_core_repr(self, repr: str) -> WorkFlowGraph:
"""
Parse the workflow from the Core representation.
The input format is:
Step 1::: Process ::: Task Name:::next::Step 2
Step 2::: Process ::: Another Task:::next::Step 3
...
Step N::: Terminal ::: End of Workflow:::
Will extract task names from Process steps and create a workflow.
"""
try:
# extract ```core ```
match = regex.search(r'```core\s*(.*?)\s*```', repr, regex.DOTALL)
if not match:
raise ValueError("No core code block found in the representation")
core_block = match.group(1).strip()
# Split into lines and remove empty lines
lines = [line.strip() for line in core_block.split('\n') if line.strip()]
# Initialize flows and tasks dictionaries
flows = {} # step -> next_step
tasks = {} # step -> task_title
# First pass: build flows and tasks mappings
for line in lines:
parts = line.split(':::')
current_step = parts[0].strip()
step_type = parts[1].strip()
if step_type == 'Process':
# Extract task title and next step
task_title = parts[2].strip()
tasks[current_step] = task_title
if len(parts) > 3 and "next" in parts[3]:
next_step = parts[3].split("::")[-1].strip()
flows[current_step] = next_step
elif step_type == 'Terminal':
flows[current_step] = None
# Second pass: follow flows to build ordered task list
ordered_tasks = []
current_step = 'Step 1'
while current_step in flows:
if current_step in tasks: # Only add if it's a Process step
ordered_tasks.append(tasks[current_step])
current_step = flows[current_step]
# Create new workflow graph using the ordered task titles
new_graph = self.create_workflow_graph_from_task_names(task_titles=ordered_tasks)
return new_graph
except Exception as e:
logger.warning(f"Failed to parse Core workflow string: {e}. Return the original workflow.")
return self.graph
class QASimplePromptBreeder:
def __init__(self, llm: BaseLLM, evaluator: None, **kwargs):
self.llm = llm
self.evaluator = evaluator
self.history_log = []
self.kwargs = kwargs
def generate_mutation_prompt(self, task_description: str, **kwargs) -> str:
"""
Generate the mutation prompt for optimization.
"""
thinking_style = random.choice(thinking_styles)
# hyper_mutation_prompt = thinking_style + "\n\nProblem Description: " + task_description + ".\n" + "Output: "
hyper_mutation_prompt = "Please generate a improved prompts based on the following information. " + "\n\nProblem Description: " + task_description + ".\n" + "Output: "
# print(">>>>>>>>>> Hyper mutation prompt: <<<<<<<<<<<\n", hyper_mutation_prompt)
try:
mutation_prompt = self.llm.generate(
prompt=hyper_mutation_prompt,
system_message="You are a helpful assistant. Do not generate harmful content. ",
).content
except:
mutation_prompt = self.llm.generate(
prompt=hyper_mutation_prompt,
system_message="You are a helpful assistant. Do not generate harmful content. ",
).content
return mutation_prompt
def get_mutation_prompt(self, task_description: str, order: Literal["zero-order", "first-order"], **kwargs) -> str:
"""
Get the mutation prompt for optimization.
"""
if order == "zero-order":
mutation_prompt = self.generate_mutation_prompt(task_description=task_description)
elif order == "first-order":
mutation_prompt = random.choice(mutation_prompts)
else:
raise ValueError(f"Invalid order: {order}. The order should be either 'zero-order' or 'first-order'.")
return mutation_prompt
def generate_prompt(self, task_description: str, prompt: str, order: Literal["zero-order", "first-order"], **kwargs) -> str:
"""
Generate the prompt for optimization.
Args:
task_description (str): The description of the task, normally the goal of the workflow.
prompt (str): The prompt to optimize.
order (Literal["zero-order", "first-order"]): The order of the mutation prompt.
Returns:
str: The optimized prompt.
"""
mutation_prompt = self.get_mutation_prompt(task_description=task_description, order=order)
prompt = mutation_prompt + "\n\nINSTRUCTION:\n\n" + prompt
# print(">>>>>>>>>> Prompt: <<<<<<<<<<<\n", prompt)
new_prompt = self.llm.generate(
prompt=prompt,
system_message="You are a helpful assistant",
).content
return new_prompt
def critic_and_update_prompt(self, task_description: str, prompt: str, order: Literal["zero-order", "first-order"], scorer=None, calltime=1, **kwargs) -> str:
"""
Generate the prompt for optimization.
Args:
task_description (str): The description of the task, normally the goal of the workflow.
prompt (str): The prompt to optimize.
order (Literal["zero-order", "first-order"]): The order of the mutation prompt.
Returns:
str: The optimized prompt.
"""
# print(self.evaluator._evaluation_records)
problem_list = ''''''
for item in self.evaluator._evaluation_records.keys():
problem_s = "Questions: " + self.evaluator._evaluation_records[item]['trajectory'][0].content['question']+'\n'
prediction_s = "Predictions: " + self.evaluator._evaluation_records[item]['prediction']+'\n'
solution_s = "Solutions: " + self.evaluator._evaluation_records[item]['label']+'\n'
# if self.evaluator.dataname != "humanevalplus":
# if 'test' in list(self.evaluator._evaluation_records[item]['label'].keys()):
# test_s = "Unit tests: " + self.evaluator._evaluation_records[item]['label']['test'][0:1000]
# elif 'tests' in list(self.evaluator._evaluation_records[item]['label'].keys()):
# test_s = "Unit tests: " + self.evaluator._evaluation_records[item]['label']['tests'][0:1000]
# else:
# test_s = "Example solution: " + self.evaluator._evaluation_records[item]['label']["canonical_solution"]
metric_s = "Score: " + str(self.evaluator._evaluation_records[item]['metrics']['acc']) + "\n"
if self.evaluator._evaluation_records[item]['metrics']['acc'] ==0:
metric_s += "Error reason: Computation result is incorrect."
else:
metric_s += "The solution is correct."
joint_s = problem_s + prediction_s + solution_s + metric_s
problem_list += joint_s
# print(problem_list)
if calltime==1:
critic_prompt = '''You are a workflow critic, not a problem solver. Given a question, its final answer, the execution history, and the workflow trajectory, your task is to evaluate the quality of the workflow used for question answering. Identify only problems, including:
(1) Structural flaws in the workflow (e.g., incorrect step ordering, missing steps, invalid assumptions, flawed control flow, or absent validation),
(2) Failures or inconsistencies exposed by the execution history or trajectory (e.g., errors, contradictions, premature termination, or unhandled cases), and
(3) Incorrect, misleading, ambiguous, or underspecified content in prompts or intermediate steps.
Do not solve the question, do not propose fixes or improvements, and do not restate or evaluate the correctness of the final answer itself.
Output one single paragraph consisting of clearly enumerated or bullet-style problem statements only. Be extremely concise, critical, and factual. The workflow to critique is as follows: '''
question_prompt = "The questions, predictions, solutions, and evaluated metrics based on this workflow is: " + problem_list
# question_prompt = ""
critic_out = self.llm.generate(
prompt=prompt+question_prompt,
system_message=critic_prompt,
).content
print(critic_out)
else:
critic_prompt_outlist = '''Please summarize the following problems in one paragraph. Be super concise and proposing different points.\n'''
for item in range(calltime):
critic_prompt = "You are a workflow critic, not a solver. Given a question, its answer, the execution history, and the workflow trajectory, evaluate the workflow used for question answering. Identify only problems, including: (1) structural flaws in the workflow (e.g., incorrect step ordering, missing validation, flawed control flow), (2) failures revealed by the execution history or trajectory, and (3) incorrect, misleading, or ambiguous information in the prompts. Do not attempt to solve the question, do not suggest fixes, and do not restate the answer. Return one single paragraph listing the problems as clearly numbered or bullet-style points, and be extremely concise. The previous workflow is: "
question_prompt = "The questions, solutions, and evaluated metrics based on this workflow is: " + problem_list
critic_out = self.llm.generate(
prompt=prompt+question_prompt,
system_message=critic_prompt,
).content
critic_prompt_outlist = critic_prompt_outlist + f"Detected Issue {item+1}:" + critic_out +"\n"
critic_out = self.llm.generate(
prompt=critic_prompt_outlist,
system_message="You are an expert in summarizing information and data.",
).content
print(critic_out)
# mutation_prompt = self.get_mutation_prompt(task_description=task_description, order=order)
if scorer == None:
prompt = "The detected issue is:" + critic_out+"You should always improve workflow by correcting the issue without changing the inputs and outputs of nodes in the workflow. You can remove redundant agents. You should keep the graph executable.\n" + "\n\nThe original workflow is:\n\n" + prompt
else:
prompt = f'''Detected issues:
{critic_out}
Your task is to design an improved agent workflow that resolves the issues above while strictly preserving the original workflow’s external behavior.
Performance objective: The revised workflow should increase the model performance score according to {scorer}.
Original workflow:
{prompt}
Constraints:
Do not change the inputs or outputs of any existing node.
You may remove redundant or unnecessary agents, but must not introduce breaking dependencies.
You may reorder, merge, or refactor internal steps only if the workflow remains fully executable.
The resulting workflow must form a valid, connected, and runnable graph.
Do not modify the task definition or evaluation metric.
Output requirements:
Output only the revised workflow definition.
Do not include explanations, justifications, or commentary.
Your output:'''
# print(">>>>>>>>>> Prompt: <<<<<<<<<<<\n", prompt)
new_prompt = self.llm.generate(
prompt=prompt,
system_message='''You are a Graph Optimization Agent. Your objective is to iteratively improve the performance of an agent workflow graph by correcting identified issues and optimizing its structure.
You may only perform the following operations: Reorder existing agents. Remove redundant or unnecessary agents. Add new agents.
Hard constraints (must not be violated): Do not change agent names, agent inputs, or agent outputs. Do not modify the task name. Do not change the input of the first node. Every agent in the resulting workflow must accept the **question** as input directly. The workflow must remain a valid, connected, and executable graph at all times. You should only use one prompt.
Your output must be only the revised workflow definition, with no explanations or commentary.''',
).content
print(new_prompt)
return new_prompt
# def critic_and_update_prompt(
# self,
# task_description: str,
# prompt: str,
# order: Literal["zero-order", "first-order"],
# scorer=None,
# calltime: int = 1,
# **kwargs
# ) -> str:
# """
# Generate the prompt for optimization.
# Args:
# task_description (str): The description of the task, normally the goal of the workflow.
# prompt (str): The workflow prompt to optimize.
# order (Literal["zero-order", "first-order"]): The order of the mutation prompt.
# Returns:
# str: The optimized workflow prompt.
# """
# # ---------- Build problem_list safely ----------
# records: Dict[str, Any] = getattr(self.evaluator, "_evaluation_records", {}) or {}
# problem_list = ""
# for key, rec in records.items():
# traj = rec.get("trajectory") or []
# first_step = traj[0] if len(traj) > 0 else None
# content = getattr(first_step, "content", {}) if first_step is not None else {}
# question = content.get("question", "")
# prediction = rec.get("prediction", "")
# solution = rec.get("label", "")
# metrics = rec.get("metrics", {}) or {}
# # Prefer 'acc' but fall back gracefully
# acc = metrics.get("acc", None)
# problem_s = f"Question: {str(question)}\n"
# prediction_s = f"Prediction: {str(prediction)}\n"
# solution_s = f"Solution: {str(solution)}\n"
# metric_s = f"Score(acc): {str(acc)}\n"
# if acc is None:
# metric_s += "Error reason: Missing 'acc' in metrics.\n"
# else:
# try:
# metric_s += "Result: correct.\n" if float(acc) != 0.0 else "Result: incorrect.\n"
# except Exception:
# metric_s += "Result: unknown (non-numeric acc).\n"
# problem_list += (problem_s + prediction_s + solution_s + metric_s + "\n---\n")
# # ---------- Critic prompt ----------
# critic_system = (
# "You are a workflow critic, not a solver. "
# "Given: (A) the workflow, (B) a set of questions, predictions, solutions, and evaluation results, "
# "identify ONLY problems in the workflow. Problems include: "
# "(1) structural flaws in the workflow (wrong ordering, missing validation, flawed control flow), "
# "(2) failures revealed by the records/trajectory, and "
# "(3) incorrect, misleading, or ambiguous workflow instructions. "
# "Do NOT solve any question. Do NOT suggest fixes. Do NOT restate the answer. "
# "Return ONE paragraph with concise bullet points."
# )
# critic_user = (
# "=== WORKFLOW ===\n"
# f"{prompt}\n\n"
# "=== EVALUATION RECORDS (question/prediction/solution/metrics) ===\n"
# f"{problem_list}\n"
# )
# # ---------- Call critic (possibly multiple times) ----------
# critic_outputs = []
# n_calls = max(1, int(calltime))
# for _ in range(n_calls):
# out = self.llm.generate(
# prompt=critic_user,
# system_message=critic_system,
# ).content
# critic_outputs.append(out)
# if n_calls == 1:
# critic_out = critic_outputs[0]
# else:
# summarize_user = "Summarize these detected problems into ONE concise paragraph:\n\n" + "\n\n".join(
# [f"- Critic run {i+1}: {c}" for i, c in enumerate(critic_outputs)]
# )
# critic_out = self.llm.generate(
# prompt=summarize_user,
# system_message="You are an expert at summarizing issues precisely and concisely.",
# ).content
# # ---------- Build optimizer prompt (do not shadow 'prompt') ----------
# if scorer is None:
# optimizer_user = (
# "=== DETECTED ISSUES ===\n"
# f"{critic_out}\n\n"
# "=== CONSTRAINTS ===\n"
# "- Improve the workflow by correcting the issues.\n"
# "- Do NOT change the names of agents or the declared inputs/outputs of nodes.\n"
# "- You may reorder agents, remove redundant agents, or reuse known agents.\n"
# "- Keep the graph executable.\n\n"
# "=== ORIGINAL WORKFLOW ===\n"
# f"{prompt}\n\n"
# "=== OUTPUT ===\n"
# "Return the full revised workflow only."
# )
# else:
# optimizer_user = (
# "=== DETECTED ISSUES ===\n"
# f"{critic_out}\n\n"
# f"=== TARGET ===\nIncrease performance score: {scorer}\n\n"
# "=== CONSTRAINTS ===\n"
# "- Improve the workflow by correcting the issues.\n"
# "- Do NOT change the names of agents or the declared inputs/outputs of nodes.\n"
# "- You may reorder agents, remove redundant agents, or reuse known agents.\n"
# "- Keep the graph executable.\n\n"
# "=== ORIGINAL WORKFLOW ===\n"
# f"{prompt}\n\n"
# "=== OUTPUT ===\n"
# "Return the full revised workflow only."
# )
# optimizer_system = (
# "You are a Graph Optimization Agent. "
# "Iteratively improve workflow performance by fixing the detected issues. "
# "Allowed operations: reorder agents, remove agents, reuse known agents. "
# "Forbidden: renaming agents, changing node IO schema. "
# "Keep the graph executable."
# )
# print(critic_out)
# new_prompt = self.llm.generate(
# prompt=optimizer_user,
# system_message=optimizer_system,
# ).content
# print(new_prompt)
# return new_prompt
def update_dev_set(dataset):
import numpy as np
permutation = np.random.permutation(len(dataset._dev_data_full))
full_data = dataset._dev_data_full
dev_data = [full_data[idx] for idx in permutation[:50]]
return dev_data
class QASTRUCTUREOptimizer(Optimizer):
graph: Union[WorkFlowGraph, ActionGraph] = Field(description="The workflow to optimize.")
repr_scheme: str = Field(default="python", description="The scheme to represent the workflow.")
optimize_mode: Literal["all", "structure", "prompt"] = Field(default="all", description="The mode to optimize the workflow.")
order: Literal["zero-order", "first-order"] = Field(default="zero-order", description="Whether to use zero-order (using hyper-mutation prompt) or first-order (using mutation prompt) optimization.")
calltime: int = Field(default=1, description="Number of textgrad used for evaluation.")
num_workers: int = Field(default=1, description="Number of textgrad used for evaluation.")
def init_module(self, **kwargs):
self._snapshot: List[dict] = []
self._prompt_breeder = QASimplePromptBreeder(llm=self.llm, evaluator = self.evaluator) # generate prompt for optimization
self._convergence_check_counter = 0
self._best_score = float("-inf")
self._prompt_dict = {}
if isinstance(self.graph, ActionGraph):
if self.optimize_mode != "prompt":
raise ValueError(
f"{type(self).__name__} only support prompt optimization when `graph` is an `ActionGraph`. "
f"The `optimize_mode` should be set to `prompt`, but got {self.optimize_mode}."
)
def optimize(self, dataset: Benchmark, **kwargs):
if isinstance(self.graph, WorkFlowGraph):
logger.info(f"Optimizing the {type(self.graph).__name__} workflow with {self.repr_scheme} representation.")
elif isinstance(self.graph, ActionGraph):
logger.info(f"Optimizing the {type(self.graph).__name__} graph ...")
graph: Union[WorkFlowGraph, ActionGraph] = self.graph
logger.info("Run initial evaluation on the original workflow ...")
with suppress_logger_info():
metrics = self.evaluate(dataset, eval_mode="dev", graph=graph)
self._prompt_breeder = QASimplePromptBreeder(llm=self.llm, evaluator = self.evaluator) # generate prompt for optimization
logger.info(f"Initial metrics: {metrics}")
self.log_snapshot(graph=graph, metrics=metrics)
set_scorer = None
if kwargs["provided_scorer"] == True:
set_scorer = metrics
for i in range(self.max_steps):
# try:
# # perform a step of optimization
# graph = self.step(set_scorer=set_scorer)
# # print(graph)
# # evaluate the workflow
# if (i + 1) % self.eval_every_n_steps == 0:
# logger.info(f"Evaluate the workflow at step {i+1} ...")
# with suppress_logger_info():
# metrics = self.evaluate(dataset, eval_mode="dev")
# logger.info(f"Step {i+1} metrics: {metrics}")
# self.log_snapshot(graph=graph, metrics=metrics)
# except Exception as e:
# logger.warning(f"Error in step {i}: {e}. Skip this step.")
# continue
# if self.convergence_check():
# logger.info(f"Convergence check passed at step {i+1}. Stop the optimization.")
# break
# perform a step of optimization
graph = self.step(set_scorer=set_scorer, step=i)
# print(graph)
# evaluate the workflow
if (i + 1) % self.eval_every_n_steps == 0:
logger.info(f"Evaluate the workflow at step {i+1} ...")
with suppress_logger_info():
metrics = self.evaluate(dataset, eval_mode="dev")
logger.info(f"Step {i+1} metrics: {metrics}")
self.log_snapshot(graph=graph, metrics=metrics)
print("randomly update dataset")
self.dataset._dev_data = update_dev_set(self.dataset)
self.dataset._train_data = update_dev_set(self.dataset)
if i == self.max_steps - 1:
logger.info(f"Reach the maximum number of steps {self.max_steps}. Stop the optimization.")
# set self.graph to the best graph
logger.info("Restore the best graph from the snapshot ...")
self.restore_best_graph()
def step(self, **kwargs) -> Union[WorkFlowGraph, ActionGraph]:
"""
Take a step of optimization and return the optimized graph.
"""
graph = self._select_graph_with_highest_score(return_metrics=False)
if isinstance(graph, WorkFlowGraph):
new_graph = self._workflow_graph_step(graph, kwargs["set_scorer"], kwargs["step"])
elif isinstance(graph, ActionGraph):
new_graph = self._action_graph_step(graph, kwargs["set_scorer"], kwargs["step"])
else:
raise ValueError(f"Invalid graph type: {type(graph)}. The graph should be an instance of `WorkFlowGraph` or `ActionGraph`.")
return new_graph
def evaluate(
self,
dataset: Benchmark,
eval_mode: str = "test",
graph: Optional[Union[WorkFlowGraph, ActionGraph]] = None,
indices: Optional[List[int]] = None,
sample_k: Optional[int] = None,
**kwargs
) -> dict:
"""
Evaluate the workflow. If `graph` is provided, use the provided graph for evaluation. Otherwise, use the graph in the optimizer.
Args:
dataset (Benchmark): The dataset to evaluate the workflow on.
eval_mode (str): The evaluation mode. Choices: ["test", "dev", "train"].
graph (Union[WorkFlowGraph, ActionGraph], optional): The graph to evaluate. If not provided, use the graph in the optimizer.
indices (List[int], optional): The indices of the data to evaluate the workflow on.
sample_k (int, optional): The number of data to evaluate the workflow on. If provided, a random sample of size `sample_k` will be used.
Returns:
dict: The metrics of the workflow evaluation.
"""
self.dataset = dataset
graph = graph if graph is not None else self.graph
agent_manager = self.evaluator.agent_manager
agent_manager.add_agents_from_workflow(graph, llm_config=self.llm.config)
# print(agent_manager)
# obtain Evaluator
self.evaluator = Evaluator(llm=self.llm, agent_manager=agent_manager, collate_func=self.collate_func, num_workers=self.num_workers, verbose=True)
self.evaluator.dataname = self.dataset.dataname
metrics_list = []
for i in range(self.eval_rounds):
eval_info = [
f"[{type(graph).__name__}]",
f"Evaluation round {i+1}/{self.eval_rounds}",
f"Mode: {eval_mode}"
]
if indices is not None:
eval_info.append(f"Indices: {len(indices)} samples")
if sample_k is not None:
eval_info.append(f"Sample size: {sample_k}")
logger.info(" | ".join(eval_info))
# if self.dataset.dataname == 'scicode':
# metrics = await self.evaluator.async_evaluate(
# graph=graph,
# benchmark=dataset,
# eval_mode=eval_mode,
# indices=indices,
# sample_k=sample_k,
# **kwargs
# )
# else:
# metrics = self.evaluator.evaluate(
# graph=graph,
# benchmark=dataset,
# eval_mode=eval_mode,
# indices=indices,
# sample_k=sample_k,
# **kwargs
# )
metrics = self.evaluator.evaluate(
graph=graph,
benchmark=dataset,
eval_mode=eval_mode,
indices=indices,
sample_k=sample_k,
**kwargs
)
metrics_list.append(metrics)
avg_metrics = self.evaluator._calculate_average_score(metrics_list)
self.dataset = dataset
self.evaluator.error_list = deepcopy(self.dataset.error_list)
self.dataset.error_list = {}
return avg_metrics
def group_eval(self, dataset: Benchmark,
eval_mode: str = "test",
graph: Optional[Union[WorkFlowGraph, ActionGraph]] = None,
indices: Optional[List[int]] = None,
sample_k: Optional[int] = None,iteritem=5):
self.evaluator._evaluation_records.clear()
result_all = []
for data in dataset._test_data:
result_list = []
for seed in range(0,iteritem):
results = self._evaluate_graph(graph=graph, data=data, benchmark=benchmark,**kwargs)
result_list.append(results)
result_all.append(result_list)
def log_snapshot(self, graph: Union[WorkFlowGraph, ActionGraph], metrics: dict):
if isinstance(graph, WorkFlowGraph):
graph_info = graph.get_graph_info()
elif isinstance(graph, ActionGraph):
# TODO check if the action graph is valid
graph_info = graph
else:
raise ValueError(f"Invalid graph type: {type(graph)}. The graph should be an instance of `SequentialWorkFlowGraph` or `ActionGraph`.")
self._snapshot.append(
{
"index": len(self._snapshot),
"graph": deepcopy(graph_info),
"metrics": metrics,
}
)
def _select_graph_with_highest_score(self, return_metrics: bool = False) -> Union[SequentialWorkFlowGraph, ActionGraph]:
if len(self._snapshot) == 0:
return self.graph
snapshot_scores = [np.mean(list(snapshot["metrics"].values())) for snapshot in self._snapshot]
best_index = np.argmax(snapshot_scores)
if isinstance(self.graph, WorkFlowGraph):
graph = WorkFlowGraph.from_dict(self._snapshot[best_index]["graph"])
elif isinstance(self.graph, ActionGraph):
# TODO check if the action graph is valid
graph = self._snapshot[best_index]["graph"]
else:
raise ValueError(f"Invalid graph type: {type(self.graph)}. The graph should be an instance of `SequentialWorkFlowGraph` or `ActionGraph`.")
if return_metrics:
return graph, self._snapshot[best_index]["metrics"]
return graph
def restore_best_graph(self):
best_graph, best_metrics = self._select_graph_with_highest_score(return_metrics=True)
logger.info(f"Restore the best graph from snapshot with metrics {best_metrics} ...")
self.graph = best_graph
def _wfg_structure_optimization_step(self, graph: WorkFlowGraph, scorer, step) -> WorkFlowGraph:
"""
optinize the structure of the workflow graph and return the optimized graph.
Args:
graph (SequentialWorkFlowGraph): The workflow graph to optimize.
Returns:
SequentialWorkFlowGraph: The optimized workflow graph.
"""
graph_scheme = STRUCTUREWorkFlowScheme(graph=graph)
graph_repr = graph_scheme.convert_to_scheme(scheme=self.repr_scheme)
if self.repr_scheme == "python":
output_format = "\n\nALWAYS wrap the refined workflow in ```python\n``` format and DON'T include any other text within the code block!"
elif self.repr_scheme == "yaml":
output_format = "\n\nALWAYS wrap the refined workflow in ```yaml\n``` format and DON'T include any other text within the code block!"
elif self.repr_scheme == "code":
output_format = "\n\nALWAYS wrap the refined workflow in ```code\n``` format and DON'T include any other text within the code block!"
elif self.repr_scheme == "core":
output_format = "\n\nALWAYS wrap the refined workflow in ```core\n``` format and DON'T include any other text within the code block!"
elif self.repr_scheme == "bpmn":
output_format = "\n\nALWAYS wrap the refined workflow in ```bpmn\n``` format and DON'T include any other text within the code block!"
else:
raise ValueError(f"Invalid representation scheme: {self.repr_scheme}. The scheme should be one of {VALID_SCHEMES}.")
prompt = "Task Description: " + graph.goal + "\n\nWorkflow Steps: " + graph_repr + output_format
# if step%5==0:
# # print(prompt)
# new_graph_repr = self._prompt_breeder.critic_and_update_prompt(task_description=graph.goal, prompt=prompt, order=self.order, scorer=scorer, calltime=self.calltime)
# # print(new_graph_repr)
# new_graph = graph_scheme.parse_from_scheme(scheme=self.repr_scheme, repr=new_graph_repr)
# print(new_graph)
# else:
# new_graph = graph
new_graph_repr = self._prompt_breeder.critic_and_update_prompt(task_description=graph.goal, prompt=prompt, order=self.order, scorer=scorer, calltime=self.calltime)
new_graph = graph_scheme.parse_from_scheme(scheme=self.repr_scheme, repr=new_graph_repr)
return new_graph
def _wfg_prompt_optimization_step(self, graph: WorkFlowGraph, scorer=None) -> WorkFlowGraph:
task_description = graph.goal
graph_scheme = STRUCTUREWorkFlowScheme(graph=graph)
graph_repr = graph_scheme.convert_to_scheme(scheme=self.repr_scheme)
graph_info = graph.get_graph_info()
problem_list = ''''''
for item in self.evaluator._evaluation_records.keys():
problem_s = "Questions: " + self.evaluator._evaluation_records[item]['trajectory'][0].content['question']+'\n'
prediction_s = "Predictions: " + self.evaluator._evaluation_records[item]['prediction']+'\n'
solution_s = "Solutions: " + self.evaluator._evaluation_records[item]['label']+'\n'
# if self.evaluator.dataname != "humanevalplus":
# if 'test' in list(self.evaluator._evaluation_records[item]['label'].keys()):
# test_s = "Unit tests: " + self.evaluator._evaluation_records[item]['label']['test'][0:1000]
# elif 'tests' in list(self.evaluator._evaluation_records[item]['label'].keys()):
# test_s = "Unit tests: " + self.evaluator._evaluation_records[item]['label']['tests'][0:1000]
# else:
# test_s = "Example solution: " + self.evaluator._evaluation_records[item]['label']["canonical_solution"]
metric_s = "Score: " + str(self.evaluator._evaluation_records[item]['metrics']['acc']) + "\n"
if self.evaluator._evaluation_records[item]['metrics']['acc'] ==0:
metric_s += "Error reason: Computation result is incorrect."
else:
metric_s += "The solution is correct."
joint_s = problem_s + prediction_s + solution_s + metric_s
problem_list += joint_s
print(problem_list)
for i, task in enumerate(graph_info["tasks"]):
if task['name'] not in list(self._prompt_dict.keys()):
self._prompt_dict[task['name']] = []
print(task)
try:
task['prompt'] = task["prompt_template"]["instruction"]
except:
task['prompt'] = task['prompt']
original_prompt = task["prompt"]
optimization_prompt = "Task Description: " + task_description + "\n\nWorkflow Steps:\n" + graph_repr + f"\n\nINSTRUCTION for the {i+1}-th task:\n\"\"\"\n" + original_prompt + "\n\"\"\""
error_prompt = f'''{optimization_prompt}
Agent name: {task['name']}
Observed evidence:
The following questions, model predictions, ground-truth solutions, tests, and evaluation metrics were produced using this agent and its prompt:
{problem_list}
Your task is to critically evaluate the original agent prompt, using the evidence above. Identify only problems, including:
(1) Structural or logical flaws in how the prompt guides reasoning or execution (e.g., incorrect step ordering, missing validation instructions, flawed control flow, or implicit assumptions),
(2) Failures or inconsistencies revealed by the execution history or trajectory (e.g., systematic errors, contradictions, brittle behavior, or poor generalization), and
(3) Incorrect, misleading, ambiguous, or underspecified language in the prompt that could degrade performance or reliability.
Do not solve any task, do not propose fixes or improvements, and do not restate or judge the correctness of answers.
Output requirements:
Return one single paragraph only.
List problems as clearly enumerated or bullet-style points.
Be extremely concise, precise, and critical.'''
critic_issues = self.llm.generate(error_prompt).content
optimization_prompt += f"The new prompts should consider fixing the issues by adjusting the prompt content: {critic_issues}. You should not change the original role and task of the assigned agent."
if self._prompt_dict[task['name']] != []:
prev_prompt = "\n".join(self._prompt_dict[task['name']])
optimization_prompt += f"The previous prompts are: {prev_prompt}\nYou should also fix the problems in these prompts."
optimization_prompt += f"\n\nGiven the above information, please refine the instruction for the {i+1}-th task.\n"
optimization_prompt += r"Note that you must always use bracket (e.g. `{input_name}`, `{code}`, `{question}`) to wrap the inputs of the tasks in your refined instruction. You must ensure the prompts contain all inputs. You cannot change the name of functions.\n"
###new one
optimization_prompt += "Your prompt should not change the function name and entry_point in the question. Only output the refined instruction and DON'T include any other text!"
new_prompt = self._prompt_breeder.generate_prompt(task_description=task_description, prompt=optimization_prompt, order=self.order)
graph_info["tasks"][i]["prompt"] = new_prompt
# print("task name", task['name'])
# print("detected issue", critic_issues)
# print("renewed prompt", new_prompt)
self._prompt_dict[task['name']].append(new_prompt)
new_graph = SequentialWorkFlowGraph.from_dict(graph_info)
return new_graph
# def _wfg_prompt_optimization_step(self, graph: WorkFlowGraph, scorer=None) -> WorkFlowGraph:
# task_description = graph.goal
# graph_scheme = STRUCTUREWorkFlowScheme(graph=graph)
# graph_repr = graph_scheme.convert_to_scheme(scheme=self.repr_scheme)
# graph_info: Dict[str, Any] = graph.get_graph_info()
# # ----------------------------
# # Build evaluation record summary safely
# # ----------------------------
# records: Dict[str, Any] = getattr(self.evaluator, "_evaluation_records", {}) or {}
# problem_list = ""
# for key, rec in records.items():
# traj = rec.get("trajectory") or []
# first_step = traj[0] if len(traj) > 0 else None
# content = getattr(first_step, "content", {}) if first_step is not None else {}
# question = content.get("question", "")
# prediction = rec.get("prediction", "")
# solution = rec.get("label", "")
# metrics = rec.get("metrics", {}) or {}
# acc = metrics.get("acc", None)
# # Safely stringify
# problem_list += (
# "Question:\n"
# f"```text\n{str(question)}\n```\n"
# "Prediction:\n"
# f"```text\n{str(prediction)}\n```\n"
# "Solution/Label:\n"
# f"```text\n{str(solution)}\n```\n"
# f"Score(acc): {str(acc)}\n"
# )
# # Robust correctness hint
# if acc is None:
# problem_list += "Result: unknown (missing acc).\n"
# else:
# try:
# problem_list += "Result: incorrect.\n" if float(acc) == 0.0 else "Result: correct.\n"
# except Exception:
# problem_list += "Result: unknown (non-numeric acc).\n"
# problem_list += "\n---\n"
# # Optional: keep for debugging, but consider a verbose flag
# print(problem_list)
# critic_system = (
# "You are a prompt/workflow critic, not a solver. "
# "You will be given: the workflow steps, one agent instruction, and evaluation records "
# "(questions, predictions, solutions, scores). "
# "Identify ONLY problems in the agent instruction and how it interacts with the workflow: "
# "(1) ambiguous/misleading instructions, missing required inputs/placeholders, "
# "(2) structural/interaction flaws (wrong assumptions about upstream/downstream), "
# "(3) failures suggested by the evaluation records. "
# "Do NOT solve any question. Do NOT propose fixes. Do NOT restate solutions. "
# "Return ONE concise paragraph with bullet points."
# )
# # ----------------------------
# # Iterate tasks and refine prompts
# # ----------------------------
# for i, task in enumerate(graph_info.get("tasks", [])):
# task_name = task.get("name", f"task_{i}")
# if task_name not in self._prompt_dict:
# self._prompt_dict[task_name] = []
# print(task)
# # Get original instruction robustly
# prompt_template = task.get("prompt_template") or {}
# original_instruction = prompt_template.get("instruction", task.get("prompt", ""))
# # Make sure we do not accidentally lose original info
# graph_info["tasks"][i]["prompt"] = original_instruction
# # ----------------------------
# # Critic prompt (structured + delimited)
# # ----------------------------
# critic_user = (
# "=== TASK CONTEXT ===\n"
# f"Task description (workflow goal):\n```text\n{task_description}\n```\n\n"
# "=== WORKFLOW STEPS (graph) ===\n"
# f"```text\n{graph_repr}\n```\n\n"
# f"=== AGENT ({i+1}) ===\n"
# f"Agent name: {task_name}\n\n"
# "Current instruction:\n"
# f"```text\n{original_instruction}\n```\n\n"
# "=== EVALUATION RECORDS ===\n"
# f"{problem_list}\n"
# )
# critic_issues = self.llm.generate(
# prompt=critic_user,
# system_message=critic_system,
# ).content
# # ----------------------------
# # Refinement prompt to breeder
# # ----------------------------
# refine_user = (
# "You are refining ONE agent instruction in an existing workflow.\n\n"
# "=== TASK CONTEXT ===\n"
# f"Workflow goal:\n```text\n{task_description}\n```\n\n"
# "Workflow steps:\n"
# f"```text\n{graph_repr}\n```\n\n"
# f"=== AGENT ({i+1}) TO REFINE ===\n"
# f"Agent name: {task_name}\n\n"
# "Original instruction:\n"
# f"```text\n{original_instruction}\n```\n\n"
# "=== DETECTED ISSUES (do not solve, just fix via prompt wording) ===\n"
# f"```text\n{critic_issues}\n```\n\n"
# "=== CONSTRAINTS ===\n"
# "- Do NOT change the agent's role or assigned task.\n"
# "- Do NOT change any function name or entry_point mentioned in the question.\n"
# "- Ensure ALL required task inputs appear as placeholders wrapped in curly braces, "
# "e.g., {input_name}, {code}, {question}.\n"
# "- Output ONLY the refined instruction text (no preamble, no quotes).\n"
# )
# # If prior prompts exist, include them (delimited) for debugging regressions
# if self._prompt_dict[task_name]:
# prev_prompt = "\n\n---\n\n".join(self._prompt_dict[task_name])
# refine_user += (
# "\n=== PREVIOUS REFINED INSTRUCTIONS (fix recurring issues) ===\n"
# f"```text\n{prev_prompt}\n```\n"
# )
# new_instruction = self._prompt_breeder.generate_prompt(
# task_description=task_description,
# prompt=refine_user,
# order=self.order,
# )
# graph_info["tasks"][i]["prompt"] = new_instruction
# self._prompt_dict[task_name].append(new_instruction)
# new_graph = SequentialWorkFlowGraph.from_dict(graph_info)
# return new_graph
def _workflow_graph_step(self, graph: WorkFlowGraph, scorer, step) -> WorkFlowGraph:
if self.optimize_mode == "structure" or self.optimize_mode == "all":
# optimize the structure of the graph
graph = self._wfg_structure_optimization_step(graph, scorer=scorer, step=step)
if self.optimize_mode == "prompt" or self.optimize_mode == "all":
# optimize the prompt of the graph
graph = self._wfg_prompt_optimization_step(graph, scorer=scorer)
return graph
def _action_graph_prompt_optimization_step(self, graph: ActionGraph) -> ActionGraph:
task_description = graph.description
graph_info = graph.get_graph_info()
graph_steps = inspect.getsource(getattr(graph, "execute"))
for operator_name, operator_info in graph_info["operators"].items():
original_prompt = operator_info["prompt"]
optimization_prompt = "Task Description: " + task_description + "\n\nWorkflow Steps:\n" + graph_steps + f"\n\nINSTRUCTION for the `{operator_name}` operator:\n\"\"\"\n" + original_prompt + "\n\"\"\""
optimization_prompt += "\n\nThe interface of the operator is as follows:\n" + operator_info["interface"]
optimization_prompt += f"\n\nGiven the above information, please refine the instruction for the `{operator_name}` operator.\n"
optimization_prompt += r"Note that you should always use bracket (e.g. `{input_name}`) to wrap the inputs of the operator in your refined instruction, "
optimization_prompt += "and the input names should be EXACTLY the same as those defined in the interface. DON'T use bracket to wrap output names."
optimization_prompt += "\nOnly output the refined instruction and DON'T include any other text!"
new_prompt = self._prompt_breeder.generate_prompt(task_description=task_description, prompt=optimization_prompt, order=self.order)
new_prompt = new_prompt.replace("\"", "").strip()
graph_info["operators"][operator_name]["prompt"] = new_prompt
new_graph = ActionGraph.from_dict(graph_info)
return new_graph
def _action_graph_step(self, graph: ActionGraph) -> ActionGraph:
if self.optimize_mode == "prompt":
graph = self._action_graph_prompt_optimization_step(graph)
else:
raise ValueError(f"{type(self).__name__} only support prompt optimization when `self.graph` is an `ActionGraph` instance. "
f"The `optimize_mode` should be set to `prompt`, but got {self.optimize_mode}.")
return graph
def convergence_check(self, **kwargs) -> bool:
if not self._snapshot:
logger.warning("No snapshots available for convergence check")
return False
# Get scores from snapshots
scores = [np.mean(list(snapshot["metrics"].values())) for snapshot in self._snapshot]
current_score = scores[-1]
if current_score > self._best_score:
self._best_score = current_score
self._convergence_check_counter = 0
else:
self._convergence_check_counter += 1
if self._convergence_check_counter >= self.convergence_threshold:
logger.info(f"Early stopping triggered: No improvement for {self.convergence_threshold} iterations")
# logger.info(f"Score history: {scores[-self.convergence_threshold:]}")
return True
return False
def save(self, path: str, ignore: List[str] = []):
"""
Save the (optimized) workflow graph to a file.
Args:
path (str): The path to save the workflow graph.
ignore (List[str]): The keys to ignore when saving the workflow graph.
"""
self.graph.save_module(path, ignore=ignore)