| import json |
| from copy import deepcopy |
| from typing import Any, Dict, List |
| from flow_modules.aiflows.OpenAIChatFlowModule import OpenAIChatAtomicFlow |
|
|
| from dataclasses import dataclass |
| class PlanGeneratorAtomicFlow(OpenAIChatAtomicFlow): |
| def __init__(self, **kwargs): |
| super().__init__(**kwargs) |
|
|
| @classmethod |
| def instantiate_from_config(cls, config): |
| flow_config = deepcopy(config) |
|
|
| kwargs = {"flow_config": flow_config} |
|
|
| |
| kwargs.update(cls._set_up_prompts(flow_config)) |
| kwargs.update(cls._set_up_backend(flow_config)) |
|
|
| |
| return cls(**kwargs) |
|
|
| def run(self, input_data: Dict[str, Any]) -> Dict[str, Any]: |
| hint_for_model = """ |
| Make sure your response is in the following format: |
| Response Format: |
| { |
| "plan": "Python printable string of the plan corresponding to the goal", |
| } |
| """ |
| if 'goal' in input_data: |
| input_data['goal'] += hint_for_model |
| api_output = super().run(input_data)["api_output"].strip() |
| try: |
| response = json.loads(api_output) |
| return response |
| except json.decoder.JSONDecodeError: |
| new_input_data = input_data.copy() |
| new_input_data['goal'] += ("The previous respond cannot be parsed with json.loads, Make sure your next " |
| "response is solely in JSON format.") |
| new_api_output = super().run(new_input_data)["api_output"].strip() |
| return json.loads(new_api_output) |
|
|
|
|
|
|