|
|
from llm.llm import LLM |
|
|
from input.problem import problem_input |
|
|
from input.test_middle_result import problem_str, problem_analysis, selected_models, modeling_solution, modeling_solution, task_descriptions |
|
|
from agent.problem_analysis import ProblemAnalysis |
|
|
from agent.problem_modeling import ProblemModeling |
|
|
from agent.task_decompse import TaskDecompose |
|
|
from agent.task import Task |
|
|
from utils.utils import read_json_file, write_json_file, write_markdown_file, json_to_markdown |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
|
|
llm = LLM('deepseek-reasoner') |
|
|
|
|
|
paper = {'tasks': []} |
|
|
|
|
|
problem_path = 'data/actor_data/input/problem/2024_C.json' |
|
|
problem_str, problem = problem_input(problem_path, llm) |
|
|
problem_type = problem_path.split('/')[-1].split('_')[-1].split('.')[0] |
|
|
tasknum = 4 |
|
|
|
|
|
print(problem_str) |
|
|
print('---') |
|
|
paper['problem_background'] = problem['background'] |
|
|
paper['problem_requirement'] = problem['problem_requirement'] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
task = Task(llm) |
|
|
for task_description in task_descriptions[:1]: |
|
|
task_analysis = task.analysis(task_description) |
|
|
task_formulas = task.formulas(problem['data_description'], task_description, task_analysis) |
|
|
task_modeling = task.modeling(problem['data_description'], task_description, task_analysis, task_formulas) |
|
|
task_result = task.result(task_description, task_analysis, task_formulas, task_modeling) |
|
|
task_answer = task.answer(task_description, task_analysis, task_formulas, task_modeling, task_result) |
|
|
paper['tasks'].append({ |
|
|
'task_description': task_description, |
|
|
'task_analysis': task_analysis, |
|
|
'mathematical_formulas': task_formulas, |
|
|
'mathematical_modeling_process': task_modeling, |
|
|
'result': task_result, |
|
|
'answer': task_answer |
|
|
}) |
|
|
print(paper['tasks']) |
|
|
|
|
|
print(llm.get_total_usage()) |
|
|
|