Spaces:
Sleeping
Sleeping
File size: 1,657 Bytes
8496edd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
from llm.llm import LLM
from input.problem import problem_input
# from input.test_middle_result import problem_str, problem_analysis, selected_models, modeling_solution, modeling_solution, task_descriptions
from agent.problem_analysis import ProblemAnalysis
from agent.problem_modeling import ProblemModeling
from agent.task_decompse import TaskDecompose
from agent.task import Task
from agent.create_charts import Chart
from utils.utils import read_json_file, write_json_file, write_text_file, json_to_markdown
# from utils.convert_format import markdown_to_latex
import os
from prompt.template import PROBLEM_EXTRACT_PROMPT
config = {
'problem_analysis_round': 1,
'problem_modeling_round': 1,
'task_formulas_round': 1,
'tasknum': 4,
'chart_num': 3,
'model_name': 'chatgpt-4o-latest'
}
def run_batch(problem_path, config):
# Initialize LLM
llm = LLM(config['model_name'])
# Get problem input
problem_str, problem = problem_input(problem_path, llm)
problem_name = os.path.splitext(os.path.basename(problem_path))[0]
problem_type = os.path.splitext(os.path.basename(problem_path))[0].split('_')[-1]
return {problem_name: problem}
if __name__ == "__main__":
import glob
# files = glob.glob('/Users/ann/Downloads/methmatical_paper_extraction/parse/2025_*/content/*.md')
files = glob.glob('../data/actor_data/input/problem/2025_*')
problems = read_json_file('../data/actor_data/output/problem_24.json')
for file in files:
problems.update(run_batch(file, config))
write_json_file('../data/actor_data/output/problem_25.json', problems)
print(problems.keys())
|