Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +27 -0
- eval/dataset/correction_check/3-准确性验证样本.xlsx +3 -0
- eval/dataset/correction_check/all_samples.xlsx +3 -0
- eval/dataset/correction_check/correction_txt.zip +3 -0
- eval/dataset/doc/doc_20251230_141704.tar.gz.part000 +3 -0
- eval/dataset/doc/doc_20251230_141704.tar.gz.sha256 +3 -0
- eval/dataset/doc_type/doc_type_model.json +3 -0
- eval/dataset/doc_type/doc_type_stat_final.json +3 -0
- eval/dataset/doc_type/doc_type_stat_final_patch.json +0 -0
- eval/dataset/doc_type/documents.json +3 -0
- eval/dataset/human_exp/selected_documents_50.xlsx +0 -0
- eval/dataset/human_exp/长文本题目质量评测汇总表-1031-50.xlsx +3 -0
- eval/dataset/info.txt +2 -0
- eval/dataset/longbench_pro_final.json +3 -0
- eval/dataset/longbench_pro_tmp.json +3 -0
- eval/dataset/origin/1023.xlsx +3 -0
- eval/dataset/prompt_rewrite/1023_prompt_rewrite_1718.xlsx +3 -0
- eval/dataset/prompt_rewrite/2-改写样本(1513文档-1513题).xlsx +3 -0
- eval/output/DeepSeek-V3-0324/thinking_context-120000_bon-3_inference_1-of-1.jsonl +3 -0
- eval/output/GLM-4.6/nonthinking_context-120000_bon-3_evaluation.jsonl +3 -0
- eval/output/GLM-4.6/nonthinking_context-120000_bon-3_inference_1-of-8.jsonl +0 -0
- eval/output/GLM-4.6/nonthinking_context-120000_bon-3_inference_2-of-8.jsonl +0 -0
- eval/output/GLM-4.6/nonthinking_context-120000_bon-3_inference_3-of-8.jsonl +0 -0
- eval/output/GLM-4.6/nonthinking_context-120000_bon-3_inference_4-of-8.jsonl +0 -0
- eval/output/GLM-4.6/nonthinking_context-120000_bon-3_inference_5-of-8.jsonl +0 -0
- eval/output/GLM-4.6/nonthinking_context-120000_bon-3_inference_6-of-8.jsonl +0 -0
- eval/output/GLM-4.6/nonthinking_context-120000_bon-3_inference_7-of-8.jsonl +0 -0
- eval/output/GLM-4.6/nonthinking_context-120000_bon-3_inference_8-of-8.jsonl +0 -0
- eval/output/GLM-4.6/nonthinking_context-120000_bon-3_summary.json +164 -0
- eval/output/GLM-4.6/thinking_context-120000_bon-3_evaluation.jsonl +3 -0
- eval/output/GLM-4.6/thinking_context-120000_bon-3_inference_1-of-1.jsonl +3 -0
- eval/output/GLM-4.6/thinking_context-120000_bon-3_summary.json +164 -0
- eval/output/Kimi-K2-Instruct-0905-128k/nonthinking_context-120000_bon-3_evaluation.jsonl +3 -0
- eval/output/Kimi-K2-Instruct-0905-128k/nonthinking_context-120000_bon-3_inference_1-of-1.jsonl +3 -0
- eval/output/Kimi-K2-Instruct-0905-128k/nonthinking_context-120000_bon-3_summary.json +164 -0
- eval/output/Kimi-K2-Instruct-0905-128k/thinking_context-120000_bon-3_evaluation.jsonl +3 -0
- eval/output/Kimi-K2-Instruct-0905-128k/thinking_context-120000_bon-3_inference_1-of-1.jsonl +3 -0
- eval/output/Kimi-K2-Instruct-0905-128k/thinking_context-120000_bon-3_summary.json +164 -0
- eval/output/Kimi-K2-Instruct-0905/nonthinking_context-224000_bon-3_evaluation.jsonl +3 -0
- eval/output/Kimi-K2-Instruct-0905/nonthinking_context-224000_bon-3_inference_1-of-1.jsonl +3 -0
- eval/output/Kimi-K2-Instruct-0905/nonthinking_context-224000_bon-3_summary.json +164 -0
- eval/output/Kimi-K2-Instruct-0905/thinking_context-224000_bon-3_evaluation.jsonl +3 -0
- eval/output/Kimi-K2-Instruct-0905/thinking_context-224000_bon-3_inference_1-of-1.jsonl +3 -0
- eval/output/Kimi-K2-Instruct-0905/thinking_context-224000_bon-3_summary.json +164 -0
- eval/output/Qwen3-14B/nonthinking_context-120000_bon-3_evaluation.jsonl +3 -0
- eval/output/Qwen3-14B/nonthinking_context-120000_bon-3_inference_1-of-1.jsonl +3 -0
- eval/output/Qwen3-14B/nonthinking_context-120000_bon-3_summary.json +164 -0
- eval/output/Qwen3-14B/thinking_context-120000_bon-3_evaluation.jsonl +3 -0
- eval/output/Qwen3-14B/thinking_context-120000_bon-3_inference_1-of-1.jsonl +3 -0
- eval/output/Qwen3-14B/thinking_context-120000_bon-3_summary.json +164 -0
.gitattributes
CHANGED
|
@@ -225,3 +225,30 @@ eval/output/DeepSeek-R1-0528/thinking_context-120000_bon-3_inference_1-of-1.json
|
|
| 225 |
eval/output/DeepSeek-V3-0324/nonthinking_context-120000_bon-3_inference_1-of-1.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 226 |
eval/output/DeepSeek-V3-0324/thinking_context-120000_bon-3_evaluation.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 227 |
eval/output/DeepSeek-V3-0324/nonthinking_context-120000_bon-3_evaluation.jsonl filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 225 |
eval/output/DeepSeek-V3-0324/nonthinking_context-120000_bon-3_inference_1-of-1.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 226 |
eval/output/DeepSeek-V3-0324/thinking_context-120000_bon-3_evaluation.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 227 |
eval/output/DeepSeek-V3-0324/nonthinking_context-120000_bon-3_evaluation.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 228 |
+
eval/output/DeepSeek-V3-0324/thinking_context-120000_bon-3_inference_1-of-1.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 229 |
+
eval/output/Kimi-K2-Instruct-0905-128k/nonthinking_context-120000_bon-3_inference_1-of-1.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 230 |
+
eval/output/Kimi-K2-Instruct-0905-128k/thinking_context-120000_bon-3_evaluation.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 231 |
+
eval/output/Kimi-K2-Instruct-0905-128k/nonthinking_context-120000_bon-3_evaluation.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 232 |
+
eval/output/Kimi-K2-Instruct-0905-128k/thinking_context-120000_bon-3_inference_1-of-1.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 233 |
+
eval/output/Qwen3-14B/nonthinking_context-120000_bon-3_inference_1-of-1.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 234 |
+
eval/output/Qwen3-14B/thinking_context-120000_bon-3_evaluation.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 235 |
+
eval/output/Qwen3-14B/nonthinking_context-120000_bon-3_evaluation.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 236 |
+
eval/output/Qwen3-14B/thinking_context-120000_bon-3_inference_1-of-1.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 237 |
+
eval/output/Kimi-K2-Instruct-0905/thinking_context-224000_bon-3_inference_1-of-1.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 238 |
+
eval/output/Kimi-K2-Instruct-0905/thinking_context-224000_bon-3_evaluation.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 239 |
+
eval/output/Kimi-K2-Instruct-0905/nonthinking_context-224000_bon-3_inference_1-of-1.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 240 |
+
eval/output/Kimi-K2-Instruct-0905/nonthinking_context-224000_bon-3_evaluation.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 241 |
+
eval/output/GLM-4.6/thinking_context-120000_bon-3_evaluation.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 242 |
+
eval/output/GLM-4.6/nonthinking_context-120000_bon-3_evaluation.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 243 |
+
eval/output/GLM-4.6/thinking_context-120000_bon-3_inference_1-of-1.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 244 |
+
eval/dataset/longbench_pro_tmp.json filter=lfs diff=lfs merge=lfs -text
|
| 245 |
+
eval/dataset/longbench_pro_final.json filter=lfs diff=lfs merge=lfs -text
|
| 246 |
+
eval/dataset/origin/1023.xlsx filter=lfs diff=lfs merge=lfs -text
|
| 247 |
+
eval/dataset/prompt_rewrite/1023_prompt_rewrite_1718.xlsx filter=lfs diff=lfs merge=lfs -text
|
| 248 |
+
eval/dataset/prompt_rewrite/2-改写样本(1513文档-1513题).xlsx filter=lfs diff=lfs merge=lfs -text
|
| 249 |
+
eval/dataset/doc_type/doc_type_stat_final.json filter=lfs diff=lfs merge=lfs -text
|
| 250 |
+
eval/dataset/doc_type/documents.json filter=lfs diff=lfs merge=lfs -text
|
| 251 |
+
eval/dataset/doc_type/doc_type_model.json filter=lfs diff=lfs merge=lfs -text
|
| 252 |
+
eval/dataset/human_exp/长文本题目质量评测汇总表-1031-50.xlsx filter=lfs diff=lfs merge=lfs -text
|
| 253 |
+
eval/dataset/correction_check/all_samples.xlsx filter=lfs diff=lfs merge=lfs -text
|
| 254 |
+
eval/dataset/correction_check/3-准确性验证样本.xlsx filter=lfs diff=lfs merge=lfs -text
|
eval/dataset/correction_check/3-准确性验证样本.xlsx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5d24dc12379c5b2e719154120c96d30950fb7941180b810f2417f32bb2984809
|
| 3 |
+
size 947870
|
eval/dataset/correction_check/all_samples.xlsx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f33b67b487c7837ca9d754645ab39995b16363f1883d90936d17aa6b13815eba
|
| 3 |
+
size 892290
|
eval/dataset/correction_check/correction_txt.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9b6bd31c77f1541ffc960e2cab7c53d9619754b7370e59bbb494e6929258219b
|
| 3 |
+
size 5367806
|
eval/dataset/doc/doc_20251230_141704.tar.gz.part000
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d2b9d85bfa28069ab5cd603485c20e726b25d951cbf0a6f25500fe03d30db884
|
| 3 |
+
size 595853367
|
eval/dataset/doc/doc_20251230_141704.tar.gz.sha256
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:25a98f65df8fc5dbb35badea76dae332ffa6264dbc69309d10eefdb4e74fe2af
|
| 3 |
+
size 101
|
eval/dataset/doc_type/doc_type_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:47112b6c9e8aab7556e0dea63ab6a63ce8d6be366d14b3abc22378326597d678
|
| 3 |
+
size 544744989
|
eval/dataset/doc_type/doc_type_stat_final.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4a4bf2affd233fc33d56c8abd33949807ba8216da3a9cb4c042bde5ed25356a7
|
| 3 |
+
size 544805122
|
eval/dataset/doc_type/doc_type_stat_final_patch.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
eval/dataset/doc_type/documents.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6b0a2a959a635fcd27b020747aaf1c7ea7d2b45de32c368b57f7970154413c58
|
| 3 |
+
size 544678293
|
eval/dataset/human_exp/selected_documents_50.xlsx
ADDED
|
Binary file (61.3 kB). View file
|
|
|
eval/dataset/human_exp/长文本题目质量评测汇总表-1031-50.xlsx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:08d9297196214125eed5b091ce38a47004c3a24f180aae507b8c22c6f7672c1c
|
| 3 |
+
size 216704
|
eval/dataset/info.txt
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
longbench_pro_tmp.json 为未经过难度划分数据
|
| 2 |
+
longbench_pro_final.json = longbench_pro.json 最终难度划分数据
|
eval/dataset/longbench_pro_final.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:92ff05f6088e212d06c5a731ab86000b69cee6a0900cbbd524a25851e3c30de0
|
| 3 |
+
size 531535940
|
eval/dataset/longbench_pro_tmp.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:37b83638f4053a1a633e223540caa1c31717774da5ec7619892a5d46e5fa0623
|
| 3 |
+
size 531533464
|
eval/dataset/origin/1023.xlsx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e09895d9aecd0e13eac84338bf398cd2da1876d602e3cee5b410e9216b219f85
|
| 3 |
+
size 2038170
|
eval/dataset/prompt_rewrite/1023_prompt_rewrite_1718.xlsx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:caafb396082ed4d6c503c33126d0925c5dd4d45bcca268fbf9f637cf0608aab0
|
| 3 |
+
size 960484
|
eval/dataset/prompt_rewrite/2-改写样本(1513文档-1513题).xlsx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:55c73568a6e6733b27f6237c4d5fa66eb8a908171a82459d5d220e417da79a30
|
| 3 |
+
size 1257384
|
eval/output/DeepSeek-V3-0324/thinking_context-120000_bon-3_inference_1-of-1.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2bb20f6e3ff42352b2caee34ad972d238f1263df787b7921ea932c58121b1eb6
|
| 3 |
+
size 21637234
|
eval/output/GLM-4.6/nonthinking_context-120000_bon-3_evaluation.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:881daa485e18ef8f82af26eb07d57c280ebbb48579cbced3271e02b27d5f04b5
|
| 3 |
+
size 16161371
|
eval/output/GLM-4.6/nonthinking_context-120000_bon-3_inference_1-of-8.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
eval/output/GLM-4.6/nonthinking_context-120000_bon-3_inference_2-of-8.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
eval/output/GLM-4.6/nonthinking_context-120000_bon-3_inference_3-of-8.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
eval/output/GLM-4.6/nonthinking_context-120000_bon-3_inference_4-of-8.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
eval/output/GLM-4.6/nonthinking_context-120000_bon-3_inference_5-of-8.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
eval/output/GLM-4.6/nonthinking_context-120000_bon-3_inference_6-of-8.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
eval/output/GLM-4.6/nonthinking_context-120000_bon-3_inference_7-of-8.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
eval/output/GLM-4.6/nonthinking_context-120000_bon-3_inference_8-of-8.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
eval/output/GLM-4.6/nonthinking_context-120000_bon-3_summary.json
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"date": "2025-12-08",
|
| 3 |
+
"total_questions_num": 1500,
|
| 4 |
+
"inference_iterations": 3,
|
| 5 |
+
"total_samples_num": 4500,
|
| 6 |
+
"fail_samples_num": 0,
|
| 7 |
+
"inference_inconsistent_samples_num": 0,
|
| 8 |
+
"average_overall_metric": 0.45854238430368943,
|
| 9 |
+
"inference_iteration_1_overall_metric": 0.44890397156188516,
|
| 10 |
+
"inference_iteration_2_overall_metric": 0.4676756901179884,
|
| 11 |
+
"inference_iteration_3_overall_metric": 0.45904749123119587,
|
| 12 |
+
"average_token_length_metric": {
|
| 13 |
+
"8k": 0.539826456202734,
|
| 14 |
+
"16k": 0.49883990878468565,
|
| 15 |
+
"32k": 0.5226004279628154,
|
| 16 |
+
"64k": 0.4617605114078172,
|
| 17 |
+
"128k": 0.3868307627999842,
|
| 18 |
+
"256k": 0.34139623866410224
|
| 19 |
+
},
|
| 20 |
+
"average_contextual_requirement_metric": {
|
| 21 |
+
"Full": 0.43758451425043066,
|
| 22 |
+
"Partial": 0.4852160370987465
|
| 23 |
+
},
|
| 24 |
+
"average_difficulty_metric": {
|
| 25 |
+
"Easy": 0.5881815896217503,
|
| 26 |
+
"Moderate": 0.40045739164324096,
|
| 27 |
+
"Hard": 0.43071501653405486,
|
| 28 |
+
"Extreme": 0.3729573279423014
|
| 29 |
+
},
|
| 30 |
+
"average_primary_task_metric": {
|
| 31 |
+
"T1. Retrieval & Ranking": 0.7391294273870368,
|
| 32 |
+
"T2. Sequencing & Structure Reconstruction": 0.7168876379566528,
|
| 33 |
+
"T3. Evidence-Grounded QA": 0.48333333333333334,
|
| 34 |
+
"T4. Summarization & Synthesis": 0.54430615481068,
|
| 35 |
+
"T5. Attribution & Citation Alignment": 0.5036524549754317,
|
| 36 |
+
"T6. Aggregation & Clustering": 0.4230515938862694,
|
| 37 |
+
"T7. Consistency & Compliance Checking": 0.28024190494681,
|
| 38 |
+
"T8. Structured & Numeric Reasoning": 0.204783950617284,
|
| 39 |
+
"T9. Version & Code Diff Analysis": 0.5493453618981832,
|
| 40 |
+
"T10. Rule Induction & In-Context Learning": 0.48856481481481484,
|
| 41 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.3444444444444445
|
| 42 |
+
},
|
| 43 |
+
"average_language_metric": {
|
| 44 |
+
"Chinese": 0.4607293656684521,
|
| 45 |
+
"English": 0.45635540293892746
|
| 46 |
+
},
|
| 47 |
+
"BoN-1": {
|
| 48 |
+
"overall_metric": 0.44890397156188516,
|
| 49 |
+
"token_length": {
|
| 50 |
+
"8k": 0.5369079824065298,
|
| 51 |
+
"16k": 0.4950184758232463,
|
| 52 |
+
"32k": 0.5119378052749619,
|
| 53 |
+
"64k": 0.4359616248204175,
|
| 54 |
+
"128k": 0.3900490912931948,
|
| 55 |
+
"256k": 0.323548849752962
|
| 56 |
+
},
|
| 57 |
+
"contextual_requirement": {
|
| 58 |
+
"Full": 0.42475510461227917,
|
| 59 |
+
"Partial": 0.47963889313411256
|
| 60 |
+
},
|
| 61 |
+
"difficulty": {
|
| 62 |
+
"Easy": 0.5839417584803529,
|
| 63 |
+
"Moderate": 0.38543423470966093,
|
| 64 |
+
"Hard": 0.4251833017579716,
|
| 65 |
+
"Extreme": 0.3582444584458006
|
| 66 |
+
},
|
| 67 |
+
"primary_task": {
|
| 68 |
+
"T1. Retrieval & Ranking": 0.7552782575667283,
|
| 69 |
+
"T2. Sequencing & Structure Reconstruction": 0.72015059015059,
|
| 70 |
+
"T3. Evidence-Grounded QA": 0.425,
|
| 71 |
+
"T4. Summarization & Synthesis": 0.5415035414147457,
|
| 72 |
+
"T5. Attribution & Citation Alignment": 0.49621823831328055,
|
| 73 |
+
"T6. Aggregation & Clustering": 0.41038094275648807,
|
| 74 |
+
"T7. Consistency & Compliance Checking": 0.27110571290315905,
|
| 75 |
+
"T8. Structured & Numeric Reasoning": 0.1953703703703704,
|
| 76 |
+
"T9. Version & Code Diff Analysis": 0.5256412558109732,
|
| 77 |
+
"T10. Rule Induction & In-Context Learning": 0.4822222222222222,
|
| 78 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.35
|
| 79 |
+
},
|
| 80 |
+
"language": {
|
| 81 |
+
"Chinese": 0.44671558878002343,
|
| 82 |
+
"English": 0.45109235434374795
|
| 83 |
+
}
|
| 84 |
+
},
|
| 85 |
+
"pass@1": 0.19266666666666668,
|
| 86 |
+
"BoN-2": {
|
| 87 |
+
"overall_metric": 0.5266630332287439,
|
| 88 |
+
"token_length": {
|
| 89 |
+
"8k": 0.604236555945032,
|
| 90 |
+
"16k": 0.5564543546519637,
|
| 91 |
+
"32k": 0.5870438443448399,
|
| 92 |
+
"64k": 0.5387565602861155,
|
| 93 |
+
"128k": 0.46692762105066604,
|
| 94 |
+
"256k": 0.4065592630938467
|
| 95 |
+
},
|
| 96 |
+
"contextual_requirement": {
|
| 97 |
+
"Full": 0.5015019335956029,
|
| 98 |
+
"Partial": 0.5586862509436522
|
| 99 |
+
},
|
| 100 |
+
"difficulty": {
|
| 101 |
+
"Easy": 0.6773656505276844,
|
| 102 |
+
"Moderate": 0.47328676466113107,
|
| 103 |
+
"Hard": 0.49837787163941455,
|
| 104 |
+
"Extreme": 0.4152118781770787
|
| 105 |
+
},
|
| 106 |
+
"primary_task": {
|
| 107 |
+
"T1. Retrieval & Ranking": 0.7867939033692525,
|
| 108 |
+
"T2. Sequencing & Structure Reconstruction": 0.7854105213890441,
|
| 109 |
+
"T3. Evidence-Grounded QA": 0.5666666666666667,
|
| 110 |
+
"T4. Summarization & Synthesis": 0.5572174880251567,
|
| 111 |
+
"T5. Attribution & Citation Alignment": 0.573956288098141,
|
| 112 |
+
"T6. Aggregation & Clustering": 0.4915724205045227,
|
| 113 |
+
"T7. Consistency & Compliance Checking": 0.33460325146257974,
|
| 114 |
+
"T8. Structured & Numeric Reasoning": 0.2578703703703704,
|
| 115 |
+
"T9. Version & Code Diff Analysis": 0.6423128731937172,
|
| 116 |
+
"T10. Rule Induction & In-Context Learning": 0.6198611111111111,
|
| 117 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.425
|
| 118 |
+
},
|
| 119 |
+
"language": {
|
| 120 |
+
"Chinese": 0.518790207475404,
|
| 121 |
+
"English": 0.5345358589820849
|
| 122 |
+
}
|
| 123 |
+
},
|
| 124 |
+
"pass@2": 0.25466666666666665,
|
| 125 |
+
"BoN-3": {
|
| 126 |
+
"overall_metric": 0.5609610306922399,
|
| 127 |
+
"token_length": {
|
| 128 |
+
"8k": 0.6386076789983793,
|
| 129 |
+
"16k": 0.5860184400135818,
|
| 130 |
+
"32k": 0.6210153091497477,
|
| 131 |
+
"64k": 0.5871225156541778,
|
| 132 |
+
"128k": 0.4913536643924111,
|
| 133 |
+
"256k": 0.4416485759451446
|
| 134 |
+
},
|
| 135 |
+
"contextual_requirement": {
|
| 136 |
+
"Full": 0.5370910562726694,
|
| 137 |
+
"Partial": 0.5913409981353319
|
| 138 |
+
},
|
| 139 |
+
"difficulty": {
|
| 140 |
+
"Easy": 0.7100406216614994,
|
| 141 |
+
"Moderate": 0.5174437099567866,
|
| 142 |
+
"Hard": 0.5265960486680222,
|
| 143 |
+
"Extreme": 0.44880562762488274
|
| 144 |
+
},
|
| 145 |
+
"primary_task": {
|
| 146 |
+
"T1. Retrieval & Ranking": 0.815117939706351,
|
| 147 |
+
"T2. Sequencing & Structure Reconstruction": 0.8064498061783285,
|
| 148 |
+
"T3. Evidence-Grounded QA": 0.6416666666666667,
|
| 149 |
+
"T4. Summarization & Synthesis": 0.5680717589841464,
|
| 150 |
+
"T5. Attribution & Citation Alignment": 0.604565984118908,
|
| 151 |
+
"T6. Aggregation & Clustering": 0.5283992168363204,
|
| 152 |
+
"T7. Consistency & Compliance Checking": 0.35872230397543203,
|
| 153 |
+
"T8. Structured & Numeric Reasoning": 0.28935185185185186,
|
| 154 |
+
"T9. Version & Code Diff Analysis": 0.6707362245587523,
|
| 155 |
+
"T10. Rule Induction & In-Context Learning": 0.6406944444444443,
|
| 156 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.5
|
| 157 |
+
},
|
| 158 |
+
"language": {
|
| 159 |
+
"Chinese": 0.5609588769304155,
|
| 160 |
+
"English": 0.5609631844540666
|
| 161 |
+
}
|
| 162 |
+
},
|
| 163 |
+
"pass@3": 0.286
|
| 164 |
+
}
|
eval/output/GLM-4.6/thinking_context-120000_bon-3_evaluation.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:82a20685e3fdcc5e3e873ad009ed2fb6d5c14273e04c3d9966918b39860dcaf6
|
| 3 |
+
size 54877694
|
eval/output/GLM-4.6/thinking_context-120000_bon-3_inference_1-of-1.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0f4eace753fab028a0f762d98a31b950f52ee9ae296d0ea58983c35d85f0e6a5
|
| 3 |
+
size 54790669
|
eval/output/GLM-4.6/thinking_context-120000_bon-3_summary.json
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"date": "2025-12-08",
|
| 3 |
+
"total_questions_num": 1500,
|
| 4 |
+
"inference_iterations": 3,
|
| 5 |
+
"total_samples_num": 4500,
|
| 6 |
+
"fail_samples_num": 0,
|
| 7 |
+
"inference_inconsistent_samples_num": 0,
|
| 8 |
+
"average_overall_metric": 0.5820993757625644,
|
| 9 |
+
"inference_iteration_1_overall_metric": 0.5900318347862288,
|
| 10 |
+
"inference_iteration_2_overall_metric": 0.5774825139114689,
|
| 11 |
+
"inference_iteration_3_overall_metric": 0.5787837785899949,
|
| 12 |
+
"average_token_length_metric": {
|
| 13 |
+
"8k": 0.7122784818137915,
|
| 14 |
+
"16k": 0.6603518496747058,
|
| 15 |
+
"32k": 0.6352743108645184,
|
| 16 |
+
"64k": 0.5897286272690893,
|
| 17 |
+
"128k": 0.475467875017661,
|
| 18 |
+
"256k": 0.4194951099356217
|
| 19 |
+
},
|
| 20 |
+
"average_contextual_requirement_metric": {
|
| 21 |
+
"Full": 0.5470143190278319,
|
| 22 |
+
"Partial": 0.6267530843340428
|
| 23 |
+
},
|
| 24 |
+
"average_difficulty_metric": {
|
| 25 |
+
"Easy": 0.7978473417092227,
|
| 26 |
+
"Moderate": 0.6094768922677877,
|
| 27 |
+
"Hard": 0.4892370620605133,
|
| 28 |
+
"Extreme": 0.3887688912252786
|
| 29 |
+
},
|
| 30 |
+
"average_primary_task_metric": {
|
| 31 |
+
"T1. Retrieval & Ranking": 0.8197327977970514,
|
| 32 |
+
"T2. Sequencing & Structure Reconstruction": 0.8006519321293782,
|
| 33 |
+
"T3. Evidence-Grounded QA": 0.538888888888889,
|
| 34 |
+
"T4. Summarization & Synthesis": 0.5408566771607968,
|
| 35 |
+
"T5. Attribution & Citation Alignment": 0.5337112988588841,
|
| 36 |
+
"T6. Aggregation & Clustering": 0.5397680321862239,
|
| 37 |
+
"T7. Consistency & Compliance Checking": 0.380513781495624,
|
| 38 |
+
"T8. Structured & Numeric Reasoning": 0.6123456790123456,
|
| 39 |
+
"T9. Version & Code Diff Analysis": 0.6754501038965057,
|
| 40 |
+
"T10. Rule Induction & In-Context Learning": 0.6013425925925924,
|
| 41 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.46666666666666673
|
| 42 |
+
},
|
| 43 |
+
"average_language_metric": {
|
| 44 |
+
"Chinese": 0.5991918774535788,
|
| 45 |
+
"English": 0.5650068740715505
|
| 46 |
+
},
|
| 47 |
+
"BoN-1": {
|
| 48 |
+
"overall_metric": 0.5900318347862288,
|
| 49 |
+
"token_length": {
|
| 50 |
+
"8k": 0.7190910013269595,
|
| 51 |
+
"16k": 0.6680291983169964,
|
| 52 |
+
"32k": 0.6447298296516131,
|
| 53 |
+
"64k": 0.5905857251798682,
|
| 54 |
+
"128k": 0.4766512837488421,
|
| 55 |
+
"256k": 0.4411039704930917
|
| 56 |
+
},
|
| 57 |
+
"contextual_requirement": {
|
| 58 |
+
"Full": 0.5455741223917344,
|
| 59 |
+
"Partial": 0.6466143778337665
|
| 60 |
+
},
|
| 61 |
+
"difficulty": {
|
| 62 |
+
"Easy": 0.8187580590040169,
|
| 63 |
+
"Moderate": 0.6184177733393077,
|
| 64 |
+
"Hard": 0.4890540077544932,
|
| 65 |
+
"Extreme": 0.38715232500749663
|
| 66 |
+
},
|
| 67 |
+
"primary_task": {
|
| 68 |
+
"T1. Retrieval & Ranking": 0.8264146064969754,
|
| 69 |
+
"T2. Sequencing & Structure Reconstruction": 0.8039418272654707,
|
| 70 |
+
"T3. Evidence-Grounded QA": 0.55,
|
| 71 |
+
"T4. Summarization & Synthesis": 0.5427878753470068,
|
| 72 |
+
"T5. Attribution & Citation Alignment": 0.5419657251498339,
|
| 73 |
+
"T6. Aggregation & Clustering": 0.5399584229450126,
|
| 74 |
+
"T7. Consistency & Compliance Checking": 0.37011974365960826,
|
| 75 |
+
"T8. Structured & Numeric Reasoning": 0.6416666666666667,
|
| 76 |
+
"T9. Version & Code Diff Analysis": 0.693087317328304,
|
| 77 |
+
"T10. Rule Induction & In-Context Learning": 0.6145833333333334,
|
| 78 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.475
|
| 79 |
+
},
|
| 80 |
+
"language": {
|
| 81 |
+
"Chinese": 0.6075617792305364,
|
| 82 |
+
"English": 0.5725018903419208
|
| 83 |
+
}
|
| 84 |
+
},
|
| 85 |
+
"pass@1": 0.36466666666666664,
|
| 86 |
+
"BoN-2": {
|
| 87 |
+
"overall_metric": 0.6659160211594685,
|
| 88 |
+
"token_length": {
|
| 89 |
+
"8k": 0.7935291454736249,
|
| 90 |
+
"16k": 0.7469698033059613,
|
| 91 |
+
"32k": 0.7147019641303554,
|
| 92 |
+
"64k": 0.6716314717341791,
|
| 93 |
+
"128k": 0.5707409059251077,
|
| 94 |
+
"256k": 0.497922836387586
|
| 95 |
+
},
|
| 96 |
+
"contextual_requirement": {
|
| 97 |
+
"Full": 0.6309312423015924,
|
| 98 |
+
"Partial": 0.7104421033422219
|
| 99 |
+
},
|
| 100 |
+
"difficulty": {
|
| 101 |
+
"Easy": 0.8923409007483016,
|
| 102 |
+
"Moderate": 0.7327459154582487,
|
| 103 |
+
"Hard": 0.5632013922542815,
|
| 104 |
+
"Extreme": 0.4414476037490934
|
| 105 |
+
},
|
| 106 |
+
"primary_task": {
|
| 107 |
+
"T1. Retrieval & Ranking": 0.880882952904511,
|
| 108 |
+
"T2. Sequencing & Structure Reconstruction": 0.8476685999185997,
|
| 109 |
+
"T3. Evidence-Grounded QA": 0.6666666666666666,
|
| 110 |
+
"T4. Summarization & Synthesis": 0.5583277247093672,
|
| 111 |
+
"T5. Attribution & Citation Alignment": 0.6177785331058078,
|
| 112 |
+
"T6. Aggregation & Clustering": 0.6268647075743846,
|
| 113 |
+
"T7. Consistency & Compliance Checking": 0.4566279411326888,
|
| 114 |
+
"T8. Structured & Numeric Reasoning": 0.7217592592592593,
|
| 115 |
+
"T9. Version & Code Diff Analysis": 0.7464145919055787,
|
| 116 |
+
"T10. Rule Induction & In-Context Learning": 0.7400000000000001,
|
| 117 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.5583333333333333
|
| 118 |
+
},
|
| 119 |
+
"language": {
|
| 120 |
+
"Chinese": 0.6881369643762367,
|
| 121 |
+
"English": 0.6436950779427024
|
| 122 |
+
}
|
| 123 |
+
},
|
| 124 |
+
"pass@2": 0.43866666666666665,
|
| 125 |
+
"BoN-3": {
|
| 126 |
+
"overall_metric": 0.706844154531168,
|
| 127 |
+
"token_length": {
|
| 128 |
+
"8k": 0.8222844269368486,
|
| 129 |
+
"16k": 0.7772591556882481,
|
| 130 |
+
"32k": 0.7466099276083229,
|
| 131 |
+
"64k": 0.7234900151981559,
|
| 132 |
+
"128k": 0.62014724878678,
|
| 133 |
+
"256k": 0.551274152968658
|
| 134 |
+
},
|
| 135 |
+
"contextual_requirement": {
|
| 136 |
+
"Full": 0.6725119993190923,
|
| 137 |
+
"Partial": 0.7505396248010854
|
| 138 |
+
},
|
| 139 |
+
"difficulty": {
|
| 140 |
+
"Easy": 0.9285919042851156,
|
| 141 |
+
"Moderate": 0.7918869916930842,
|
| 142 |
+
"Hard": 0.6026672046571206,
|
| 143 |
+
"Extreme": 0.4764972072411785
|
| 144 |
+
},
|
| 145 |
+
"primary_task": {
|
| 146 |
+
"T1. Retrieval & Ranking": 0.9055704999975606,
|
| 147 |
+
"T2. Sequencing & Structure Reconstruction": 0.8679013780100734,
|
| 148 |
+
"T3. Evidence-Grounded QA": 0.7416666666666667,
|
| 149 |
+
"T4. Summarization & Synthesis": 0.5683880603721403,
|
| 150 |
+
"T5. Attribution & Citation Alignment": 0.6545164467516165,
|
| 151 |
+
"T6. Aggregation & Clustering": 0.6671955091257745,
|
| 152 |
+
"T7. Consistency & Compliance Checking": 0.5102082868832639,
|
| 153 |
+
"T8. Structured & Numeric Reasoning": 0.7717592592592593,
|
| 154 |
+
"T9. Version & Code Diff Analysis": 0.7612642969391072,
|
| 155 |
+
"T10. Rule Induction & In-Context Learning": 0.7541666666666667,
|
| 156 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.6583333333333333
|
| 157 |
+
},
|
| 158 |
+
"language": {
|
| 159 |
+
"Chinese": 0.7225009620618484,
|
| 160 |
+
"English": 0.6911873470004898
|
| 161 |
+
}
|
| 162 |
+
},
|
| 163 |
+
"pass@3": 0.48133333333333334
|
| 164 |
+
}
|
eval/output/Kimi-K2-Instruct-0905-128k/nonthinking_context-120000_bon-3_evaluation.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b7d4a0edd3eb987abdb683e22e5c303ed67e98eb8faf380aa37242208e2b782e
|
| 3 |
+
size 15849538
|
eval/output/Kimi-K2-Instruct-0905-128k/nonthinking_context-120000_bon-3_inference_1-of-1.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2973d40772ea83c97ccf6da17559c6090de911364633b90bc1c0c4fcc58abc88
|
| 3 |
+
size 15759142
|
eval/output/Kimi-K2-Instruct-0905-128k/nonthinking_context-120000_bon-3_summary.json
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"date": "2025-12-08",
|
| 3 |
+
"total_questions_num": 1500,
|
| 4 |
+
"inference_iterations": 3,
|
| 5 |
+
"total_samples_num": 4500,
|
| 6 |
+
"fail_samples_num": 66,
|
| 7 |
+
"inference_inconsistent_samples_num": 0,
|
| 8 |
+
"average_overall_metric": 0.5004423181466298,
|
| 9 |
+
"inference_iteration_1_overall_metric": 0.505055755589836,
|
| 10 |
+
"inference_iteration_2_overall_metric": 0.5008557440077253,
|
| 11 |
+
"inference_iteration_3_overall_metric": 0.49541545484232746,
|
| 12 |
+
"average_token_length_metric": {
|
| 13 |
+
"8k": 0.526987669317299,
|
| 14 |
+
"16k": 0.5564418660666185,
|
| 15 |
+
"32k": 0.525462011662911,
|
| 16 |
+
"64k": 0.47828682864933986,
|
| 17 |
+
"128k": 0.48407619984559663,
|
| 18 |
+
"256k": 0.4313993333380157
|
| 19 |
+
},
|
| 20 |
+
"average_contextual_requirement_metric": {
|
| 21 |
+
"Full": 0.45766544557303007,
|
| 22 |
+
"Partial": 0.5548856105130303
|
| 23 |
+
},
|
| 24 |
+
"average_difficulty_metric": {
|
| 25 |
+
"Easy": 0.6589494133685208,
|
| 26 |
+
"Moderate": 0.45228486559842923,
|
| 27 |
+
"Hard": 0.4416150597335389,
|
| 28 |
+
"Extreme": 0.3970745035349534
|
| 29 |
+
},
|
| 30 |
+
"average_primary_task_metric": {
|
| 31 |
+
"T1. Retrieval & Ranking": 0.7367165015790406,
|
| 32 |
+
"T2. Sequencing & Structure Reconstruction": 0.7437940454607117,
|
| 33 |
+
"T3. Evidence-Grounded QA": 0.5472222222222223,
|
| 34 |
+
"T4. Summarization & Synthesis": 0.5118823270011625,
|
| 35 |
+
"T5. Attribution & Citation Alignment": 0.5129585068748002,
|
| 36 |
+
"T6. Aggregation & Clustering": 0.494076726290264,
|
| 37 |
+
"T7. Consistency & Compliance Checking": 0.3659774931149578,
|
| 38 |
+
"T8. Structured & Numeric Reasoning": 0.22422839506172845,
|
| 39 |
+
"T9. Version & Code Diff Analysis": 0.5943555260685875,
|
| 40 |
+
"T10. Rule Induction & In-Context Learning": 0.5182870370370369,
|
| 41 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.4638888888888891
|
| 42 |
+
},
|
| 43 |
+
"average_language_metric": {
|
| 44 |
+
"Chinese": 0.5044290569806512,
|
| 45 |
+
"English": 0.496455579312609
|
| 46 |
+
},
|
| 47 |
+
"BoN-1": {
|
| 48 |
+
"overall_metric": 0.505055755589836,
|
| 49 |
+
"token_length": {
|
| 50 |
+
"8k": 0.5266072042106631,
|
| 51 |
+
"16k": 0.5589770254967287,
|
| 52 |
+
"32k": 0.516582063985329,
|
| 53 |
+
"64k": 0.5006172327672036,
|
| 54 |
+
"128k": 0.49701845855046856,
|
| 55 |
+
"256k": 0.43053254852862716
|
| 56 |
+
},
|
| 57 |
+
"contextual_requirement": {
|
| 58 |
+
"Full": 0.45814290534952534,
|
| 59 |
+
"Partial": 0.564763019532052
|
| 60 |
+
},
|
| 61 |
+
"difficulty": {
|
| 62 |
+
"Easy": 0.6616739681199717,
|
| 63 |
+
"Moderate": 0.46440551828652016,
|
| 64 |
+
"Hard": 0.4514696235898542,
|
| 65 |
+
"Extreme": 0.3953837835196745
|
| 66 |
+
},
|
| 67 |
+
"primary_task": {
|
| 68 |
+
"T1. Retrieval & Ranking": 0.7386033391656766,
|
| 69 |
+
"T2. Sequencing & Structure Reconstruction": 0.751870444370444,
|
| 70 |
+
"T3. Evidence-Grounded QA": 0.5583333333333333,
|
| 71 |
+
"T4. Summarization & Synthesis": 0.5120788388123152,
|
| 72 |
+
"T5. Attribution & Citation Alignment": 0.5064240409764144,
|
| 73 |
+
"T6. Aggregation & Clustering": 0.4919914077691852,
|
| 74 |
+
"T7. Consistency & Compliance Checking": 0.3705478575327794,
|
| 75 |
+
"T8. Structured & Numeric Reasoning": 0.2162037037037037,
|
| 76 |
+
"T9. Version & Code Diff Analysis": 0.5944391613729382,
|
| 77 |
+
"T10. Rule Induction & In-Context Learning": 0.55,
|
| 78 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.48333333333333334
|
| 79 |
+
},
|
| 80 |
+
"language": {
|
| 81 |
+
"Chinese": 0.5114727139139933,
|
| 82 |
+
"English": 0.49863879726568106
|
| 83 |
+
}
|
| 84 |
+
},
|
| 85 |
+
"pass@1": 0.238,
|
| 86 |
+
"BoN-2": {
|
| 87 |
+
"overall_metric": 0.5489408346415129,
|
| 88 |
+
"token_length": {
|
| 89 |
+
"8k": 0.5733458870975628,
|
| 90 |
+
"16k": 0.5939483234863656,
|
| 91 |
+
"32k": 0.5625151598149565,
|
| 92 |
+
"64k": 0.5471133669057122,
|
| 93 |
+
"128k": 0.5489282514642873,
|
| 94 |
+
"256k": 0.46779401908019724
|
| 95 |
+
},
|
| 96 |
+
"contextual_requirement": {
|
| 97 |
+
"Full": 0.5052132874423421,
|
| 98 |
+
"Partial": 0.6045940765313681
|
| 99 |
+
},
|
| 100 |
+
"difficulty": {
|
| 101 |
+
"Easy": 0.7080751777551884,
|
| 102 |
+
"Moderate": 0.5154193459081605,
|
| 103 |
+
"Hard": 0.4961493116589808,
|
| 104 |
+
"Extreme": 0.4313083505621949
|
| 105 |
+
},
|
| 106 |
+
"primary_task": {
|
| 107 |
+
"T1. Retrieval & Ranking": 0.7736759657166097,
|
| 108 |
+
"T2. Sequencing & Structure Reconstruction": 0.7854749417249417,
|
| 109 |
+
"T3. Evidence-Grounded QA": 0.5916666666666667,
|
| 110 |
+
"T4. Summarization & Synthesis": 0.5238001030026453,
|
| 111 |
+
"T5. Attribution & Citation Alignment": 0.574376842245779,
|
| 112 |
+
"T6. Aggregation & Clustering": 0.5434486544883844,
|
| 113 |
+
"T7. Consistency & Compliance Checking": 0.42028716701288094,
|
| 114 |
+
"T8. Structured & Numeric Reasoning": 0.26157407407407407,
|
| 115 |
+
"T9. Version & Code Diff Analysis": 0.6414677369659328,
|
| 116 |
+
"T10. Rule Induction & In-Context Learning": 0.5666666666666667,
|
| 117 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.5666666666666667
|
| 118 |
+
},
|
| 119 |
+
"language": {
|
| 120 |
+
"Chinese": 0.5522083464052925,
|
| 121 |
+
"English": 0.5456733228777346
|
| 122 |
+
}
|
| 123 |
+
},
|
| 124 |
+
"pass@2": 0.2753333333333333,
|
| 125 |
+
"BoN-3": {
|
| 126 |
+
"overall_metric": 0.5738198351626509,
|
| 127 |
+
"token_length": {
|
| 128 |
+
"8k": 0.6020628943332411,
|
| 129 |
+
"16k": 0.6177752499978515,
|
| 130 |
+
"32k": 0.5988410055159528,
|
| 131 |
+
"64k": 0.569819571424144,
|
| 132 |
+
"128k": 0.5636502733409089,
|
| 133 |
+
"256k": 0.49077001636381384
|
| 134 |
+
},
|
| 135 |
+
"contextual_requirement": {
|
| 136 |
+
"Full": 0.5323897336597776,
|
| 137 |
+
"Partial": 0.6265490552572197
|
| 138 |
+
},
|
| 139 |
+
"difficulty": {
|
| 140 |
+
"Easy": 0.7379827116229757,
|
| 141 |
+
"Moderate": 0.5428010392089135,
|
| 142 |
+
"Hard": 0.513578700854544,
|
| 143 |
+
"Extreme": 0.45392804741673737
|
| 144 |
+
},
|
| 145 |
+
"primary_task": {
|
| 146 |
+
"T1. Retrieval & Ranking": 0.7811024237907935,
|
| 147 |
+
"T2. Sequencing & Structure Reconstruction": 0.793894253894254,
|
| 148 |
+
"T3. Evidence-Grounded QA": 0.6083333333333333,
|
| 149 |
+
"T4. Summarization & Synthesis": 0.5329092193343102,
|
| 150 |
+
"T5. Attribution & Citation Alignment": 0.6015461544150912,
|
| 151 |
+
"T6. Aggregation & Clustering": 0.577058241910845,
|
| 152 |
+
"T7. Consistency & Compliance Checking": 0.45580929677574705,
|
| 153 |
+
"T8. Structured & Numeric Reasoning": 0.2990740740740741,
|
| 154 |
+
"T9. Version & Code Diff Analysis": 0.6674668022910347,
|
| 155 |
+
"T10. Rule Induction & In-Context Learning": 0.5979166666666667,
|
| 156 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.5916666666666667
|
| 157 |
+
},
|
| 158 |
+
"language": {
|
| 159 |
+
"Chinese": 0.5797006431690659,
|
| 160 |
+
"English": 0.5679390271562386
|
| 161 |
+
}
|
| 162 |
+
},
|
| 163 |
+
"pass@3": 0.3
|
| 164 |
+
}
|
eval/output/Kimi-K2-Instruct-0905-128k/thinking_context-120000_bon-3_evaluation.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:60ce1e34fb4157d8eb04dcb31f567fdc686cd77982b5afa8a1caf28153eb4112
|
| 3 |
+
size 22656085
|
eval/output/Kimi-K2-Instruct-0905-128k/thinking_context-120000_bon-3_inference_1-of-1.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f643947f0da31f2053d3be1c5a242e18bbc41eda4723c6bfe9e294a26f0bb9ba
|
| 3 |
+
size 22567993
|
eval/output/Kimi-K2-Instruct-0905-128k/thinking_context-120000_bon-3_summary.json
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"date": "2025-12-08",
|
| 3 |
+
"total_questions_num": 1500,
|
| 4 |
+
"inference_iterations": 3,
|
| 5 |
+
"total_samples_num": 4500,
|
| 6 |
+
"fail_samples_num": 65,
|
| 7 |
+
"inference_inconsistent_samples_num": 0,
|
| 8 |
+
"average_overall_metric": 0.547353303021338,
|
| 9 |
+
"inference_iteration_1_overall_metric": 0.5511604537655028,
|
| 10 |
+
"inference_iteration_2_overall_metric": 0.5448899842789734,
|
| 11 |
+
"inference_iteration_3_overall_metric": 0.5460094710195365,
|
| 12 |
+
"average_token_length_metric": {
|
| 13 |
+
"8k": 0.5980622517201026,
|
| 14 |
+
"16k": 0.59148018537737,
|
| 15 |
+
"32k": 0.5868061560339518,
|
| 16 |
+
"64k": 0.5570387159812348,
|
| 17 |
+
"128k": 0.49786820056428005,
|
| 18 |
+
"256k": 0.45286430845108866
|
| 19 |
+
},
|
| 20 |
+
"average_contextual_requirement_metric": {
|
| 21 |
+
"Full": 0.4953767694830531,
|
| 22 |
+
"Partial": 0.6135052547973372
|
| 23 |
+
},
|
| 24 |
+
"average_difficulty_metric": {
|
| 25 |
+
"Easy": 0.7602885283677935,
|
| 26 |
+
"Moderate": 0.5293031027161125,
|
| 27 |
+
"Hard": 0.45056273913644485,
|
| 28 |
+
"Extreme": 0.38952194446214045
|
| 29 |
+
},
|
| 30 |
+
"average_primary_task_metric": {
|
| 31 |
+
"T1. Retrieval & Ranking": 0.7798365160173625,
|
| 32 |
+
"T2. Sequencing & Structure Reconstruction": 0.7593473807497786,
|
| 33 |
+
"T3. Evidence-Grounded QA": 0.4749999999999999,
|
| 34 |
+
"T4. Summarization & Synthesis": 0.5012397165886582,
|
| 35 |
+
"T5. Attribution & Citation Alignment": 0.5844845622927546,
|
| 36 |
+
"T6. Aggregation & Clustering": 0.5175781242603432,
|
| 37 |
+
"T7. Consistency & Compliance Checking": 0.37370624575436445,
|
| 38 |
+
"T8. Structured & Numeric Reasoning": 0.5476851851851853,
|
| 39 |
+
"T9. Version & Code Diff Analysis": 0.6197574830220355,
|
| 40 |
+
"T10. Rule Induction & In-Context Learning": 0.5526851851851853,
|
| 41 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.41111111111111104
|
| 42 |
+
},
|
| 43 |
+
"average_language_metric": {
|
| 44 |
+
"Chinese": 0.5297125752167522,
|
| 45 |
+
"English": 0.564994030825924
|
| 46 |
+
},
|
| 47 |
+
"BoN-1": {
|
| 48 |
+
"overall_metric": 0.5511604537655028,
|
| 49 |
+
"token_length": {
|
| 50 |
+
"8k": 0.6190520514681797,
|
| 51 |
+
"16k": 0.5879732174717002,
|
| 52 |
+
"32k": 0.579525685141092,
|
| 53 |
+
"64k": 0.5639104499109322,
|
| 54 |
+
"128k": 0.5141745143729862,
|
| 55 |
+
"256k": 0.44232680422812864
|
| 56 |
+
},
|
| 57 |
+
"contextual_requirement": {
|
| 58 |
+
"Full": 0.5025475373263892,
|
| 59 |
+
"Partial": 0.6130314383243751
|
| 60 |
+
},
|
| 61 |
+
"difficulty": {
|
| 62 |
+
"Easy": 0.7592976039413494,
|
| 63 |
+
"Moderate": 0.5359745373414339,
|
| 64 |
+
"Hard": 0.44782045261166586,
|
| 65 |
+
"Extreme": 0.4010056442092694
|
| 66 |
+
},
|
| 67 |
+
"primary_task": {
|
| 68 |
+
"T1. Retrieval & Ranking": 0.7689832072037757,
|
| 69 |
+
"T2. Sequencing & Structure Reconstruction": 0.7521662442495773,
|
| 70 |
+
"T3. Evidence-Grounded QA": 0.49166666666666664,
|
| 71 |
+
"T4. Summarization & Synthesis": 0.5002696227247343,
|
| 72 |
+
"T5. Attribution & Citation Alignment": 0.5800230859795266,
|
| 73 |
+
"T6. Aggregation & Clustering": 0.5329070216713895,
|
| 74 |
+
"T7. Consistency & Compliance Checking": 0.38804362024312816,
|
| 75 |
+
"T8. Structured & Numeric Reasoning": 0.5375000000000001,
|
| 76 |
+
"T9. Version & Code Diff Analysis": 0.6142764379282863,
|
| 77 |
+
"T10. Rule Induction & In-Context Learning": 0.5694444444444445,
|
| 78 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.425
|
| 79 |
+
},
|
| 80 |
+
"language": {
|
| 81 |
+
"Chinese": 0.5293130707601792,
|
| 82 |
+
"English": 0.573007836770827
|
| 83 |
+
}
|
| 84 |
+
},
|
| 85 |
+
"pass@1": 0.308,
|
| 86 |
+
"BoN-2": {
|
| 87 |
+
"overall_metric": 0.620379858032724,
|
| 88 |
+
"token_length": {
|
| 89 |
+
"8k": 0.6704509321934469,
|
| 90 |
+
"16k": 0.6668915766140909,
|
| 91 |
+
"32k": 0.6647194788756731,
|
| 92 |
+
"64k": 0.6485396466392949,
|
| 93 |
+
"128k": 0.5568474087312739,
|
| 94 |
+
"256k": 0.5148301051425674
|
| 95 |
+
},
|
| 96 |
+
"contextual_requirement": {
|
| 97 |
+
"Full": 0.5669398879170983,
|
| 98 |
+
"Partial": 0.688394365452613
|
| 99 |
+
},
|
| 100 |
+
"difficulty": {
|
| 101 |
+
"Easy": 0.8309630284689782,
|
| 102 |
+
"Moderate": 0.6205391310709226,
|
| 103 |
+
"Hard": 0.5188402380022156,
|
| 104 |
+
"Extreme": 0.4562840219656874
|
| 105 |
+
},
|
| 106 |
+
"primary_task": {
|
| 107 |
+
"T1. Retrieval & Ranking": 0.8302030275641743,
|
| 108 |
+
"T2. Sequencing & Structure Reconstruction": 0.8157491119991119,
|
| 109 |
+
"T3. Evidence-Grounded QA": 0.5833333333333334,
|
| 110 |
+
"T4. Summarization & Synthesis": 0.515740105792992,
|
| 111 |
+
"T5. Attribution & Citation Alignment": 0.6717843515169009,
|
| 112 |
+
"T6. Aggregation & Clustering": 0.5928224314514636,
|
| 113 |
+
"T7. Consistency & Compliance Checking": 0.4531330963527468,
|
| 114 |
+
"T8. Structured & Numeric Reasoning": 0.6208333333333335,
|
| 115 |
+
"T9. Version & Code Diff Analysis": 0.6944216701628937,
|
| 116 |
+
"T10. Rule Induction & In-Context Learning": 0.6683333333333333,
|
| 117 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.475
|
| 118 |
+
},
|
| 119 |
+
"language": {
|
| 120 |
+
"Chinese": 0.6027681730481206,
|
| 121 |
+
"English": 0.637991543017329
|
| 122 |
+
}
|
| 123 |
+
},
|
| 124 |
+
"pass@2": 0.37533333333333335,
|
| 125 |
+
"BoN-3": {
|
| 126 |
+
"overall_metric": 0.6549475935177054,
|
| 127 |
+
"token_length": {
|
| 128 |
+
"8k": 0.705933557831548,
|
| 129 |
+
"16k": 0.6894085857758541,
|
| 130 |
+
"32k": 0.7014687798467849,
|
| 131 |
+
"64k": 0.6759348401339255,
|
| 132 |
+
"128k": 0.5954510837201171,
|
| 133 |
+
"256k": 0.5614887137980065
|
| 134 |
+
},
|
| 135 |
+
"contextual_requirement": {
|
| 136 |
+
"Full": 0.6068796404416824,
|
| 137 |
+
"Partial": 0.7161249883417369
|
| 138 |
+
},
|
| 139 |
+
"difficulty": {
|
| 140 |
+
"Easy": 0.8606832735104384,
|
| 141 |
+
"Moderate": 0.6630299875300573,
|
| 142 |
+
"Hard": 0.5651351517138449,
|
| 143 |
+
"Extreme": 0.4832552890978184
|
| 144 |
+
},
|
| 145 |
+
"primary_task": {
|
| 146 |
+
"T1. Retrieval & Ranking": 0.8477101837140745,
|
| 147 |
+
"T2. Sequencing & Structure Reconstruction": 0.85363270988271,
|
| 148 |
+
"T3. Evidence-Grounded QA": 0.6083333333333333,
|
| 149 |
+
"T4. Summarization & Synthesis": 0.5228953260531338,
|
| 150 |
+
"T5. Attribution & Citation Alignment": 0.7072394690114919,
|
| 151 |
+
"T6. Aggregation & Clustering": 0.6303000464290786,
|
| 152 |
+
"T7. Consistency & Compliance Checking": 0.5140825952835215,
|
| 153 |
+
"T8. Structured & Numeric Reasoning": 0.6597222222222222,
|
| 154 |
+
"T9. Version & Code Diff Analysis": 0.7183766010743463,
|
| 155 |
+
"T10. Rule Induction & In-Context Learning": 0.7141666666666666,
|
| 156 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.5083333333333333
|
| 157 |
+
},
|
| 158 |
+
"language": {
|
| 159 |
+
"Chinese": 0.635411735515484,
|
| 160 |
+
"English": 0.6744834515199285
|
| 161 |
+
}
|
| 162 |
+
},
|
| 163 |
+
"pass@3": 0.4013333333333333
|
| 164 |
+
}
|
eval/output/Kimi-K2-Instruct-0905/nonthinking_context-224000_bon-3_evaluation.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5276fe9420fff0e0be206db79f3d0a567bb48bb493905d31d52fecaeb440ff84
|
| 3 |
+
size 15831236
|
eval/output/Kimi-K2-Instruct-0905/nonthinking_context-224000_bon-3_inference_1-of-1.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:25466905caf762220e55e1ae1e2687ab1fe9c3507810dd35a0d3fc68b0822044
|
| 3 |
+
size 15740866
|
eval/output/Kimi-K2-Instruct-0905/nonthinking_context-224000_bon-3_summary.json
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"date": "2025-12-10",
|
| 3 |
+
"total_questions_num": 1500,
|
| 4 |
+
"inference_iterations": 3,
|
| 5 |
+
"total_samples_num": 4500,
|
| 6 |
+
"fail_samples_num": 67,
|
| 7 |
+
"inference_inconsistent_samples_num": 0,
|
| 8 |
+
"average_overall_metric": 0.5009443422920304,
|
| 9 |
+
"inference_iteration_1_overall_metric": 0.5011015308802983,
|
| 10 |
+
"inference_iteration_2_overall_metric": 0.49751406897312744,
|
| 11 |
+
"inference_iteration_3_overall_metric": 0.5042174270226657,
|
| 12 |
+
"average_token_length_metric": {
|
| 13 |
+
"8k": 0.5193469215810047,
|
| 14 |
+
"16k": 0.5532046525085649,
|
| 15 |
+
"32k": 0.5393076869166767,
|
| 16 |
+
"64k": 0.45954315717941974,
|
| 17 |
+
"128k": 0.4753071835553842,
|
| 18 |
+
"256k": 0.4589564520111373
|
| 19 |
+
},
|
| 20 |
+
"average_contextual_requirement_metric": {
|
| 21 |
+
"Full": 0.45716785858755954,
|
| 22 |
+
"Partial": 0.5566598670068132
|
| 23 |
+
},
|
| 24 |
+
"average_difficulty_metric": {
|
| 25 |
+
"Easy": 0.6491770551060967,
|
| 26 |
+
"Moderate": 0.4905460752618018,
|
| 27 |
+
"Hard": 0.43431147544571097,
|
| 28 |
+
"Extreme": 0.3960536599333797
|
| 29 |
+
},
|
| 30 |
+
"average_primary_task_metric": {
|
| 31 |
+
"T1. Retrieval & Ranking": 0.7424451818013483,
|
| 32 |
+
"T2. Sequencing & Structure Reconstruction": 0.7428685203685202,
|
| 33 |
+
"T3. Evidence-Grounded QA": 0.5472222222222223,
|
| 34 |
+
"T4. Summarization & Synthesis": 0.5115863734748296,
|
| 35 |
+
"T5. Attribution & Citation Alignment": 0.5310286936858898,
|
| 36 |
+
"T6. Aggregation & Clustering": 0.481867796853936,
|
| 37 |
+
"T7. Consistency & Compliance Checking": 0.36661627375742456,
|
| 38 |
+
"T8. Structured & Numeric Reasoning": 0.24089506172839517,
|
| 39 |
+
"T9. Version & Code Diff Analysis": 0.607908662662019,
|
| 40 |
+
"T10. Rule Induction & In-Context Learning": 0.5085648148148147,
|
| 41 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.43611111111111117
|
| 42 |
+
},
|
| 43 |
+
"average_language_metric": {
|
| 44 |
+
"Chinese": 0.502898779221276,
|
| 45 |
+
"English": 0.4989899053627864
|
| 46 |
+
},
|
| 47 |
+
"BoN-1": {
|
| 48 |
+
"overall_metric": 0.5011015308802983,
|
| 49 |
+
"token_length": {
|
| 50 |
+
"8k": 0.5266204875905753,
|
| 51 |
+
"16k": 0.5431095505457598,
|
| 52 |
+
"32k": 0.5392474254502099,
|
| 53 |
+
"64k": 0.46556965866611255,
|
| 54 |
+
"128k": 0.48161604654304363,
|
| 55 |
+
"256k": 0.45044601648609367
|
| 56 |
+
},
|
| 57 |
+
"contextual_requirement": {
|
| 58 |
+
"Full": 0.45533929299371073,
|
| 59 |
+
"Partial": 0.5593443790995934
|
| 60 |
+
},
|
| 61 |
+
"difficulty": {
|
| 62 |
+
"Easy": 0.6458118886638174,
|
| 63 |
+
"Moderate": 0.4926159658473186,
|
| 64 |
+
"Hard": 0.42892012991547307,
|
| 65 |
+
"Extreme": 0.4021536227747844
|
| 66 |
+
},
|
| 67 |
+
"primary_task": {
|
| 68 |
+
"T1. Retrieval & Ranking": 0.734408014297419,
|
| 69 |
+
"T2. Sequencing & Structure Reconstruction": 0.7401580826580821,
|
| 70 |
+
"T3. Evidence-Grounded QA": 0.5666666666666667,
|
| 71 |
+
"T4. Summarization & Synthesis": 0.5077956681433179,
|
| 72 |
+
"T5. Attribution & Citation Alignment": 0.5196800787679438,
|
| 73 |
+
"T6. Aggregation & Clustering": 0.4941189417411527,
|
| 74 |
+
"T7. Consistency & Compliance Checking": 0.3706991980056276,
|
| 75 |
+
"T8. Structured & Numeric Reasoning": 0.22268518518518515,
|
| 76 |
+
"T9. Version & Code Diff Analysis": 0.6228334158501364,
|
| 77 |
+
"T10. Rule Induction & In-Context Learning": 0.5243055555555556,
|
| 78 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.4166666666666667
|
| 79 |
+
},
|
| 80 |
+
"language": {
|
| 81 |
+
"Chinese": 0.49773496712013804,
|
| 82 |
+
"English": 0.5044680946404603
|
| 83 |
+
}
|
| 84 |
+
},
|
| 85 |
+
"pass@1": 0.23,
|
| 86 |
+
"BoN-2": {
|
| 87 |
+
"overall_metric": 0.5483962332490746,
|
| 88 |
+
"token_length": {
|
| 89 |
+
"8k": 0.5725846797858735,
|
| 90 |
+
"16k": 0.6070506360109902,
|
| 91 |
+
"32k": 0.5785761030801342,
|
| 92 |
+
"64k": 0.5092135264066221,
|
| 93 |
+
"128k": 0.5131800223023555,
|
| 94 |
+
"256k": 0.5097724319084751
|
| 95 |
+
},
|
| 96 |
+
"contextual_requirement": {
|
| 97 |
+
"Full": 0.49887561173193595,
|
| 98 |
+
"Partial": 0.6114224788163434
|
| 99 |
+
},
|
| 100 |
+
"difficulty": {
|
| 101 |
+
"Easy": 0.7046240276873015,
|
| 102 |
+
"Moderate": 0.5462638174512109,
|
| 103 |
+
"Hard": 0.4707831873116643,
|
| 104 |
+
"Extreme": 0.4364568187575384
|
| 105 |
+
},
|
| 106 |
+
"primary_task": {
|
| 107 |
+
"T1. Retrieval & Ranking": 0.7623386988134085,
|
| 108 |
+
"T2. Sequencing & Structure Reconstruction": 0.7700949513449513,
|
| 109 |
+
"T3. Evidence-Grounded QA": 0.6,
|
| 110 |
+
"T4. Summarization & Synthesis": 0.5249959487118497,
|
| 111 |
+
"T5. Attribution & Citation Alignment": 0.5953546052259285,
|
| 112 |
+
"T6. Aggregation & Clustering": 0.5412045721601162,
|
| 113 |
+
"T7. Consistency & Compliance Checking": 0.41849900851913713,
|
| 114 |
+
"T8. Structured & Numeric Reasoning": 0.28935185185185186,
|
| 115 |
+
"T9. Version & Code Diff Analysis": 0.6584466738317519,
|
| 116 |
+
"T10. Rule Induction & In-Context Learning": 0.5701388888888889,
|
| 117 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.5
|
| 118 |
+
},
|
| 119 |
+
"language": {
|
| 120 |
+
"Chinese": 0.5438314702987085,
|
| 121 |
+
"English": 0.552960996199442
|
| 122 |
+
}
|
| 123 |
+
},
|
| 124 |
+
"pass@2": 0.27666666666666667,
|
| 125 |
+
"BoN-3": {
|
| 126 |
+
"overall_metric": 0.5729921291255787,
|
| 127 |
+
"token_length": {
|
| 128 |
+
"8k": 0.5918227174634976,
|
| 129 |
+
"16k": 0.6198567950677695,
|
| 130 |
+
"32k": 0.6115768945303457,
|
| 131 |
+
"64k": 0.5284625404433138,
|
| 132 |
+
"128k": 0.558149430962295,
|
| 133 |
+
"256k": 0.528084396286257
|
| 134 |
+
},
|
| 135 |
+
"contextual_requirement": {
|
| 136 |
+
"Full": 0.524120078602167,
|
| 137 |
+
"Partial": 0.6351929207008328
|
| 138 |
+
},
|
| 139 |
+
"difficulty": {
|
| 140 |
+
"Easy": 0.728216785648972,
|
| 141 |
+
"Moderate": 0.5755547819370206,
|
| 142 |
+
"Hard": 0.5066701999617829,
|
| 143 |
+
"Extreme": 0.45151519711603216
|
| 144 |
+
},
|
| 145 |
+
"primary_task": {
|
| 146 |
+
"T1. Retrieval & Ranking": 0.7860047126136753,
|
| 147 |
+
"T2. Sequencing & Structure Reconstruction": 0.7853528878528878,
|
| 148 |
+
"T3. Evidence-Grounded QA": 0.625,
|
| 149 |
+
"T4. Summarization & Synthesis": 0.5322270993764735,
|
| 150 |
+
"T5. Attribution & Citation Alignment": 0.6208079019292253,
|
| 151 |
+
"T6. Aggregation & Clustering": 0.5652450687006129,
|
| 152 |
+
"T7. Consistency & Compliance Checking": 0.44884599567602773,
|
| 153 |
+
"T8. Structured & Numeric Reasoning": 0.31157407407407406,
|
| 154 |
+
"T9. Version & Code Diff Analysis": 0.6765946379547446,
|
| 155 |
+
"T10. Rule Induction & In-Context Learning": 0.5895833333333333,
|
| 156 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.5583333333333333
|
| 157 |
+
},
|
| 158 |
+
"language": {
|
| 159 |
+
"Chinese": 0.5755180331103958,
|
| 160 |
+
"English": 0.5704662251407642
|
| 161 |
+
}
|
| 162 |
+
},
|
| 163 |
+
"pass@3": 0.3
|
| 164 |
+
}
|
eval/output/Kimi-K2-Instruct-0905/thinking_context-224000_bon-3_evaluation.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:657ceb0ddfc1bd4e9f2b476ea79dc7257ea05611bec60fef368a2f297bff247e
|
| 3 |
+
size 22361329
|
eval/output/Kimi-K2-Instruct-0905/thinking_context-224000_bon-3_inference_1-of-1.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:962721513be04a16b8cce28a4a9d69276a33f2605517b1fc05cd327ef826eae4
|
| 3 |
+
size 22273247
|
eval/output/Kimi-K2-Instruct-0905/thinking_context-224000_bon-3_summary.json
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"date": "2025-12-08",
|
| 3 |
+
"total_questions_num": 1500,
|
| 4 |
+
"inference_iterations": 3,
|
| 5 |
+
"total_samples_num": 4500,
|
| 6 |
+
"fail_samples_num": 69,
|
| 7 |
+
"inference_inconsistent_samples_num": 0,
|
| 8 |
+
"average_overall_metric": 0.5553060678788313,
|
| 9 |
+
"inference_iteration_1_overall_metric": 0.558917739810572,
|
| 10 |
+
"inference_iteration_2_overall_metric": 0.5552262066724464,
|
| 11 |
+
"inference_iteration_3_overall_metric": 0.5517742571534756,
|
| 12 |
+
"average_token_length_metric": {
|
| 13 |
+
"8k": 0.5978532013581613,
|
| 14 |
+
"16k": 0.5816609532803436,
|
| 15 |
+
"32k": 0.5872894997726004,
|
| 16 |
+
"64k": 0.5360933501085343,
|
| 17 |
+
"128k": 0.522886026665569,
|
| 18 |
+
"256k": 0.5060533760877814
|
| 19 |
+
},
|
| 20 |
+
"average_contextual_requirement_metric": {
|
| 21 |
+
"Full": 0.5076499747745465,
|
| 22 |
+
"Partial": 0.6159592772842868
|
| 23 |
+
},
|
| 24 |
+
"average_difficulty_metric": {
|
| 25 |
+
"Easy": 0.7729188134795828,
|
| 26 |
+
"Moderate": 0.5733088402612271,
|
| 27 |
+
"Hard": 0.4375074213815535,
|
| 28 |
+
"Extreme": 0.38246894115073926
|
| 29 |
+
},
|
| 30 |
+
"average_primary_task_metric": {
|
| 31 |
+
"T1. Retrieval & Ranking": 0.758377055109974,
|
| 32 |
+
"T2. Sequencing & Structure Reconstruction": 0.7423694091857211,
|
| 33 |
+
"T3. Evidence-Grounded QA": 0.4861111111111112,
|
| 34 |
+
"T4. Summarization & Synthesis": 0.5011656658098056,
|
| 35 |
+
"T5. Attribution & Citation Alignment": 0.6197584764672828,
|
| 36 |
+
"T6. Aggregation & Clustering": 0.5164556923382113,
|
| 37 |
+
"T7. Consistency & Compliance Checking": 0.3547519606397262,
|
| 38 |
+
"T8. Structured & Numeric Reasoning": 0.5962962962962963,
|
| 39 |
+
"T9. Version & Code Diff Analysis": 0.6299270957790389,
|
| 40 |
+
"T10. Rule Induction & In-Context Learning": 0.5606944444444444,
|
| 41 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.44166666666666654
|
| 42 |
+
},
|
| 43 |
+
"average_language_metric": {
|
| 44 |
+
"Chinese": 0.5409680916435047,
|
| 45 |
+
"English": 0.5696440441141594
|
| 46 |
+
},
|
| 47 |
+
"BoN-1": {
|
| 48 |
+
"overall_metric": 0.558917739810572,
|
| 49 |
+
"token_length": {
|
| 50 |
+
"8k": 0.6032704706738693,
|
| 51 |
+
"16k": 0.5808397170448323,
|
| 52 |
+
"32k": 0.5927696772272222,
|
| 53 |
+
"64k": 0.5485389223926213,
|
| 54 |
+
"128k": 0.5293584568340762,
|
| 55 |
+
"256k": 0.49872919469081073
|
| 56 |
+
},
|
| 57 |
+
"contextual_requirement": {
|
| 58 |
+
"Full": 0.5058707162683681,
|
| 59 |
+
"Partial": 0.6264321334097424
|
| 60 |
+
},
|
| 61 |
+
"difficulty": {
|
| 62 |
+
"Easy": 0.7833558783440772,
|
| 63 |
+
"Moderate": 0.5768808845916201,
|
| 64 |
+
"Hard": 0.4381739958885578,
|
| 65 |
+
"Extreme": 0.3805641270346401
|
| 66 |
+
},
|
| 67 |
+
"primary_task": {
|
| 68 |
+
"T1. Retrieval & Ranking": 0.7528903747933631,
|
| 69 |
+
"T2. Sequencing & Structure Reconstruction": 0.7690795456301782,
|
| 70 |
+
"T3. Evidence-Grounded QA": 0.525,
|
| 71 |
+
"T4. Summarization & Synthesis": 0.5001505956406523,
|
| 72 |
+
"T5. Attribution & Citation Alignment": 0.6016113058386018,
|
| 73 |
+
"T6. Aggregation & Clustering": 0.5066860396628982,
|
| 74 |
+
"T7. Consistency & Compliance Checking": 0.37783095340307493,
|
| 75 |
+
"T8. Structured & Numeric Reasoning": 0.5875000000000001,
|
| 76 |
+
"T9. Version & Code Diff Analysis": 0.6056866583526187,
|
| 77 |
+
"T10. Rule Induction & In-Context Learning": 0.5740277777777777,
|
| 78 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.45
|
| 79 |
+
},
|
| 80 |
+
"language": {
|
| 81 |
+
"Chinese": 0.5344377355706056,
|
| 82 |
+
"English": 0.5833977440505398
|
| 83 |
+
}
|
| 84 |
+
},
|
| 85 |
+
"pass@1": 0.30733333333333335,
|
| 86 |
+
"BoN-2": {
|
| 87 |
+
"overall_metric": 0.6315444476656525,
|
| 88 |
+
"token_length": {
|
| 89 |
+
"8k": 0.6765293955798586,
|
| 90 |
+
"16k": 0.6653958948787116,
|
| 91 |
+
"32k": 0.6614274068144226,
|
| 92 |
+
"64k": 0.6099720675525434,
|
| 93 |
+
"128k": 0.5887552831784975,
|
| 94 |
+
"256k": 0.5871866379898839
|
| 95 |
+
},
|
| 96 |
+
"contextual_requirement": {
|
| 97 |
+
"Full": 0.5906087497944075,
|
| 98 |
+
"Partial": 0.6836444267745122
|
| 99 |
+
},
|
| 100 |
+
"difficulty": {
|
| 101 |
+
"Easy": 0.8546630775122501,
|
| 102 |
+
"Moderate": 0.6784971908255039,
|
| 103 |
+
"Hard": 0.5110824387985737,
|
| 104 |
+
"Extreme": 0.4354103526732191
|
| 105 |
+
},
|
| 106 |
+
"primary_task": {
|
| 107 |
+
"T1. Retrieval & Ranking": 0.7926072573330768,
|
| 108 |
+
"T2. Sequencing & Structure Reconstruction": 0.814456423206423,
|
| 109 |
+
"T3. Evidence-Grounded QA": 0.5666666666666667,
|
| 110 |
+
"T4. Summarization & Synthesis": 0.5156065007848037,
|
| 111 |
+
"T5. Attribution & Citation Alignment": 0.705413217864198,
|
| 112 |
+
"T6. Aggregation & Clustering": 0.6176306197741449,
|
| 113 |
+
"T7. Consistency & Compliance Checking": 0.42951044567324476,
|
| 114 |
+
"T8. Structured & Numeric Reasoning": 0.6861111111111111,
|
| 115 |
+
"T9. Version & Code Diff Analysis": 0.7066217095721875,
|
| 116 |
+
"T10. Rule Induction & In-Context Learning": 0.6680555555555556,
|
| 117 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.525
|
| 118 |
+
},
|
| 119 |
+
"language": {
|
| 120 |
+
"Chinese": 0.617638270540212,
|
| 121 |
+
"English": 0.6454506247910954
|
| 122 |
+
}
|
| 123 |
+
},
|
| 124 |
+
"pass@2": 0.37066666666666664,
|
| 125 |
+
"BoN-3": {
|
| 126 |
+
"overall_metric": 0.6596591269299542,
|
| 127 |
+
"token_length": {
|
| 128 |
+
"8k": 0.6994761842119754,
|
| 129 |
+
"16k": 0.6880103332979634,
|
| 130 |
+
"32k": 0.6831628614701334,
|
| 131 |
+
"64k": 0.6414281237947528,
|
| 132 |
+
"128k": 0.6223896794908902,
|
| 133 |
+
"256k": 0.6234875793140215
|
| 134 |
+
},
|
| 135 |
+
"contextual_requirement": {
|
| 136 |
+
"Full": 0.6186943779973499,
|
| 137 |
+
"Partial": 0.7117960801169108
|
| 138 |
+
},
|
| 139 |
+
"difficulty": {
|
| 140 |
+
"Easy": 0.865163305432102,
|
| 141 |
+
"Moderate": 0.7223592192252923,
|
| 142 |
+
"Hard": 0.5480741090923853,
|
| 143 |
+
"Extreme": 0.4666471483928459
|
| 144 |
+
},
|
| 145 |
+
"primary_task": {
|
| 146 |
+
"T1. Retrieval & Ranking": 0.8175679720885881,
|
| 147 |
+
"T2. Sequencing & Structure Reconstruction": 0.8413347300847297,
|
| 148 |
+
"T3. Evidence-Grounded QA": 0.6,
|
| 149 |
+
"T4. Summarization & Synthesis": 0.5241839616339089,
|
| 150 |
+
"T5. Attribution & Citation Alignment": 0.7286969407202896,
|
| 151 |
+
"T6. Aggregation & Clustering": 0.6482898910584165,
|
| 152 |
+
"T7. Consistency & Compliance Checking": 0.47389920059132556,
|
| 153 |
+
"T8. Structured & Numeric Reasoning": 0.7092592592592593,
|
| 154 |
+
"T9. Version & Code Diff Analysis": 0.7448385112889893,
|
| 155 |
+
"T10. Rule Induction & In-Context Learning": 0.7002777777777779,
|
| 156 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.5416666666666666
|
| 157 |
+
},
|
| 158 |
+
"language": {
|
| 159 |
+
"Chinese": 0.646426154128056,
|
| 160 |
+
"English": 0.6728920997318576
|
| 161 |
+
}
|
| 162 |
+
},
|
| 163 |
+
"pass@3": 0.4013333333333333
|
| 164 |
+
}
|
eval/output/Qwen3-14B/nonthinking_context-120000_bon-3_evaluation.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:491a9aad0e2b8afc5650c7050f653b8298af60fca85176dc81f40558d08560bf
|
| 3 |
+
size 16990990
|
eval/output/Qwen3-14B/nonthinking_context-120000_bon-3_inference_1-of-1.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cd100d47ab820480677bf5e227361f7bf127dc36554d9be9066bb70c1147ac2b
|
| 3 |
+
size 16898641
|
eval/output/Qwen3-14B/nonthinking_context-120000_bon-3_summary.json
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"date": "2025-12-08",
|
| 3 |
+
"total_questions_num": 1500,
|
| 4 |
+
"inference_iterations": 3,
|
| 5 |
+
"total_samples_num": 4500,
|
| 6 |
+
"fail_samples_num": 0,
|
| 7 |
+
"inference_inconsistent_samples_num": 0,
|
| 8 |
+
"average_overall_metric": 0.371082468941152,
|
| 9 |
+
"inference_iteration_1_overall_metric": 0.3702373257200545,
|
| 10 |
+
"inference_iteration_2_overall_metric": 0.3726984925368399,
|
| 11 |
+
"inference_iteration_3_overall_metric": 0.37031158856656204,
|
| 12 |
+
"average_token_length_metric": {
|
| 13 |
+
"8k": 0.43147751060158823,
|
| 14 |
+
"16k": 0.4033401435644649,
|
| 15 |
+
"32k": 0.38388990895019404,
|
| 16 |
+
"64k": 0.35197621981023713,
|
| 17 |
+
"128k": 0.3411527865206871,
|
| 18 |
+
"256k": 0.3146582441997405
|
| 19 |
+
},
|
| 20 |
+
"average_contextual_requirement_metric": {
|
| 21 |
+
"Full": 0.34812497724125846,
|
| 22 |
+
"Partial": 0.4003010947410168
|
| 23 |
+
},
|
| 24 |
+
"average_difficulty_metric": {
|
| 25 |
+
"Easy": 0.4843882954180315,
|
| 26 |
+
"Moderate": 0.29072923835932707,
|
| 27 |
+
"Hard": 0.35344272393957227,
|
| 28 |
+
"Extreme": 0.31132465435421575
|
| 29 |
+
},
|
| 30 |
+
"average_primary_task_metric": {
|
| 31 |
+
"T1. Retrieval & Ranking": 0.6723638502162732,
|
| 32 |
+
"T2. Sequencing & Structure Reconstruction": 0.6447956689096103,
|
| 33 |
+
"T3. Evidence-Grounded QA": 0.4888888888888889,
|
| 34 |
+
"T4. Summarization & Synthesis": 0.5233374417827854,
|
| 35 |
+
"T5. Attribution & Citation Alignment": 0.28649007131147347,
|
| 36 |
+
"T6. Aggregation & Clustering": 0.36581903676825483,
|
| 37 |
+
"T7. Consistency & Compliance Checking": 0.2131249616899122,
|
| 38 |
+
"T8. Structured & Numeric Reasoning": 0.08441358024691357,
|
| 39 |
+
"T9. Version & Code Diff Analysis": 0.354238942968118,
|
| 40 |
+
"T10. Rule Induction & In-Context Learning": 0.43171296296296297,
|
| 41 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.24166666666666667
|
| 42 |
+
},
|
| 43 |
+
"average_language_metric": {
|
| 44 |
+
"Chinese": 0.3760505297242663,
|
| 45 |
+
"English": 0.36611440815803775
|
| 46 |
+
},
|
| 47 |
+
"BoN-1": {
|
| 48 |
+
"overall_metric": 0.3702373257200545,
|
| 49 |
+
"token_length": {
|
| 50 |
+
"8k": 0.4365488155340593,
|
| 51 |
+
"16k": 0.40901874466031907,
|
| 52 |
+
"32k": 0.3764681078768914,
|
| 53 |
+
"64k": 0.3558737887299017,
|
| 54 |
+
"128k": 0.337849918010455,
|
| 55 |
+
"256k": 0.30566457950869874
|
| 56 |
+
},
|
| 57 |
+
"contextual_requirement": {
|
| 58 |
+
"Full": 0.34789877784316464,
|
| 59 |
+
"Partial": 0.39866820483609555
|
| 60 |
+
},
|
| 61 |
+
"difficulty": {
|
| 62 |
+
"Easy": 0.47550918682117166,
|
| 63 |
+
"Moderate": 0.2976353828419508,
|
| 64 |
+
"Hard": 0.3595591768355849,
|
| 65 |
+
"Extreme": 0.3096166654215201
|
| 66 |
+
},
|
| 67 |
+
"primary_task": {
|
| 68 |
+
"T1. Retrieval & Ranking": 0.6693651539947573,
|
| 69 |
+
"T2. Sequencing & Structure Reconstruction": 0.643656093318885,
|
| 70 |
+
"T3. Evidence-Grounded QA": 0.49166666666666664,
|
| 71 |
+
"T4. Summarization & Synthesis": 0.5246008307406761,
|
| 72 |
+
"T5. Attribution & Citation Alignment": 0.28080851312128013,
|
| 73 |
+
"T6. Aggregation & Clustering": 0.3666330157824754,
|
| 74 |
+
"T7. Consistency & Compliance Checking": 0.219629388990068,
|
| 75 |
+
"T8. Structured & Numeric Reasoning": 0.07824074074074075,
|
| 76 |
+
"T9. Version & Code Diff Analysis": 0.3648645953884854,
|
| 77 |
+
"T10. Rule Induction & In-Context Learning": 0.41458333333333336,
|
| 78 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.24166666666666667
|
| 79 |
+
},
|
| 80 |
+
"language": {
|
| 81 |
+
"Chinese": 0.37263158698072796,
|
| 82 |
+
"English": 0.36784306445938036
|
| 83 |
+
}
|
| 84 |
+
},
|
| 85 |
+
"pass@1": 0.136,
|
| 86 |
+
"BoN-2": {
|
| 87 |
+
"overall_metric": 0.39747790211532286,
|
| 88 |
+
"token_length": {
|
| 89 |
+
"8k": 0.4586397357227334,
|
| 90 |
+
"16k": 0.4346767556872062,
|
| 91 |
+
"32k": 0.40654133431254835,
|
| 92 |
+
"64k": 0.3846278376492738,
|
| 93 |
+
"128k": 0.366368675809838,
|
| 94 |
+
"256k": 0.334013073510339
|
| 95 |
+
},
|
| 96 |
+
"contextual_requirement": {
|
| 97 |
+
"Full": 0.3762242907679459,
|
| 98 |
+
"Partial": 0.4245279529210758
|
| 99 |
+
},
|
| 100 |
+
"difficulty": {
|
| 101 |
+
"Easy": 0.5137672201120023,
|
| 102 |
+
"Moderate": 0.3211245068584676,
|
| 103 |
+
"Hard": 0.37927074472552935,
|
| 104 |
+
"Extreme": 0.33219733038914645
|
| 105 |
+
},
|
| 106 |
+
"primary_task": {
|
| 107 |
+
"T1. Retrieval & Ranking": 0.6959264095016384,
|
| 108 |
+
"T2. Sequencing & Structure Reconstruction": 0.6706657296788379,
|
| 109 |
+
"T3. Evidence-Grounded QA": 0.5333333333333333,
|
| 110 |
+
"T4. Summarization & Synthesis": 0.5372603569538371,
|
| 111 |
+
"T5. Attribution & Citation Alignment": 0.30622924870075346,
|
| 112 |
+
"T6. Aggregation & Clustering": 0.394716262565116,
|
| 113 |
+
"T7. Consistency & Compliance Checking": 0.23852214824749268,
|
| 114 |
+
"T8. Structured & Numeric Reasoning": 0.10046296296296296,
|
| 115 |
+
"T9. Version & Code Diff Analysis": 0.389089970943113,
|
| 116 |
+
"T10. Rule Induction & In-Context Learning": 0.46875,
|
| 117 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.26666666666666666
|
| 118 |
+
},
|
| 119 |
+
"language": {
|
| 120 |
+
"Chinese": 0.39990507125254376,
|
| 121 |
+
"English": 0.39505073297810234
|
| 122 |
+
}
|
| 123 |
+
},
|
| 124 |
+
"pass@2": 0.152,
|
| 125 |
+
"BoN-3": {
|
| 126 |
+
"overall_metric": 0.4076945334257239,
|
| 127 |
+
"token_length": {
|
| 128 |
+
"8k": 0.47272207856160825,
|
| 129 |
+
"16k": 0.44496379279331655,
|
| 130 |
+
"32k": 0.4150202640804616,
|
| 131 |
+
"64k": 0.39561603549517055,
|
| 132 |
+
"128k": 0.3755358328446131,
|
| 133 |
+
"256k": 0.3423091967791733
|
| 134 |
+
},
|
| 135 |
+
"contextual_requirement": {
|
| 136 |
+
"Full": 0.38567226658065773,
|
| 137 |
+
"Partial": 0.435722873046717
|
| 138 |
+
},
|
| 139 |
+
"difficulty": {
|
| 140 |
+
"Easy": 0.5205174207555429,
|
| 141 |
+
"Moderate": 0.3404168914689614,
|
| 142 |
+
"Hard": 0.3867590355007207,
|
| 143 |
+
"Extreme": 0.34204218281858184
|
| 144 |
+
},
|
| 145 |
+
"primary_task": {
|
| 146 |
+
"T1. Retrieval & Ranking": 0.7035836621732522,
|
| 147 |
+
"T2. Sequencing & Structure Reconstruction": 0.6789876392507479,
|
| 148 |
+
"T3. Evidence-Grounded QA": 0.5416666666666666,
|
| 149 |
+
"T4. Summarization & Synthesis": 0.5440147604159721,
|
| 150 |
+
"T5. Attribution & Citation Alignment": 0.3324233311147572,
|
| 151 |
+
"T6. Aggregation & Clustering": 0.41234554444538063,
|
| 152 |
+
"T7. Consistency & Compliance Checking": 0.24338937917661474,
|
| 153 |
+
"T8. Structured & Numeric Reasoning": 0.10046296296296298,
|
| 154 |
+
"T9. Version & Code Diff Analysis": 0.41745877832271355,
|
| 155 |
+
"T10. Rule Induction & In-Context Learning": 0.46874999999999994,
|
| 156 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.275
|
| 157 |
+
},
|
| 158 |
+
"language": {
|
| 159 |
+
"Chinese": 0.41071052140088854,
|
| 160 |
+
"English": 0.4046785454505592
|
| 161 |
+
}
|
| 162 |
+
},
|
| 163 |
+
"pass@3": 0.15933333333333333
|
| 164 |
+
}
|
eval/output/Qwen3-14B/thinking_context-120000_bon-3_evaluation.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:89e33b714a9e17b5c3b59159ddd7260efc8346b0e9415fe023ccacd171bfe91a
|
| 3 |
+
size 42921375
|
eval/output/Qwen3-14B/thinking_context-120000_bon-3_inference_1-of-1.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e7e2326ec633e7b7422cb6dbb21b4e288e62443ca9adf5635ebcfb0ead982df5
|
| 3 |
+
size 42831855
|
eval/output/Qwen3-14B/thinking_context-120000_bon-3_summary.json
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"date": "2025-12-08",
|
| 3 |
+
"total_questions_num": 1500,
|
| 4 |
+
"inference_iterations": 3,
|
| 5 |
+
"total_samples_num": 4500,
|
| 6 |
+
"fail_samples_num": 19,
|
| 7 |
+
"inference_inconsistent_samples_num": 0,
|
| 8 |
+
"average_overall_metric": 0.47140163871696883,
|
| 9 |
+
"inference_iteration_1_overall_metric": 0.4702465037493788,
|
| 10 |
+
"inference_iteration_2_overall_metric": 0.4731184728103188,
|
| 11 |
+
"inference_iteration_3_overall_metric": 0.4708399395912094,
|
| 12 |
+
"average_token_length_metric": {
|
| 13 |
+
"8k": 0.5782430822549752,
|
| 14 |
+
"16k": 0.5324144227767025,
|
| 15 |
+
"32k": 0.497499382093641,
|
| 16 |
+
"64k": 0.4573465836705664,
|
| 17 |
+
"128k": 0.3964337558937265,
|
| 18 |
+
"256k": 0.36647260561220363
|
| 19 |
+
},
|
| 20 |
+
"average_contextual_requirement_metric": {
|
| 21 |
+
"Full": 0.434181327100322,
|
| 22 |
+
"Partial": 0.5187729444108845
|
| 23 |
+
},
|
| 24 |
+
"average_difficulty_metric": {
|
| 25 |
+
"Easy": 0.6954869771942226,
|
| 26 |
+
"Moderate": 0.39026206272645725,
|
| 27 |
+
"Hard": 0.3840987663314641,
|
| 28 |
+
"Extreme": 0.3365623987956807
|
| 29 |
+
},
|
| 30 |
+
"average_primary_task_metric": {
|
| 31 |
+
"T1. Retrieval & Ranking": 0.7452599144109371,
|
| 32 |
+
"T2. Sequencing & Structure Reconstruction": 0.690021191602685,
|
| 33 |
+
"T3. Evidence-Grounded QA": 0.46388888888888874,
|
| 34 |
+
"T4. Summarization & Synthesis": 0.5079917038055713,
|
| 35 |
+
"T5. Attribution & Citation Alignment": 0.4011786521228004,
|
| 36 |
+
"T6. Aggregation & Clustering": 0.456013454446653,
|
| 37 |
+
"T7. Consistency & Compliance Checking": 0.26448705908382575,
|
| 38 |
+
"T8. Structured & Numeric Reasoning": 0.4464506172839507,
|
| 39 |
+
"T9. Version & Code Diff Analysis": 0.5003275109836627,
|
| 40 |
+
"T10. Rule Induction & In-Context Learning": 0.4528703703703705,
|
| 41 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.3805555555555556
|
| 42 |
+
},
|
| 43 |
+
"average_language_metric": {
|
| 44 |
+
"Chinese": 0.43752943274977507,
|
| 45 |
+
"English": 0.5052738446841633
|
| 46 |
+
},
|
| 47 |
+
"BoN-1": {
|
| 48 |
+
"overall_metric": 0.4702465037493788,
|
| 49 |
+
"token_length": {
|
| 50 |
+
"8k": 0.5913655956747179,
|
| 51 |
+
"16k": 0.5095272462031484,
|
| 52 |
+
"32k": 0.5071120818375107,
|
| 53 |
+
"64k": 0.44784910454974686,
|
| 54 |
+
"128k": 0.41043482344109344,
|
| 55 |
+
"256k": 0.35519017079005766
|
| 56 |
+
},
|
| 57 |
+
"contextual_requirement": {
|
| 58 |
+
"Full": 0.43466831346143797,
|
| 59 |
+
"Partial": 0.5155278368431229
|
| 60 |
+
},
|
| 61 |
+
"difficulty": {
|
| 62 |
+
"Easy": 0.6903049055473668,
|
| 63 |
+
"Moderate": 0.4015483173366612,
|
| 64 |
+
"Hard": 0.38434152744866484,
|
| 65 |
+
"Extreme": 0.330728695471088
|
| 66 |
+
},
|
| 67 |
+
"primary_task": {
|
| 68 |
+
"T1. Retrieval & Ranking": 0.7350489226932438,
|
| 69 |
+
"T2. Sequencing & Structure Reconstruction": 0.7006655082390375,
|
| 70 |
+
"T3. Evidence-Grounded QA": 0.43333333333333335,
|
| 71 |
+
"T4. Summarization & Synthesis": 0.5066399530046457,
|
| 72 |
+
"T5. Attribution & Citation Alignment": 0.4030762255317208,
|
| 73 |
+
"T6. Aggregation & Clustering": 0.44151556236290046,
|
| 74 |
+
"T7. Consistency & Compliance Checking": 0.2711344235218425,
|
| 75 |
+
"T8. Structured & Numeric Reasoning": 0.4435185185185186,
|
| 76 |
+
"T9. Version & Code Diff Analysis": 0.5056201530159209,
|
| 77 |
+
"T10. Rule Induction & In-Context Learning": 0.45111111111111113,
|
| 78 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.4083333333333333
|
| 79 |
+
},
|
| 80 |
+
"language": {
|
| 81 |
+
"Chinese": 0.43645831615890296,
|
| 82 |
+
"English": 0.5040346913398556
|
| 83 |
+
}
|
| 84 |
+
},
|
| 85 |
+
"pass@1": 0.23333333333333334,
|
| 86 |
+
"BoN-2": {
|
| 87 |
+
"overall_metric": 0.5468849991087089,
|
| 88 |
+
"token_length": {
|
| 89 |
+
"8k": 0.6615753489640523,
|
| 90 |
+
"16k": 0.6155193707982429,
|
| 91 |
+
"32k": 0.5758535655924854,
|
| 92 |
+
"64k": 0.540724309445691,
|
| 93 |
+
"128k": 0.4626871186035022,
|
| 94 |
+
"256k": 0.4249502812482792
|
| 95 |
+
},
|
| 96 |
+
"contextual_requirement": {
|
| 97 |
+
"Full": 0.5080630736758954,
|
| 98 |
+
"Partial": 0.5962947223868355
|
| 99 |
+
},
|
| 100 |
+
"difficulty": {
|
| 101 |
+
"Easy": 0.7925721699736568,
|
| 102 |
+
"Moderate": 0.479093636665099,
|
| 103 |
+
"Hard": 0.4528494469332917,
|
| 104 |
+
"Extreme": 0.38403673085414924
|
| 105 |
+
},
|
| 106 |
+
"primary_task": {
|
| 107 |
+
"T1. Retrieval & Ranking": 0.8163493863260063,
|
| 108 |
+
"T2. Sequencing & Structure Reconstruction": 0.7442577955813249,
|
| 109 |
+
"T3. Evidence-Grounded QA": 0.5833333333333334,
|
| 110 |
+
"T4. Summarization & Synthesis": 0.5201693233821205,
|
| 111 |
+
"T5. Attribution & Citation Alignment": 0.5067233304435315,
|
| 112 |
+
"T6. Aggregation & Clustering": 0.5291314359023842,
|
| 113 |
+
"T7. Consistency & Compliance Checking": 0.31988194373207235,
|
| 114 |
+
"T8. Structured & Numeric Reasoning": 0.5319444444444444,
|
| 115 |
+
"T9. Version & Code Diff Analysis": 0.5907370281186358,
|
| 116 |
+
"T10. Rule Induction & In-Context Learning": 0.5363888888888889,
|
| 117 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.4666666666666667
|
| 118 |
+
},
|
| 119 |
+
"language": {
|
| 120 |
+
"Chinese": 0.5179409589397771,
|
| 121 |
+
"English": 0.5758290392776407
|
| 122 |
+
}
|
| 123 |
+
},
|
| 124 |
+
"pass@2": 0.3006666666666667,
|
| 125 |
+
"BoN-3": {
|
| 126 |
+
"overall_metric": 0.5824444918225498,
|
| 127 |
+
"token_length": {
|
| 128 |
+
"8k": 0.6960404076989396,
|
| 129 |
+
"16k": 0.6595983514236444,
|
| 130 |
+
"32k": 0.6169744918377427,
|
| 131 |
+
"64k": 0.5731325003589102,
|
| 132 |
+
"128k": 0.48953898539680313,
|
| 133 |
+
"256k": 0.4593822142192594
|
| 134 |
+
},
|
| 135 |
+
"contextual_requirement": {
|
| 136 |
+
"Full": 0.5444884918996935,
|
| 137 |
+
"Partial": 0.6307521280880043
|
| 138 |
+
},
|
| 139 |
+
"difficulty": {
|
| 140 |
+
"Easy": 0.8395318293521158,
|
| 141 |
+
"Moderate": 0.5135257846150404,
|
| 142 |
+
"Hard": 0.4826106375194937,
|
| 143 |
+
"Extreme": 0.4116567499755111
|
| 144 |
+
},
|
| 145 |
+
"primary_task": {
|
| 146 |
+
"T1. Retrieval & Ranking": 0.8331279763695215,
|
| 147 |
+
"T2. Sequencing & Structure Reconstruction": 0.7661840637575932,
|
| 148 |
+
"T3. Evidence-Grounded QA": 0.6416666666666667,
|
| 149 |
+
"T4. Summarization & Synthesis": 0.5304996896358936,
|
| 150 |
+
"T5. Attribution & Citation Alignment": 0.5410298862500872,
|
| 151 |
+
"T6. Aggregation & Clustering": 0.5653969713210634,
|
| 152 |
+
"T7. Consistency & Compliance Checking": 0.35530101198058683,
|
| 153 |
+
"T8. Structured & Numeric Reasoning": 0.5680555555555555,
|
| 154 |
+
"T9. Version & Code Diff Analysis": 0.6135286679274133,
|
| 155 |
+
"T10. Rule Induction & In-Context Learning": 0.6047222222222223,
|
| 156 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.5166666666666667
|
| 157 |
+
},
|
| 158 |
+
"language": {
|
| 159 |
+
"Chinese": 0.5531328379155088,
|
| 160 |
+
"English": 0.6117561457295911
|
| 161 |
+
}
|
| 162 |
+
},
|
| 163 |
+
"pass@3": 0.338
|
| 164 |
+
}
|