|
|
from typing import List, Dict, Set, Optional |
|
|
from collections import defaultdict |
|
|
import jieba |
|
|
from rouge import Rouge |
|
|
import pytrec_eval |
|
|
from itertools import combinations |
|
|
|
|
|
''' |
|
|
normalize text |
|
|
''' |
|
|
|
|
|
def get_answer_area(text: str) -> str: |
|
|
if "[Answer]" in text or "[答案]" in text: |
|
|
if "[Answer]" in text: |
|
|
last_answer_start: int = text.rfind('[Answer]') |
|
|
if last_answer_start != -1: |
|
|
text = text[last_answer_start + 8:] |
|
|
else: |
|
|
last_answer_start: int = text.rfind('[答案]') |
|
|
if last_answer_start != -1: |
|
|
text = text[last_answer_start + 4:] |
|
|
return text.strip() |
|
|
|
|
|
def lower(text: str) -> str: |
|
|
return text.lower() |
|
|
|
|
|
def split_by_new_line(text: str) -> List[str]: |
|
|
return text.split("\n") |
|
|
|
|
|
def fix_space(text: str) -> str: |
|
|
''' |
|
|
1. Can not remove all spaces in the answer. For example: "1 11" != "11 1" but "111" == "111" |
|
|
2. One sample answers contained multiple spaces; this operation has a very low probability of affecting the judgment. |
|
|
''' |
|
|
return ' '.join(text.split()) |
|
|
|
|
|
def normalize_answers(answers: List[str]) -> List[str]: |
|
|
return [fix_space(lower(a).strip()) for a in answers] |
|
|
|
|
|
def normalize_prediction(prediction: str) -> List[str]: |
|
|
return [fix_space(p.strip()) for p in split_by_new_line(lower(get_answer_area(prediction)))] |
|
|
|
|
|
def normalize_prediction_abstract(abstract: str) -> str: |
|
|
return fix_space(lower(abstract).strip()) |
|
|
|
|
|
''' |
|
|
metrics |
|
|
''' |
|
|
def Accuracy(answers: List[str], prediction: str) -> float: |
|
|
answers: List[str] = normalize_answers(answers) |
|
|
predictions: List[str] = normalize_prediction(prediction) |
|
|
|
|
|
if len(answers) == 0 or len(predictions) == 0: |
|
|
return 0.0 |
|
|
|
|
|
if answers[0] == predictions[0]: |
|
|
return 1.0 |
|
|
else: |
|
|
return 0.0 |
|
|
|
|
|
def F1_Score(answers: List[str], prediction: str) -> float: |
|
|
answers: List[str] = normalize_answers(answers) |
|
|
predictions: List[str] = normalize_prediction(prediction) |
|
|
|
|
|
answer_set: Set[str] = set(answers) |
|
|
prediction_set: Set[str] = set(predictions) |
|
|
|
|
|
common: Set[str] = answer_set & prediction_set |
|
|
if len(common) == 0 or len(prediction_set) == 0 or len(answer_set) == 0: |
|
|
return 0.0 |
|
|
|
|
|
precision: float = len(common) / len(prediction_set) |
|
|
recall: float = len(common) / len(answer_set) |
|
|
|
|
|
if precision + recall == 0: |
|
|
return 0.0 |
|
|
|
|
|
f1: float = (2 * precision * recall) / (precision + recall) |
|
|
return f1 |
|
|
|
|
|
def SubEM(answers: List[str], prediction: str) -> float: |
|
|
answers: List[str] = normalize_answers(answers) |
|
|
predictions: List[str] = normalize_prediction(prediction) |
|
|
|
|
|
if len(answers) == 0 or len(predictions) == 0: |
|
|
return 0.0 |
|
|
|
|
|
score: float = 0.0 |
|
|
for a in answers: |
|
|
if a in predictions: |
|
|
score += 1.0 |
|
|
return score / len(answers) |
|
|
|
|
|
|
|
|
def Summary_Max_Rouge_L(answers: List[str], prediction: str, is_zh: bool) -> float: |
|
|
if is_zh: |
|
|
answers = [" ".join(list(jieba.cut(a, cut_all=False))) for a in answers] |
|
|
prediction = " ".join(list(jieba.cut(prediction, cut_all=False))) |
|
|
|
|
|
rouge_evaluator = Rouge() |
|
|
try: |
|
|
scores = rouge_evaluator.get_scores([prediction] * len(answers), answers, avg=False) |
|
|
except: |
|
|
return 0.0 |
|
|
|
|
|
return max([score["rouge-l"]["f"] for score in scores]) |
|
|
|
|
|
def Summary_Max_Semantic_Similarity(Embedding_Model, answers: List[str], prediction: str) -> float: |
|
|
answer_embeddings = Embedding_Model.encode(answers) |
|
|
prediction_embeddings = Embedding_Model.encode([prediction]) |
|
|
|
|
|
|
|
|
similarity = Embedding_Model.similarity(answer_embeddings, prediction_embeddings) |
|
|
return float(similarity.max().cpu().numpy()) |
|
|
|
|
|
def Summary(Embedding_Model, answers: List[str], prediction: str, is_zh: bool, alpha: float = 0.5, beta: float = 0.5) -> float: |
|
|
answers: List[str] = normalize_answers(answers) |
|
|
prediction: str = normalize_prediction_abstract(prediction) |
|
|
|
|
|
if len(answers) == 0 or not prediction: |
|
|
return 0.0 |
|
|
|
|
|
return alpha * Summary_Max_Semantic_Similarity(Embedding_Model, answers, prediction) + beta * Summary_Max_Rouge_L(answers, prediction, is_zh) |
|
|
|
|
|
|
|
|
def NDCG(answers: List[str], prediction: str) -> float: |
|
|
answers: List[str] = normalize_answers(answers) |
|
|
predictions: List[str] = normalize_prediction(prediction) |
|
|
|
|
|
if len(answers) == 0 or len(predictions) == 0: |
|
|
return 0.0 |
|
|
|
|
|
k_value = len(answers) |
|
|
|
|
|
answers = { |
|
|
'query': {a: len(answers) - i for i, a in enumerate(answers)} |
|
|
} |
|
|
predictions = { |
|
|
'query': {p: len(predictions) - i for i, p in enumerate(predictions)} |
|
|
} |
|
|
|
|
|
ndcg = 0.0 |
|
|
ndcg_string = "ndcg_cut." + str(k_value) |
|
|
evaluator = pytrec_eval.RelevanceEvaluator(answers, {ndcg_string}) |
|
|
scores = evaluator.evaluate(predictions) |
|
|
|
|
|
for query_id in scores.keys(): |
|
|
ndcg += scores[query_id]["ndcg_cut_" + str(k_value)] |
|
|
|
|
|
ndcg = ndcg / len(scores) |
|
|
|
|
|
return ndcg |
|
|
|
|
|
def Pairwise_Accuracy(answers: List[str], prediction: str) -> float: |
|
|
answers: List[str] = normalize_answers(answers) |
|
|
predictions: List[str] = normalize_prediction(prediction) |
|
|
|
|
|
if len(answers) == 0 or len(answers) == 1 or len(predictions) == 0 or len(predictions) == 1: |
|
|
return 0.0 |
|
|
|
|
|
n_total: int = len(predictions) * (len(predictions) - 1) // 2 |
|
|
prediction_indices: Dict[str, int] = {p:i for i, p in enumerate(predictions)} |
|
|
n_correct: int = 0 |
|
|
|
|
|
for a, b in combinations(answers, 2): |
|
|
if a in prediction_indices and b in prediction_indices: |
|
|
if prediction_indices[a] < prediction_indices[b]: |
|
|
n_correct += 1 |
|
|
|
|
|
return n_correct / n_total |
|
|
|
|
|
''' |
|
|
calculate metrics |
|
|
''' |
|
|
def get_average_overall_results(data, bon_num): |
|
|
"""get average overall results for all inference iterations""" |
|
|
overall_results = defaultdict(list) |
|
|
for item in data: |
|
|
overall_results[item['id']].append(item) |
|
|
|
|
|
average_overall_results = [] |
|
|
inference_inconsistent_samples_num = 0 |
|
|
for _, items in overall_results.items(): |
|
|
if len(items) != bon_num: |
|
|
inference_inconsistent_samples_num += 1 |
|
|
tmp_item = items[0].copy() |
|
|
tmp_item['metric'] = sum(item['metric'] for item in items) / len(items) |
|
|
average_overall_results.append(tmp_item) |
|
|
return average_overall_results, inference_inconsistent_samples_num |
|
|
|
|
|
def get_inference_iteration_idx_results(data, inference_iteration_idx): |
|
|
"""get results for the idx-th inference iteration""" |
|
|
inference_iteration_idx_results = [] |
|
|
for item in data: |
|
|
if item['bon_idx'] != inference_iteration_idx: |
|
|
continue |
|
|
inference_iteration_idx_results.append(item) |
|
|
return inference_iteration_idx_results |
|
|
|
|
|
def get_best_of_n_results(data, bon_num): |
|
|
"""get best of n results for each question""" |
|
|
best_of_n_results = {} |
|
|
for item in data: |
|
|
if item['bon_idx'] > bon_num: |
|
|
continue |
|
|
if item['id'] not in best_of_n_results or item['metric'] > best_of_n_results[item['id']]['metric']: |
|
|
best_of_n_results[item['id']] = item |
|
|
return list(best_of_n_results.values()) |
|
|
|
|
|
def calculate_pass_n_metrics(best_of_n_results): |
|
|
""" |
|
|
calculate pass@n metrics |
|
|
Summary Threshold = 0.5 * 0.5(Rouge-L) + 0.5 * 0.8(Semantic Similarity) = 0.65 |
|
|
if the summary score is greater than 0.65, it is considered to pass |
|
|
""" |
|
|
pass_sample_num = 0 |
|
|
for best_of_n_result in best_of_n_results: |
|
|
if 'T4' in best_of_n_result['primary_task']: |
|
|
if best_of_n_result['metric'] > 0.65: |
|
|
pass_sample_num += 1 |
|
|
else: |
|
|
if best_of_n_result['metric'] == 1.0: |
|
|
pass_sample_num += 1 |
|
|
return pass_sample_num / len(best_of_n_results) |
|
|
|
|
|
def calculate_overall_metrics(metric_results): |
|
|
"""calculate overall metrics""" |
|
|
if not metric_results: |
|
|
return 0.0 |
|
|
|
|
|
metrics = [metric_result['metric'] for metric_result in metric_results] |
|
|
return sum(metrics) / len(metrics) |
|
|
|
|
|
def calculate_dimension_metrics(metric_results, dimension, sort_keys): |
|
|
"""calculate metrics for each dimension""" |
|
|
dimension_groups = defaultdict(list) |
|
|
|
|
|
for metric_result in metric_results: |
|
|
value = metric_result[dimension] |
|
|
dimension_groups[value].append(metric_result['metric']) |
|
|
|
|
|
results = {} |
|
|
for value, metrics in dimension_groups.items(): |
|
|
results[value] = sum(metrics) / len(metrics) |
|
|
|
|
|
return {key: results[key] for key in sort_keys} |