-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain.py
More file actions
166 lines (126 loc) · 4.91 KB
/
main.py
File metadata and controls
166 lines (126 loc) · 4.91 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
# Necessary Imports
import os
import json
import importlib
import pandas as pd
import torch
from transformers import AutoModelForSequenceClassification, AutoTokenizer
# Non-LLM Imports
import evaluate as hf_eval
# RAGAS Imports
from ragas import EvaluationDataset
from ragas.llms import LangchainLLMWrapper
from ragas.embeddings import LangchainEmbeddingsWrapper
from ragas import evaluate
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
# Config File
from config import LLM_METRICS, DATA_FILE, METRICS
### 1. Evaluation & Dataset Loaders ###
# OpenAI API
if LLM_METRICS['OpenAI_API']:
os.environ["OPENAI_API_KEY"] = LLM_METRICS['OpenAI_API']
else:
raise ValueError("OpenAI API key not found in LLM_BASED_METRICS.")
# Evaluator loaders
evaluator_llm = LangchainLLMWrapper(ChatOpenAI(model=LLM_METRICS['OpenAI_Model']))
evaluator_embeddings = LangchainEmbeddingsWrapper(OpenAIEmbeddings())
# RAGAS evaluation library loaders
def ragas_evaluate_metric(dataset, metric):
print(metric)
ragas_metric = getattr(importlib.import_module('ragas.metrics'), metric)
res = evaluate(dataset=dataset, metrics=[ragas_metric()], llm=evaluator_llm)
return res
# Define RoBERTa NLI score function
def roberta_nli_score(sentence1, sentence2, model, tokenizer):
# Tokenize the input
inputs = tokenizer(sentence1, sentence2, return_tensors="pt", truncation=True)
# Get model predictions
with torch.no_grad():
logits = model(**inputs).logits
# Convert logits to probabilities (softmax)
probs = torch.nn.functional.softmax(logits, dim=-1)
# Extract probabilities for entailment, contradiction, and neutral
entailment_prob = probs[:, 2].item()
contradiction_prob = probs[:, 0].item()
neutral_prob = probs[:, 1].item()
return {
"Entailment Score": entailment_prob,
"Contradiction Score": contradiction_prob,
"Neutral Score": neutral_prob
}
# HuggingFace evaluation library loaders
def nonllm_evaluate_metric(data, metric):
print(metric)
scores = []
if metric == 'roberta-nli':
# Load the RoBERTa-large-mnli model and tokenizer once
model_name = "roberta-large-mnli"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
for i in data:
score = roberta_nli_score(i['reference'], i['response'], model, tokenizer)
scores.append(score)
else:
eval_metric = hf_eval.load(metric)
for i in data:
score = eval_metric.compute(references=[[i['reference']]], predictions=[i['response']])
scores.append(score)
return scores
# Dataset Loaders
# Datafile Loader
with open(DATA_FILE, mode='r', encoding='utf-8') as infile:
data = json.load(infile)
# data = data[:10]
dataset = EvaluationDataset.from_list(data)
### 2. LLM Based Evaluation ###
evaluation_results = []
if LLM_METRICS:
for metric, metric_class in LLM_METRICS.items():
if metric_class is True:
evaluation_results.append(ragas_evaluate_metric(dataset, metric))
if evaluation_results:
# Scores Dataframe
df = pd.DataFrame(evaluation_results)
scores = df['scores']
# Json file creation
def create_json(scores):
input_json = scores.values.tolist()
transformed_result = {"scores": []}
transposed = zip(*input_json)
for group in transposed:
score_entry = {}
for item in group:
score_entry.update(item)
transformed_result["scores"].append(score_entry)
return transformed_result
output_json = create_json(scores)
# Write the transformed JSON to the file
output_file = "ragas_scores.json"
with open(output_file, 'w') as json_file:
json.dump(output_json, json_file, indent=4)
print(f"Metrics results saved to {output_file}")
### 3. Non-LLM Based Evaluation ###
metrics_results = []
if METRICS:
for metric, metric_class in METRICS.items():
if metric_class is True:
metrics_results.append(nonllm_evaluate_metric(data, metric))
# Combining to form a json file
def combine_metrics_results(metrics_results):
num_data_points = len(metrics_results[0])
combined_data = []
# Loop over each data point and combine the corresponding metrics from each metric set
for i in range(num_data_points):
combined_row = {}
for metric_set in metrics_results:
combined_row.update(metric_set[i]) # Merge corresponding data point's metrics
combined_data.append(combined_row)
return combined_data
if metrics_results:
# Combine the metrics
combined_data = combine_metrics_results(metrics_results)
# Write to a JSON file
output_file = "quant_scores.json"
with open(output_file, "w") as f:
json.dump(combined_data, f, indent=4)
print(f"Metrics results saved to {output_file}")