-
Notifications
You must be signed in to change notification settings - Fork 4
Expand file tree
/
Copy pathEvaluatedAlgorithm.py
More file actions
69 lines (57 loc) · 3.36 KB
/
EvaluatedAlgorithm.py
File metadata and controls
69 lines (57 loc) · 3.36 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
from RecommenderMetrics import RecommenderMetrics
from EvaluationData import EvaluationData
class EvaluatedAlgorithm:
def __init__(self, algorithm, name):
self.algorithm = algorithm
self.name = name
def Evaluate(self, evaluationData, doTopN, n=10, verbose=True):
metrics = {}
# Compute accuracy
if (verbose):
print("Evaluating accuracy...")
self.algorithm.fit(evaluationData.GetTrainSet())
predictions = self.algorithm.test(evaluationData.GetTestSet())
metrics["RMSE"] = RecommenderMetrics.RMSE(predictions)
metrics["MAE"] = RecommenderMetrics.MAE(predictions)
if (doTopN):
# Evaluate top-10 with Leave One Out testing
if (verbose):
print("Evaluating top-N with leave-one-out...")
self.algorithm.fit(evaluationData.GetLOOCVTrainSet())
leftOutPredictions = self.algorithm.test(evaluationData.GetLOOCVTestSet())
# Build predictions for all ratings not in the training set
allPredictions = self.algorithm.test(evaluationData.GetLOOCVAntiTestSet())
# Compute top 10 recs for each user
topNPredicted = RecommenderMetrics.GetTopN(allPredictions, n)
if (verbose):
print("Computing hit-rate and rank metrics...")
# See how often we recommended a movie the user actually rated
metrics["HR"] = RecommenderMetrics.HitRate(topNPredicted, leftOutPredictions)
# See how often we recommended a movie the user actually liked
metrics["cHR"] = RecommenderMetrics.CumulativeHitRate(topNPredicted, leftOutPredictions)
# Compute ARHR
metrics["ARHR"] = RecommenderMetrics.AverageReciprocalHitRank(topNPredicted, leftOutPredictions)
#Evaluate properties of recommendations on full training set
if (verbose):
print("Computing recommendations with full data set...")
self.algorithm.fit(evaluationData.GetFullTrainSet())
allPredictions = self.algorithm.test(evaluationData.GetFullAntiTestSet())
topNPredicted = RecommenderMetrics.GetTopN(allPredictions, n)
if (verbose):
print("Analyzing coverage, diversity, and novelty...")
# Print user coverage with a minimum predicted rating of 4.0:
metrics["Coverage"] = RecommenderMetrics.UserCoverage( topNPredicted,
evaluationData.GetFullTrainSet().n_users,
ratingThreshold=4.0)
# Measure diversity of recommendations:
metrics["Diversity"] = RecommenderMetrics.Diversity(topNPredicted, evaluationData.GetSimilarities())
# Measure novelty (average popularity rank of recommendations):
metrics["Novelty"] = RecommenderMetrics.Novelty(topNPredicted,
evaluationData.GetPopularityRankings())
if (verbose):
print("Analysis complete.")
return metrics
def GetName(self):
return self.name
def GetAlgorithm(self):
return self.algorithm