-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathevaluate_clustering.py
255 lines (220 loc) · 10.4 KB
/
evaluate_clustering.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
import argparse
import os
import pickle
from dtaidistance.dtw_ndim import distance_matrix_fast
from sklearn.metrics import silhouette_samples
import matplotlib.pyplot as plt
import numpy as np
from ftpr.clustering import PhaseClustering
from mplsoccer import Pitch
def inertia(distance_matrix, cluster_indices):
"""
Compute the fitness of a clustering solution.
Args:
distance_matrix (numpy.ndarray): A 2D array containing pairwise distances between time series.
cluster_indices (list): A list containing the cluster index for each time series.
k (int): The number of clusters.
Returns:
float: The fitness value of the clustering solution.
"""
k = len(cluster_indices)
clusters = {i: [] for i in range(k)}
num_series = len(cluster_indices)
# Group the indices of the time series by their cluster assignments
for idx, cluster_id in enumerate(cluster_indices):
clusters[cluster_id].append(idx)
intra_cluster_distances = []
inter_cluster_distances = []
# Calculate intra-cluster distances
for cluster_id, indices in clusters.items():
if len(indices) < 2:
continue
for i in range(len(indices)):
for j in range(i + 1, len(indices)):
intra_cluster_distances.append(distance_matrix[indices[i], indices[j]])
# Calculate inter-cluster distances
for i in range(k):
for j in range(i + 1, k):
if not clusters[i] or not clusters[j]:
continue
for idx_i in clusters[i]:
for idx_j in clusters[j]:
inter_cluster_distances.append(distance_matrix[idx_i, idx_j])
intra_cluster_mean = np.mean(intra_cluster_distances) if intra_cluster_distances else 0
inter_cluster_mean = np.mean(inter_cluster_distances) if inter_cluster_distances else np.inf
# Avoid division by zero by adding a small epsilon
epsilon = 1e-10
fitness = intra_cluster_mean / (inter_cluster_mean + epsilon)
return fitness
def create_and_save_plot(xs, ys, path, labels=None, title=None, xlabel=None, ylabel=None):
fig = plt.figure(figsize=(10, 10))
for i, x in enumerate(xs):
if labels:
plt.plot(x, ys[i], label=labels[i])
else:
plt.plot(x, ys[i])
if labels:
plt.legend(loc='upper right')
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
fig.savefig(path)
plt.close(fig)
def load_pickle(path):
with open(path, 'rb') as f:
return pickle.load(f)
def clustering_dict_to_list(cls_pred):
total_num = 0
for _, value in cls_pred.items():
total_num += len(value)
result = [0] * total_num
for key, value in cls_pred.items():
for id in value:
result[id] = key
return result
def get_silhouette_info(shilhouette_scores, cls_pred):
cls_pred_dict = None
if not isinstance(cls_pred, dict):
cls_pred_dict = dict()
for i, cls in enumerate(cls_pred):
cls_pred_dict[cls] = cls_pred_dict.get(cls, []) + [i]
else:
cls_pred_dict = cls_pred
silhouette_info = []
for cls, indeces in cls_pred_dict.items():
cls_scores = []
for index in indeces:
cls_scores.append(shilhouette_scores[index])
cls_scores.sort(reverse=True)
silhouette_info.append((cls, cls_scores, np.mean(cls_scores)))
silhouette_info = sorted(silhouette_info, key=lambda x: x[2], reverse=True)
return silhouette_info
if __name__ == '__main__':
# Defining parameters
parser = argparse.ArgumentParser()
parser.add_argument('--input-dir', type=str)
parser.add_argument('--top', type=int)
parser.add_argument('--output-dir', type=str)
args = parser.parse_args()
# check if folder exists
if not os.path.exists(args.input_dir):
raise ValueError(f'The input folder {args.input_dir} does not exist!')
# get all pickle files in the folder
file_names = [f.split('.')[0][8:] for f in os.listdir(args.input_dir) if f.endswith('.pkl')]
assert len(file_names) > 0
n_parameters = len(file_names[0].split('_'))
parameters_values = [set() for _ in range(n_parameters)]
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
all_stats = {
"Average Silhouette": dict(),
f'Average Top {args.top} Silhouettes': dict(),
f'Average Silhouettes in Top {args.top} Clusters': dict(),
"Average Coherence-Seperation": dict(),
"Number of Empty Clusters": dict(),
"Standard Deviation of Number of Datapoints in Clusters": dict()
}
for name in file_names:
print(f'{name}:')
parameters = name.split('_')
for i, p in enumerate(parameters):
parameters_values[i].add(p)
exp_out_dir = os.path.join(args.output_dir, name)
if not os.path.exists(exp_out_dir):
os.mkdir(exp_out_dir)
if not os.path.exists(os.path.join(exp_out_dir, 'clusters_arrows')):
os.mkdir(os.path.join(exp_out_dir, 'clusters_arrows'))
# Read pickle file and series
clustering_data = load_pickle(os.path.join(args.input_dir, f'results_{name}.pkl'))
if 'phase_clusterings' not in clustering_data or len(clustering_data['phase_clusterings']) == 0:
raise ValueError('Clustering data does not contain any phase_clusterings object!')
if 'clusterings' not in clustering_data or \
'n_clusters' not in clustering_data or \
'cls_preds' not in clustering_data or \
len(clustering_data['clusterings']) != len(clustering_data['n_clusters']):
raise ValueError('Clustering data format is curropted!')
phase_clustering: PhaseClustering = clustering_data['phase_clusterings'][0]
series = phase_clustering.series
distances = distance_matrix_fast(series)
# Average Silhouette Diagram with respect to n_clusters
all_stats['Average Silhouette'][name] = []
all_stats[f'Average Top {args.top} Silhouettes'][name] = []
all_stats[f'Average Silhouettes in Top {args.top} Clusters'][name] = []
all_stats['Average Coherence-Seperation'][name] = []
all_stats['Number of Empty Clusters'][name] = []
all_stats['Standard Deviation of Number of Datapoints in Clusters'][name] = []
print('Plotting Diagrams...')
for i, clustering in enumerate(clustering_data['clusterings']):
cls_pred = clustering_data['cls_preds'][i]
phase_clustering.labels_ = cls_pred
phase_clustering.n_clusters = clustering_data['n_clusters'][i]
# average data in clusters
data_in_clusters = [0 for _ in range(clustering_data['n_clusters'][i])]
for c in cls_pred:
data_in_clusters[c] += 1
all_stats['Standard Deviation of Number of Datapoints in Clusters'][name].append(np.std(data_in_clusters))
# empty clusters
n_empty = 0
for j in range(clustering_data['n_clusters'][i]):
if j not in cls_pred:
n_empty += 1
all_stats['Number of Empty Clusters'][name].append(n_empty)
#
# inertia
all_stats['Average Coherence-Seperation'][name].append(inertia(distances, cls_pred))
# silhouette
scores = silhouette_samples(distances, cls_pred, metric='precomputed')
all_stats['Average Silhouette'][name].append(np.average(scores))
# top-k silhoeuutes and silhouette in top-k
n_clusters = min(args.top, clustering_data['n_clusters'][i])
best_clusters = phase_clustering.get_cluster_scores()
best_clusters_indeces = np.argsort(best_clusters)[::-1][:n_clusters]
silhouette_info = get_silhouette_info(scores, cls_pred)
# top-k silhoeuutes
s_avg = 0
count_avg = 0
for info in silhouette_info[:n_clusters]:
count_avg += len(info[1])
s_avg += info[2] * len(info[1])
all_stats[f'Average Top {args.top} Silhouettes'][name].append(s_avg / count_avg)
# silhouette in top-k
s_topk = 0
count_topk = 0
for info in silhouette_info:
if info[0] in best_clusters_indeces:
count_topk += len(info[1])
s_topk += info[2] * len(info[1])
all_stats[f'Average Silhouettes in Top {args.top} Clusters'][name].append(s_topk / count_topk)
# Subplots for top-k clusters (for each n_clusters)
pitch = Pitch(pitch_type='statsbomb', pitch_color='#22312b', line_color='#c7d5cc')
fig, axs = pitch.grid(nrows=4, ncols=3, figheight=30,
endnote_height=0.03, endnote_space=0,
axis=False,
title_height=0.08, grid_height=0.84)
fig.set_facecolor('#22312b')
for idx, ax in enumerate(axs['pitch'].flat):
if idx < clustering_data['n_clusters'][i]:
phase_clustering.labels_ = cls_pred
phase_clustering.n_clusters = clustering_data['n_clusters'][i]
series_in_cluster = phase_clustering.get_cluster_series(best_clusters_indeces[idx])
for ser in series_in_cluster:
pitch.scatter(ser[0, 0], ser[0, 1], s=200, ax=ax)
for z in range(len(ser) - 1):
pitch.arrows(ser[z, 0], ser[z, 1], ser[z + 1, 0], ser[z + 1, 1],
color='#777777', ax=ax, width=1)
ax.set_title(f'#Phases: {len(series_in_cluster)} #Score: {best_clusters[best_clusters_indeces[idx]]}', fontsize=20, color='white')
fig.savefig(os.path.join(exp_out_dir, 'clusters_arrows', f"{clustering_data['n_clusters'][i]}.png"))
plt.close('all')
xs = clustering_data['n_clusters']
# for key in all_stats:
# create_and_save_plot([xs], [all_stats[key][name]], os.path.join(exp_out_dir, f'{key}.png'), title=key)
# Create comparison plots
xs = clustering_data['n_clusters']
xss = [xs for _ in range(len(file_names))]
for key in all_stats:
ys = []
labels = []
for name, stat in all_stats[key].items():
labels.append(name)
ys.append(stat)
create_and_save_plot(xss, ys, os.path.join(args.output_dir, f'{key}.png'), labels=labels, ylabel=key, xlabel='Num Clusters')