2 * clueless-kmean is a variant of k-mean which enforces balanced
3 * distribution of classes in every cluster
5 * Copyright (c) 2013 Idiap Research Institute, http://www.idiap.ch/
6 * Written by Francois Fleuret <francois.fleuret@idiap.ch>
8 * This file is part of clueless-kmean.
10 * clueless-kmean is free software: you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * version 3 as published by the Free Software Foundation.
14 * clueless-kmean is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with selector. If not, see <http://www.gnu.org/licenses/>.
24 #include "clusterer.h"
27 Clusterer::Clusterer() {
32 Clusterer::~Clusterer() {
33 deallocate_array<scalar_t>(_cluster_means);
34 deallocate_array<scalar_t>(_cluster_var);
37 scalar_t Clusterer::distance_to_centroid(scalar_t *x, int k) {
38 // We take the variance into account + the normalization term. This
39 // is between k-mean and EM with a diagonal covariance
41 for(int d = 0; d < _dim; d++) {
42 dist += sq(_cluster_means[k][d] - x[d]) / (2 * _cluster_var[k][d]);
43 dist += 0.5 * log(_cluster_var[k][d]);
44 ASSERT(!isnan(dist) && !isinf(dist));
49 void Clusterer::initialize_clusters(int nb_points, scalar_t **points) {
50 int *used = new int[nb_points];
51 for(int k = 0; k < nb_points; k++) { used[k] = 0; }
52 for(int k = 0; k < _nb_clusters; k++) {
54 do { l = int(drand48() * nb_points); } while(used[l]);
55 for(int d = 0; d < _dim; d++) {
56 _cluster_means[k][d] = points[l][d];
57 _cluster_var[k][d] = 1.0;
64 scalar_t Clusterer::baseline_cluster_association(int nb_points, scalar_t **points,
65 int nb_classes, int *labels,
67 int *associated_clusters = new int[nb_points];
68 scalar_t total_dist = 0;
70 for(int n = 0; n < nb_points; n++) {
71 scalar_t lowest_dist = 0;
72 for(int k = 0; k < _nb_clusters; k++) {
73 scalar_t dist = distance_to_centroid(points[n], k);
74 if(k == 0 || dist <= lowest_dist) {
76 associated_clusters[n] = k;
80 total_dist += lowest_dist;
83 for(int n = 0; n < nb_points; n++) {
84 for(int k = 0; k < _nb_clusters; k++) {
87 gamma[n][associated_clusters[n]] = 1.0;
90 delete[] associated_clusters;
95 scalar_t Clusterer::baseline_lp_cluster_association(int nb_points, scalar_t **points,
96 int nb_classes, int *labels,
100 int *coeff_row = new int[nb_points * _nb_clusters + 1];
101 int *coeff_col = new int[nb_points * _nb_clusters + 1];
102 scalar_t *coeff_wgt = new scalar_t[nb_points * _nb_clusters + 1];
104 lp = glp_create_prob();
106 glp_set_prob_name(lp, "baseline_lp_cluster_association");
107 glp_set_obj_dir(lp, GLP_MIN);
109 glp_add_rows(lp, nb_points);
111 for(int n = 1; n <= nb_points; n++) {
112 glp_set_row_bnds(lp, n, GLP_FX, 1.0, 1.0);
115 glp_add_cols(lp, nb_points * _nb_clusters);
116 for(int k = 1; k <= _nb_clusters; k++) {
117 for(int n = 1; n <= nb_points; n++) {
118 int i = n + nb_points * (k - 1);
119 glp_set_obj_coef(lp, i, distance_to_centroid(points[n-1], k-1));
120 glp_set_col_bnds(lp, i, GLP_DB, 0.0, 1.0);
126 for(int n = 1; n <= nb_points; n++) {
127 for(int k = 1; k <= _nb_clusters; k++) {
128 coeff_row[n_coeff] = n;
129 coeff_col[n_coeff] = n + nb_points * (k - 1);
130 coeff_wgt[n_coeff] = 1.0;
135 glp_load_matrix(lp, nb_points * _nb_clusters, coeff_row, coeff_col, coeff_wgt);
137 glp_simplex(lp, NULL);
139 scalar_t total_dist = glp_get_obj_val(lp);
141 for(int k = 1; k <= _nb_clusters; k++) {
142 for(int n = 1; n <= nb_points; n++) {
143 int i = n + nb_points * (k - 1);
144 gamma[n-1][k-1] = glp_get_col_prim(lp, i);
157 scalar_t Clusterer::uninformative_lp_cluster_association(int nb_points, scalar_t **points,
158 int nb_classes, int *labels,
162 // dist(n,k) distance of samples n to cluster k
164 // We want to optimize the
166 // \gamma(n,k) is the association of point n to cluster k
170 // \sum_{n,k} \gamma(n,k) dist(n,k)
174 // (A) \forall n, k, \gamma(n, k) >= 0
175 // (B) \forall n, \sum_k \gamma(n,k) = 1
176 // (C) \forall k, \sum_n \gamma(n,k) = N/K
180 // The coefficients for the constraints are passed to the glpk
181 // functions with a sparse representation.
183 int nb_coeffs = nb_points * _nb_clusters + nb_points * _nb_clusters;
185 int *coeff_row = new int[nb_coeffs + 1];
186 int *coeff_col = new int[nb_coeffs + 1];
187 scalar_t *coeff_wgt = new scalar_t[nb_coeffs + 1];
191 scalar_t *nb_samples_per_class = new scalar_t[nb_classes];
192 for(int c = 0; c < nb_classes; c++) {
193 nb_samples_per_class[c] = 0.0;
196 for(int n = 0; n < nb_points; n++) {
197 nb_samples_per_class[labels[n]] += 1.0;
200 lp = glp_create_prob();
202 glp_set_prob_name(lp, "uninformative_lp_cluster_association");
203 glp_set_obj_dir(lp, GLP_MIN);
205 // We have one column per coefficient gamma
207 glp_add_cols(lp, nb_points * _nb_clusters);
209 // The constraints (A) will be expressed by putting directly bounds
210 // on the column variables. So we need one row per (B) constraint,
211 // and one per (C) constraint.
213 glp_add_rows(lp, nb_points + _nb_clusters * nb_classes);
215 // First, we set the weights for the objective function, and the
216 // constraint on the individual gammas
218 for(int k = 1; k <= _nb_clusters; k++) {
219 for(int n = 1; n <= nb_points; n++) {
220 int col = n + nb_points * (k - 1);
222 // The LP weight on this association coefficient for the global
223 // loss is the normalized distance of that sample to the
224 // centroid of that cluster
226 glp_set_obj_coef(lp, col, distance_to_centroid(points[n-1], k-1));
228 // The (A) constraints: Each column correspond to one of the
229 // gamma, and it has to be in [0,1]
231 glp_set_col_bnds(lp, col, GLP_DB, 0.0, 1.0);
235 // The (B) constraints: for each point, the sum of its association
236 // coefficients is equal to 1.0
238 for(int n = 1; n <= nb_points; n++) {
240 glp_set_row_bnds(lp, row, GLP_FX, 1.0, 1.0);
243 for(int n = 1; n <= nb_points; n++) {
244 for(int k = 1; k <= _nb_clusters; k++) {
246 coeff_row[n_coeff] = row;
247 coeff_col[n_coeff] = nb_points * (k - 1) + n;
248 coeff_wgt[n_coeff] = 1.0;
253 // The (C) constraints: For each pair cluster/class, the sum of the
254 // association coefficient to this cluster for this class is equal
255 // to the number of sample of that class, divided by the number of
258 for(int k = 1; k <= _nb_clusters; k++) {
259 for(int c = 1; c <= nb_classes; c++) {
260 int row = nb_points + (k - 1) * nb_classes + c;
261 scalar_t tau = nb_samples_per_class[c-1] / scalar_t(_nb_clusters);
262 glp_set_row_bnds(lp, row, GLP_FX, tau, tau);
266 for(int k = 1; k <= _nb_clusters; k++) {
267 for(int c = 1; c <= nb_classes; c++) {
268 int row = nb_points + (k - 1) * nb_classes + c;
269 for(int n = 1; n <= nb_points; n++) {
270 if(labels[n-1] == c - 1) {
271 coeff_row[n_coeff] = row;
272 coeff_col[n_coeff] = (k-1) * nb_points + n;
273 coeff_wgt[n_coeff] = 1.0;
280 ASSERT(n_coeff == nb_coeffs + 1);
282 glp_load_matrix(lp, nb_coeffs, coeff_row, coeff_col, coeff_wgt);
284 // Now a miracle occurs
286 glp_simplex(lp, NULL);
288 // We retrieve the result
290 scalar_t total_dist = glp_get_obj_val(lp);
292 for(int k = 1; k <= _nb_clusters; k++) {
293 for(int n = 1; n <= nb_points; n++) {
294 int i = n + nb_points * (k - 1);
295 gamma[n-1][k-1] = glp_get_col_prim(lp, i);
299 delete[] nb_samples_per_class;
308 void Clusterer::update_clusters(int nb_points, scalar_t **points, scalar_t **gamma) {
309 for(int k = 0; k < _nb_clusters; k++) {
311 for(int d = 0; d < _dim; d++) {
312 _cluster_means[k][d] = 0.0;
313 _cluster_var[k][d] = 0.0;
316 scalar_t sum_gamma = 0;
317 for(int n = 0; n < nb_points; n++) {
318 sum_gamma += gamma[n][k];
319 for(int d = 0; d < _dim; d++) {
320 _cluster_means[k][d] += gamma[n][k] * points[n][d];
321 _cluster_var[k][d] += gamma[n][k] * sq(points[n][d]);
325 ASSERT(sum_gamma >= 1);
327 for(int d = 0; d < _dim; d++) {
330 (_cluster_var[k][d] - sq(_cluster_means[k][d]) / sum_gamma) / (sum_gamma - 1);
331 _cluster_var[k][d] = max(scalar_t(min_cluster_variance), _cluster_var[k][d]);
333 _cluster_var[k][d] = 1;
336 _cluster_means[k][d] /= sum_gamma;
341 void Clusterer::train(int mode,
342 int nb_clusters, int dim,
343 int nb_points, scalar_t **points,
344 int nb_classes, int *labels,
345 int *cluster_associations) {
346 deallocate_array<scalar_t>(_cluster_means);
347 deallocate_array<scalar_t>(_cluster_var);
348 _nb_clusters = nb_clusters;
350 _cluster_means = allocate_array<scalar_t>(_nb_clusters, _dim);
351 _cluster_var = allocate_array<scalar_t>(_nb_clusters, _dim);
353 scalar_t **gammas = allocate_array<scalar_t>(nb_points, _nb_clusters);
355 if(nb_clusters > nb_points) abort();
357 initialize_clusters(nb_points, points);
359 scalar_t pred_total_distance, total_distance = FLT_MAX;
363 pred_total_distance = total_distance;
367 case STANDARD_ASSOCIATION:
369 baseline_cluster_association(nb_points, points, nb_classes, labels, gammas);
372 case STANDARD_LP_ASSOCIATION:
374 baseline_lp_cluster_association(nb_points, points, nb_classes, labels, gammas);
377 case UNINFORMATIVE_LP_ASSOCIATION:
379 uninformative_lp_cluster_association(nb_points, points, nb_classes, labels, gammas);
383 cerr << "Unknown sample-cluster association mode." << endl;
387 cout << "TRAIN " << nb_rounds << " " << total_distance << endl;
388 update_clusters(nb_points, points, gammas);
390 } while(total_distance < min_iteration_improvement * pred_total_distance &&
391 nb_rounds < max_nb_iterations);
393 if(cluster_associations) {
394 for(int n = 0; n < nb_points; n++) {
395 for(int k = 0; k < _nb_clusters; k++) {
396 if(k == 0 || gammas[n][k] > gammas[n][cluster_associations[n]]) {
397 cluster_associations[n] = k;
403 deallocate_array<scalar_t>(gammas);
406 int Clusterer::cluster(scalar_t *point) {
407 scalar_t lowest_dist = 0;
408 int associated_cluster = -1;
410 for(int k = 0; k < _nb_clusters; k++) {
413 for(int d = 0; d < _dim; d++) {
414 dist += sq(_cluster_means[k][d] - point[d]) / (2 * _cluster_var[k][d]);
415 dist += 0.5 * log(_cluster_var[k][d]);
416 ASSERT(!isnan(dist) && !isinf(dist));
419 if(k == 0 || dist <= lowest_dist) {
421 associated_cluster = k;
425 ASSERT(associated_cluster >= 0);
427 return associated_cluster;