2 * clueless-kmean is a variant of k-mean which enforces balanced
3 * distribution of classes in every cluster
5 * Copyright (c) 2013 Idiap Research Institute, http://www.idiap.ch/
6 * Written by Francois Fleuret <francois.fleuret@idiap.ch>
8 * This file is part of clueless-kmean.
10 * clueless-kmean is free software: you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * version 3 as published by the Free Software Foundation.
14 * clueless-kmean is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with selector. If not, see <http://www.gnu.org/licenses/>.
24 #include "clusterer.h"
27 Clusterer::Clusterer() {
32 Clusterer::~Clusterer() {
33 deallocate_array<scalar_t>(_cluster_means);
34 deallocate_array<scalar_t>(_cluster_var);
37 scalar_t Clusterer::distance_to_centroid(scalar_t *x, int k) {
39 for(int d = 0; d < _dim; d++) {
40 dist += sq(_cluster_means[k][d] - x[d]) / (2 * _cluster_var[k][d]);
41 dist += 0.5 * log(_cluster_var[k][d]);
42 ASSERT(!isnan(dist) && !isinf(dist));
47 void Clusterer::initialize_clusters(int nb_points, scalar_t **points) {
48 int *used = new int[nb_points];
49 for(int k = 0; k < nb_points; k++) { used[k] = 0; }
50 for(int k = 0; k < _nb_clusters; k++) {
52 do { l = int(drand48() * nb_points); } while(used[l]);
53 for(int d = 0; d < _dim; d++) {
54 _cluster_means[k][d] = points[l][d];
55 _cluster_var[k][d] = 1.0;
62 scalar_t Clusterer::baseline_cluster_association(int nb_points, scalar_t **points,
63 int nb_classes, int *labels,
65 int *associated_clusters = new int[nb_points];
66 scalar_t total_dist = 0;
68 for(int n = 0; n < nb_points; n++) {
69 scalar_t lowest_dist = 0;
70 for(int k = 0; k < _nb_clusters; k++) {
71 scalar_t dist = distance_to_centroid(points[n], k);
72 if(k == 0 || dist <= lowest_dist) {
74 associated_clusters[n] = k;
78 total_dist += lowest_dist;
81 for(int n = 0; n < nb_points; n++) {
82 for(int k = 0; k < _nb_clusters; k++) {
85 gamma[n][associated_clusters[n]] = 1.0;
88 delete[] associated_clusters;
93 scalar_t Clusterer::baseline_lp_cluster_association(int nb_points, scalar_t **points,
94 int nb_classes, int *labels,
98 int *coeff_row = new int[nb_points * _nb_clusters + 1];
99 int *coeff_col = new int[nb_points * _nb_clusters + 1];
100 scalar_t *coeff_wgt = new scalar_t[nb_points * _nb_clusters + 1];
102 lp = glp_create_prob();
104 glp_set_prob_name(lp, "baseline_lp_cluster_association");
105 glp_set_obj_dir(lp, GLP_MIN);
107 glp_add_rows(lp, nb_points);
109 for(int n = 1; n <= nb_points; n++) {
110 glp_set_row_bnds(lp, n, GLP_FX, 1.0, 1.0);
113 glp_add_cols(lp, nb_points * _nb_clusters);
114 for(int k = 1; k <= _nb_clusters; k++) {
115 for(int n = 1; n <= nb_points; n++) {
116 int i = n + nb_points * (k - 1);
117 glp_set_obj_coef(lp, i, distance_to_centroid(points[n-1], k-1));
118 glp_set_col_bnds(lp, i, GLP_DB, 0.0, 1.0);
124 for(int n = 1; n <= nb_points; n++) {
125 for(int k = 1; k <= _nb_clusters; k++) {
126 coeff_row[n_coeff] = n;
127 coeff_col[n_coeff] = n + nb_points * (k - 1);
128 coeff_wgt[n_coeff] = 1.0;
133 glp_load_matrix(lp, nb_points * _nb_clusters, coeff_row, coeff_col, coeff_wgt);
135 glp_simplex(lp, NULL);
137 scalar_t total_dist = glp_get_obj_val(lp);
139 for(int k = 1; k <= _nb_clusters; k++) {
140 for(int n = 1; n <= nb_points; n++) {
141 int i = n + nb_points * (k - 1);
142 gamma[n-1][k-1] = glp_get_col_prim(lp, i);
155 scalar_t Clusterer::uninformative_lp_cluster_association(int nb_points, scalar_t **points,
156 int nb_classes, int *labels,
160 // dist(n,k) distance of samples n to cluster k
162 // We want to optimize the
164 // \gamma(n,k) is the association of point n to cluster k
168 // \sum_{n,k} \gamma(n,k) dist(n,k)
172 // (A) \forall n, k, \gamma(n, k) >= 0
173 // (B) \forall n, \sum_k \gamma(n,k) = 1
174 // (C) \forall k, \sum_n \gamma(n,k) = N/K
178 // The coefficients for the constraints are passed to the glpk
179 // functions with a sparse representation.
181 // ** GLPK USES INDEXES STARTING AT 1, NOT 0. **
183 int nb_coeffs = nb_points * _nb_clusters + nb_points * _nb_clusters;
185 int *coeff_row = new int[nb_coeffs + 1];
186 int *coeff_col = new int[nb_coeffs + 1];
187 scalar_t *coeff_wgt = new scalar_t[nb_coeffs + 1];
191 scalar_t *nb_samples_per_class = new scalar_t[nb_classes];
192 for(int c = 0; c < nb_classes; c++) {
193 nb_samples_per_class[c] = 0.0;
196 for(int n = 0; n < nb_points; n++) {
197 nb_samples_per_class[labels[n]] += 1.0;
200 lp = glp_create_prob();
202 glp_set_prob_name(lp, "uninformative_lp_cluster_association");
203 glp_set_obj_dir(lp, GLP_MIN);
205 // We have one column per coefficient gamma
207 glp_add_cols(lp, nb_points * _nb_clusters);
209 // The column for gamma[n][k] point 1<=n<=nb_points and cluster
210 // 1<=k<=_nb_clusters is nb_points * (k - 1) + n;
212 // The constraints (A) will be expressed by putting directly bounds
213 // on the variables (i.e. one per column). So we need one row per
214 // (B) constraint, and one per (C) constraint.
216 glp_add_rows(lp, nb_points + _nb_clusters * nb_classes);
218 // First, we set the weights for the objective function, and the (A)
219 // constraints on the individual gammas
221 for(int k = 1; k <= _nb_clusters; k++) {
222 for(int n = 1; n <= nb_points; n++) {
223 int col = n + nb_points * (k - 1);
225 // The LP weight on the gammas for the global loss is the
226 // normalized distance of that sample to the centroid of that
229 glp_set_obj_coef(lp, col, distance_to_centroid(points[n-1], k-1));
231 // The (A) constraints: Each column correspond to one of the
232 // gamma, and it has to be in [0,1]
234 glp_set_col_bnds(lp, col, GLP_DB, 0.0, 1.0);
238 // The (B) constraints: for each point, the sum of its gamma is
241 for(int n = 1; n <= nb_points; n++) {
243 glp_set_row_bnds(lp, row, GLP_FX, 1.0, 1.0);
244 for(int k = 1; k <= _nb_clusters; k++) {
245 coeff_row[n_coeff] = row;
246 coeff_col[n_coeff] = nb_points * (k - 1) + n;
247 coeff_wgt[n_coeff] = 1.0;
252 // The (C) constraints: For each pair cluster/class, the sum of the
253 // gammas for this cluster and this class is equal to the number of
254 // sample of that class, divided by the number of clusters
256 for(int k = 1; k <= _nb_clusters; k++) {
257 for(int c = 1; c <= nb_classes; c++) {
258 int row = nb_points + (k - 1) * nb_classes + c;
259 scalar_t tau = nb_samples_per_class[c-1] / scalar_t(_nb_clusters);
260 glp_set_row_bnds(lp, row, GLP_FX, tau, tau);
261 for(int n = 1; n <= nb_points; n++) {
262 if(labels[n-1] == c - 1) {
263 coeff_row[n_coeff] = row;
264 coeff_col[n_coeff] = (k-1) * nb_points + n;
265 coeff_wgt[n_coeff] = 1.0;
272 ASSERT(n_coeff == nb_coeffs + 1);
274 glp_load_matrix(lp, nb_coeffs, coeff_row, coeff_col, coeff_wgt);
276 // Now a miracle occurs
278 glp_simplex(lp, NULL);
280 // We retrieve the result
282 scalar_t total_dist = glp_get_obj_val(lp);
284 for(int k = 1; k <= _nb_clusters; k++) {
285 for(int n = 1; n <= nb_points; n++) {
286 int i = n + nb_points * (k - 1);
287 gamma[n-1][k-1] = glp_get_col_prim(lp, i);
291 delete[] nb_samples_per_class;
300 void Clusterer::update_clusters(int nb_points, scalar_t **points, scalar_t **gamma) {
301 for(int k = 0; k < _nb_clusters; k++) {
303 for(int d = 0; d < _dim; d++) {
304 _cluster_means[k][d] = 0.0;
305 _cluster_var[k][d] = 0.0;
308 scalar_t sum_gamma = 0;
309 for(int n = 0; n < nb_points; n++) {
310 sum_gamma += gamma[n][k];
311 for(int d = 0; d < _dim; d++) {
312 _cluster_means[k][d] += gamma[n][k] * points[n][d];
313 _cluster_var[k][d] += gamma[n][k] * sq(points[n][d]);
317 ASSERT(sum_gamma >= 1);
319 for(int d = 0; d < _dim; d++) {
322 (_cluster_var[k][d] - sq(_cluster_means[k][d]) / sum_gamma) / (sum_gamma - 1);
323 _cluster_var[k][d] = max(scalar_t(min_cluster_variance), _cluster_var[k][d]);
325 _cluster_var[k][d] = 1;
328 _cluster_means[k][d] /= sum_gamma;
333 void Clusterer::train(int mode,
334 int nb_clusters, int dim,
335 int nb_points, scalar_t **points,
336 int nb_classes, int *labels,
337 int *cluster_associations) {
338 deallocate_array<scalar_t>(_cluster_means);
339 deallocate_array<scalar_t>(_cluster_var);
340 _nb_clusters = nb_clusters;
342 _cluster_means = allocate_array<scalar_t>(_nb_clusters, _dim);
343 _cluster_var = allocate_array<scalar_t>(_nb_clusters, _dim);
345 scalar_t **gammas = allocate_array<scalar_t>(nb_points, _nb_clusters);
347 if(nb_clusters > nb_points) abort();
349 initialize_clusters(nb_points, points);
351 scalar_t pred_total_distance, total_distance = FLT_MAX;
355 pred_total_distance = total_distance;
359 case STANDARD_ASSOCIATION:
361 baseline_cluster_association(nb_points, points, nb_classes, labels, gammas);
364 case STANDARD_LP_ASSOCIATION:
366 baseline_lp_cluster_association(nb_points, points, nb_classes, labels, gammas);
369 case UNINFORMATIVE_LP_ASSOCIATION:
371 uninformative_lp_cluster_association(nb_points, points, nb_classes, labels, gammas);
375 cerr << "Unknown sample-cluster association mode." << endl;
379 cout << "TRAIN " << nb_rounds << " " << total_distance << endl;
380 update_clusters(nb_points, points, gammas);
382 } while(total_distance < min_iteration_improvement * pred_total_distance &&
383 nb_rounds < max_nb_iterations);
385 if(cluster_associations) {
386 for(int n = 0; n < nb_points; n++) {
387 for(int k = 0; k < _nb_clusters; k++) {
388 if(k == 0 || gammas[n][k] > gammas[n][cluster_associations[n]]) {
389 cluster_associations[n] = k;
395 deallocate_array<scalar_t>(gammas);
398 int Clusterer::cluster(scalar_t *point) {
399 scalar_t lowest_dist = 0;
400 int associated_cluster = -1;
402 for(int k = 0; k < _nb_clusters; k++) {
405 for(int d = 0; d < _dim; d++) {
406 dist += sq(_cluster_means[k][d] - point[d]) / (2 * _cluster_var[k][d]);
407 dist += 0.5 * log(_cluster_var[k][d]);
408 ASSERT(!isnan(dist) && !isinf(dist));
411 if(k == 0 || dist <= lowest_dist) {
413 associated_cluster = k;
417 ASSERT(associated_cluster >= 0);
419 return associated_cluster;