From: Francois Fleuret Date: Thu, 28 Mar 2013 07:42:00 +0000 (+0100) Subject: Cosmetics so that I will understand in three months the glpk use. X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=commitdiff_plain;h=f371b63c16c7d5149e6518b724346a40b8351ab6;p=clueless-kmeans.git Cosmetics so that I will understand in three months the glpk use. --- diff --git a/clueless-kmean.cc b/clueless-kmean.cc index 13d9624..e3d4f93 100644 --- a/clueless-kmean.cc +++ b/clueless-kmean.cc @@ -77,6 +77,7 @@ int main(int argc, char **argv) { glp_term_out(0); clusterer.train(Clusterer::UNINFORMATIVE_LP_ASSOCIATION, + // Clusterer::STANDARD_LP_ASSOCIATION, nb_clusters, sample_set.dim, sample_set.nb_points, sample_set.points, diff --git a/clusterer.cc b/clusterer.cc index 04a9af4..60cead1 100644 --- a/clusterer.cc +++ b/clusterer.cc @@ -97,9 +97,9 @@ scalar_t Clusterer::baseline_lp_cluster_association(int nb_points, scalar_t **po scalar_t **gamma) { glp_prob *lp; - int *ia = new int[nb_points * _nb_clusters + 1]; - int *ja = new int[nb_points * _nb_clusters + 1]; - scalar_t *ar = new scalar_t[nb_points * _nb_clusters + 1]; + int *coeff_row = new int[nb_points * _nb_clusters + 1]; + int *coeff_col = new int[nb_points * _nb_clusters + 1]; + scalar_t *coeff_wgt = new scalar_t[nb_points * _nb_clusters + 1]; lp = glp_create_prob(); @@ -121,18 +121,18 @@ scalar_t Clusterer::baseline_lp_cluster_association(int nb_points, scalar_t **po } } - int l = 1; + int n_coeff = 1; for(int n = 1; n <= nb_points; n++) { for(int k = 1; k <= _nb_clusters; k++) { - ia[l] = n; - ja[l] = n + nb_points * (k - 1); - ar[l] = 1.0; - l++; + coeff_row[n_coeff] = n; + coeff_col[n_coeff] = n + nb_points * (k - 1); + coeff_wgt[n_coeff] = 1.0; + n_coeff++; } } - glp_load_matrix(lp, nb_points * _nb_clusters, ia, ja, ar); + glp_load_matrix(lp, nb_points * _nb_clusters, coeff_row, coeff_col, coeff_wgt); glp_simplex(lp, NULL); @@ -145,9 +145,9 @@ scalar_t Clusterer::baseline_lp_cluster_association(int nb_points, scalar_t **po } } - delete[] ia; - delete[] ja; - delete[] ar; + delete[] coeff_row; + delete[] coeff_col; + delete[] coeff_wgt; glp_delete_prob(lp); @@ -157,13 +157,36 @@ scalar_t Clusterer::baseline_lp_cluster_association(int nb_points, scalar_t **po scalar_t Clusterer::uninformative_lp_cluster_association(int nb_points, scalar_t **points, int nb_classes, int *labels, scalar_t **gamma) { + // N points + // K clusters + // dist(n,k) distance of samples n to cluster k + // + // We want to optimize the + // + // \gamma(n,k) is the association of point n to cluster k + // + // to minimize + // + // \sum_{n,k} \gamma(n,k) dist(n,k) + // + // under + // + // (A) \forall n, k, \gamma(n, k) >= 0 + // (B) \forall n, \sum_k \gamma(n,k) = 1 + // (C) \forall k, \sum_n \gamma(n,k) = N/K + glp_prob *lp; + // The coefficients for the constraints are passed to the glpk + // functions with a sparse representation. + int nb_coeffs = nb_points * _nb_clusters + nb_points * _nb_clusters; - int *ia = new int[nb_coeffs + 1]; - int *ja = new int[nb_coeffs + 1]; - scalar_t *ar = new scalar_t[nb_coeffs + 1]; + int *coeff_row = new int[nb_coeffs + 1]; + int *coeff_col = new int[nb_coeffs + 1]; + scalar_t *coeff_wgt = new scalar_t[nb_coeffs + 1]; + + int n_coeff = 1; scalar_t *nb_samples_per_class = new scalar_t[nb_classes]; for(int c = 0; c < nb_classes; c++) { @@ -179,34 +202,18 @@ scalar_t Clusterer::uninformative_lp_cluster_association(int nb_points, scalar_t glp_set_prob_name(lp, "uninformative_lp_cluster_association"); glp_set_obj_dir(lp, GLP_MIN); - // We have one constraint per point and one per cluster/class - - glp_add_rows(lp, nb_points + _nb_clusters * nb_classes); + // We have one column per coefficient gamma - // (A) For each point, the constraint is that the sum of its - // association coefficients is equal to 1.0 - - for(int n = 1; n <= nb_points; n++) { - int row = n; - glp_set_row_bnds(lp, row, GLP_FX, 1.0, 1.0); - } - - // (B) For each pair cluster/class, the sum of the association - // coefficient to this cluster for this class is equal to the number - // of sample of that class, divided by the number of clusters + glp_add_cols(lp, nb_points * _nb_clusters); - for(int k = 1; k <= _nb_clusters; k++) { - for(int c = 1; c <= nb_classes; c++) { - int row = nb_points + (k - 1) * nb_classes + c; - scalar_t tau = nb_samples_per_class[c-1] / scalar_t(_nb_clusters); - glp_set_row_bnds(lp, row, GLP_FX, tau, tau); - } - } + // The constraints (A) will be expressed by putting directly bounds + // on the column variables. So we need one row per (B) constraint, + // and one per (C) constraint. - // Each one of the constraints above involve a linear combination of - // all the association coefficients + glp_add_rows(lp, nb_points + _nb_clusters * nb_classes); - glp_add_cols(lp, nb_points * _nb_clusters); + // First, we set the weights for the objective function, and the + // constraint on the individual gammas for(int k = 1; k <= _nb_clusters; k++) { for(int n = 1; n <= nb_points; n++) { @@ -218,49 +225,61 @@ scalar_t Clusterer::uninformative_lp_cluster_association(int nb_points, scalar_t glp_set_obj_coef(lp, col, distance_to_centroid(points[n-1], k-1)); - // And all the association coefficient is in [0,1] + // The (A) constraints: Each column correspond to one of the + // gamma, and it has to be in [0,1] glp_set_col_bnds(lp, col, GLP_DB, 0.0, 1.0); } } - int l = 1; - - // We build the matrix of the LP coefficients + // The (B) constraints: for each point, the sum of its association + // coefficients is equal to 1.0 - // The sums of the association coefficients per points for the - // constraints (A) above. + for(int n = 1; n <= nb_points; n++) { + int row = n; + glp_set_row_bnds(lp, row, GLP_FX, 1.0, 1.0); + } for(int n = 1; n <= nb_points; n++) { for(int k = 1; k <= _nb_clusters; k++) { int row = n; - ia[l] = row; - ja[l] = nb_points * (k - 1) + n; - ar[l] = 1.0; - l++; + coeff_row[n_coeff] = row; + coeff_col[n_coeff] = nb_points * (k - 1) + n; + coeff_wgt[n_coeff] = 1.0; + n_coeff++; } } - // And the sums of coefficients for each pair class/cluster for - // constraint (B) above. + // The (C) constraints: For each pair cluster/class, the sum of the + // association coefficient to this cluster for this class is equal + // to the number of sample of that class, divided by the number of + // clusters + + for(int k = 1; k <= _nb_clusters; k++) { + for(int c = 1; c <= nb_classes; c++) { + int row = nb_points + (k - 1) * nb_classes + c; + scalar_t tau = nb_samples_per_class[c-1] / scalar_t(_nb_clusters); + glp_set_row_bnds(lp, row, GLP_FX, tau, tau); + } + } for(int k = 1; k <= _nb_clusters; k++) { for(int c = 1; c <= nb_classes; c++) { int row = nb_points + (k - 1) * nb_classes + c; for(int n = 1; n <= nb_points; n++) { if(labels[n-1] == c - 1) { - ia[l] = row; - ja[l] = (k-1) * nb_points + n; - ar[l] = 1.0; - l++; + coeff_row[n_coeff] = row; + coeff_col[n_coeff] = (k-1) * nb_points + n; + coeff_wgt[n_coeff] = 1.0; + n_coeff++; } } } } - ASSERT(l == nb_coeffs + 1); + ASSERT(n_coeff == nb_coeffs + 1); - glp_load_matrix(lp, nb_coeffs, ia, ja, ar); + glp_load_matrix(lp, nb_coeffs, coeff_row, coeff_col, coeff_wgt); // Now a miracle occurs @@ -278,9 +297,9 @@ scalar_t Clusterer::uninformative_lp_cluster_association(int nb_points, scalar_t } delete[] nb_samples_per_class; - delete[] ia; - delete[] ja; - delete[] ar; + delete[] coeff_row; + delete[] coeff_col; + delete[] coeff_wgt; glp_delete_prob(lp); return total_dist;