2 ///////////////////////////////////////////////////////////////////////////
3 // This program is free software: you can redistribute it and/or modify //
4 // it under the terms of the version 3 of the GNU General Public License //
5 // as published by the Free Software Foundation. //
7 // This program is distributed in the hope that it will be useful, but //
8 // WITHOUT ANY WARRANTY; without even the implied warranty of //
9 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU //
10 // General Public License for more details. //
12 // You should have received a copy of the GNU General Public License //
13 // along with this program. If not, see <http://www.gnu.org/licenses/>. //
15 // Written by Francois Fleuret //
16 // (C) Idiap Research Institute //
18 // Contact <francois.fleuret@idiap.ch> for comments & bug reports //
19 ///////////////////////////////////////////////////////////////////////////
22 #include "loss_machine.h"
24 LossMachine::LossMachine(int loss_type) {
25 _loss_type = loss_type;
28 void LossMachine::get_loss_derivatives(SampleSet *samples,
30 scalar_t *derivatives) {
34 case LOSS_EXPONENTIAL:
36 for(int n = 0; n < samples->nb_samples(); n++) {
38 - samples->label(n) * exp( - samples->label(n) * responses[n]);
45 for(int n = 0; n < samples->nb_samples(); n++) {
46 if(samples->label(n) != 0 && samples->label(n) * responses[n] < 1)
56 for(int n = 0; n < samples->nb_samples(); n++) {
57 if(samples->label(n) == 0)
60 derivatives[n] = samples->label(n) * 1/(1 + exp(samples->label(n) * responses[n]));
66 cerr << "Unknown loss type in BoostedClassifier::get_loss_derivatives."
73 scalar_t LossMachine::loss(SampleSet *samples, scalar_t *responses) {
78 case LOSS_EXPONENTIAL:
80 for(int n = 0; n < samples->nb_samples(); n++) {
81 l += exp( - samples->label(n) * responses[n]);
87 case LOSS_EV_REGULARIZED:
89 scalar_t sum_pos = 0, sum_sq_pos = 0, nb_pos = 0, m_pos, v_pos;
90 scalar_t sum_neg = 0, sum_sq_neg = 0, nb_neg = 0, m_neg, v_neg;
92 for(int n = 0; n < samples->nb_samples(); n++) {
93 if(samples->label(n) > 0) {
94 sum_pos += responses[n];
95 sum_sq_pos += sq(responses[n]);
97 } else if(samples->label(n) < 0) {
98 sum_neg += responses[n];
99 sum_sq_neg += sq(responses[n]);
107 m_pos = sum_pos / nb_pos;
108 v_pos = sum_sq_pos/(nb_pos - 1) - sq(sum_pos)/(nb_pos * (nb_pos - 1));
109 l += nb_pos * exp(v_pos/2 - m_pos);
113 m_neg = sum_neg / nb_neg;
114 v_neg = sum_sq_neg/(nb_neg - 1) - sq(sum_neg)/(nb_neg * (nb_neg - 1));
115 l += nb_neg * exp(v_neg/2 + m_neg);
123 for(int n = 0; n < samples->nb_samples(); n++) {
124 if(samples->label(n) != 0) {
125 if(samples->label(n) * responses[n] < 1)
126 l += (1 - samples->label(n) * responses[n]);
134 for(int n = 0; n < samples->nb_samples(); n++) {
135 if(samples->label(n) != 0) {
136 scalar_t u = - samples->label(n) * responses[n];
140 l += log(1 + exp(u));
148 cerr << "Unknown loss type in LossMachine::loss." << endl;
155 scalar_t LossMachine::optimal_weight(SampleSet *sample_set,
156 scalar_t *weak_learner_responses,
157 scalar_t *current_responses) {
161 case LOSS_EXPONENTIAL:
163 scalar_t num = 0, den = 0, z;
164 for(int n = 0; n < sample_set->nb_samples(); n++) {
165 z = sample_set->label(n) * weak_learner_responses[n];
167 num += exp( - sample_set->label(n) * current_responses[n]);
169 den += exp( - sample_set->label(n) * current_responses[n]);
173 return 0.5 * log(num / den);
177 case LOSS_EV_REGULARIZED:
180 scalar_t u = 0, du = -0.1;
181 scalar_t *responses = new scalar_t[sample_set->nb_samples()];
183 scalar_t l, prev_l = -1;
185 const scalar_t minimum_delta_for_optimization = 1e-5;
190 scalar_t sum_pos = 0, sum_sq_pos = 0, nb_pos = 0, m_pos, v_pos;
191 scalar_t sum_neg = 0, sum_sq_neg = 0, nb_neg = 0, m_neg, v_neg;
193 for(int n = 0; n < sample_set->nb_samples(); n++) {
194 if(sample_set->label(n) > 0) {
195 sum_pos += responses[n];
196 sum_sq_pos += sq(responses[n]);
198 } else if(sample_set->label(n) < 0) {
199 sum_neg += responses[n];
200 sum_sq_neg += sq(responses[n]);
206 m_pos = sum_pos / nb_pos;
207 v_pos = sum_sq_pos/(nb_pos - 1) - sq(sum_pos)/(nb_pos * (nb_pos - 1));
208 shift = max(shift, v_pos/2 - m_pos);
212 m_neg = sum_neg / nb_neg;
213 v_neg = sum_sq_neg/(nb_neg - 1) - sq(sum_neg)/(nb_neg * (nb_neg - 1));
214 shift = max(shift, v_neg/2 + m_neg);
217 // (*global.log_stream) << "nb_pos = " << nb_pos << " nb_neg = " << nb_neg << endl;
223 while(nb < 100 && abs(du) > minimum_delta_for_optimization) {
226 // (*global.log_stream) << "l = " << l << " u = " << u << " du = " << du << endl;
229 for(int s = 0; s < sample_set->nb_samples(); s++) {
230 responses[s] = current_responses[s] + u * weak_learner_responses[s] ;
234 scalar_t sum_pos = 0, sum_sq_pos = 0, nb_pos = 0, m_pos, v_pos;
235 scalar_t sum_neg = 0, sum_sq_neg = 0, nb_neg = 0, m_neg, v_neg;
237 for(int n = 0; n < sample_set->nb_samples(); n++) {
238 if(sample_set->label(n) > 0) {
239 sum_pos += responses[n];
240 sum_sq_pos += sq(responses[n]);
242 } else if(sample_set->label(n) < 0) {
243 sum_neg += responses[n];
244 sum_sq_neg += sq(responses[n]);
252 m_pos = sum_pos / nb_pos;
253 v_pos = sum_sq_pos/(nb_pos - 1) - sq(sum_pos)/(nb_pos * (nb_pos - 1));
254 l += nb_pos * exp(v_pos/2 - m_pos - shift);
258 m_neg = sum_neg / nb_neg;
259 v_neg = sum_sq_neg/(nb_neg - 1) - sq(sum_neg)/(nb_neg * (nb_neg - 1));
260 l += nb_neg * exp(v_neg/2 + m_neg - shift);
265 if(l > prev_l) du = du * -0.25;
278 scalar_t u = 0, du = -0.1;
279 scalar_t *responses = new scalar_t[sample_set->nb_samples()];
281 scalar_t l, prev_l = -1;
283 const scalar_t minimum_delta_for_optimization = 1e-5;
286 while(n < 100 && abs(du) > minimum_delta_for_optimization) {
289 for(int s = 0; s < sample_set->nb_samples(); s++) {
290 responses[s] = current_responses[s] + u * weak_learner_responses[s] ;
292 l = loss(sample_set, responses);
293 if(l > prev_l) du = du * -0.25;
297 (*global.log_stream) << "END l = " << l << " du = " << du << endl;
305 cerr << "Unknown loss type in LossMachine::optimal_weight." << endl;
311 void LossMachine::subsample(int nb, scalar_t *labels, scalar_t *responses,
312 int nb_to_sample, int *sample_nb_occurences, scalar_t *sample_responses,
313 int allow_duplicates) {
317 case LOSS_EXPONENTIAL:
319 scalar_t *weights = new scalar_t[nb];
321 for(int n = 0; n < nb; n++) {
325 weights[n] = exp( - labels[n] * responses[n]);
327 sample_nb_occurences[n] = 0;
328 sample_responses[n] = 0.0;
331 scalar_t total_weight;
332 int nb_sampled = 0, sum_sample_nb_occurences = 0;
334 int *sampled_indexes = new int[nb_to_sample];
336 (*global.log_stream) << "Sampling " << nb_to_sample << " samples." << endl;
339 total_weight = robust_sampling(nb,
344 for(int k = 0; nb_sampled < nb_to_sample && k < nb_to_sample; k++) {
345 int i = sampled_indexes[k];
346 if(allow_duplicates || sample_nb_occurences[i] == 0) nb_sampled++;
347 sample_nb_occurences[i]++;
348 sum_sample_nb_occurences++;
350 } while(nb_sampled < nb_to_sample);
352 (*global.log_stream) << "nb_sampled = " << nb_sampled << " nb_to_sample = " << nb_to_sample << endl;
354 (*global.log_stream) << "Done." << endl;
356 delete[] sampled_indexes;
358 scalar_t unit_weight = log(total_weight / scalar_t(sum_sample_nb_occurences));
360 for(int n = 0; n < nb; n++) {
361 if(sample_nb_occurences[n] > 0) {
362 if(allow_duplicates) {
363 sample_responses[n] = - labels[n] * unit_weight;
365 sample_responses[n] = - labels[n] * (unit_weight + log(scalar_t(sample_nb_occurences[n])));
366 sample_nb_occurences[n] = 1;
377 cerr << "Unknown loss type in LossMachine::resample." << endl;