2 ///////////////////////////////////////////////////////////////////////////
3 // This program is free software: you can redistribute it and/or modify //
4 // it under the terms of the version 3 of the GNU General Public License //
5 // as published by the Free Software Foundation. //
7 // This program is distributed in the hope that it will be useful, but //
8 // WITHOUT ANY WARRANTY; without even the implied warranty of //
9 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU //
10 // General Public License for more details. //
12 // You should have received a copy of the GNU General Public License //
13 // along with this program. If not, see <http://www.gnu.org/licenses/>. //
15 // Written by Francois Fleuret, (C) IDIAP //
16 // Contact <francois.fleuret@idiap.ch> for comments & bug reports //
17 ///////////////////////////////////////////////////////////////////////////
20 #include "loss_machine.h"
22 LossMachine::LossMachine(int loss_type) {
23 _loss_type = loss_type;
26 void LossMachine::get_loss_derivatives(SampleSet *samples,
28 scalar_t *derivatives) {
32 case LOSS_EXPONENTIAL:
34 for(int n = 0; n < samples->nb_samples(); n++) {
36 - samples->label(n) * exp( - samples->label(n) * responses[n]);
41 case LOSS_EV_REGULARIZED:
43 scalar_t sum_pos = 0, sum_sq_pos = 0, nb_pos = 0, m_pos, v_pos;
44 scalar_t sum_neg = 0, sum_sq_neg = 0, nb_neg = 0, m_neg, v_neg;
46 for(int n = 0; n < samples->nb_samples(); n++) {
47 if(samples->label(n) > 0) {
48 sum_pos += responses[n];
49 sum_sq_pos += sq(responses[n]);
52 else if(samples->label(n) < 0) {
53 sum_neg += responses[n];
54 sum_sq_neg += sq(responses[n]);
59 m_pos = sum_pos / nb_pos;
60 v_pos = sum_sq_pos/(nb_pos - 1) - sq(sum_pos)/(nb_pos * (nb_pos - 1));
62 scalar_t loss_pos = nb_pos * exp(v_pos/2 - m_pos);
64 m_neg = sum_neg / nb_neg;
65 v_neg = sum_sq_neg/(nb_neg - 1) - sq(sum_neg)/(nb_neg * (nb_neg - 1));
67 scalar_t loss_neg = nb_neg * exp(v_neg/2 + m_neg);
69 for(int n = 0; n < samples->nb_samples(); n++) {
70 if(samples->label(n) > 0) {
72 ( - 1/nb_pos + (responses[n] - m_pos)/(nb_pos - 1)) * loss_pos;
73 } else if(samples->label(n) < 0) {
75 ( 1/nb_neg + (responses[n] - m_neg)/(nb_neg - 1)) * loss_neg;
84 for(int n = 0; n < samples->nb_samples(); n++) {
85 if(samples->label(n) != 0 && samples->label(n) * responses[n] < 1)
95 for(int n = 0; n < samples->nb_samples(); n++) {
96 if(samples->label(n) == 0)
99 derivatives[n] = samples->label(n) * 1/(1 + exp(samples->label(n) * responses[n]));
105 cerr << "Unknown loss type in BoostedClassifier::get_loss_derivatives."
112 scalar_t LossMachine::loss(SampleSet *samples, scalar_t *responses) {
117 case LOSS_EXPONENTIAL:
119 for(int n = 0; n < samples->nb_samples(); n++) {
120 l += exp( - samples->label(n) * responses[n]);
126 case LOSS_EV_REGULARIZED:
128 scalar_t sum_pos = 0, sum_sq_pos = 0, nb_pos = 0, m_pos, v_pos;
129 scalar_t sum_neg = 0, sum_sq_neg = 0, nb_neg = 0, m_neg, v_neg;
131 for(int n = 0; n < samples->nb_samples(); n++) {
132 if(samples->label(n) > 0) {
133 sum_pos += responses[n];
134 sum_sq_pos += sq(responses[n]);
136 } else if(samples->label(n) < 0) {
137 sum_neg += responses[n];
138 sum_sq_neg += sq(responses[n]);
146 m_pos = sum_pos / nb_pos;
147 v_pos = sum_sq_pos/(nb_pos - 1) - sq(sum_pos)/(nb_pos * (nb_pos - 1));
148 l += nb_pos * exp(v_pos/2 - m_pos);
152 m_neg = sum_neg / nb_neg;
153 v_neg = sum_sq_neg/(nb_neg - 1) - sq(sum_neg)/(nb_neg * (nb_neg - 1));
154 l += nb_neg * exp(v_neg/2 + m_neg);
162 for(int n = 0; n < samples->nb_samples(); n++) {
163 if(samples->label(n) != 0) {
164 if(samples->label(n) * responses[n] < 1)
165 l += (1 - samples->label(n) * responses[n]);
173 for(int n = 0; n < samples->nb_samples(); n++) {
174 if(samples->label(n) != 0) {
175 scalar_t u = - samples->label(n) * responses[n];
179 l += log(1 + exp(u));
187 cerr << "Unknown loss type in LossMachine::loss." << endl;
194 scalar_t LossMachine::optimal_weight(SampleSet *sample_set,
195 scalar_t *weak_learner_responses,
196 scalar_t *current_responses) {
200 case LOSS_EXPONENTIAL:
202 scalar_t num = 0, den = 0, z;
203 for(int n = 0; n < sample_set->nb_samples(); n++) {
204 z = sample_set->label(n) * weak_learner_responses[n];
206 num += exp( - sample_set->label(n) * current_responses[n]);
208 den += exp( - sample_set->label(n) * current_responses[n]);
212 return 0.5 * log(num / den);
216 case LOSS_EV_REGULARIZED:
219 scalar_t u = 0, du = -0.1;
220 scalar_t *responses = new scalar_t[sample_set->nb_samples()];
222 scalar_t l, prev_l = -1;
224 const scalar_t minimum_delta_for_optimization = 1e-5;
229 scalar_t sum_pos = 0, sum_sq_pos = 0, nb_pos = 0, m_pos, v_pos;
230 scalar_t sum_neg = 0, sum_sq_neg = 0, nb_neg = 0, m_neg, v_neg;
232 for(int n = 0; n < sample_set->nb_samples(); n++) {
233 if(sample_set->label(n) > 0) {
234 sum_pos += responses[n];
235 sum_sq_pos += sq(responses[n]);
237 } else if(sample_set->label(n) < 0) {
238 sum_neg += responses[n];
239 sum_sq_neg += sq(responses[n]);
245 m_pos = sum_pos / nb_pos;
246 v_pos = sum_sq_pos/(nb_pos - 1) - sq(sum_pos)/(nb_pos * (nb_pos - 1));
247 shift = max(shift, v_pos/2 - m_pos);
251 m_neg = sum_neg / nb_neg;
252 v_neg = sum_sq_neg/(nb_neg - 1) - sq(sum_neg)/(nb_neg * (nb_neg - 1));
253 shift = max(shift, v_neg/2 + m_neg);
256 // (*global.log_stream) << "nb_pos = " << nb_pos << " nb_neg = " << nb_neg << endl;
262 while(nb < 100 && abs(du) > minimum_delta_for_optimization) {
265 // (*global.log_stream) << "l = " << l << " u = " << u << " du = " << du << endl;
268 for(int s = 0; s < sample_set->nb_samples(); s++) {
269 responses[s] = current_responses[s] + u * weak_learner_responses[s] ;
273 scalar_t sum_pos = 0, sum_sq_pos = 0, nb_pos = 0, m_pos, v_pos;
274 scalar_t sum_neg = 0, sum_sq_neg = 0, nb_neg = 0, m_neg, v_neg;
276 for(int n = 0; n < sample_set->nb_samples(); n++) {
277 if(sample_set->label(n) > 0) {
278 sum_pos += responses[n];
279 sum_sq_pos += sq(responses[n]);
281 } else if(sample_set->label(n) < 0) {
282 sum_neg += responses[n];
283 sum_sq_neg += sq(responses[n]);
291 m_pos = sum_pos / nb_pos;
292 v_pos = sum_sq_pos/(nb_pos - 1) - sq(sum_pos)/(nb_pos * (nb_pos - 1));
293 l += nb_pos * exp(v_pos/2 - m_pos - shift);
297 m_neg = sum_neg / nb_neg;
298 v_neg = sum_sq_neg/(nb_neg - 1) - sq(sum_neg)/(nb_neg * (nb_neg - 1));
299 l += nb_neg * exp(v_neg/2 + m_neg - shift);
304 if(l > prev_l) du = du * -0.25;
317 scalar_t u = 0, du = -0.1;
318 scalar_t *responses = new scalar_t[sample_set->nb_samples()];
320 scalar_t l, prev_l = -1;
322 const scalar_t minimum_delta_for_optimization = 1e-5;
325 while(n < 100 && abs(du) > minimum_delta_for_optimization) {
328 for(int s = 0; s < sample_set->nb_samples(); s++) {
329 responses[s] = current_responses[s] + u * weak_learner_responses[s] ;
331 l = loss(sample_set, responses);
332 if(l > prev_l) du = du * -0.25;
336 (*global.log_stream) << "END l = " << l << " du = " << du << endl;
344 cerr << "Unknown loss type in LossMachine::optimal_weight." << endl;
350 void LossMachine::subsample(int nb, scalar_t *labels, scalar_t *responses,
351 int nb_to_sample, int *sample_nb_occurences, scalar_t *sample_responses,
352 int allow_duplicates) {
356 case LOSS_EXPONENTIAL:
358 scalar_t *weights = new scalar_t[nb];
360 for(int n = 0; n < nb; n++) {
364 weights[n] = exp( - labels[n] * responses[n]);
366 sample_nb_occurences[n] = 0;
367 sample_responses[n] = 0.0;
370 scalar_t total_weight;
371 int nb_sampled = 0, sum_sample_nb_occurences = 0;
373 int *sampled_indexes = new int[nb_to_sample];
375 (*global.log_stream) << "Sampling " << nb_to_sample << " samples." << endl;
378 total_weight = robust_sampling(nb,
383 for(int k = 0; nb_sampled < nb_to_sample && k < nb_to_sample; k++) {
384 int i = sampled_indexes[k];
385 if(allow_duplicates || sample_nb_occurences[i] == 0) nb_sampled++;
386 sample_nb_occurences[i]++;
387 sum_sample_nb_occurences++;
389 } while(nb_sampled < nb_to_sample);
391 (*global.log_stream) << "nb_sampled = " << nb_sampled << " nb_to_sample = " << nb_to_sample << endl;
393 (*global.log_stream) << "Done." << endl;
395 delete[] sampled_indexes;
397 scalar_t unit_weight = log(total_weight / scalar_t(sum_sample_nb_occurences));
399 for(int n = 0; n < nb; n++) {
400 if(sample_nb_occurences[n] > 0) {
401 if(allow_duplicates) {
402 sample_responses[n] = - labels[n] * unit_weight;
404 sample_responses[n] = - labels[n] * (unit_weight + log(scalar_t(sample_nb_occurences[n])));
405 sample_nb_occurences[n] = 1;
416 cerr << "Unknown loss type in LossMachine::resample." << endl;