2 ////////////////////////////////////////////////////////////////////////////////
3 // This program is free software; you can redistribute it and/or //
4 // modify it under the terms of the GNU General Public License //
5 // version 2 as published by the Free Software Foundation. //
7 // This program is distributed in the hope that it will be useful, but //
8 // WITHOUT ANY WARRANTY; without even the implied warranty of //
9 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU //
10 // General Public License for more details. //
12 // Written and (C) by François Fleuret //
13 // Contact <francois.fleuret@epfl.ch> for comments & bug reports //
14 ////////////////////////////////////////////////////////////////////////////////
16 // $Id: approximer.cc,v 1.13 2006-12-22 23:13:46 fleuret Exp $
18 #include "approximer.h"
20 MappingApproximer::MappingApproximer(int max_nb_weak_learners) :
21 _max_nb_weak_learners(max_nb_weak_learners),
23 _indexes(new int[_max_nb_weak_learners]),
24 _thresholds(new scalar_t[_max_nb_weak_learners]),
25 _weights(new scalar_t[_max_nb_weak_learners]),
27 _input_sorted_index(0),
28 _outputs_on_samples(0) { }
30 MappingApproximer::~MappingApproximer() {
34 delete[] _outputs_on_samples;
37 void MappingApproximer::load(istream &is) {
38 is.read((char *) &_nb_weak_learners, sizeof(_nb_weak_learners));
39 if(_nb_weak_learners > _max_nb_weak_learners) {
40 cerr << "Number of weak learners missmatch." << endl;
43 is.read((char *) _indexes, _nb_weak_learners * sizeof(int));
44 is.read((char *) _thresholds, _nb_weak_learners * sizeof(scalar_t));
45 is.read((char *) _weights, _nb_weak_learners * sizeof(scalar_t));
48 void MappingApproximer::save(ostream &os) {
49 os.write((char *) &_nb_weak_learners, sizeof(_nb_weak_learners));
50 os.write((char *) _indexes, _nb_weak_learners * sizeof(int));
51 os.write((char *) _thresholds, _nb_weak_learners * sizeof(scalar_t));
52 os.write((char *) _weights, _nb_weak_learners * sizeof(scalar_t));
55 void MappingApproximer::set_learning_input(int input_size, int nb_samples,
56 scalar_t *input, scalar_t *sample_weights) {
58 _input_size = input_size;
60 _sample_weights = sample_weights;
61 if(_nb_samples != nb_samples ){
62 _nb_samples = nb_samples;
63 delete[] _outputs_on_samples;
64 delete[] _input_sorted_index;
65 _outputs_on_samples = new scalar_t[_nb_samples];
66 _input_sorted_index = new int[_input_size * _nb_samples];
68 for(int t = 0; t < _nb_samples; t++) _outputs_on_samples[t] = 0.0;
70 for(int n = 0; n < _input_size; n++) {
71 Couple couples[_nb_samples];
72 for(int s = 0; s < _nb_samples; s++) {
74 couples[s].value = _input[s * _input_size + n];
76 qsort(couples, _nb_samples, sizeof(Couple), compare_couple);
77 for(int s = 0; s < _nb_samples; s++)
78 _input_sorted_index[n * _nb_samples + s] = couples[s].index;
83 void MappingApproximer::learn_one_step(scalar_t *target) {
84 scalar_t delta[_nb_samples], s_delta = 0.0, s_weights = 0;
86 for(int s = 0; s < _nb_samples; s++) {
87 delta[s] = _outputs_on_samples[s] - target[s];
88 s_delta += _sample_weights[s] * delta[s];
89 s_weights += _sample_weights[s];
92 scalar_t best_z = 0, z, prev, val;
95 for(int n = 0; n < _input_size; n++) {
97 i = _input_sorted_index + n * _nb_samples;
98 prev = _input[(*i) * _input_size + n];
99 for(int s = 1; s < _nb_samples; s++) {
100 z -= 2 * _sample_weights[*i] * delta[*i];
102 val = _input[(*i) * _input_size + n];
103 if(val > prev && abs(z) > abs(best_z)) {
104 _thresholds[_nb_weak_learners] = (val + prev)/2;
105 _indexes[_nb_weak_learners] = n;
106 _weights[_nb_weak_learners] = - z / s_weights;
113 if(best_z == 0) return;
115 // Update the responses on the samples
116 for(int s = 0; s < _nb_samples; s++) {
117 if(_input[s * _input_size + _indexes[_nb_weak_learners]] >= _thresholds[_nb_weak_learners])
118 _outputs_on_samples[s] += _weights[_nb_weak_learners];
120 _outputs_on_samples[s] -= _weights[_nb_weak_learners];
126 scalar_t MappingApproximer::predict(scalar_t *input) {
128 for(int w = 0; w < _nb_weak_learners; w++)
129 if(input[_indexes[w]] >= _thresholds[w])
136 void test_approximer() {
137 // const int nb_samples = 1000, nb_weak_learners = 100;
138 // MappingApproximer approximer(nb_weak_learners);
139 // scalar_t input[nb_samples], output[nb_samples], weight[nb_samples];
140 // for(int n = 0; n < nb_samples; n++) {
141 // input[n] = scalar_t(n * 2 * M_PI)/scalar_t(nb_samples);
142 // output[n] = sin(input[n]);
143 // weight[n] = (drand48() < 0.5) ? 1.0 : 0.0;
145 // approximer.set_learning_input(1, nb_samples, input, weight);
146 // for(int w = 0; w < nb_weak_learners; w++) {
147 // approximer.learn_one_step(output);
149 // for(int n = 0; n < nb_samples; n++)
150 // e += weight[n] * sq(output[n] - approximer._outputs_on_samples[n]);
151 // cerr << w << " " << e << endl;
153 // for(int n = 0; n < nb_samples; n++) {
154 // cout << input[n] << " " << approximer._outputs_on_samples[n] << endl;
157 const int dim = 5, nb_samples = 1000, nb_weak_learners = 100;
158 MappingApproximer approximer(nb_weak_learners);
159 scalar_t input[nb_samples * dim], output[nb_samples], weight[nb_samples];
160 for(int n = 0; n < nb_samples; n++) {
162 for(int d = 0; d < dim; d++) {
163 input[n * dim + d] = drand48();
164 s += (d+1) * input[n * dim + d];
167 weight[n] = (drand48() < 0.5) ? 1.0 : 0.0;
169 approximer.set_learning_input(dim, nb_samples, input, weight);
170 for(int w = 0; w < nb_weak_learners; w++) {
171 approximer.learn_one_step(output);
173 for(int n = 0; n < nb_samples; n++)
174 e += weight[n] * sq(output[n] - approximer._outputs_on_samples[n]);
175 cerr << w << " " << e << endl;
177 for(int n = 0; n < nb_samples; n++) {
178 cout << output[n] << " " << approximer._outputs_on_samples[n] << endl;