2 ////////////////////////////////////////////////////////////////////////////////
3 // This program is free software; you can redistribute it and/or //
4 // modify it under the terms of the GNU General Public License //
5 // version 2 as published by the Free Software Foundation. //
7 // This program is distributed in the hope that it will be useful, but //
8 // WITHOUT ANY WARRANTY; without even the implied warranty of //
9 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU //
10 // General Public License for more details. //
12 // Written and (C) by François Fleuret //
13 // Contact <francois.fleuret@epfl.ch> for comments & bug reports //
14 ////////////////////////////////////////////////////////////////////////////////
16 #include "approximer.h"
18 MappingApproximer::MappingApproximer(int max_nb_weak_learners) :
19 _max_nb_weak_learners(max_nb_weak_learners),
21 _indexes(new int[_max_nb_weak_learners]),
22 _thresholds(new scalar_t[_max_nb_weak_learners]),
23 _weights(new scalar_t[_max_nb_weak_learners]),
25 _input_sorted_index(0),
26 _outputs_on_samples(0) { }
28 MappingApproximer::~MappingApproximer() {
32 delete[] _outputs_on_samples;
35 void MappingApproximer::load(istream &is) {
36 is.read((char *) &_nb_weak_learners, sizeof(_nb_weak_learners));
37 if(_nb_weak_learners > _max_nb_weak_learners) {
38 cerr << "Number of weak learners missmatch." << endl;
41 is.read((char *) _indexes, _nb_weak_learners * sizeof(int));
42 is.read((char *) _thresholds, _nb_weak_learners * sizeof(scalar_t));
43 is.read((char *) _weights, _nb_weak_learners * sizeof(scalar_t));
46 void MappingApproximer::save(ostream &os) {
47 os.write((char *) &_nb_weak_learners, sizeof(_nb_weak_learners));
48 os.write((char *) _indexes, _nb_weak_learners * sizeof(int));
49 os.write((char *) _thresholds, _nb_weak_learners * sizeof(scalar_t));
50 os.write((char *) _weights, _nb_weak_learners * sizeof(scalar_t));
53 void MappingApproximer::set_learning_input(int input_size, int nb_samples,
54 scalar_t *input, scalar_t *sample_weights) {
56 _input_size = input_size;
58 _sample_weights = sample_weights;
59 if(_nb_samples != nb_samples ){
60 _nb_samples = nb_samples;
61 delete[] _outputs_on_samples;
62 delete[] _input_sorted_index;
63 _outputs_on_samples = new scalar_t[_nb_samples];
64 _input_sorted_index = new int[_input_size * _nb_samples];
66 for(int t = 0; t < _nb_samples; t++) _outputs_on_samples[t] = 0.0;
68 for(int n = 0; n < _input_size; n++) {
69 Couple couples[_nb_samples];
70 for(int s = 0; s < _nb_samples; s++) {
72 couples[s].value = _input[s * _input_size + n];
74 qsort(couples, _nb_samples, sizeof(Couple), compare_couple);
75 for(int s = 0; s < _nb_samples; s++)
76 _input_sorted_index[n * _nb_samples + s] = couples[s].index;
81 void MappingApproximer::learn_one_step(scalar_t *target) {
82 scalar_t delta[_nb_samples], s_delta = 0.0, s_weights = 0;
84 for(int s = 0; s < _nb_samples; s++) {
85 delta[s] = _outputs_on_samples[s] - target[s];
86 s_delta += _sample_weights[s] * delta[s];
87 s_weights += _sample_weights[s];
90 scalar_t best_z = 0, z, prev, val;
93 for(int n = 0; n < _input_size; n++) {
95 i = _input_sorted_index + n * _nb_samples;
96 prev = _input[(*i) * _input_size + n];
97 for(int s = 1; s < _nb_samples; s++) {
98 z -= 2 * _sample_weights[*i] * delta[*i];
100 val = _input[(*i) * _input_size + n];
101 if(val > prev && abs(z) > abs(best_z)) {
102 _thresholds[_nb_weak_learners] = (val + prev)/2;
103 _indexes[_nb_weak_learners] = n;
104 _weights[_nb_weak_learners] = - z / s_weights;
111 if(best_z == 0) return;
113 // Update the responses on the samples
114 for(int s = 0; s < _nb_samples; s++) {
115 if(_input[s * _input_size + _indexes[_nb_weak_learners]] >= _thresholds[_nb_weak_learners])
116 _outputs_on_samples[s] += _weights[_nb_weak_learners];
118 _outputs_on_samples[s] -= _weights[_nb_weak_learners];
124 scalar_t MappingApproximer::predict(scalar_t *input) {
126 for(int w = 0; w < _nb_weak_learners; w++)
127 if(input[_indexes[w]] >= _thresholds[w])
134 void test_approximer() {
135 // const int nb_samples = 1000, nb_weak_learners = 100;
136 // MappingApproximer approximer(nb_weak_learners);
137 // scalar_t input[nb_samples], output[nb_samples], weight[nb_samples];
138 // for(int n = 0; n < nb_samples; n++) {
139 // input[n] = scalar_t(n * 2 * M_PI)/scalar_t(nb_samples);
140 // output[n] = sin(input[n]);
141 // weight[n] = (drand48() < 0.5) ? 1.0 : 0.0;
143 // approximer.set_learning_input(1, nb_samples, input, weight);
144 // for(int w = 0; w < nb_weak_learners; w++) {
145 // approximer.learn_one_step(output);
147 // for(int n = 0; n < nb_samples; n++)
148 // e += weight[n] * sq(output[n] - approximer._outputs_on_samples[n]);
149 // cerr << w << " " << e << endl;
151 // for(int n = 0; n < nb_samples; n++) {
152 // cout << input[n] << " " << approximer._outputs_on_samples[n] << endl;
155 const int dim = 5, nb_samples = 1000, nb_weak_learners = 100;
156 MappingApproximer approximer(nb_weak_learners);
157 scalar_t input[nb_samples * dim], output[nb_samples], weight[nb_samples];
158 for(int n = 0; n < nb_samples; n++) {
160 for(int d = 0; d < dim; d++) {
161 input[n * dim + d] = drand48();
162 s += (d+1) * input[n * dim + d];
165 weight[n] = (drand48() < 0.5) ? 1.0 : 0.0;
167 approximer.set_learning_input(dim, nb_samples, input, weight);
168 for(int w = 0; w < nb_weak_learners; w++) {
169 approximer.learn_one_step(output);
171 for(int n = 0; n < nb_samples; n++)
172 e += weight[n] * sq(output[n] - approximer._outputs_on_samples[n]);
173 cerr << w << " " << e << endl;
175 for(int n = 0; n < nb_samples; n++) {
176 cout << output[n] << " " << approximer._outputs_on_samples[n] << endl;