KJB
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
sample_adaptive_mh.h
Go to the documentation of this file.
1 /* $Id: sample_adaptive_mh.h 12839 2012-08-08 23:51:05Z ksimek $ */
2 /* {{{=========================================================================== *
3  |
4  | Copyright (c) 1994-2012 by Kobus Barnard (author)
5  |
6  | Personal and educational use of this code is granted, provided that this
7  | header is kept intact, and that the authorship is not misrepresented, that
8  | its use is acknowledged in publications, and relevant papers are cited.
9  |
10  | For other use contact the author (kobus AT cs DOT arizona DOT edu).
11  |
12  | Please note that the code in this file has not necessarily been adequately
13  | tested. Naturally, there is no guarantee of performance, support, or fitness
14  | for any particular task. Nonetheless, I am interested in hearing about
15  | problems that you encounter.
16  |
17  | Author: Kyle Simek
18  * =========================================================================== }}}*/
19 
20 // vim: tabstop=4 shiftwidth=4 foldmethod=marker
21 
23 #include <sample_cpp/sample_base.h>
24 
25 template <class SimpleVector>
27 {
28 private:
31 public:
34 
36  const Target_distribution& target,
37  const kjb::Matrix& initial_covariance,
38  double goal_accept_rate) :
39  goal_accept_prob_(goal_accept_rate),
40  i_(2), // first i_ comes from initial step
41  gamma_(),
42  mu_(),
43  proposer_(new Proposer(initial_covariance)),
44  post_callback_(),
45  step_(target, boost::bind<Mh_proposal_result>(&Proposer::operator(), proposer_, _1, _2))
46  {
47  // a relatively conservative choice
49  }
50 
51  void set_temperature(double t)
52  {
53  proposer_->set_temperature(t);
54  step_.set_temperature(t);
55  }
56 
71  void set_inverse_learning_rate(double C, double alpha)
72  {
73  gamma_ = boost::bind(Self::inverse_i_, log(C), alpha, _1);
74  }
75 
86  void set_constant_learning_rate(double gamma, size_t change_point = 0)
87  {
88  if(change_point == 0)
89  gamma_ = boost::bind(Self::constant_function_, gamma, _1);
90  else
91  gamma_ = boost::bind(Self::step_function_, gamma, change_point, _1);
92  }
93 
94  Step_log<SimpleVector> operator()(SimpleVector& in, double lt_m)
95  {
96  SimpleVector previous_state = in;
97 
98  double accept_prob;
99  SimpleVector proposed_state;
100 
101  Step_log<SimpleVector> step_log = step_.run_step(in, lt_m, accept_prob, proposed_state);
102  accept_prob = std::min(0.0, accept_prob);
103  accept_prob = exp(accept_prob);
104  adapt(accept_prob, previous_state, proposed_state, in);
105 
106  if(post_callback_) post_callback_(*this);
107  return step_log;
108  }
109 
114  {
115  return proposer_->get_unscaled_cholesky_covariance();
116  }
117 
121  double get_global_scale() const
122  {
123  return proposer_->get_global_scale();
124  }
125 
126  virtual void adapt(double accept_prob, const SimpleVector& previous_state, const SimpleVector& proposed_state, const SimpleVector& accepted_state)
127  {
128 
130  double gamma = gamma_(i_);
131  proposer_->adapt_global_scale(gamma * (accept_prob - goal_accept_prob_));
132 
133  // update mean
134  // mu_ += gamma * delta;
135  if(mu_.size() == 0)
136  mu_ = previous_state;
137 
138  SimpleVector delta = accepted_state;
139  std::transform(delta.begin(), delta.end(), mu_.begin(), delta.begin(), std::minus<double>());
140 
141  // update covariance
142  proposer_->adapt_covariance(delta, gamma);
143 
144  std::transform(delta.begin(), delta.end(), delta.begin(), std::bind2nd(std::multiplies<double>(), gamma));
145  std::transform(mu_.begin(), mu_.end(), delta.begin(), mu_.begin(), std::plus<double>());
146 
147  ++i_;
148  }
149 
154  void set_post_callback(const boost::function1<void, const Self&>& cb)
155  {
156  post_callback_ = cb;
157  }
158 
160  double get_current_gamma() const
161  {
162  return gamma_(i_);
163  }
164 
166  double get_log_lambda() const
167  {
168  return log_lambda_;
169  }
170 protected:
172  {
173  step_.set_target(t);
174  }
175 private:
181  static double inverse_i_(double log_C, double alpha, double i)
182  {
183  return exp( log_C - alpha * log(i));
184  }
185 
192  static double step_function_(double v, size_t change_point, size_t i)
193  {
194  if(i < change_point) return v;
195  return 0.0;
196  }
197 
201  static double constant_function_(double f, size_t)
202  {
203  return f;
204  }
205 
206 
207  double goal_accept_prob_;
208  double log_lambda_;
209  size_t i_;
210  boost::function1<double, size_t> gamma_;
211 
212  SimpleVector mu_;
213  boost::shared_ptr<Mv_gaussian_proposer<SimpleVector> > proposer_;
214 
215  boost::function1<void, const Self&> post_callback_;
216  mutable Base_step step_;
217 };
218 
219 
220 template <class Model>
222 {
223 public:
225 private:
226  typedef boost::function2<void, const Model&, kjb::Vector&> To_vector;
227  typedef boost::function2<void, const kjb::Vector&, Model&> From_vector;
228 
230 
231  struct target_wrapper
232  {
233  target_wrapper(
234  const Target_distribution& target_,
235  const From_vector& from_vector_) :
236  target(target_)
237  ,from_vector(from_vector_)
238  ,model()
239 #ifdef TEST
240  ,called(false)
241 #endif
242  {}
243 
244  double operator()(const kjb::Vector& v) const
245  {
246  from_vector(v, model);
247 #ifdef TEST
248  called = true;
249 #endif
250  return target(model);
251  }
252 
253  Target_distribution target;
254  From_vector from_vector;
255  mutable Model model;
256 #ifdef TEST
257  mutable bool called;
258 #endif
259  };
260 
261 
262 
263 public:
265 
267  const To_vector& to_vector,
268  const From_vector& from_vector,
269  const Target_distribution& target,
270  const kjb::Matrix& initial_covariance,
271  double goal_accept_rate) :
272  Base_step(boost::ref(target_wrapper_), initial_covariance, goal_accept_rate),
273  target_wrapper_(target, from_vector),
274  to_vector_(to_vector)
275  {}
276 
283  Base_step(other),
284  target_wrapper_(other.target_wrapper_),
285  to_vector_(other.to_vector_)
286  {
287  Base_step::set_target(boost::ref(target_wrapper_));
288  }
289 
290 
291  Step_log<Model> operator()(Model& in, double lt_m)
292  {
293  target_wrapper_.model = in;
294 #ifdef TEST
295  target_wrapper_.called = false;
296 #endif
297 
298  kjb::Vector v;
299  to_vector_(in, v);
300  Step_log<Model> log = Base_step::operator()(v, lt_m);
301 
302  assert(log.size() == 1);
303 #ifdef TEST
304  assert(target_wrapper_.called == true);
305 #endif
306 
307  if(log.back().accept)
308  {
309  in = target_wrapper_.model;
310  }
311 
312  return log;
313  }
314 
315 private:
316  target_wrapper target_wrapper_;
317  To_vector to_vector_;
318 };
319 
void set_temperature(double t)
Definition: sample_adaptive_mh.h:51
Generic_adaptive_mh_step(const To_vector &to_vector, const From_vector &from_vector, const Target_distribution &target, const kjb::Matrix &initial_covariance, double goal_accept_rate)
Definition: sample_adaptive_mh.h:266
void set_target(const Target_distribution &target)
Definition: sample_step.h:314
Indicates the result of an MH proposal. It is simply a pair of probabilities, forward and backward...
Definition: sample_base.h:334
Definition: sample_vector.h:251
Step_log< SimpleVector > operator()(SimpleVector &in, double lt_m)
Definition: sample_adaptive_mh.h:94
void set_constant_learning_rate(double gamma, size_t change_point=0)
Definition: sample_adaptive_mh.h:86
Definition: sample_base.h:160
This class implements vectors, in the linear-algebra sense, with real-valued elements.
Definition: m_vector.h:87
Model_evaluator< Model >::Type Target_distribution
Definition: sample_adaptive_mh.h:224
Base_step::Proposer Proposer
Definition: sample_adaptive_mh.h:264
void set_temperature(double T)
Definition: sample_step.h:333
void set_post_callback(const boost::function1< void, const Self & > &cb)
Definition: sample_adaptive_mh.h:154
Definition: sample_adaptive_mh.h:221
void set_target(const Target_distribution &t)
Definition: sample_adaptive_mh.h:171
Step_log< Model > operator()(Model &in, double lt_m)
Definition: sample_adaptive_mh.h:291
Base_step::Target_distribution Target_distribution
Definition: sample_adaptive_mh.h:32
Definition: sample_adaptive_mh.h:26
boost::function1< double, const Model & > Type
Definition: sample_base.h:214
double get_log_lambda() const
returns the log of the current scaling lambda
Definition: sample_adaptive_mh.h:166
double get_global_scale() const
Definition: sample_adaptive_mh.h:121
Int_matrix::Value_type min(const Int_matrix &mat)
Return the minimum value in this matrix.
Definition: l_int_matrix.h:1385
virtual void adapt(double accept_prob, const SimpleVector &previous_state, const SimpleVector &proposed_state, const SimpleVector &accepted_state)
Definition: sample_adaptive_mh.h:126
Parent::Target_distribution Target_distribution
Definition: sample_step.h:224
Step_log< Model > run_step(Model &m, double lt_m, double &accept_prob, boost::optional< Model & > proposed_state_out=boost::none) const
Runs a step of Metropolis-Hastings on a model m.
Definition: sample_step.h:120
void set_inverse_learning_rate(double C, double alpha)
Definition: sample_adaptive_mh.h:71
get the indices of edges in each direction for i
Definition: APPgetLargeConnectedEdges.m:48
Simple_adaptive_mh_step(const Target_distribution &target, const kjb::Matrix &initial_covariance, double goal_accept_rate)
Definition: sample_adaptive_mh.h:35
This class implements matrices, in the linear-algebra sense, with real-valued elements.
Definition: m_matrix.h:94
Generic_adaptive_mh_step(const Generic_adaptive_mh_step &other)
Definition: sample_adaptive_mh.h:282
double get_current_gamma() const
returns the current learning rate (for debugging)
Definition: sample_adaptive_mh.h:160
Mv_gaussian_proposer< SimpleVector > Proposer
Definition: sample_adaptive_mh.h:33
const kjb::Matrix & get_cholesky_covariance() const
Definition: sample_adaptive_mh.h:113