sampling/entropic_sampling/
entropic_adaptive.rs

1use{
2    crate::{*, traits::*},
3    rand::{Rng, seq::*},
4    std::{
5        marker::PhantomData,
6        io::Write,
7        iter::*,
8        collections::*,
9        convert::*,
10        num::*
11    }
12};
13
14#[cfg(feature = "serde_support")]
15use serde::{Serialize, Deserialize};
16
17#[derive(Debug, Clone)]
18#[cfg_attr(feature = "serde_support", derive(Serialize, Deserialize))]
19/// Error states, that entropic sampling, or the creation of `EntropicSamplingAdaptive`
20/// could encounter
21pub enum EntropicErrors {
22    /// # source (`WangLandauAdaptive`) was in an invalid state
23    /// * did you forget to use one of the `init*` methods to initialize a valid
24    ///   WangLandau state? 
25    InvalidWangLandau,
26
27    /// Still in the process of gathering statistics
28    /// Not enough to make an estimate
29    NotEnoughStatistics,
30
31    /// Still Gathering Statistics, this is only an estimate!
32    EstimatedStatistic(Vec<f64>),
33
34    /// Invalid trial step. Is your max_step smaller than your min_step?
35    InvalidMinMaxTrialSteps,
36
37    /// # Possible reasons
38    /// * `log_density.len()` and `histogram.bin_count()` are not equal
39    /// * not all values of `log_density` are finite
40    InvalidLogDensity,
41
42    /// You are trying to have a `min_best_of_count` that is 
43    /// larger than the total steps you try!
44    InvalidBestof,
45}
46
47/// # Entropic sampling made easy
48/// > J. Lee,
49/// > “New Monte Carlo algorithm: Entropic sampling,”
50/// > Phys. Rev. Lett. 71, 211–214 (1993),
51/// > DOI: [10.1103/PhysRevLett.71.211](https://doi.org/10.1103/PhysRevLett.71.211)
52#[derive(Debug, Clone)]
53#[cfg_attr(feature = "serde_support", derive(Serialize, Deserialize))]
54pub struct EntropicSamplingAdaptive<Hist, R, E, S, Res, T>
55{
56    rng: R,
57    trial_list: Vec<usize>,
58    best_of_steps: Vec<usize>,
59    min_best_of_count: usize,
60    best_of_threshold: f64,
61    ensemble: E,
62    steps: Vec<S>,
63    step_res_marker: PhantomData<Res>,
64    accepted_step_hist: Vec<usize>,
65    rejected_step_hist: Vec<usize>,
66    total_steps_rejected: usize,
67    total_steps_accepted: usize,
68    wl_steps_accepted: usize,
69    wl_steps_rejected: usize,
70    min_step: usize,
71    counter: usize,
72    step_count: usize,
73    step_goal: usize,
74    histogram: Hist,
75    log_density: Vec<f64>,
76    old_energy: T,
77    old_bin: usize,
78    adjust_bestof_every: usize,
79}
80
81impl<Hist, R, E, S, Res, Energy> GlueAble<Hist> for EntropicSamplingAdaptive<Hist, R, E, S, Res, Energy>
82    where Hist: Clone + Histogram
83{
84    fn push_glue_entry_ignoring(
85            &self, 
86            job: &mut GlueJob<Hist>,
87            ignore_idx: &[usize]
88        ) {
89        if !ignore_idx.contains(&0)
90        {
91            let mut missing_steps = 0;
92            if self.step_count() >= self.step_goal()
93            {
94                missing_steps = (self.step_goal() - self.step_count()) as u64;
95            }
96            let rejected = self.total_entr_steps_rejected() as u64;
97            let accepted = self.total_entr_steps_accepted() as u64;
98
99            let stats = IntervalSimStats{
100                sim_progress: SimProgress::MissingSteps(missing_steps),
101                interval_sim_type: SimulationType::Entropic,
102                rejected_steps: rejected,
103                accepted_steps: accepted,
104                replica_exchanges: None,
105                proposed_replica_exchanges: None,
106                merged_over_walkers: NonZeroUsize::new(1).unwrap()
107            };
108
109            let glue_entry = GlueEntry{
110                hist: self.hist().clone(),
111                prob: self.log_density_refined(),
112                log_base: LogBase::BaseE,
113                interval_stats: stats
114            };
115            job.collection.push(glue_entry);
116        }
117    }
118}
119
120impl<Hist, R, E, S, Res, T> TryFrom<WangLandauAdaptive<Hist, R, E, S, Res, T>>
121    for EntropicSamplingAdaptive<Hist, R, E, S, Res, T>
122    where 
123        Hist: Histogram,
124        R: Rng
125{
126    type Error = EntropicErrors;
127    fn try_from(mut wl: WangLandauAdaptive<Hist, R, E, S, Res, T>) -> Result<Self, Self::Error> {
128        if wl.old_bin.is_none() || wl.old_energy.is_none() {
129            return Err(EntropicErrors::InvalidWangLandau);
130        }
131        let wl_steps_rejected = wl.total_steps_rejected();
132        let wl_steps_accepted = wl.total_steps_accepted();
133        wl.accepted_step_hist
134            .iter_mut()
135            .for_each(|v| *v = 0);
136        wl.rejected_step_hist
137            .iter_mut()
138            .for_each(|v| *v = 0);
139        wl.best_of_steps.clear();
140        wl.histogram.reset();
141        wl.trial_list.shuffle(&mut wl.rng);
142        
143        Ok(
144            Self{
145                wl_steps_rejected,
146                wl_steps_accepted,
147                counter: 0,
148                steps: wl.steps,
149                step_res_marker: wl.step_res_marker,
150                log_density: wl.log_density,
151                old_energy: wl.old_energy.unwrap(),
152                old_bin: wl.old_bin.unwrap(),
153                accepted_step_hist: wl.accepted_step_hist,
154                rejected_step_hist: wl.rejected_step_hist,
155                total_steps_accepted: 0,
156                total_steps_rejected: 0,
157                min_step: wl.min_step,
158                min_best_of_count: wl.min_best_of_count,
159                best_of_steps: wl.best_of_steps,
160                best_of_threshold: wl.best_of_threshold,
161                rng: wl.rng,
162                trial_list: wl.trial_list,
163                ensemble: wl.ensemble,
164                step_count: 0,
165                step_goal: wl.step_count,
166                histogram: wl.histogram,
167                adjust_bestof_every: 10usize.max(4 * wl.check_refine_every),
168            }   
169        )
170    }
171}
172
173impl<Hist, R, E, S, Res, T> EntropicSamplingAdaptive<Hist, R, E, S, Res, T>
174{
175
176    /// # Current state of the Ensemble
177    #[inline]
178    pub fn ensemble(&self) -> &E
179    {
180        &self.ensemble
181    }
182
183    /// # Energy of ensemble
184    /// * assuming `energy_fn` (see `self.entropic_step` etc.) 
185    ///   is deterministic and will always give the same result for the same ensemble,
186    ///   this returns the energy of the current ensemble
187    #[inline]
188    pub fn energy(&self) -> &T
189    {
190        &self.old_energy
191    }
192
193    /// # Number of entropic steps done until now
194    /// * will be reset by [`self.refine_estimate`](#method.refine_estimate)
195    #[inline]
196    pub fn step_count(&self) -> usize
197    {
198        self.step_count
199    }
200
201    /// # Number of entropic steps to be performed
202    /// * if `self` was created from `WangLandauAdaptive`,
203    ///   `step_goal` will be equal to the number of WangLandau steps, that were performed
204    #[inline]
205    pub fn step_goal(&self) -> usize
206    {
207        self.step_goal
208    }
209
210    /// # Number of entropic steps to be performed
211    /// * set the number of steps to be performed by entropic sampling
212    #[inline]
213    pub fn set_step_goal(&mut self, step_goal: usize){
214        self.step_goal = step_goal;
215    }
216
217    /// # Smallest possible markov step (`m_steps` of MarkovChain trait) by entropic step
218    #[inline]
219    pub fn min_step_size(&self) -> usize
220    {
221        self.min_step
222    }
223
224    /// # Largest possible markov step (`m_steps` of MarkovChain trait) by entropic step
225    #[inline]
226    pub fn max_step_size(&self) -> usize 
227    {
228        self.min_step + self.accepted_step_hist.len() - 1
229    }
230
231    /// # Currently used best of
232    /// * might have length 0, if statistics are still being gathered
233    /// * otherwise this contains the step sizes, from which the next step size
234    ///   is drawn uniformly
235    #[inline]
236    pub fn best_of_steps(&self) -> &Vec<usize>
237    {
238        &self.best_of_steps
239    }
240
241    /// # Fraction of steps accepted since the statistics were reset the last time
242    /// * (steps accepted since last reset) / (steps since last reset)
243    /// * `NaN` if no steps were performed yet
244    pub fn fraction_accepted_current(&self) -> f64 {
245        let accepted: usize = self.accepted_step_hist.iter().sum();
246        let total = accepted + self.rejected_step_hist.iter().sum::<usize>();
247        if total == 0 {
248            f64::NAN
249        } else {
250            accepted as f64 / total as f64
251        }
252    }
253
254    /// # total number of entropic steps, that were accepted
255    pub fn total_entr_steps_accepted(&self) -> usize
256    {
257        self.total_steps_accepted 
258            + self.accepted_step_hist
259                .iter()
260                .sum::<usize>()
261    }
262
263    /// # total number of entropic steps, that were rejected
264    pub fn total_entr_steps_rejected(&self) -> usize
265    {
266        self.total_steps_rejected
267            + self.rejected_step_hist
268                .iter()
269                .sum::<usize>()
270    }
271
272    /// # Fraction of steps accepted since the creation of `self`
273    /// * `NaN` if no steps were performed yet
274    pub fn fraction_accepted_total_entropic(&self) -> f64 {
275        let total_acc = self.total_entr_steps_accepted();
276        let total_steps = total_acc + self.total_entr_steps_rejected();
277
278        if total_steps == 0 {
279            f64::NAN
280        } else {
281            total_acc as f64 / total_steps as f64
282        }
283    }
284
285    /// * returns the (non normalized) log_density estimate log(P(E)), with which the simulation was started
286    /// * if you created this from a WangLandau simulation, this is the result of the WangLandau Simulation
287    pub fn log_density_estimate(&self) -> &Vec<f64>
288    {
289        &self.log_density
290    }
291
292    /// calculates the (non normalized) log_density estimate log(P(E)) according to the [paper](#entropic-sampling-made-easy)
293    pub fn log_density_refined(&self) -> Vec<f64> 
294    where Hist: Histogram{
295        let mut log_density = Vec::with_capacity(self.log_density.len());
296        log_density.extend(
297            self.log_density
298                .iter()
299                .zip(self.histogram.hist().iter())
300                .map(
301                    |(&log_p, &h)|
302                    {
303                        if h == 0 {
304                            log_p
305                        } else {
306                            log_p + (h as f64).ln()
307                        }
308                    }
309                )
310        );
311        log_density
312    }
313
314    /// # Return current state of histogram
315    pub fn hist(&self) -> &Hist
316    {
317        &self.histogram
318    }
319}
320impl<Hist, R, E, S, Res, T> EntropicSamplingAdaptive<Hist, R, E, S, Res, T>
321where Hist: Histogram,
322    R: Rng
323{
324
325    /// # Creates EntropicSamplingAdaptive from a `WangLandauAdaptive` state
326    /// * `WangLandauAdaptive` state needs to be valid, i.e., you must have called one of the `init*` methods
327    /// - this ensures, that the members `old_energy` and `old_bin` are not `None`
328    pub fn from_wl_adaptive(wl: WangLandauAdaptive<Hist, R, E, S, Res, T>) -> Result<Self, EntropicErrors>
329    {
330        wl.try_into()
331    }
332
333
334
335
336    /// # Calculates `self.log_density_refined` and uses that as estimate for a the entropic sampling simulation
337    /// * returns old estimate
338    /// # prepares `self` for a new entropic simulation
339    /// * sets new estimate for log(P(E))
340    /// * resets statistic gathering
341    /// * resets step_count
342    pub fn refine_estimate(&mut self) -> Vec<f64>
343    {
344        let mut estimate = self.log_density_refined();
345        std::mem::swap(&mut estimate, &mut self.log_density);
346        self.counter = 0;
347        self.step_count = 0;
348        self.best_of_steps.clear();
349        self.histogram.reset();
350        self.trial_list.shuffle(&mut self.rng);
351
352        self.total_steps_accepted += self.accepted_step_hist.iter().sum::<usize>();
353        self.accepted_step_hist
354            .iter_mut()
355            .for_each(|entry| *entry = 0);
356
357        self.total_steps_rejected += self.rejected_step_hist.iter().sum::<usize>();
358        self.rejected_step_hist
359            .iter_mut()
360            .for_each(|entry| *entry = 0);
361
362        estimate
363    }
364
365    /// # How often to adjust `bestof_steps`?
366    /// * if you try to set a value smaller 10, it will be set to 10
367    /// * will re-evaluate the statistics every `adjust_bestof_every` steps,
368    /// - this will not start new statistics gathering but just trigger a reevaluation of
369    ///   the gathered statistics (should be O(max_stepsize - min_stepsize))
370    #[inline]
371    pub fn set_adjust_bestof_every(&mut self, adjust_bestof_every: usize)
372    {
373        self.adjust_bestof_every = adjust_bestof_every.max(10);
374    }
375
376    /// Is the simulation in the process of rebuilding the statistics,
377    /// i.e., is it currently trying many different step sizes?
378    #[inline]
379    pub fn is_rebuilding_statistics(&self) -> bool
380    {
381        self.counter < self.trial_list.len()
382    }
383
384    fn statistic_bin_not_hit(&self) -> bool
385    {
386        self.accepted_step_hist
387            .iter()
388            .zip(self.rejected_step_hist.iter())
389            .any(|(a, r )| a + r == 0)
390    }
391
392    /// # Estimate accept/reject statistics
393    /// * contains list of estimated probabilities for accepting a step of corresponding step size
394    /// * list\[i\] corresponds to step size `i + self.min_step`
395    /// * O(trial_step_max - trial_step_min)
396    pub fn estimate_statistics(&self) -> Result<Vec<f64>, WangLandauErrors>
397    {
398        let calc_estimate = || {
399            let mut estimate = Vec::with_capacity(self.accepted_step_hist.len());
400            estimate.extend(
401                self.accepted_step_hist
402                    .iter()
403                    .zip(
404                        self.rejected_step_hist.iter()
405                    ).map(|(&a, &r)|
406                        {
407                            a as f64 / (a + r) as f64
408                        }
409                    )
410            );
411            estimate
412        };
413        if self.is_rebuilding_statistics() {
414            
415            if self.statistic_bin_not_hit()
416            {
417                Err(WangLandauErrors::NotEnoughStatistics)
418            } else{
419                
420                Err(WangLandauErrors::EstimatedStatistic(calc_estimate()))
421            }
422        } else {
423            Ok(
424                calc_estimate()
425            ) 
426        }
427    }
428
429    fn generate_bestof(&mut self)
430    {
431        let statistics = self.estimate_statistics().unwrap();
432        let mut heap = BinaryHeap::with_capacity(statistics.len());
433        heap.extend(statistics.into_iter()
434            .enumerate()
435            .map(|(index, prob)|
436                {
437                    ProbIndex::new(prob, index)
438                }
439            )
440        );
441        while let Some(p_i) = heap.pop() {
442            if p_i.is_best_of(self.best_of_threshold) 
443                || self.best_of_steps.len() < self.min_best_of_count
444            {
445                let step_size = p_i.index + self.min_step;
446                self.best_of_steps.push(step_size);
447            } else {
448                break;
449            }
450        }
451    }
452
453    fn adjust_bestof(&mut self){
454        self.best_of_steps.clear();
455        self.generate_bestof();
456    }
457
458    fn get_stepsize(&mut self) -> usize {
459        match self.trial_list.get(self.counter) {
460            None => {
461                if self.best_of_steps.is_empty(){
462                    self.generate_bestof();
463                }
464                else if self.counter % self.adjust_bestof_every == 0 {
465                    self.adjust_bestof();
466                }
467                *self.best_of_steps.choose(&mut self.rng).unwrap()
468            },
469            Some(&step_size) => {
470                step_size
471            },
472        }
473    }
474
475    #[inline]
476    fn count_accepted(&mut self, size: usize){
477        self.accepted_step_hist[size - self.min_step] += 1;
478        self.counter += 1;
479    }
480
481    #[inline]
482    fn count_rejected(&mut self, size: usize){
483        self.rejected_step_hist[size - self.min_step] += 1;
484        self.counter += 1;
485    }
486
487    /// **panics** if index is invalid
488    #[inline(always)]
489    fn metropolis_acception_prob(&self, new_bin: usize) -> f64
490    {
491        
492        (self.log_density[self.old_bin] - self.log_density[new_bin])
493            .exp()
494    }
495}
496
497impl<Hist, R, E, S, Res, T> EntropicSamplingAdaptive<Hist, R, E, S, Res, T>
498where Hist: Histogram + HistogramVal<T>,
499    R: Rng,
500    E: MarkovChain<S, Res>,
501    T: Clone,
502{
503
504    /// # Entropic sampling
505    /// * performs `self.entropic_step(energy_fn)` until `condition` is false
506    /// * **Note**: you have access to the current step_count (`self.step_count()`)
507    /// # Parameter
508    /// * `energy_fn` function calculating `Some(energy)` of the system
509    ///   or rather the Parameter of which you wish to obtain the probability distribution.
510    ///   If there are any states, for which the calculation is invalid, `None` should be returned
511    /// * steps resulting in ensembles for which `energy_fn(&mut ensemble)` is `None`
512    ///   will always be rejected 
513    /// * **Important** `energy_fn`: should be the same as used for Wang Landau, otherwise the results will be wrong!
514    /// * `print_fn`: see below
515    /// # Correlations
516    /// * if you want to measure correlations between "energy" and other measurable quantities,
517    ///   use `print_fn`, which will be called after each step - use this function to write to 
518    ///   a file or whatever you desire
519    /// * Note: You do not have to recalculate the energy, if you need it in `print_fn`:
520    ///   just call `self.energy()` 
521    /// * you have access to your ensemble with `self.ensemble()`
522    /// * if you do not need it, you can use `|_|{}` as `print_fn`
523    ///  ## Safety
524    /// * While you do have mutable access to the ensemble, the energy function should not change the 
525    ///   ensemble in a way, which affects the next calculation of the energy
526    /// * This is intended for usecases, where the energy calculation is more efficient with mutable access, e.g., through using a 
527    ///   buffer stored in the ensemble
528    /// * Note: I chose to make this function unsafe to force users to acknowledge the (purely logical) limitations 
529    ///   regarding the usage of the mutable ensemble. From a programming point of view this will not lead to 
530    ///   any undefined behavior or such regardless of if the user fulfills the requirements
531    pub unsafe fn entropic_sampling_while_unsafe<F, G, W>(
532        &mut self,
533        mut energy_fn: F,
534        mut print_fn: G,
535        mut condition: W
536    ) where F: FnMut(&mut E) -> Option<T>,
537        G: FnMut(&Self),
538        W: FnMut(&Self) -> bool
539    {
540        while condition(self) {
541            self.entropic_step_unsafe(&mut energy_fn);
542            print_fn(self);
543        }
544    }
545
546    /// # Entropic sampling
547    /// * performs `self.entropic_step(energy_fn)` until `condition` is false
548    /// * **Note**: you have access to the current step_count (`self.step_count()`)
549    /// # Parameter
550    /// * `energy_fn` function calculating `Some(energy)` of the system
551    ///   or rather the Parameter of which you wish to obtain the probability distribution.
552    ///   If there are any states, for which the calculation is invalid, `None` should be returned
553    /// * steps resulting in ensembles for which `energy_fn(&mut ensemble)` is `None`
554    ///   will always be rejected 
555    /// * **Important** `energy_fn`: should be the same as used for Wang Landau, otherwise the results will be wrong!
556    /// * `print_fn`: see below
557    /// # Correlations
558    /// * if you want to measure correlations between "energy" and other measurable quantities,
559    ///   use `print_fn`, which will be called after each step - use this function to write to 
560    ///   a file or whatever you desire
561    /// * Note: You do not have to recalculate the energy, if you need it in `print_fn`:
562    ///   just call `self.energy()` 
563    /// * you have access to your ensemble with `self.ensemble()`
564    /// * if you do not need it, you can use `|_|{}` as `print_fn`
565    pub fn entropic_sampling_while<F, G, W>(
566        &mut self,
567        mut energy_fn: F,
568        mut print_fn: G,
569        mut condition: W
570    ) where F: FnMut(&E) -> Option<T>,
571        G: FnMut(&Self),
572        W: FnMut(&Self) -> bool
573    {
574        while condition(self) {
575            self.entropic_step(&mut energy_fn);
576            print_fn(self);
577        }
578    }
579
580
581    /// # Entropic sampling using an accumulating markov step
582    /// * performs `self.entropic_step_acc(&mut energy_fn)` until `condition(self) == false`
583    /// # Parameter
584    /// * `energy_fn` function calculating the energy `E` of the system
585    ///   (or rather the Parameter of which you wish to obtain the probability distribution)
586    ///   during the markov steps, which can be more efficient.
587    /// * **Important** `energy_fn`: should be the same as used for Wang Landau, otherwise the results will be wrong!
588    /// * `print_fn`: see below
589    /// # Correlations
590    /// * if you want to measure correlations between "energy" and other measurable quantities,
591    ///   use `print_fn`, which will be called after each step - use this function to write to 
592    ///   a file or whatever you desire
593    /// * Note: You do not have to recalculate the energy, if you need it in `print_fn`:
594    ///   just call `self.energy()` 
595    /// * you have access to your ensemble with `self.ensemble()`
596    /// * if you do not need it, you can use `|_|{}` as `print_fn`
597    pub fn entropic_sampling_while_acc<F, G, W>(
598        &mut self,
599        mut energy_fn: F,
600        mut print_fn: G,
601        mut condition: W
602    ) where F: FnMut(&E, &S, &mut T),
603        G: FnMut(&Self),
604        W: FnMut(&Self) -> bool
605    {
606        while condition(self) {
607            self.entropic_step_acc(&mut energy_fn);
608            print_fn(self);
609        }
610    }
611
612    /// # Entropic sampling
613    /// * if possible, use `self.entropic_sampling()` instead!
614    /// * More powerful version of `self.entropic_sampling()`, since you now have mutable access
615    /// * to access ensemble mutable, use `self.ensemble_mut()`
616    /// * Note: Whatever you do with the ensemble (or self), should not change the result of the energy function, if performed again.
617    ///   Otherwise the results will be false!
618    /// * performs `self.entropic_step_unsafe(energy_fn)` until `self.step_count == self.step_goal`
619    /// # Parameter
620    /// * `energy_fn` function calculating `Some(energy)` of the system
621    ///   or rather the Parameter of which you wish to obtain the probability distribution.
622    ///   If there are any states, for which the calculation is invalid, `None` should be returned
623    /// * steps resulting in ensembles for which `energy_fn(&mut ensemble)` is `None`
624    ///   will always be rejected 
625    /// * **Important** `energy_fn`: should be the same as used for Wang Landau, otherwise the results will be wrong!
626    /// * `print_fn`: see below
627    /// # Correlations
628    /// * if you want to measure correlations between "energy" and other measurable quantities,
629    ///   use `print_fn`, which will be called after each step - use this function to write to 
630    ///   a file or whatever you desire
631    /// * Note: You do not have to recalculate the energy, if you need it in `print_fn`:
632    ///   just call `self.energy()` 
633    /// * you have access to your ensemble with `self.ensemble()`
634    /// * if you do not need it, you can use `|_|{}` as `print_fn`
635    ///  ## Safety
636    /// * While you do have mutable access to the ensemble, the energy function should not change the 
637    ///   ensemble in a way, which affects the next calculation of the energy
638    /// * This is intended for usecases, where the energy calculation is more efficient with mutable access, e.g., through using a 
639    ///   buffer stored in the ensemble
640    /// * Note: I chose to make this function unsafe to force users to acknowledge the (purely logical) limitations 
641    ///   regarding the usage of the mutable ensemble. From a programming point of view this will not lead to 
642    ///   any undefined behavior or such regardless of if the user fulfills the requirements
643    pub unsafe fn entropic_sampling_unsafe<F, G>(
644        &mut self,
645        mut energy_fn: F,
646        mut print_fn: G,
647    ) where F: FnMut(&mut E) -> Option<T>,
648        G: FnMut(&mut Self)
649    {
650        while self.step_count < self.step_goal {
651            self.entropic_step_unsafe(&mut energy_fn);
652            print_fn(self);
653        }
654    }
655
656    /// # Entropic sampling
657    /// * performs `self.entropic_step(energy_fn)` until `self.step_count == self.step_goal`
658    /// # Parameter
659    /// * `energy_fn` function calculating `Some(energy)` of the system
660    ///   or rather the Parameter of which you wish to obtain the probability distribution.
661    ///   If there are any states, for which the calculation is invalid, `None` should be returned
662    /// * steps resulting in ensembles for which `energy_fn(&mut ensemble)` is `None`
663    ///   will always be rejected 
664    /// * **Important** `energy_fn`: should be the same as used for Wang Landau, otherwise the results will be wrong!
665    /// * `print_fn`: see below
666    /// # Correlations
667    /// * if you want to measure correlations between "energy" and other measurable quantities,
668    ///   use `print_fn`, which will be called after each step - use this function to write to 
669    ///   a file or whatever you desire
670    /// * Note: You do not have to recalculate the energy, if you need it in `print_fn`:
671    ///   just call `self.energy()` 
672    /// * you have access to your ensemble with `self.ensemble()`
673    /// * if you do not need it, you can use `|_|{}` as `print_fn`
674    pub fn entropic_sampling<F, G>(
675        &mut self,
676        mut energy_fn: F,
677        mut print_fn: G,
678    ) where F: FnMut(&E) -> Option<T>,
679        G: FnMut(&Self)
680    {
681        while self.step_count < self.step_goal {
682            self.entropic_step(&mut energy_fn);
683            print_fn(self);
684        }
685    }
686
687    /// # Entropic sampling using an accumulating markov step
688    /// * performs `self.entropic_step_acc(&mut energy_fn)` until `self.step_count == self.step_goal`
689    /// # Parameter
690    /// * `energy_fn` function calculating the energy `E` of the system
691    ///   (or rather the Parameter of which you wish to obtain the probability distribution)
692    ///   during the markov steps, which can be more efficient.
693    /// * **Important** `energy_fn`: should be the same as used for Wang Landau, otherwise the results will be wrong!
694    /// * `print_fn`: see below
695    /// # Correlations
696    /// * if you want to measure correlations between "energy" and other measurable quantities,
697    ///   use `print_fn`, which will be called after each step - use this function to write to 
698    ///   a file or whatever you desire
699    /// * Note: You do not have to recalculate the energy, if you need it in `print_fn`:
700    ///   just call `self.energy()` 
701    /// * you have access to your ensemble with `self.ensemble()`
702    /// * if you do not need it, you can use `|_|{}` as `print_fn`
703    pub fn entropic_sampling_acc<F, G>(
704        &mut self,
705        mut energy_fn: F,
706        mut print_fn: G,
707    ) where F: FnMut(&E, &S, &mut T),
708        G: FnMut(&Self)
709    {
710        while self.step_count < self.step_goal {
711            self.entropic_step_acc(&mut energy_fn);
712            print_fn(self);
713        }
714    }
715
716    /// # Entropic step
717    /// * performs a single step
718    /// # Parameter
719    /// * `energy_fn` function calculating `Some(energy)` of the system
720    ///   or rather the Parameter of which you wish to obtain the probability distribution.
721    ///   If there are any states, for which the calculation is invalid, `None` should be returned
722    /// * steps resulting in ensembles for which `energy_fn(&mut ensemble)` is `None`
723    ///   will always be rejected 
724    /// # Important
725    /// * `energy_fn`: should be the same as used for Wang Landau, otherwise the results will be wrong!
726    ///  ## Safety
727    /// * While you do have mutable access to the ensemble, the energy function should not change the 
728    ///   ensemble in a way, which affects the next calculation of the energy
729    /// * This is intended for usecases, where the energy calculation is more efficient with mutable access, e.g., through using a 
730    ///   buffer stored in the ensemble
731    /// * Note: I chose to make this function unsafe to force users to acknowledge the (purely logical) limitations 
732    ///   regarding the usage of the mutable ensemble. From a programming point of view this will not lead to 
733    ///   any undefined behavior or such regardless of if the user fulfills the requirements
734    pub unsafe fn entropic_step_unsafe<F>(
735        &mut self,
736        mut energy_fn: F,
737    )where F: FnMut(&mut E) -> Option<T>
738    {
739
740        self.step_count += 1;
741        let step_size = self.get_stepsize();
742
743
744        self.ensemble.m_steps(step_size, &mut self.steps);
745
746        let current_energy = match energy_fn(&mut self.ensemble)
747        {
748            Some(energy) => energy,
749            None => {
750                self.ensemble.steps_rejected(&self.steps);
751                self.count_rejected(step_size);
752                self.histogram.increment_index(self.old_bin).unwrap();
753                self.ensemble.undo_steps_quiet(&self.steps);
754                return;
755            }
756        };
757
758        self.entropic_step_helper(current_energy);
759    }
760
761    /// # Entropic step
762    /// * performs a single step
763    /// # Parameter
764    /// * `energy_fn` function calculating `Some(energy)` of the system
765    ///   or rather the Parameter of which you wish to obtain the probability distribution.
766    ///   If there are any states, for which the calculation is invalid, `None` should be returned
767    /// * steps resulting in ensembles for which `energy_fn(&mut ensemble)` is `None`
768    ///   will always be rejected 
769    /// # Important
770    /// * `energy_fn`: should be the same as used for Wang Landau, otherwise the results will be wrong!
771    pub fn entropic_step<F>(
772        &mut self,
773        mut energy_fn: F,
774    )where F: FnMut(&E) -> Option<T>
775    {
776        unsafe{
777            self.entropic_step_unsafe(|e| energy_fn(e))
778        }
779    }
780
781    /// # Accumulating entropic step
782    /// * performs a single step
783    /// # Parameter
784    /// * `energy_fn` function calculating the energy `E` of the system
785    ///   (or rather the Parameter of which you wish to obtain the probability distribution)
786    ///   during the markov steps, which can be more efficient.
787    /// # Important
788    /// * `energy_fn`: should be the same as used for Wang Landau, otherwise the results will be wrong!
789    pub fn entropic_step_acc<F>(
790        &mut self,
791        energy_fn: F,
792    )
793        where F: FnMut(&E, &S, &mut T)
794    {
795        self.step_count += 1;
796        let step_size = self.get_stepsize();
797
798        let mut current_energy = self.old_energy.clone();
799        self.ensemble.m_steps_acc(
800            step_size,
801            &mut self.steps,
802            &mut current_energy,
803            energy_fn
804        );
805        self.entropic_step_helper(current_energy);
806    }
807
808    fn entropic_step_helper(&mut self, current_energy: T)
809    {
810        let step_size = self.steps.len();
811        
812        match self.histogram.get_bin_index(&current_energy)
813        {
814            Ok(current_bin) => {
815                let accept_prob = self.metropolis_acception_prob(current_bin);
816
817                if self.rng.random::<f64>() > accept_prob {
818                    // reject step
819                    self.ensemble.steps_rejected(&self.steps);
820                    self.count_rejected(step_size);
821                    self.ensemble.undo_steps_quiet(&self.steps);
822                } else {
823                    // accept step
824                    self.ensemble.steps_accepted(&self.steps);
825                    self.count_accepted(step_size);
826                    
827                    self.old_energy = current_energy;
828                    self.old_bin = current_bin;
829                }
830            },
831            _  => {
832                // invalid step -> reject
833                self.ensemble.steps_rejected(&self.steps);
834                self.count_rejected(step_size);
835                self.ensemble.undo_steps_quiet(&self.steps);
836            }
837        };
838        
839        self.histogram.increment_index(self.old_bin).unwrap();
840    }
841}
842
843impl<Hist, R, E, S, Res, T> Entropic for EntropicSamplingAdaptive<Hist, R, E, S, Res, T>
844where Hist: Histogram,
845    R: Rng
846{
847    /// # Number of entropic steps done until now
848    /// * will be reset by [`self.refine_estimate`](#method.refine_estimate)
849    #[inline]
850    fn step_counter(&self) -> usize
851    {
852        self.step_count
853    }
854
855    fn total_steps_accepted(&self) -> usize {
856        self.total_entr_steps_accepted() + self.wl_steps_accepted
857    }
858
859    fn total_steps_rejected(&self) -> usize {
860        self.total_entr_steps_rejected() + self.wl_steps_rejected
861    }
862
863    /// # Number of entropic steps to be performed
864    /// * if `self` was created from `WangLandauAdaptive`,
865    ///   `step_goal` will be equal to the number of WangLandau steps, that were performed
866    #[inline]
867    fn step_goal(&self) -> usize
868    {
869        self.step_goal
870    }
871
872    fn log_density(&self) -> Vec<f64> {
873        self.log_density_refined()
874    }
875
876    fn write_log<W: Write>(&self, mut w: W) -> Result<(), std::io::Error> {
877        writeln!(w,
878            "#Acceptance prob_total: {}\n#Acceptance prob current: {}\n#total_steps: {}",
879            self.fraction_accepted_total(),
880            self.fraction_accepted_current(),
881            self.step_counter(),
882        )?;
883
884        writeln!(w, "#min_step_size {}", self.min_step_size())?;
885        writeln!(w, "#max_step_size {}", self.max_step_size())?;
886
887        write!(w, "#Current acception histogram:")?;
888        for val in self.accepted_step_hist.iter()
889        {
890            write!(w, " {}", val)?;
891        }
892
893        write!(w, "\n#Current rejection histogram:")?;
894        for val in self.rejected_step_hist.iter()
895        {
896            write!(w, " {}", val)?;
897        }
898
899        writeln!(w, "\n#bestof threshold: {}", self.best_of_threshold)?;
900        writeln!(w, "#min_bestof_count: {}", self.min_best_of_count)?;
901        write!(w, "\n#Current_Bestof:")?;
902
903        for val in self.best_of_steps.iter()
904        {
905            write!(w, " {}", val)?;
906        }
907
908        write!(w, "#current_statistics_estimate:")?;
909        let estimate = self.estimate_statistics();
910        match estimate {
911            Ok(estimate) => {
912                for val in estimate
913                {
914                    write!(w, " {}", val)?;
915                }
916                writeln!(w)
917            },
918            _ => {
919                writeln!(w, " None")
920            }
921        }
922    }
923}
924
925
926impl<Hist, R, E, S, Res, Energy> EntropicEnergy<Energy> for EntropicSamplingAdaptive<Hist, R, E, S, Res, Energy>
927where Hist: Histogram,
928    R: Rng,
929{
930    /// # Energy of ensemble
931    /// * assuming `energy_fn` (see `self.entropic_step` etc.) 
932    ///   is deterministic and will always give the same result for the same ensemble,
933    ///   this returns the energy of the current ensemble
934    #[inline]
935    fn energy(&self) -> &Energy
936    {
937        &self.old_energy
938    }
939}
940
941impl<Hist, R, E, S, Res, Energy> EntropicHist<Hist> for EntropicSamplingAdaptive<Hist, R, E, S, Res, Energy>
942where Hist: Histogram,
943    R: Rng,
944{
945    #[inline]
946    fn hist(&self) -> &Hist
947    {
948        &self.histogram
949    }
950}
951
952impl<Hist, R, E, S, Res, Energy> EntropicEnsemble<E> for EntropicSamplingAdaptive<Hist, R, E, S, Res, Energy>
953where Hist: Histogram,
954    R: Rng,
955{
956    fn ensemble(&self) -> &E {
957        &self.ensemble
958    }
959
960    unsafe fn ensemble_mut(&mut self) -> &mut E {
961        &mut self.ensemble
962    }
963}
964
965impl<Hist, R, E, S, Res, Energy> HasRng<R> for EntropicSamplingAdaptive<Hist, R, E, S, Res, Energy>
966    where R: Rng,
967{
968    fn rng(&mut self) -> &mut R {
969        &mut self.rng
970    }
971
972    fn swap_rng(&mut self, rng: &mut R) {
973        std::mem::swap(&mut self.rng, rng);
974    }
975}