1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
use{
    crate::{*, glue_helper::*},
    std::{
        io::Write,
        fmt::Display,
        cmp::*,
    }
};

/// # Glued together probability
/// * you can [write it to a file](#method.write), maybe the file makes more sense
/// * If you have problems understanding the fields, 
/// please, first look at the [documentation of the current master branch](https://pardoxa.github.io/net_ensembles/master/doc/net_ensembles/)
/// to see, if the documentation there makes more sense. If it doesn't:
///  **open an issue on the github repository**
#[derive(Debug, Clone)]
pub struct GlueResult<T>{
    /// # probably the result you want, i.e., what you were simulating for
    /// * this is the log10 of the probability of each bin
    /// * you get the probability of a bin by `10_f64.powf(glued_log10_probability[i])`, i.e., 10^bin_value
    /// * **normed** such that the sum of the probabilities is 1.0 (within numerical precision errors)
    pub glued_log10_probability: Vec<f64>,
    /// # This are the bin borders
    /// * bin_i is defined as the interval [`borders[i]`, `borders[i + 1]`[, i.e. half open interval
    /// * where the right border is exclusive
    pub borders: Vec<T>,
    /// # log10 of "probabilities" of the curves, you were glueing together
    /// * height adjusted, such that the intervals fit together, but not normalized, the sum can be anything
    pub log10_vec: Vec<Vec<f64>>,
    /// # Index map
    /// * `self.left_list[i]` is the index of `self.borders` where the interval `self.log10_vec`
    /// has the first entry
    pub left_list: Vec<usize>,

    /// # How many markov steps were performed in total?
    /// * includes steps used to find initial valid ensemble
    /// * for entropic sampling, this includes the steps of the wang landau simulation
    pub total_steps: usize,
    /// # How many markov steps were accepted in total?
    /// * includes steps used to find initial valid ensemble
    /// * for entropic sampling, this includes the steps of the wang landau simulation
    pub total_steps_accepted: usize,
    /// # How many markov steps were rejected in total?
    /// * includes steps used to find initial valid ensemble
    /// * for entropic sampling, this includes the steps of the wang landau simulation
    pub total_steps_rejected: usize,
}

impl<T> GlueResult<T>
where T: Display
{
    /// # Write the result to a file
    /// * for plotting with gnuplot etc. 
    pub fn write<W: Write>(&self, mut w: W) -> std::io::Result<()>
    {
        write!(w, "#bin_left bin_right glued_log_density")?;
        for i in 0..self.log10_vec.len(){
            write!(w, " curve_{}", i)?;
        }
        writeln!(w)?;

        writeln!(w, "#total_steps {}", self.total_steps)?;
        writeln!(w, "#total_steps_accepted {}", self.total_steps_accepted)?;
        writeln!(w, "#total_steps_rejected {}", self.total_steps_rejected)?;
        let frac_acc =  self.total_steps_accepted as f64 / self.total_steps as f64;
        writeln!(w, "#total_acception_fraction {:e}", frac_acc)?;
        let frac_rej = self.total_steps_rejected as f64 / self.total_steps as f64;
        writeln!(w, "#total_rejection_fraction {:e}", frac_rej)?;

        let glue_log_density = &self.glued_log10_probability;
        let borders = &self.borders;
        let log10_vec = &self.log10_vec;
        let left_list = &self.left_list;

        for i in 0..glue_log_density.len(){
            write!(w, "{} {} {:e}", borders[i], borders[i + 1], glue_log_density[i])?;
            for j in 0..log10_vec.len()
            {
                let val = if left_list[j] <= i
                {
                    log10_vec[j].get(i - left_list[j])
                }else {
                    None
                };
                match val {
                    Some(v) => write!(w, " {:e}", v)?,
                    None => write!(w, " NONE")?,
                };
            }
            writeln!(w)?;
        }
        Ok(())
    }
}

/// # Combine multiple WangLandau intervals to get the probability distribution of the whole interval
/// * `list`: slice of Wang landau distributions
/// # Restrictions
/// * `original_hist` has to contain all the borders of the histograms.
/// Meaning: The size of a bin has to be constant among all histograms of the `list`.
/// If it is not, you might get an error, or you might get wrong results. 
/// **I do not check for this exaustingly**.
/// * There is an **easy way** to make sure, that you don`t get problems here:
/// Create the `original_hist` first. Then create the other Histograms using the `HistogramPartion` trait.
/// This is the intended way. But as long as the borders and bin sizes match, this function will work properly
/// # Understanding returned Parameters (OK(..)):
pub fn glue_wl<WL, HIST, T>(list: &[WL], original_hist: &HIST) -> Result<GlueResult<T>, GlueErrors>
where WL: WangLandauHist<HIST>,
    HIST: Histogram + HistogramVal<T>,
    T: PartialOrd
{
    if list.is_empty(){
        return Err(GlueErrors::EmptyList);
    }

    let total_steps = list.iter()
        .fold(0_usize, |acc, wl| acc + wl.steps_total());
    let total_steps_accepted = list.iter()
        .fold(0_usize, |acc, wl| acc + wl.total_steps_accepted());
    let total_steps_rejected = list.iter()
        .fold(0, |acc, wl| acc + wl.total_steps_rejected());

    let mut list: Vec<_> = list.iter().collect();

    // sort
    list.sort_unstable_by(|a, b| {
            a.hist()
                .first_border()
                .partial_cmp(
                    &b.hist()
                    .first_border()
                ).unwrap_or(Ordering::Less)
        }
    );
    
    let borders = original_hist.borders_clone()
        .map_err(GlueErrors::BorderCreation)?;

    let mut left_list = Vec::with_capacity(list.len());
    let mut right_list = Vec::with_capacity(list.len());
    for wl in list.iter()
    {
        left_list.push(get_index(&wl.hist().first_border(), &borders)?);
        right_list.push(get_index(&wl.hist().second_last_border(), &borders)?);
    }

    // get log densitys
    let mut log10_vec: Vec<_> = list.iter()
        .map(|wl| wl.log_density_base10())
        .collect();

    // re-normalize to prevent later precision problems
    inner_subtract_max(&mut log10_vec);

    // calc z
    let z_vec = calc_z(&log10_vec, &left_list, &right_list)?;

    // correct height
    height_correction(&mut log10_vec, &z_vec);

    // glueing together
    let mut glue_log_density = glue(original_hist.bin_count(), &log10_vec, &left_list, &right_list)?;

    // now norm the result
    norm_log10_sum_to_1(&mut glue_log_density);
    
    let glue_res = GlueResult{
        log10_vec,
        glued_log10_probability: glue_log_density,
        left_list,
        borders,
        total_steps,
        total_steps_accepted,
        total_steps_rejected
    };
    Ok(glue_res)
}