diff --git a/reCTBN/src/lib.rs b/reCTBN/src/lib.rs index 8feddfb..1997fa6 100644 --- a/reCTBN/src/lib.rs +++ b/reCTBN/src/lib.rs @@ -6,7 +6,7 @@ extern crate approx; pub mod parameter_learning; pub mod params; pub mod process; -pub mod reward_function; +pub mod reward; pub mod sampling; pub mod structure_learning; pub mod tools; diff --git a/reCTBN/src/parameter_learning.rs b/reCTBN/src/parameter_learning.rs index 536a9d5..3c34d06 100644 --- a/reCTBN/src/parameter_learning.rs +++ b/reCTBN/src/parameter_learning.rs @@ -144,6 +144,10 @@ impl ParameterLearning for BayesianApproach { .zip(M.mapv(|x| x as f64).axis_iter(Axis(2))) .for_each(|(mut C, m)| C.assign(&(&m.mapv(|y| y + alpha) / &T.mapv(|y| y + tau)))); + CIM.outer_iter_mut().for_each(|mut C| { + C.diag_mut().fill(0.0); + }); + //Set the diagonal of the inner matrices to the the row sum multiplied by -1 let tmp_diag_sum: Array2 = CIM.sum_axis(Axis(2)).mapv(|x| x * -1.0); CIM.outer_iter_mut() diff --git a/reCTBN/src/params.rs b/reCTBN/src/params.rs index dc941e5..3d08273 100644 --- a/reCTBN/src/params.rs +++ b/reCTBN/src/params.rs @@ -20,7 +20,7 @@ pub enum ParamsError { } /// Allowed type of states -#[derive(Clone)] +#[derive(Clone, Hash, PartialEq, Eq, Debug)] pub enum StateType { Discrete(usize), } @@ -267,11 +267,13 @@ impl ParamsTrait for DiscreteStatesContinousTimeParams { ))); } + let domain_size = domain_size as f64; + // Check if each row sum up to 0 if cim .sum_axis(Axis(2)) .iter() - .any(|x| f64::abs(x.clone()) > f64::EPSILON.sqrt()) + .any(|x| f64::abs(x.clone()) > f64::EPSILON * domain_size) { return Err(ParamsError::InvalidCIM(String::from( "The sum of each row must be 0", diff --git a/reCTBN/src/reward.rs b/reCTBN/src/reward.rs new file mode 100644 index 0000000..910954c --- /dev/null +++ b/reCTBN/src/reward.rs @@ -0,0 +1,59 @@ +pub mod reward_evaluation; +pub mod reward_function; + +use std::collections::HashMap; + +use crate::process; + +/// Instantiation of reward function and instantaneous reward +/// +/// +/// # Arguments +/// +/// * `transition_reward`: reward obtained transitioning from one state to another +/// * `instantaneous_reward`: reward per unit of time obtained staying in a specific state + +#[derive(Debug, PartialEq)] +pub struct Reward { + pub transition_reward: f64, + pub instantaneous_reward: f64, +} + +/// The trait RewardFunction describe the methods that all the reward functions must satisfy + +pub trait RewardFunction: Sync { + /// Given the current state and the previous state, it compute the reward. + /// + /// # Arguments + /// + /// * `current_state`: the current state of the network represented as a `process::NetworkProcessState` + /// * `previous_state`: an optional argument representing the previous state of the network + + fn call( + &self, + current_state: &process::NetworkProcessState, + previous_state: Option<&process::NetworkProcessState>, + ) -> Reward; + + /// Initialize the RewardFunction internal accordingly to the structure of a NetworkProcess + /// + /// # Arguments + /// + /// * `p`: any structure that implements the trait `process::NetworkProcess` + fn initialize_from_network_process(p: &T) -> Self; +} + +pub trait RewardEvaluation { + fn evaluate_state_space( + &self, + network_process: &N, + reward_function: &R, + ) -> HashMap; + + fn evaluate_state( + &self, + network_process: &N, + reward_function: &R, + state: &process::NetworkProcessState, + ) -> f64; +} diff --git a/reCTBN/src/reward/reward_evaluation.rs b/reCTBN/src/reward/reward_evaluation.rs new file mode 100644 index 0000000..3802489 --- /dev/null +++ b/reCTBN/src/reward/reward_evaluation.rs @@ -0,0 +1,205 @@ +use std::collections::HashMap; + +use rayon::prelude::{IntoParallelIterator, ParallelIterator}; +use statrs::distribution::ContinuousCDF; + +use crate::params::{self, ParamsTrait}; +use crate::process; + +use crate::{ + process::NetworkProcessState, + reward::RewardEvaluation, + sampling::{ForwardSampler, Sampler}, +}; + +pub enum RewardCriteria { + FiniteHorizon, + InfiniteHorizon { discount_factor: f64 }, +} + +pub struct MonteCarloReward { + max_iterations: usize, + max_err_stop: f64, + alpha_stop: f64, + end_time: f64, + reward_criteria: RewardCriteria, + seed: Option, +} + +impl MonteCarloReward { + pub fn new( + max_iterations: usize, + max_err_stop: f64, + alpha_stop: f64, + end_time: f64, + reward_criteria: RewardCriteria, + seed: Option, + ) -> MonteCarloReward { + MonteCarloReward { + max_iterations, + max_err_stop, + alpha_stop, + end_time, + reward_criteria, + seed, + } + } +} + +impl RewardEvaluation for MonteCarloReward { + fn evaluate_state_space( + &self, + network_process: &N, + reward_function: &R, + ) -> HashMap { + let variables_domain: Vec> = network_process + .get_node_indices() + .map(|x| match network_process.get_node(x) { + params::Params::DiscreteStatesContinousTime(x) => (0..x + .get_reserved_space_as_parent()) + .map(|s| params::StateType::Discrete(s)) + .collect(), + }) + .collect(); + + let n_states: usize = variables_domain.iter().map(|x| x.len()).product(); + + (0..n_states) + .into_par_iter() + .map(|s| { + let state: process::NetworkProcessState = variables_domain + .iter() + .fold((s, vec![]), |acc, x| { + let mut acc = acc; + let idx_s = acc.0 % x.len(); + acc.1.push(x[idx_s].clone()); + acc.0 = acc.0 / x.len(); + acc + }) + .1; + + let r = self.evaluate_state(network_process, reward_function, &state); + (state, r) + }) + .collect() + } + + fn evaluate_state( + &self, + network_process: &N, + reward_function: &R, + state: &NetworkProcessState, + ) -> f64 { + let mut sampler = + ForwardSampler::new(network_process, self.seed.clone(), Some(state.clone())); + let mut expected_value = 0.0; + let mut squared_expected_value = 0.0; + let normal = statrs::distribution::Normal::new(0.0, 1.0).unwrap(); + + for i in 0..self.max_iterations { + sampler.reset(); + let mut ret = 0.0; + let mut previous = sampler.next().unwrap(); + while previous.t < self.end_time { + let current = sampler.next().unwrap(); + if current.t > self.end_time { + let r = reward_function.call(&previous.state, None); + let discount = match self.reward_criteria { + RewardCriteria::FiniteHorizon => self.end_time - previous.t, + RewardCriteria::InfiniteHorizon { discount_factor } => { + std::f64::consts::E.powf(-discount_factor * previous.t) + - std::f64::consts::E.powf(-discount_factor * self.end_time) + } + }; + ret += discount * r.instantaneous_reward; + } else { + let r = reward_function.call(&previous.state, Some(¤t.state)); + let discount = match self.reward_criteria { + RewardCriteria::FiniteHorizon => current.t - previous.t, + RewardCriteria::InfiniteHorizon { discount_factor } => { + std::f64::consts::E.powf(-discount_factor * previous.t) + - std::f64::consts::E.powf(-discount_factor * current.t) + } + }; + ret += discount * r.instantaneous_reward; + ret += match self.reward_criteria { + RewardCriteria::FiniteHorizon => 1.0, + RewardCriteria::InfiniteHorizon { discount_factor } => { + std::f64::consts::E.powf(-discount_factor * current.t) + } + } * r.transition_reward; + } + previous = current; + } + + let float_i = i as f64; + expected_value = + expected_value * float_i as f64 / (float_i + 1.0) + ret / (float_i + 1.0); + squared_expected_value = squared_expected_value * float_i as f64 / (float_i + 1.0) + + ret.powi(2) / (float_i + 1.0); + + if i > 2 { + let var = + (float_i + 1.0) / float_i * (squared_expected_value - expected_value.powi(2)); + if self.alpha_stop + - 2.0 * normal.cdf(-(float_i + 1.0).sqrt() * self.max_err_stop / var.sqrt()) + > 0.0 + { + return expected_value; + } + } + } + + expected_value + } +} + +pub struct NeighborhoodRelativeReward { + inner_reward: RE, +} + +impl NeighborhoodRelativeReward { + pub fn new(inner_reward: RE) -> NeighborhoodRelativeReward { + NeighborhoodRelativeReward { inner_reward } + } +} + +impl RewardEvaluation for NeighborhoodRelativeReward { + fn evaluate_state_space( + &self, + network_process: &N, + reward_function: &R, + ) -> HashMap { + let absolute_reward = self + .inner_reward + .evaluate_state_space(network_process, reward_function); + + //This approach optimize memory. Maybe optimizing execution time can be better. + absolute_reward + .iter() + .map(|(k1, v1)| { + let mut max_val: f64 = 1.0; + absolute_reward.iter().for_each(|(k2, v2)| { + let count_diff: usize = k1 + .iter() + .zip(k2.iter()) + .map(|(s1, s2)| if s1 == s2 { 0 } else { 1 }) + .sum(); + if count_diff < 2 { + max_val = max_val.max(v1 / v2); + } + }); + (k1.clone(), max_val) + }) + .collect() + } + + fn evaluate_state( + &self, + _network_process: &N, + _reward_function: &R, + _state: &process::NetworkProcessState, + ) -> f64 { + unimplemented!(); + } +} diff --git a/reCTBN/src/reward_function.rs b/reCTBN/src/reward/reward_function.rs similarity index 70% rename from reCTBN/src/reward_function.rs rename to reCTBN/src/reward/reward_function.rs index 35e15c8..216df6a 100644 --- a/reCTBN/src/reward_function.rs +++ b/reCTBN/src/reward/reward_function.rs @@ -3,46 +3,10 @@ use crate::{ params::{self, ParamsTrait}, process, + reward::{Reward, RewardFunction}, }; -use ndarray; - -/// Instantiation of reward function and instantaneous reward -/// -/// -/// # Arguments -/// -/// * `transition_reward`: reward obtained transitioning from one state to another -/// * `instantaneous_reward`: reward per unit of time obtained staying in a specific state - -#[derive(Debug, PartialEq)] -pub struct Reward { - pub transition_reward: f64, - pub instantaneous_reward: f64, -} -/// The trait RewardFunction describe the methods that all the reward functions must satisfy - -pub trait RewardFunction { - /// Given the current state and the previous state, it compute the reward. - /// - /// # Arguments - /// - /// * `current_state`: the current state of the network represented as a `process::NetworkProcessState` - /// * `previous_state`: an optional argument representing the previous state of the network - - fn call( - &self, - current_state: process::NetworkProcessState, - previous_state: Option, - ) -> Reward; - - /// Initialize the RewardFunction internal accordingly to the structure of a NetworkProcess - /// - /// # Arguments - /// - /// * `p`: any structure that implements the trait `process::NetworkProcess` - fn initialize_from_network_process(p: &T) -> Self; -} +use ndarray; /// Reward function over a factored state space /// @@ -80,8 +44,8 @@ impl FactoredRewardFunction { impl RewardFunction for FactoredRewardFunction { fn call( &self, - current_state: process::NetworkProcessState, - previous_state: Option, + current_state: &process::NetworkProcessState, + previous_state: Option<&process::NetworkProcessState>, ) -> Reward { let instantaneous_reward: f64 = current_state .iter() diff --git a/reCTBN/src/sampling.rs b/reCTBN/src/sampling.rs index 1384872..73c6d78 100644 --- a/reCTBN/src/sampling.rs +++ b/reCTBN/src/sampling.rs @@ -26,10 +26,15 @@ where current_time: f64, current_state: NetworkProcessState, next_transitions: Vec>, + initial_state: Option, } impl<'a, T: NetworkProcess> ForwardSampler<'a, T> { - pub fn new(net: &'a T, seed: Option) -> ForwardSampler<'a, T> { + pub fn new( + net: &'a T, + seed: Option, + initial_state: Option, + ) -> ForwardSampler<'a, T> { let rng: ChaCha8Rng = match seed { //If a seed is present use it to initialize the random generator. Some(seed) => SeedableRng::seed_from_u64(seed), @@ -37,11 +42,12 @@ impl<'a, T: NetworkProcess> ForwardSampler<'a, T> { None => SeedableRng::from_entropy(), }; let mut fs = ForwardSampler { - net: net, - rng: rng, + net, + rng, current_time: 0.0, current_state: vec![], next_transitions: vec![], + initial_state, }; fs.reset(); return fs; @@ -112,11 +118,16 @@ impl<'a, T: NetworkProcess> Iterator for ForwardSampler<'a, T> { impl<'a, T: NetworkProcess> Sampler for ForwardSampler<'a, T> { fn reset(&mut self) { self.current_time = 0.0; - self.current_state = self - .net - .get_node_indices() - .map(|x| self.net.get_node(x).get_random_state_uniform(&mut self.rng)) - .collect(); + match &self.initial_state { + None => { + self.current_state = self + .net + .get_node_indices() + .map(|x| self.net.get_node(x).get_random_state_uniform(&mut self.rng)) + .collect() + } + Some(is) => self.current_state = is.clone(), + }; self.next_transitions = self.net.get_node_indices().map(|_| Option::None).collect(); } } diff --git a/reCTBN/src/structure_learning/constraint_based_algorithm.rs b/reCTBN/src/structure_learning/constraint_based_algorithm.rs index f9cd820..f49b194 100644 --- a/reCTBN/src/structure_learning/constraint_based_algorithm.rs +++ b/reCTBN/src/structure_learning/constraint_based_algorithm.rs @@ -79,190 +79,6 @@ impl<'a, P: ParameterLearning> Cache<'a, P> { } } -/// Continuous-Time Peter Clark algorithm. -/// -/// A method to learn the structure of the network. -/// -/// # Arguments -/// -/// * [`parameter_learning`](crate::parameter_learning) - is the method used to learn the parameters. -/// * [`Ftest`](crate::structure_learning::hypothesis_test::F) - is the F-test hyppothesis test. -/// * [`Chi2test`](crate::structure_learning::hypothesis_test::ChiSquare) - is the chi-squared test (χ2 test) hypothesis test. -/// # Example -/// -/// ```rust -/// # use std::collections::BTreeSet; -/// # use ndarray::{arr1, arr2, arr3}; -/// # use reCTBN::params; -/// # use reCTBN::tools::trajectory_generator; -/// # use reCTBN::process::NetworkProcess; -/// # use reCTBN::process::ctbn::CtbnNetwork; -/// use reCTBN::parameter_learning::BayesianApproach; -/// use reCTBN::structure_learning::StructureLearningAlgorithm; -/// use reCTBN::structure_learning::hypothesis_test::{F, ChiSquare}; -/// use reCTBN::structure_learning::constraint_based_algorithm::CTPC; -/// # -/// # // Create the domain for a discrete node -/// # let mut domain = BTreeSet::new(); -/// # domain.insert(String::from("A")); -/// # domain.insert(String::from("B")); -/// # domain.insert(String::from("C")); -/// # // Create the parameters for a discrete node using the domain -/// # let param = params::DiscreteStatesContinousTimeParams::new("n1".to_string(), domain); -/// # //Create the node n1 using the parameters -/// # let n1 = params::Params::DiscreteStatesContinousTime(param); -/// # -/// # let mut domain = BTreeSet::new(); -/// # domain.insert(String::from("D")); -/// # domain.insert(String::from("E")); -/// # domain.insert(String::from("F")); -/// # let param = params::DiscreteStatesContinousTimeParams::new("n2".to_string(), domain); -/// # let n2 = params::Params::DiscreteStatesContinousTime(param); -/// # -/// # let mut domain = BTreeSet::new(); -/// # domain.insert(String::from("G")); -/// # domain.insert(String::from("H")); -/// # domain.insert(String::from("I")); -/// # domain.insert(String::from("F")); -/// # let param = params::DiscreteStatesContinousTimeParams::new("n3".to_string(), domain); -/// # let n3 = params::Params::DiscreteStatesContinousTime(param); -/// # -/// # // Initialize a ctbn -/// # let mut net = CtbnNetwork::new(); -/// # -/// # // Add the nodes and their edges -/// # let n1 = net.add_node(n1).unwrap(); -/// # let n2 = net.add_node(n2).unwrap(); -/// # let n3 = net.add_node(n3).unwrap(); -/// # net.add_edge(n1, n2); -/// # net.add_edge(n1, n3); -/// # net.add_edge(n2, n3); -/// # -/// # match &mut net.get_node_mut(n1) { -/// # params::Params::DiscreteStatesContinousTime(param) => { -/// # assert_eq!( -/// # Ok(()), -/// # param.set_cim(arr3(&[ -/// # [ -/// # [-3.0, 2.0, 1.0], -/// # [1.5, -2.0, 0.5], -/// # [0.4, 0.6, -1.0] -/// # ], -/// # ])) -/// # ); -/// # } -/// # } -/// # -/// # match &mut net.get_node_mut(n2) { -/// # params::Params::DiscreteStatesContinousTime(param) => { -/// # assert_eq!( -/// # Ok(()), -/// # param.set_cim(arr3(&[ -/// # [ -/// # [-1.0, 0.5, 0.5], -/// # [3.0, -4.0, 1.0], -/// # [0.9, 0.1, -1.0] -/// # ], -/// # [ -/// # [-6.0, 2.0, 4.0], -/// # [1.5, -2.0, 0.5], -/// # [3.0, 1.0, -4.0] -/// # ], -/// # [ -/// # [-1.0, 0.1, 0.9], -/// # [2.0, -2.5, 0.5], -/// # [0.9, 0.1, -1.0] -/// # ], -/// # ])) -/// # ); -/// # } -/// # } -/// # -/// # match &mut net.get_node_mut(n3) { -/// # params::Params::DiscreteStatesContinousTime(param) => { -/// # assert_eq!( -/// # Ok(()), -/// # param.set_cim(arr3(&[ -/// # [ -/// # [-1.0, 0.5, 0.3, 0.2], -/// # [0.5, -4.0, 2.5, 1.0], -/// # [2.5, 0.5, -4.0, 1.0], -/// # [0.7, 0.2, 0.1, -1.0] -/// # ], -/// # [ -/// # [-6.0, 2.0, 3.0, 1.0], -/// # [1.5, -3.0, 0.5, 1.0], -/// # [2.0, 1.3, -5.0, 1.7], -/// # [2.5, 0.5, 1.0, -4.0] -/// # ], -/// # [ -/// # [-1.3, 0.3, 0.1, 0.9], -/// # [1.4, -4.0, 0.5, 2.1], -/// # [1.0, 1.5, -3.0, 0.5], -/// # [0.4, 0.3, 0.1, -0.8] -/// # ], -/// # [ -/// # [-2.0, 1.0, 0.7, 0.3], -/// # [1.3, -5.9, 2.7, 1.9], -/// # [2.0, 1.5, -4.0, 0.5], -/// # [0.2, 0.7, 0.1, -1.0] -/// # ], -/// # [ -/// # [-6.0, 1.0, 2.0, 3.0], -/// # [0.5, -3.0, 1.0, 1.5], -/// # [1.4, 2.1, -4.3, 0.8], -/// # [0.5, 1.0, 2.5, -4.0] -/// # ], -/// # [ -/// # [-1.3, 0.9, 0.3, 0.1], -/// # [0.1, -1.3, 0.2, 1.0], -/// # [0.5, 1.0, -3.0, 1.5], -/// # [0.1, 0.4, 0.3, -0.8] -/// # ], -/// # [ -/// # [-2.0, 1.0, 0.6, 0.4], -/// # [2.6, -7.1, 1.4, 3.1], -/// # [5.0, 1.0, -8.0, 2.0], -/// # [1.4, 0.4, 0.2, -2.0] -/// # ], -/// # [ -/// # [-3.0, 1.0, 1.5, 0.5], -/// # [3.0, -6.0, 1.0, 2.0], -/// # [0.3, 0.5, -1.9, 1.1], -/// # [5.0, 1.0, 2.0, -8.0] -/// # ], -/// # [ -/// # [-2.6, 0.6, 0.2, 1.8], -/// # [2.0, -6.0, 3.0, 1.0], -/// # [0.1, 0.5, -1.3, 0.7], -/// # [0.8, 0.6, 0.2, -1.6] -/// # ], -/// # ])) -/// # ); -/// # } -/// # } -/// # -/// # // Generate the trajectory -/// # let data = trajectory_generator(&net, 300, 30.0, Some(4164901764658873)); -/// -/// // Initialize the hypothesis tests to pass to the CTPC with their -/// // respective significance level `alpha` -/// let f = F::new(1e-6); -/// let chi_sq = ChiSquare::new(1e-4); -/// // Use the bayesian approach to learn the parameters -/// let parameter_learning = BayesianApproach { alpha: 1, tau:1.0 }; -/// -/// //Initialize CTPC -/// let ctpc = CTPC::new(parameter_learning, f, chi_sq); -/// -/// // Learn the structure of the network from the generated trajectory -/// let net = ctpc.fit_transform(net, &data); -/// # -/// # // Compare the generated network with the original one -/// # assert_eq!(BTreeSet::new(), net.get_parent_set(0)); -/// # assert_eq!(BTreeSet::from_iter(vec![0]), net.get_parent_set(1)); -/// # assert_eq!(BTreeSet::from_iter(vec![0, 1]), net.get_parent_set(2)); -/// ``` pub struct CTPC { parameter_learning: P, Ftest: F, diff --git a/reCTBN/src/structure_learning/hypothesis_test.rs b/reCTBN/src/structure_learning/hypothesis_test.rs index 311ec47..4c02929 100644 --- a/reCTBN/src/structure_learning/hypothesis_test.rs +++ b/reCTBN/src/structure_learning/hypothesis_test.rs @@ -39,17 +39,6 @@ pub struct ChiSquare { alpha: f64, } -/// Does the F-test. -/// -/// Used to determine if a difference between two sets of data is due to chance, or if it is due to -/// a relationship (dependence) between the variables. -/// -/// # Arguments -/// -/// * `alpha` - is the significance level, the probability to reject a true null hypothesis; -/// in other words is the risk of concluding that an association between the variables exists -/// when there is no actual association. - pub struct F { alpha: f64, } @@ -59,20 +48,6 @@ impl F { F { alpha } } - /// Compare two matrices extracted from two 3rd-orer tensors. - /// - /// # Arguments - /// - /// * `i` - Position of the matrix of `M1` to compare with `M2`. - /// * `M1` - 3rd-order tensor 1. - /// * `j` - Position of the matrix of `M2` to compare with `M1`. - /// * `M2` - 3rd-order tensor 2. - /// - /// # Returns - /// - /// * `true` - when the matrices `M1` and `M2` are very similar, then **independendent**. - /// * `false` - when the matrices `M1` and `M2` are too different, then **dependent**. - pub fn compare_matrices( &self, i: usize, @@ -189,8 +164,26 @@ impl ChiSquare { // continuous-time Bayesian networks. // International Journal of Approximate Reasoning, 138, pp.105-122. // Also: https://www.itl.nist.gov/div898/software/dataplot/refman1/auxillar/chi2samp.htm + // + // M = M M = M + // 1 xx'|s 2 xx'|y,s let M1 = M1.index_axis(Axis(0), i).mapv(|x| x as f64); let M2 = M2.index_axis(Axis(0), j).mapv(|x| x as f64); + // __________________ + // / === + // / \ M + // / / xx'|s + // / === + // / x'ϵVal /X \ + // / \ i/ 1 + //K = / ------------------ L = - + // / === K + // / \ M + // / / xx'|y,s + // / === + // / x'ϵVal /X \ + // \ / \ i/ + // \/ let K = M1.sum_axis(Axis(1)) / M2.sum_axis(Axis(1)); let K = K.mapv(f64::sqrt); // Reshape to column vector. @@ -198,16 +191,34 @@ impl ChiSquare { let n = K.len(); K.into_shape((n, 1)).unwrap() }; + //println!("K: {:?}", K); let L = 1.0 / &K; + // ===== 2 + // \ (K . M - L . M) + // \ 2 1 + // / --------------- + // / M + M + // ===== 2 1 + // x'ϵVal /X \ + // \ i/ let mut X_2 = (&K * &M2 - &L * &M1).mapv(|a| a.powi(2)) / (&M2 + &M1); + //println!("M1: {:?}", M1); + //println!("M2: {:?}", M2); + //println!("L*M1: {:?}", (L * &M1)); + //println!("K*M2: {:?}", (K * &M2)); + //println!("X_2: {:?}", X_2); X_2.diag_mut().fill(0.0); let X_2 = X_2.sum_axis(Axis(1)); let n = ChiSquared::new((X_2.dim() - 1) as f64).unwrap(); + //println!("CHI^2: {:?}", n); + //println!("CHI^2 CDF: {:?}", X_2.mapv(|x| n.cdf(x))); let ret = X_2.into_iter().all(|x| n.cdf(x) < (1.0 - self.alpha)); + //println!("test: {:?}", ret); ret } } +// ritorna false quando sono dipendenti e false quando sono indipendenti impl HypothesisTest for ChiSquare { fn call( &self, @@ -222,9 +233,13 @@ impl HypothesisTest for ChiSquare { T: process::NetworkProcess, P: parameter_learning::ParameterLearning, { + // Prendo dalla cache l'apprendimento dei parametri, che sarebbe una CIM + // di dimensione nxn + // (CIM, M, T) let P_small = match cache.fit(net, &dataset, child_node, Some(separation_set.clone())) { Params::DiscreteStatesContinousTime(node) => node, }; + // let mut extended_separation_set = separation_set.clone(); extended_separation_set.insert(parent_node); @@ -236,6 +251,7 @@ impl HypothesisTest for ChiSquare { ) { Params::DiscreteStatesContinousTime(node) => node, }; + // Commentare qui let partial_cardinality_product: usize = extended_separation_set .iter() .take_while(|x| **x != parent_node) diff --git a/reCTBN/src/structure_learning/score_based_algorithm.rs b/reCTBN/src/structure_learning/score_based_algorithm.rs index d65ea88..9173b86 100644 --- a/reCTBN/src/structure_learning/score_based_algorithm.rs +++ b/reCTBN/src/structure_learning/score_based_algorithm.rs @@ -6,6 +6,9 @@ use crate::structure_learning::score_function::ScoreFunction; use crate::structure_learning::StructureLearningAlgorithm; use crate::{process, tools::Dataset}; +use rayon::iter::{IntoParallelIterator, ParallelIterator}; +use rayon::prelude::ParallelExtend; + pub struct HillClimbing { score_function: S, max_parent_set: Option, @@ -36,8 +39,9 @@ impl StructureLearningAlgorithm for HillClimbing { let max_parent_set = self.max_parent_set.unwrap_or(net.get_number_of_nodes()); //Reset the adj matrix net.initialize_adj_matrix(); + let mut learned_parent_sets: Vec<(usize, BTreeSet)> = vec![]; //Iterate over each node to learn their parent set. - for node in net.get_node_indices() { + learned_parent_sets.par_extend(net.get_node_indices().into_par_iter().map(|node| { //Initialize an empty parent set. let mut parent_set: BTreeSet = BTreeSet::new(); //Compute the score for the empty parent set @@ -76,10 +80,14 @@ impl StructureLearningAlgorithm for HillClimbing { } } } - //Apply the learned parent_set to the network struct. - parent_set.iter().for_each(|p| net.add_edge(*p, node)); - } + (node, parent_set) + })); + for (child_node, candidate_parent_set) in learned_parent_sets { + for parent_node in candidate_parent_set.iter() { + net.add_edge(*parent_node, child_node); + } + } return net; } } diff --git a/reCTBN/src/structure_learning/score_function.rs b/reCTBN/src/structure_learning/score_function.rs index f8b38b5..5a56594 100644 --- a/reCTBN/src/structure_learning/score_function.rs +++ b/reCTBN/src/structure_learning/score_function.rs @@ -7,7 +7,7 @@ use statrs::function::gamma; use crate::{parameter_learning, params, process, tools}; -pub trait ScoreFunction { +pub trait ScoreFunction: Sync { fn call( &self, net: &T, diff --git a/reCTBN/src/tools.rs b/reCTBN/src/tools.rs index 0a48410..1fdb661 100644 --- a/reCTBN/src/tools.rs +++ b/reCTBN/src/tools.rs @@ -1,13 +1,7 @@ //! Contains commonly used methods used across the crate. -use std::ops::{DivAssign, MulAssign, Range}; +use ndarray::prelude::*; -use ndarray::{Array, Array1, Array2, Array3, Axis}; -use rand::{Rng, SeedableRng}; -use rand_chacha::ChaCha8Rng; - -use crate::params::ParamsTrait; -use crate::process::NetworkProcess; use crate::sampling::{ForwardSampler, Sampler}; use crate::{params, process}; @@ -69,8 +63,7 @@ pub fn trajectory_generator( let mut trajectories: Vec = Vec::new(); //Random Generator object - - let mut sampler = ForwardSampler::new(net, seed); + let mut sampler = ForwardSampler::new(net, seed, None); //Each iteration generate one trajectory for _ in 0..n_trajectories { //History of all the moments in which something changed @@ -114,243 +107,3 @@ pub fn trajectory_generator( //Return a dataset object with the sampled trajectories. Dataset::new(trajectories) } - -pub trait RandomGraphGenerator { - fn new(density: f64, seed: Option) -> Self; - fn generate_graph(&mut self, net: &mut T); -} - -/// Graph Generator using an uniform distribution. -/// -/// A method to generate a random graph with edges uniformly distributed. -/// -/// # Arguments -/// -/// * `density` - is the density of the graph in terms of edges; domain: `0.0 ≤ density ≤ 1.0`. -/// * `rng` - is the random numbers generator. -/// -/// # Example -/// -/// ```rust -/// # use std::collections::BTreeSet; -/// # use ndarray::{arr1, arr2, arr3}; -/// # use reCTBN::params; -/// # use reCTBN::params::Params::DiscreteStatesContinousTime; -/// # use reCTBN::tools::trajectory_generator; -/// # use reCTBN::process::NetworkProcess; -/// # use reCTBN::process::ctbn::CtbnNetwork; -/// use reCTBN::tools::UniformGraphGenerator; -/// use reCTBN::tools::RandomGraphGenerator; -/// # let mut net = CtbnNetwork::new(); -/// # let nodes_cardinality = 8; -/// # let domain_cardinality = 4; -/// # for node in 0..nodes_cardinality { -/// # // Create the domain for a discrete node -/// # let mut domain = BTreeSet::new(); -/// # for dvalue in 0..domain_cardinality { -/// # domain.insert(dvalue.to_string()); -/// # } -/// # // Create the parameters for a discrete node using the domain -/// # let param = params::DiscreteStatesContinousTimeParams::new( -/// # node.to_string(), -/// # domain -/// # ); -/// # //Create the node using the parameters -/// # let node = DiscreteStatesContinousTime(param); -/// # // Add the node to the network -/// # net.add_node(node).unwrap(); -/// # } -/// -/// // Initialize the Graph Generator using the one with an -/// // uniform distribution -/// let density = 1.0/3.0; -/// let seed = Some(7641630759785120); -/// let mut structure_generator = UniformGraphGenerator::new( -/// density, -/// seed -/// ); -/// -/// // Generate the graph directly on the network -/// structure_generator.generate_graph(&mut net); -/// # // Count all the edges generated in the network -/// # let mut edges = 0; -/// # for node in net.get_node_indices(){ -/// # edges += net.get_children_set(node).len() -/// # } -/// # // Number of all the nodes in the network -/// # let nodes = net.get_node_indices().len() as f64; -/// # let expected_edges = (density * nodes * (nodes - 1.0)).round() as usize; -/// # // ±10% of tolerance -/// # let tolerance = ((expected_edges as f64)*0.10) as usize; -/// # // As the way `generate_graph()` is implemented we can only reasonably -/// # // expect the number of edges to be somewhere around the expected value. -/// # assert!((expected_edges - tolerance) <= edges && edges <= (expected_edges + tolerance)); -/// ``` -pub struct UniformGraphGenerator { - density: f64, - rng: ChaCha8Rng, -} - -impl RandomGraphGenerator for UniformGraphGenerator { - fn new(density: f64, seed: Option) -> UniformGraphGenerator { - if density < 0.0 || density > 1.0 { - panic!( - "Density value must be between 1.0 and 0.0, got {}.", - density - ); - } - let rng: ChaCha8Rng = match seed { - Some(seed) => SeedableRng::seed_from_u64(seed), - None => SeedableRng::from_entropy(), - }; - UniformGraphGenerator { density, rng } - } - - /// Generate an uniformly distributed graph. - fn generate_graph(&mut self, net: &mut T) { - net.initialize_adj_matrix(); - let last_node_idx = net.get_node_indices().len(); - for parent in 0..last_node_idx { - for child in 0..last_node_idx { - if parent != child { - if self.rng.gen_bool(self.density) { - net.add_edge(parent, child); - } - } - } - } - } -} - -pub trait RandomParametersGenerator { - fn new(interval: Range, seed: Option) -> Self; - fn generate_parameters(&mut self, net: &mut T); -} - -/// Parameters Generator using an uniform distribution. -/// -/// A method to generate random parameters uniformly distributed. -/// -/// # Arguments -/// -/// * `interval` - is the interval of the random values oh the CIM's diagonal; domain: `≥ 0.0`. -/// * `rng` - is the random numbers generator. -/// -/// # Example -/// -/// ```rust -/// # use std::collections::BTreeSet; -/// # use ndarray::{arr1, arr2, arr3}; -/// # use reCTBN::params; -/// # use reCTBN::params::ParamsTrait; -/// # use reCTBN::params::Params::DiscreteStatesContinousTime; -/// # use reCTBN::process::NetworkProcess; -/// # use reCTBN::process::ctbn::CtbnNetwork; -/// # use reCTBN::tools::trajectory_generator; -/// # use reCTBN::tools::RandomGraphGenerator; -/// # use reCTBN::tools::UniformGraphGenerator; -/// use reCTBN::tools::RandomParametersGenerator; -/// use reCTBN::tools::UniformParametersGenerator; -/// # let mut net = CtbnNetwork::new(); -/// # let nodes_cardinality = 8; -/// # let domain_cardinality = 4; -/// # for node in 0..nodes_cardinality { -/// # // Create the domain for a discrete node -/// # let mut domain = BTreeSet::new(); -/// # for dvalue in 0..domain_cardinality { -/// # domain.insert(dvalue.to_string()); -/// # } -/// # // Create the parameters for a discrete node using the domain -/// # let param = params::DiscreteStatesContinousTimeParams::new( -/// # node.to_string(), -/// # domain -/// # ); -/// # //Create the node using the parameters -/// # let node = DiscreteStatesContinousTime(param); -/// # // Add the node to the network -/// # net.add_node(node).unwrap(); -/// # } -/// # -/// # // Initialize the Graph Generator using the one with an -/// # // uniform distribution -/// # let mut structure_generator = UniformGraphGenerator::new( -/// # 1.0/3.0, -/// # Some(7641630759785120) -/// # ); -/// # -/// # // Generate the graph directly on the network -/// # structure_generator.generate_graph(&mut net); -/// -/// // Initialize the parameters generator with uniform distributin -/// let mut cim_generator = UniformParametersGenerator::new( -/// 0.0..7.0, -/// Some(7641630759785120) -/// ); -/// -/// // Generate CIMs with uniformly distributed parameters. -/// cim_generator.generate_parameters(&mut net); -/// # -/// # for node in net.get_node_indices() { -/// # assert_eq!( -/// # Ok(()), -/// # net.get_node(node).validate_params() -/// # ); -/// } -/// ``` -pub struct UniformParametersGenerator { - interval: Range, - rng: ChaCha8Rng, -} - -impl RandomParametersGenerator for UniformParametersGenerator { - fn new(interval: Range, seed: Option) -> UniformParametersGenerator { - if interval.start < 0.0 || interval.end < 0.0 { - panic!( - "Interval must be entirely less or equal than 0, got {}..{}.", - interval.start, interval.end - ); - } - let rng: ChaCha8Rng = match seed { - Some(seed) => SeedableRng::seed_from_u64(seed), - None => SeedableRng::from_entropy(), - }; - UniformParametersGenerator { interval, rng } - } - - /// Generate CIMs with uniformly distributed parameters. - fn generate_parameters(&mut self, net: &mut T) { - for node in net.get_node_indices() { - let parent_set_state_space_cardinality: usize = net - .get_parent_set(node) - .iter() - .map(|x| net.get_node(*x).get_reserved_space_as_parent()) - .product(); - match &mut net.get_node_mut(node) { - params::Params::DiscreteStatesContinousTime(param) => { - let node_domain_cardinality = param.get_reserved_space_as_parent(); - let mut cim = Array3::::from_shape_fn( - ( - parent_set_state_space_cardinality, - node_domain_cardinality, - node_domain_cardinality, - ), - |_| self.rng.gen(), - ); - cim.axis_iter_mut(Axis(0)).for_each(|mut x| { - x.diag_mut().fill(0.0); - x.div_assign(&x.sum_axis(Axis(1)).insert_axis(Axis(1))); - let diag = Array1::::from_shape_fn(node_domain_cardinality, |_| { - self.rng.gen_range(self.interval.clone()) - }); - x.mul_assign(&diag.clone().insert_axis(Axis(1))); - // Recomputing the diagonal in order to reduce the issues caused by the - // loss of precision when validating the parameters. - let diag_sum = -x.sum_axis(Axis(1)); - x.diag_mut().assign(&diag_sum) - }); - param.set_cim_unchecked(cim); - } - } - } - } -} diff --git a/reCTBN/tests/parameter_learning.rs b/reCTBN/tests/parameter_learning.rs index 0a09a2a..2cbc185 100644 --- a/reCTBN/tests/parameter_learning.rs +++ b/reCTBN/tests/parameter_learning.rs @@ -6,7 +6,6 @@ use reCTBN::process::ctbn::*; use reCTBN::process::NetworkProcess; use reCTBN::parameter_learning::*; use reCTBN::params; -use reCTBN::params::Params::DiscreteStatesContinousTime; use reCTBN::tools::*; use utils::*; @@ -67,78 +66,18 @@ fn learn_binary_cim(pl: T) { )); } -fn generate_nodes( - net: &mut CtbnNetwork, - nodes_cardinality: usize, - nodes_domain_cardinality: usize -) { - for node_label in 0..nodes_cardinality { - net.add_node( - generate_discrete_time_continous_node( - node_label.to_string(), - nodes_domain_cardinality, - ) - ).unwrap(); - } -} - -fn learn_binary_cim_gen(pl: T) { - let mut net = CtbnNetwork::new(); - generate_nodes(&mut net, 2, 2); - - net.add_edge(0, 1); - - let mut cim_generator: UniformParametersGenerator = RandomParametersGenerator::new( - 1.0..6.0, - Some(6813071588535822) - ); - cim_generator.generate_parameters(&mut net); - - let p_gen = match net.get_node(1) { - DiscreteStatesContinousTime(p_gen) => p_gen, - }; - - let data = trajectory_generator(&net, 100, 100.0, Some(6347747169756259)); - let p_tj = match pl.fit(&net, &data, 1, None) { - DiscreteStatesContinousTime(p_tj) => p_tj, - }; - - assert_eq!( - p_tj.get_cim().as_ref().unwrap().shape(), - p_gen.get_cim().as_ref().unwrap().shape() - ); - assert!( - p_tj.get_cim().as_ref().unwrap().abs_diff_eq( - &p_gen.get_cim().as_ref().unwrap(), - 0.1 - ) - ); -} - #[test] fn learn_binary_cim_MLE() { let mle = MLE {}; learn_binary_cim(mle); } -#[test] -fn learn_binary_cim_MLE_gen() { - let mle = MLE {}; - learn_binary_cim_gen(mle); -} - #[test] fn learn_binary_cim_BA() { let ba = BayesianApproach { alpha: 1, tau: 1.0 }; learn_binary_cim(ba); } -#[test] -fn learn_binary_cim_BA_gen() { - let ba = BayesianApproach { alpha: 1, tau: 1.0 }; - learn_binary_cim_gen(ba); -} - fn learn_ternary_cim(pl: T) { let mut net = CtbnNetwork::new(); let n1 = net @@ -216,63 +155,18 @@ fn learn_ternary_cim(pl: T) { )); } -fn learn_ternary_cim_gen(pl: T) { - let mut net = CtbnNetwork::new(); - generate_nodes(&mut net, 2, 3); - - net.add_edge(0, 1); - - let mut cim_generator: UniformParametersGenerator = RandomParametersGenerator::new( - 4.0..6.0, - Some(6813071588535822) - ); - cim_generator.generate_parameters(&mut net); - - let p_gen = match net.get_node(1) { - DiscreteStatesContinousTime(p_gen) => p_gen, - }; - - let data = trajectory_generator(&net, 100, 200.0, Some(6347747169756259)); - let p_tj = match pl.fit(&net, &data, 1, None) { - DiscreteStatesContinousTime(p_tj) => p_tj, - }; - - assert_eq!( - p_tj.get_cim().as_ref().unwrap().shape(), - p_gen.get_cim().as_ref().unwrap().shape() - ); - assert!( - p_tj.get_cim().as_ref().unwrap().abs_diff_eq( - &p_gen.get_cim().as_ref().unwrap(), - 0.1 - ) - ); -} - #[test] fn learn_ternary_cim_MLE() { let mle = MLE {}; learn_ternary_cim(mle); } -#[test] -fn learn_ternary_cim_MLE_gen() { - let mle = MLE {}; - learn_ternary_cim_gen(mle); -} - #[test] fn learn_ternary_cim_BA() { let ba = BayesianApproach { alpha: 1, tau: 1.0 }; learn_ternary_cim(ba); } -#[test] -fn learn_ternary_cim_BA_gen() { - let ba = BayesianApproach { alpha: 1, tau: 1.0 }; - learn_ternary_cim_gen(ba); -} - fn learn_ternary_cim_no_parents(pl: T) { let mut net = CtbnNetwork::new(); let n1 = net @@ -340,63 +234,18 @@ fn learn_ternary_cim_no_parents(pl: T) { )); } -fn learn_ternary_cim_no_parents_gen(pl: T) { - let mut net = CtbnNetwork::new(); - generate_nodes(&mut net, 2, 3); - - net.add_edge(0, 1); - - let mut cim_generator: UniformParametersGenerator = RandomParametersGenerator::new( - 1.0..6.0, - Some(6813071588535822) - ); - cim_generator.generate_parameters(&mut net); - - let p_gen = match net.get_node(0) { - DiscreteStatesContinousTime(p_gen) => p_gen, - }; - - let data = trajectory_generator(&net, 100, 200.0, Some(6347747169756259)); - let p_tj = match pl.fit(&net, &data, 0, None) { - DiscreteStatesContinousTime(p_tj) => p_tj, - }; - - assert_eq!( - p_tj.get_cim().as_ref().unwrap().shape(), - p_gen.get_cim().as_ref().unwrap().shape() - ); - assert!( - p_tj.get_cim().as_ref().unwrap().abs_diff_eq( - &p_gen.get_cim().as_ref().unwrap(), - 0.1 - ) - ); -} - #[test] fn learn_ternary_cim_no_parents_MLE() { let mle = MLE {}; learn_ternary_cim_no_parents(mle); } -#[test] -fn learn_ternary_cim_no_parents_MLE_gen() { - let mle = MLE {}; - learn_ternary_cim_no_parents_gen(mle); -} - #[test] fn learn_ternary_cim_no_parents_BA() { let ba = BayesianApproach { alpha: 1, tau: 1.0 }; learn_ternary_cim_no_parents(ba); } -#[test] -fn learn_ternary_cim_no_parents_BA_gen() { - let ba = BayesianApproach { alpha: 1, tau: 1.0 }; - learn_ternary_cim_no_parents_gen(ba); -} - fn learn_mixed_discrete_cim(pl: T) { let mut net = CtbnNetwork::new(); let n1 = net @@ -583,66 +432,14 @@ fn learn_mixed_discrete_cim(pl: T) { )); } -fn learn_mixed_discrete_cim_gen(pl: T) { - let mut net = CtbnNetwork::new(); - generate_nodes(&mut net, 2, 3); - net.add_node( - generate_discrete_time_continous_node( - String::from("3"), - 4 - ) - ).unwrap(); - net.add_edge(0, 1); - net.add_edge(0, 2); - net.add_edge(1, 2); - - let mut cim_generator: UniformParametersGenerator = RandomParametersGenerator::new( - 1.0..8.0, - Some(6813071588535822) - ); - cim_generator.generate_parameters(&mut net); - - let p_gen = match net.get_node(2) { - DiscreteStatesContinousTime(p_gen) => p_gen, - }; - - let data = trajectory_generator(&net, 300, 300.0, Some(6347747169756259)); - let p_tj = match pl.fit(&net, &data, 2, None) { - DiscreteStatesContinousTime(p_tj) => p_tj, - }; - - assert_eq!( - p_tj.get_cim().as_ref().unwrap().shape(), - p_gen.get_cim().as_ref().unwrap().shape() - ); - assert!( - p_tj.get_cim().as_ref().unwrap().abs_diff_eq( - &p_gen.get_cim().as_ref().unwrap(), - 0.2 - ) - ); -} - #[test] fn learn_mixed_discrete_cim_MLE() { let mle = MLE {}; learn_mixed_discrete_cim(mle); } -#[test] -fn learn_mixed_discrete_cim_MLE_gen() { - let mle = MLE {}; - learn_mixed_discrete_cim_gen(mle); -} - #[test] fn learn_mixed_discrete_cim_BA() { let ba = BayesianApproach { alpha: 1, tau: 1.0 }; learn_mixed_discrete_cim(ba); } - -#[test] -fn learn_mixed_discrete_cim_BA_gen() { - let ba = BayesianApproach { alpha: 1, tau: 1.0 }; - learn_mixed_discrete_cim_gen(ba); -} diff --git a/reCTBN/tests/reward_evaluation.rs b/reCTBN/tests/reward_evaluation.rs new file mode 100644 index 0000000..355341c --- /dev/null +++ b/reCTBN/tests/reward_evaluation.rs @@ -0,0 +1,122 @@ +mod utils; + +use approx::assert_abs_diff_eq; +use ndarray::*; +use reCTBN::{ + params, + process::{ctbn::*, NetworkProcess, NetworkProcessState}, + reward::{reward_evaluation::*, reward_function::*, *}, +}; +use utils::generate_discrete_time_continous_node; + +#[test] +fn simple_factored_reward_function_binary_node_mc() { + let mut net = CtbnNetwork::new(); + let n1 = net + .add_node(generate_discrete_time_continous_node(String::from("n1"), 2)) + .unwrap(); + + let mut rf = FactoredRewardFunction::initialize_from_network_process(&net); + rf.get_transition_reward_mut(n1) + .assign(&arr2(&[[0.0, 0.0], [0.0, 0.0]])); + rf.get_instantaneous_reward_mut(n1) + .assign(&arr1(&[3.0, 3.0])); + + match &mut net.get_node_mut(n1) { + params::Params::DiscreteStatesContinousTime(param) => { + param.set_cim(arr3(&[[[-3.0, 3.0], [2.0, -2.0]]])).unwrap(); + } + } + + net.initialize_adj_matrix(); + + let s0: NetworkProcessState = vec![params::StateType::Discrete(0)]; + let s1: NetworkProcessState = vec![params::StateType::Discrete(1)]; + + let mc = MonteCarloReward::new(10000, 1e-1, 1e-1, 10.0, RewardCriteria::InfiniteHorizon { discount_factor: 1.0 }, Some(215)); + assert_abs_diff_eq!(3.0, mc.evaluate_state(&net, &rf, &s0), epsilon = 1e-2); + assert_abs_diff_eq!(3.0, mc.evaluate_state(&net, &rf, &s1), epsilon = 1e-2); + + let rst = mc.evaluate_state_space(&net, &rf); + assert_abs_diff_eq!(3.0, rst[&s0], epsilon = 1e-2); + assert_abs_diff_eq!(3.0, rst[&s1], epsilon = 1e-2); + + + let mc = MonteCarloReward::new(10000, 1e-1, 1e-1, 10.0, RewardCriteria::FiniteHorizon, Some(215)); + assert_abs_diff_eq!(30.0, mc.evaluate_state(&net, &rf, &s0), epsilon = 1e-2); + assert_abs_diff_eq!(30.0, mc.evaluate_state(&net, &rf, &s1), epsilon = 1e-2); + + +} + +#[test] +fn simple_factored_reward_function_chain_mc() { + let mut net = CtbnNetwork::new(); + let n1 = net + .add_node(generate_discrete_time_continous_node(String::from("n1"), 2)) + .unwrap(); + + let n2 = net + .add_node(generate_discrete_time_continous_node(String::from("n2"), 2)) + .unwrap(); + + let n3 = net + .add_node(generate_discrete_time_continous_node(String::from("n3"), 2)) + .unwrap(); + + net.add_edge(n1, n2); + net.add_edge(n2, n3); + + match &mut net.get_node_mut(n1) { + params::Params::DiscreteStatesContinousTime(param) => { + param.set_cim(arr3(&[[[-0.1, 0.1], [1.0, -1.0]]])).unwrap(); + } + } + + match &mut net.get_node_mut(n2) { + params::Params::DiscreteStatesContinousTime(param) => { + param + .set_cim(arr3(&[ + [[-0.01, 0.01], [5.0, -5.0]], + [[-5.0, 5.0], [0.01, -0.01]], + ])) + .unwrap(); + } + } + + + match &mut net.get_node_mut(n3) { + params::Params::DiscreteStatesContinousTime(param) => { + param + .set_cim(arr3(&[ + [[-0.01, 0.01], [5.0, -5.0]], + [[-5.0, 5.0], [0.01, -0.01]], + ])) + .unwrap(); + } + } + + + let mut rf = FactoredRewardFunction::initialize_from_network_process(&net); + rf.get_transition_reward_mut(n1) + .assign(&arr2(&[[0.0, 1.0], [1.0, 0.0]])); + + rf.get_transition_reward_mut(n2) + .assign(&arr2(&[[0.0, 1.0], [1.0, 0.0]])); + + rf.get_transition_reward_mut(n3) + .assign(&arr2(&[[0.0, 1.0], [1.0, 0.0]])); + + let s000: NetworkProcessState = vec![ + params::StateType::Discrete(1), + params::StateType::Discrete(0), + params::StateType::Discrete(0), + ]; + + let mc = MonteCarloReward::new(10000, 1e-1, 1e-1, 10.0, RewardCriteria::InfiniteHorizon { discount_factor: 1.0 }, Some(215)); + assert_abs_diff_eq!(2.447, mc.evaluate_state(&net, &rf, &s000), epsilon = 1e-1); + + let rst = mc.evaluate_state_space(&net, &rf); + assert_abs_diff_eq!(2.447, rst[&s000], epsilon = 1e-1); + +} diff --git a/reCTBN/tests/reward_function.rs b/reCTBN/tests/reward_function.rs index dcc5e69..853efc9 100644 --- a/reCTBN/tests/reward_function.rs +++ b/reCTBN/tests/reward_function.rs @@ -2,7 +2,7 @@ mod utils; use ndarray::*; use utils::generate_discrete_time_continous_node; -use reCTBN::{process::{NetworkProcess, ctbn::*, NetworkProcessState}, reward_function::*, params}; +use reCTBN::{process::{NetworkProcess, ctbn::*, NetworkProcessState}, reward::{*, reward_function::*}, params}; #[test] @@ -18,15 +18,15 @@ fn simple_factored_reward_function_binary_node() { let s0: NetworkProcessState = vec![params::StateType::Discrete(0)]; let s1: NetworkProcessState = vec![params::StateType::Discrete(1)]; - assert_eq!(rf.call(s0.clone(), None), Reward{transition_reward: 0.0, instantaneous_reward: 3.0}); - assert_eq!(rf.call(s1.clone(), None), Reward{transition_reward: 0.0, instantaneous_reward: 5.0}); + assert_eq!(rf.call(&s0, None), Reward{transition_reward: 0.0, instantaneous_reward: 3.0}); + assert_eq!(rf.call(&s1, None), Reward{transition_reward: 0.0, instantaneous_reward: 5.0}); - assert_eq!(rf.call(s0.clone(), Some(s1.clone())), Reward{transition_reward: 2.0, instantaneous_reward: 3.0}); - assert_eq!(rf.call(s1.clone(), Some(s0.clone())), Reward{transition_reward: 1.0, instantaneous_reward: 5.0}); + assert_eq!(rf.call(&s0, Some(&s1)), Reward{transition_reward: 2.0, instantaneous_reward: 3.0}); + assert_eq!(rf.call(&s1, Some(&s0)), Reward{transition_reward: 1.0, instantaneous_reward: 5.0}); - assert_eq!(rf.call(s0.clone(), Some(s0.clone())), Reward{transition_reward: 0.0, instantaneous_reward: 3.0}); - assert_eq!(rf.call(s1.clone(), Some(s1.clone())), Reward{transition_reward: 0.0, instantaneous_reward: 5.0}); + assert_eq!(rf.call(&s0, Some(&s0)), Reward{transition_reward: 0.0, instantaneous_reward: 3.0}); + assert_eq!(rf.call(&s1, Some(&s1)), Reward{transition_reward: 0.0, instantaneous_reward: 5.0}); } @@ -46,16 +46,16 @@ fn simple_factored_reward_function_ternary_node() { let s2: NetworkProcessState = vec![params::StateType::Discrete(2)]; - assert_eq!(rf.call(s0.clone(), Some(s1.clone())), Reward{transition_reward: 2.0, instantaneous_reward: 3.0}); - assert_eq!(rf.call(s0.clone(), Some(s2.clone())), Reward{transition_reward: 5.0, instantaneous_reward: 3.0}); + assert_eq!(rf.call(&s0, Some(&s1)), Reward{transition_reward: 2.0, instantaneous_reward: 3.0}); + assert_eq!(rf.call(&s0, Some(&s2)), Reward{transition_reward: 5.0, instantaneous_reward: 3.0}); - assert_eq!(rf.call(s1.clone(), Some(s0.clone())), Reward{transition_reward: 1.0, instantaneous_reward: 5.0}); - assert_eq!(rf.call(s1.clone(), Some(s2.clone())), Reward{transition_reward: 6.0, instantaneous_reward: 5.0}); + assert_eq!(rf.call(&s1, Some(&s0)), Reward{transition_reward: 1.0, instantaneous_reward: 5.0}); + assert_eq!(rf.call(&s1, Some(&s2)), Reward{transition_reward: 6.0, instantaneous_reward: 5.0}); - assert_eq!(rf.call(s2.clone(), Some(s0.clone())), Reward{transition_reward: 3.0, instantaneous_reward: 9.0}); - assert_eq!(rf.call(s2.clone(), Some(s1.clone())), Reward{transition_reward: 4.0, instantaneous_reward: 9.0}); + assert_eq!(rf.call(&s2, Some(&s0)), Reward{transition_reward: 3.0, instantaneous_reward: 9.0}); + assert_eq!(rf.call(&s2, Some(&s1)), Reward{transition_reward: 4.0, instantaneous_reward: 9.0}); } #[test] @@ -77,7 +77,6 @@ fn factored_reward_function_two_nodes() { rf.get_transition_reward_mut(n2).assign(&arr2(&[[12.0, 1.0],[2.0,12.0]])); rf.get_instantaneous_reward_mut(n2).assign(&arr1(&[3.0,5.0])); - let s00: NetworkProcessState = vec![params::StateType::Discrete(0), params::StateType::Discrete(0)]; let s01: NetworkProcessState = vec![params::StateType::Discrete(1), params::StateType::Discrete(0)]; let s02: NetworkProcessState = vec![params::StateType::Discrete(2), params::StateType::Discrete(0)]; @@ -87,32 +86,32 @@ fn factored_reward_function_two_nodes() { let s11: NetworkProcessState = vec![params::StateType::Discrete(1), params::StateType::Discrete(1)]; let s12: NetworkProcessState = vec![params::StateType::Discrete(2), params::StateType::Discrete(1)]; - assert_eq!(rf.call(s00.clone(), Some(s01.clone())), Reward{transition_reward: 2.0, instantaneous_reward: 6.0}); - assert_eq!(rf.call(s00.clone(), Some(s02.clone())), Reward{transition_reward: 5.0, instantaneous_reward: 6.0}); - assert_eq!(rf.call(s00.clone(), Some(s10.clone())), Reward{transition_reward: 2.0, instantaneous_reward: 6.0}); + assert_eq!(rf.call(&s00, Some(&s01)), Reward{transition_reward: 2.0, instantaneous_reward: 6.0}); + assert_eq!(rf.call(&s00, Some(&s02)), Reward{transition_reward: 5.0, instantaneous_reward: 6.0}); + assert_eq!(rf.call(&s00, Some(&s10)), Reward{transition_reward: 2.0, instantaneous_reward: 6.0}); - assert_eq!(rf.call(s01.clone(), Some(s00.clone())), Reward{transition_reward: 1.0, instantaneous_reward: 8.0}); - assert_eq!(rf.call(s01.clone(), Some(s02.clone())), Reward{transition_reward: 6.0, instantaneous_reward: 8.0}); - assert_eq!(rf.call(s01.clone(), Some(s11.clone())), Reward{transition_reward: 2.0, instantaneous_reward: 8.0}); + assert_eq!(rf.call(&s01, Some(&s00)), Reward{transition_reward: 1.0, instantaneous_reward: 8.0}); + assert_eq!(rf.call(&s01, Some(&s02)), Reward{transition_reward: 6.0, instantaneous_reward: 8.0}); + assert_eq!(rf.call(&s01, Some(&s11)), Reward{transition_reward: 2.0, instantaneous_reward: 8.0}); - assert_eq!(rf.call(s02.clone(), Some(s00.clone())), Reward{transition_reward: 3.0, instantaneous_reward: 12.0}); - assert_eq!(rf.call(s02.clone(), Some(s01.clone())), Reward{transition_reward: 4.0, instantaneous_reward: 12.0}); - assert_eq!(rf.call(s02.clone(), Some(s12.clone())), Reward{transition_reward: 2.0, instantaneous_reward: 12.0}); + assert_eq!(rf.call(&s02, Some(&s00)), Reward{transition_reward: 3.0, instantaneous_reward: 12.0}); + assert_eq!(rf.call(&s02, Some(&s01)), Reward{transition_reward: 4.0, instantaneous_reward: 12.0}); + assert_eq!(rf.call(&s02, Some(&s12)), Reward{transition_reward: 2.0, instantaneous_reward: 12.0}); - assert_eq!(rf.call(s10.clone(), Some(s11.clone())), Reward{transition_reward: 2.0, instantaneous_reward: 8.0}); - assert_eq!(rf.call(s10.clone(), Some(s12.clone())), Reward{transition_reward: 5.0, instantaneous_reward: 8.0}); - assert_eq!(rf.call(s10.clone(), Some(s00.clone())), Reward{transition_reward: 1.0, instantaneous_reward: 8.0}); + assert_eq!(rf.call(&s10, Some(&s11)), Reward{transition_reward: 2.0, instantaneous_reward: 8.0}); + assert_eq!(rf.call(&s10, Some(&s12)), Reward{transition_reward: 5.0, instantaneous_reward: 8.0}); + assert_eq!(rf.call(&s10, Some(&s00)), Reward{transition_reward: 1.0, instantaneous_reward: 8.0}); - assert_eq!(rf.call(s11.clone(), Some(s10.clone())), Reward{transition_reward: 1.0, instantaneous_reward: 10.0}); - assert_eq!(rf.call(s11.clone(), Some(s12.clone())), Reward{transition_reward: 6.0, instantaneous_reward: 10.0}); - assert_eq!(rf.call(s11.clone(), Some(s01.clone())), Reward{transition_reward: 1.0, instantaneous_reward: 10.0}); + assert_eq!(rf.call(&s11, Some(&s10)), Reward{transition_reward: 1.0, instantaneous_reward: 10.0}); + assert_eq!(rf.call(&s11, Some(&s12)), Reward{transition_reward: 6.0, instantaneous_reward: 10.0}); + assert_eq!(rf.call(&s11, Some(&s01)), Reward{transition_reward: 1.0, instantaneous_reward: 10.0}); - assert_eq!(rf.call(s12.clone(), Some(s10.clone())), Reward{transition_reward: 3.0, instantaneous_reward: 14.0}); - assert_eq!(rf.call(s12.clone(), Some(s11.clone())), Reward{transition_reward: 4.0, instantaneous_reward: 14.0}); - assert_eq!(rf.call(s12.clone(), Some(s02.clone())), Reward{transition_reward: 1.0, instantaneous_reward: 14.0}); + assert_eq!(rf.call(&s12, Some(&s10)), Reward{transition_reward: 3.0, instantaneous_reward: 14.0}); + assert_eq!(rf.call(&s12, Some(&s11)), Reward{transition_reward: 4.0, instantaneous_reward: 14.0}); + assert_eq!(rf.call(&s12, Some(&s02)), Reward{transition_reward: 1.0, instantaneous_reward: 14.0}); } diff --git a/reCTBN/tests/structure_learning.rs b/reCTBN/tests/structure_learning.rs index 3d7e230..9a69b45 100644 --- a/reCTBN/tests/structure_learning.rs +++ b/reCTBN/tests/structure_learning.rs @@ -117,50 +117,6 @@ fn check_compatibility_between_dataset_and_network(sl: T) { - let mut net = CtbnNetwork::new(); - generate_nodes(&mut net, 2, 3); - net.add_node( - generate_discrete_time_continous_node( - String::from("3"), - 4 - ) - ).unwrap(); - - net.add_edge(0, 1); - - let mut cim_generator: UniformParametersGenerator = RandomParametersGenerator::new( - 0.0..7.0, - Some(6813071588535822) - ); - cim_generator.generate_parameters(&mut net); - - let data = trajectory_generator(&net, 100, 30.0, Some(6347747169756259)); - - let mut net = CtbnNetwork::new(); - let _n1 = net - .add_node( - generate_discrete_time_continous_node(String::from("0"), - 3) - ).unwrap(); - let _net = sl.fit_transform(net, &data); -} - #[test] #[should_panic] pub fn check_compatibility_between_dataset_and_network_hill_climbing() { @@ -169,14 +125,6 @@ pub fn check_compatibility_between_dataset_and_network_hill_climbing() { check_compatibility_between_dataset_and_network(hl); } -#[test] -#[should_panic] -pub fn check_compatibility_between_dataset_and_network_hill_climbing_gen() { - let ll = LogLikelihood::new(1, 1.0); - let hl = HillClimbing::new(ll, None); - check_compatibility_between_dataset_and_network_gen(hl); -} - fn learn_ternary_net_2_nodes(sl: T) { let mut net = CtbnNetwork::new(); let n1 = net @@ -234,25 +182,6 @@ fn learn_ternary_net_2_nodes(sl: T) { assert_eq!(BTreeSet::new(), net.get_parent_set(n1)); } -fn learn_ternary_net_2_nodes_gen(sl: T) { - let mut net = CtbnNetwork::new(); - generate_nodes(&mut net, 2, 3); - - net.add_edge(0, 1); - - let mut cim_generator: UniformParametersGenerator = RandomParametersGenerator::new( - 0.0..7.0, - Some(6813071588535822) - ); - cim_generator.generate_parameters(&mut net); - - let data = trajectory_generator(&net, 100, 20.0, Some(6347747169756259)); - - let net = sl.fit_transform(net, &data); - assert_eq!(BTreeSet::from_iter(vec![0]), net.get_parent_set(1)); - assert_eq!(BTreeSet::new(), net.get_parent_set(0)); -} - #[test] pub fn learn_ternary_net_2_nodes_hill_climbing_ll() { let ll = LogLikelihood::new(1, 1.0); @@ -260,13 +189,6 @@ pub fn learn_ternary_net_2_nodes_hill_climbing_ll() { learn_ternary_net_2_nodes(hl); } -#[test] -pub fn learn_ternary_net_2_nodes_hill_climbing_ll_gen() { - let ll = LogLikelihood::new(1, 1.0); - let hl = HillClimbing::new(ll, None); - learn_ternary_net_2_nodes_gen(hl); -} - #[test] pub fn learn_ternary_net_2_nodes_hill_climbing_bic() { let bic = BIC::new(1, 1.0); @@ -274,13 +196,6 @@ pub fn learn_ternary_net_2_nodes_hill_climbing_bic() { learn_ternary_net_2_nodes(hl); } -#[test] -pub fn learn_ternary_net_2_nodes_hill_climbing_bic_gen() { - let bic = BIC::new(1, 1.0); - let hl = HillClimbing::new(bic, None); - learn_ternary_net_2_nodes_gen(hl); -} - fn get_mixed_discrete_net_3_nodes_with_data() -> (CtbnNetwork, Dataset) { let mut net = CtbnNetwork::new(); let n1 = net @@ -405,30 +320,6 @@ fn get_mixed_discrete_net_3_nodes_with_data() -> (CtbnNetwork, Dataset) { return (net, data); } -fn get_mixed_discrete_net_3_nodes_with_data_gen() -> (CtbnNetwork, Dataset) { - let mut net = CtbnNetwork::new(); - generate_nodes(&mut net, 2, 3); - net.add_node( - generate_discrete_time_continous_node( - String::from("3"), - 4 - ) - ).unwrap(); - - net.add_edge(0, 1); - net.add_edge(0, 2); - net.add_edge(1, 2); - - let mut cim_generator: UniformParametersGenerator = RandomParametersGenerator::new( - 0.0..7.0, - Some(6813071588535822) - ); - cim_generator.generate_parameters(&mut net); - - let data = trajectory_generator(&net, 300, 30.0, Some(6347747169756259)); - return (net, data); -} - fn learn_mixed_discrete_net_3_nodes(sl: T) { let (net, data) = get_mixed_discrete_net_3_nodes_with_data(); let net = sl.fit_transform(net, &data); @@ -437,14 +328,6 @@ fn learn_mixed_discrete_net_3_nodes(sl: T) { assert_eq!(BTreeSet::from_iter(vec![0, 1]), net.get_parent_set(2)); } -fn learn_mixed_discrete_net_3_nodes_gen(sl: T) { - let (net, data) = get_mixed_discrete_net_3_nodes_with_data_gen(); - let net = sl.fit_transform(net, &data); - assert_eq!(BTreeSet::new(), net.get_parent_set(0)); - assert_eq!(BTreeSet::from_iter(vec![0]), net.get_parent_set(1)); - assert_eq!(BTreeSet::from_iter(vec![0, 1]), net.get_parent_set(2)); -} - #[test] pub fn learn_mixed_discrete_net_3_nodes_hill_climbing_ll() { let ll = LogLikelihood::new(1, 1.0); @@ -452,13 +335,6 @@ pub fn learn_mixed_discrete_net_3_nodes_hill_climbing_ll() { learn_mixed_discrete_net_3_nodes(hl); } -#[test] -pub fn learn_mixed_discrete_net_3_nodes_hill_climbing_ll_gen() { - let ll = LogLikelihood::new(1, 1.0); - let hl = HillClimbing::new(ll, None); - learn_mixed_discrete_net_3_nodes_gen(hl); -} - #[test] pub fn learn_mixed_discrete_net_3_nodes_hill_climbing_bic() { let bic = BIC::new(1, 1.0); @@ -466,13 +342,6 @@ pub fn learn_mixed_discrete_net_3_nodes_hill_climbing_bic() { learn_mixed_discrete_net_3_nodes(hl); } -#[test] -pub fn learn_mixed_discrete_net_3_nodes_hill_climbing_bic_gen() { - let bic = BIC::new(1, 1.0); - let hl = HillClimbing::new(bic, None); - learn_mixed_discrete_net_3_nodes_gen(hl); -} - fn learn_mixed_discrete_net_3_nodes_1_parent_constraint(sl: T) { let (net, data) = get_mixed_discrete_net_3_nodes_with_data(); let net = sl.fit_transform(net, &data); @@ -481,14 +350,6 @@ fn learn_mixed_discrete_net_3_nodes_1_parent_constraint(sl: T) { - let (net, data) = get_mixed_discrete_net_3_nodes_with_data_gen(); - let net = sl.fit_transform(net, &data); - assert_eq!(BTreeSet::new(), net.get_parent_set(0)); - assert_eq!(BTreeSet::from_iter(vec![0]), net.get_parent_set(1)); - assert_eq!(BTreeSet::from_iter(vec![0]), net.get_parent_set(2)); -} - #[test] pub fn learn_mixed_discrete_net_3_nodes_hill_climbing_ll_1_parent_constraint() { let ll = LogLikelihood::new(1, 1.0); @@ -496,13 +357,6 @@ pub fn learn_mixed_discrete_net_3_nodes_hill_climbing_ll_1_parent_constraint() { learn_mixed_discrete_net_3_nodes_1_parent_constraint(hl); } -#[test] -pub fn learn_mixed_discrete_net_3_nodes_hill_climbing_ll_1_parent_constraint_gen() { - let ll = LogLikelihood::new(1, 1.0); - let hl = HillClimbing::new(ll, Some(1)); - learn_mixed_discrete_net_3_nodes_1_parent_constraint_gen(hl); -} - #[test] pub fn learn_mixed_discrete_net_3_nodes_hill_climbing_bic_1_parent_constraint() { let bic = BIC::new(1, 1.0); @@ -510,13 +364,6 @@ pub fn learn_mixed_discrete_net_3_nodes_hill_climbing_bic_1_parent_constraint() learn_mixed_discrete_net_3_nodes_1_parent_constraint(hl); } -#[test] -pub fn learn_mixed_discrete_net_3_nodes_hill_climbing_bic_1_parent_constraint_gen() { - let bic = BIC::new(1, 1.0); - let hl = HillClimbing::new(bic, Some(1)); - learn_mixed_discrete_net_3_nodes_1_parent_constraint_gen(hl); -} - #[test] pub fn chi_square_compare_matrices() { let i: usize = 1; @@ -664,15 +511,6 @@ pub fn learn_ternary_net_2_nodes_ctpc() { learn_ternary_net_2_nodes(ctpc); } -#[test] -pub fn learn_ternary_net_2_nodes_ctpc_gen() { - let f = F::new(1e-6); - let chi_sq = ChiSquare::new(1e-4); - let parameter_learning = BayesianApproach { alpha: 1, tau:1.0 }; - let ctpc = CTPC::new(parameter_learning, f, chi_sq); - learn_ternary_net_2_nodes_gen(ctpc); -} - #[test] fn learn_mixed_discrete_net_3_nodes_ctpc() { let f = F::new(1e-6); @@ -681,12 +519,3 @@ fn learn_mixed_discrete_net_3_nodes_ctpc() { let ctpc = CTPC::new(parameter_learning, f, chi_sq); learn_mixed_discrete_net_3_nodes(ctpc); } - -#[test] -fn learn_mixed_discrete_net_3_nodes_ctpc_gen() { - let f = F::new(1e-6); - let chi_sq = ChiSquare::new(1e-4); - let parameter_learning = BayesianApproach { alpha: 1, tau:1.0 }; - let ctpc = CTPC::new(parameter_learning, f, chi_sq); - learn_mixed_discrete_net_3_nodes_gen(ctpc); -} diff --git a/reCTBN/tests/tools.rs b/reCTBN/tests/tools.rs index 59d8f27..806faef 100644 --- a/reCTBN/tests/tools.rs +++ b/reCTBN/tests/tools.rs @@ -1,15 +1,9 @@ -use std::ops::Range; - use ndarray::{arr1, arr2, arr3}; -use reCTBN::params::ParamsTrait; use reCTBN::process::ctbn::*; -use reCTBN::process::ctmp::*; use reCTBN::process::NetworkProcess; use reCTBN::params; use reCTBN::tools::*; -use utils::*; - #[macro_use] extern crate approx; @@ -88,164 +82,3 @@ fn dataset_wrong_shape() { let t2 = Trajectory::new(time, events); Dataset::new(vec![t1, t2]); } - -#[test] -#[should_panic] -fn uniform_graph_generator_wrong_density_1() { - let density = 2.1; - let _structure_generator: UniformGraphGenerator = RandomGraphGenerator::new( - density, - None - ); -} - -#[test] -#[should_panic] -fn uniform_graph_generator_wrong_density_2() { - let density = -0.5; - let _structure_generator: UniformGraphGenerator = RandomGraphGenerator::new( - density, - None - ); -} - -#[test] -fn uniform_graph_generator_right_densities() { - for density in [1.0, 0.75, 0.5, 0.25, 0.0] { - let _structure_generator: UniformGraphGenerator = RandomGraphGenerator::new( - density, - None - ); - } -} - -#[test] -fn uniform_graph_generator_generate_graph_ctbn() { - let mut net = CtbnNetwork::new(); - let nodes_cardinality = 0..=100; - let nodes_domain_cardinality = 2; - for node_label in nodes_cardinality { - net.add_node( - utils::generate_discrete_time_continous_node( - node_label.to_string(), - nodes_domain_cardinality, - ) - ).unwrap(); - } - let density = 1.0/3.0; - let mut structure_generator: UniformGraphGenerator = RandomGraphGenerator::new( - density, - Some(7641630759785120) - ); - structure_generator.generate_graph(&mut net); - let mut edges = 0; - for node in net.get_node_indices(){ - edges += net.get_children_set(node).len() - } - let nodes = net.get_node_indices().len() as f64; - let expected_edges = (density * nodes * (nodes - 1.0)).round() as usize; - let tolerance = ((expected_edges as f64)*0.05) as usize; // ±5% of tolerance - // As the way `generate_graph()` is implemented we can only reasonably - // expect the number of edges to be somewhere around the expected value. - assert!((expected_edges - tolerance) <= edges && edges <= (expected_edges + tolerance)); -} - -#[test] -#[should_panic] -fn uniform_graph_generator_generate_graph_ctmp() { - let mut net = CtmpProcess::new(); - let node_label = String::from("0"); - let node_domain_cardinality = 4; - net.add_node( - generate_discrete_time_continous_node( - node_label, - node_domain_cardinality - ) - ).unwrap(); - let density = 1.0/3.0; - let mut structure_generator: UniformGraphGenerator = RandomGraphGenerator::new( - density, - Some(7641630759785120) - ); - structure_generator.generate_graph(&mut net); -} - -#[test] -#[should_panic] -fn uniform_parameters_generator_wrong_density_1() { - let interval: Range = -2.0..-5.0; - let _cim_generator: UniformParametersGenerator = RandomParametersGenerator::new( - interval, - None - ); -} - -#[test] -#[should_panic] -fn uniform_parameters_generator_wrong_density_2() { - let interval: Range = -1.0..0.0; - let _cim_generator: UniformParametersGenerator = RandomParametersGenerator::new( - interval, - None - ); -} - -#[test] -fn uniform_parameters_generator_right_densities_ctbn() { - let mut net = CtbnNetwork::new(); - let nodes_cardinality = 0..=3; - let nodes_domain_cardinality = 9; - for node_label in nodes_cardinality { - net.add_node( - generate_discrete_time_continous_node( - node_label.to_string(), - nodes_domain_cardinality, - ) - ).unwrap(); - } - let density = 1.0/3.0; - let seed = Some(7641630759785120); - let interval = 0.0..7.0; - let mut structure_generator: UniformGraphGenerator = RandomGraphGenerator::new( - density, - seed - ); - structure_generator.generate_graph(&mut net); - let mut cim_generator: UniformParametersGenerator = RandomParametersGenerator::new( - interval, - seed - ); - cim_generator.generate_parameters(&mut net); - for node in net.get_node_indices() { - assert_eq!( - Ok(()), - net.get_node(node).validate_params() - ); - } -} - -#[test] -fn uniform_parameters_generator_right_densities_ctmp() { - let mut net = CtmpProcess::new(); - let node_label = String::from("0"); - let node_domain_cardinality = 4; - net.add_node( - generate_discrete_time_continous_node( - node_label, - node_domain_cardinality - ) - ).unwrap(); - let seed = Some(7641630759785120); - let interval = 0.0..7.0; - let mut cim_generator: UniformParametersGenerator = RandomParametersGenerator::new( - interval, - seed - ); - cim_generator.generate_parameters(&mut net); - for node in net.get_node_indices() { - assert_eq!( - Ok(()), - net.get_node(node).validate_params() - ); - } -}