diff --git a/reCTBN/src/reward.rs b/reCTBN/src/reward.rs index b34db7f..f0edf2f 100644 --- a/reCTBN/src/reward.rs +++ b/reCTBN/src/reward.rs @@ -4,7 +4,6 @@ pub mod reward_evaluation; use std::collections::HashMap; use crate::process; -use ndarray; /// Instantiation of reward function and instantaneous reward /// @@ -22,7 +21,7 @@ pub struct Reward { /// The trait RewardFunction describe the methods that all the reward functions must satisfy -pub trait RewardFunction { +pub trait RewardFunction: Sync { /// Given the current state and the previous state, it compute the reward. /// /// # Arguments diff --git a/reCTBN/src/reward/reward_evaluation.rs b/reCTBN/src/reward/reward_evaluation.rs index cb7b8f1..431efde 100644 --- a/reCTBN/src/reward/reward_evaluation.rs +++ b/reCTBN/src/reward/reward_evaluation.rs @@ -1,8 +1,11 @@ use std::collections::HashMap; +use rayon::prelude::{IntoParallelIterator, ParallelIterator}; + use crate::params::{self, ParamsTrait}; use crate::process; + use crate::{ process::NetworkProcessState, reward::RewardEvaluation, @@ -55,7 +58,7 @@ impl RewardEvaluation for MonteCarloReward { let n_states: usize = variables_domain.iter().map(|x| x.len()).product(); - (0..n_states) + (0..n_states).into_par_iter() .map(|s| { let state: process::NetworkProcessState = variables_domain .iter()