From 5d676be18033aee322142acef9e7d439e499071f Mon Sep 17 00:00:00 2001 From: Alessandro Bregoli Date: Mon, 16 Jan 2023 06:50:24 +0100 Subject: [PATCH] parallelized re --- reCTBN/src/reward.rs | 3 +-- reCTBN/src/reward/reward_evaluation.rs | 5 ++++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/reCTBN/src/reward.rs b/reCTBN/src/reward.rs index b34db7f..f0edf2f 100644 --- a/reCTBN/src/reward.rs +++ b/reCTBN/src/reward.rs @@ -4,7 +4,6 @@ pub mod reward_evaluation; use std::collections::HashMap; use crate::process; -use ndarray; /// Instantiation of reward function and instantaneous reward /// @@ -22,7 +21,7 @@ pub struct Reward { /// The trait RewardFunction describe the methods that all the reward functions must satisfy -pub trait RewardFunction { +pub trait RewardFunction: Sync { /// Given the current state and the previous state, it compute the reward. /// /// # Arguments diff --git a/reCTBN/src/reward/reward_evaluation.rs b/reCTBN/src/reward/reward_evaluation.rs index cb7b8f1..431efde 100644 --- a/reCTBN/src/reward/reward_evaluation.rs +++ b/reCTBN/src/reward/reward_evaluation.rs @@ -1,8 +1,11 @@ use std::collections::HashMap; +use rayon::prelude::{IntoParallelIterator, ParallelIterator}; + use crate::params::{self, ParamsTrait}; use crate::process; + use crate::{ process::NetworkProcessState, reward::RewardEvaluation, @@ -55,7 +58,7 @@ impl RewardEvaluation for MonteCarloReward { let n_states: usize = variables_domain.iter().map(|x| x.len()).product(); - (0..n_states) + (0..n_states).into_par_iter() .map(|s| { let state: process::NetworkProcessState = variables_domain .iter()