|
|
@ -40,14 +40,14 @@ fn learn_binary_cim<T: ParameterLearning> (pl: T) { |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
let data = trajectory_generator(&net, 100, 100.0); |
|
|
|
let data = trajectory_generator(&net, 100, 100.0, Some(6347747169756259),); |
|
|
|
let (CIM, M, T) = pl.fit(&net, &data, 1, None); |
|
|
|
let (CIM, M, T) = pl.fit(&net, &data, 1, None); |
|
|
|
print!("CIM: {:?}\nM: {:?}\nT: {:?}\n", CIM, M, T); |
|
|
|
print!("CIM: {:?}\nM: {:?}\nT: {:?}\n", CIM, M, T); |
|
|
|
assert_eq!(CIM.shape(), [2, 2, 2]); |
|
|
|
assert_eq!(CIM.shape(), [2, 2, 2]); |
|
|
|
assert!(CIM.abs_diff_eq(&arr3(&[ |
|
|
|
assert!(CIM.abs_diff_eq(&arr3(&[ |
|
|
|
[[-1.0, 1.0], [4.0, -4.0]], |
|
|
|
[[-1.0, 1.0], [4.0, -4.0]], |
|
|
|
[[-6.0, 6.0], [2.0, -2.0]], |
|
|
|
[[-6.0, 6.0], [2.0, -2.0]], |
|
|
|
]), 0.2)); |
|
|
|
]), 0.1)); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
#[test] |
|
|
|
#[test] |
|
|
@ -60,8 +60,8 @@ fn learn_binary_cim_MLE() { |
|
|
|
#[test] |
|
|
|
#[test] |
|
|
|
fn learn_binary_cim_BA() { |
|
|
|
fn learn_binary_cim_BA() { |
|
|
|
let ba = BayesianApproach{ |
|
|
|
let ba = BayesianApproach{ |
|
|
|
default_alpha: 1, |
|
|
|
alpha: 1, |
|
|
|
default_tau: 1.0}; |
|
|
|
tau: 1.0}; |
|
|
|
learn_binary_cim(ba); |
|
|
|
learn_binary_cim(ba); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -93,7 +93,7 @@ fn learn_ternary_cim<T: ParameterLearning> (pl: T) { |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
let data = trajectory_generator(&net, 100, 200.0); |
|
|
|
let data = trajectory_generator(&net, 100, 200.0, Some(6347747169756259),); |
|
|
|
let (CIM, M, T) = pl.fit(&net, &data, 1, None); |
|
|
|
let (CIM, M, T) = pl.fit(&net, &data, 1, None); |
|
|
|
print!("CIM: {:?}\nM: {:?}\nT: {:?}\n", CIM, M, T); |
|
|
|
print!("CIM: {:?}\nM: {:?}\nT: {:?}\n", CIM, M, T); |
|
|
|
assert_eq!(CIM.shape(), [3, 3, 3]); |
|
|
|
assert_eq!(CIM.shape(), [3, 3, 3]); |
|
|
@ -101,7 +101,7 @@ fn learn_ternary_cim<T: ParameterLearning> (pl: T) { |
|
|
|
[[-1.0, 0.5, 0.5], [3.0, -4.0, 1.0], [0.9, 0.1, -1.0]], |
|
|
|
[[-1.0, 0.5, 0.5], [3.0, -4.0, 1.0], [0.9, 0.1, -1.0]], |
|
|
|
[[-6.0, 2.0, 4.0], [1.5, -2.0, 0.5], [3.0, 1.0, -4.0]], |
|
|
|
[[-6.0, 2.0, 4.0], [1.5, -2.0, 0.5], [3.0, 1.0, -4.0]], |
|
|
|
[[-1.0, 0.1, 0.9], [2.0, -2.5, 0.5], [0.9, 0.1, -1.0]], |
|
|
|
[[-1.0, 0.1, 0.9], [2.0, -2.5, 0.5], [0.9, 0.1, -1.0]], |
|
|
|
]), 0.2)); |
|
|
|
]), 0.1)); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@ -115,8 +115,8 @@ fn learn_ternary_cim_MLE() { |
|
|
|
#[test] |
|
|
|
#[test] |
|
|
|
fn learn_ternary_cim_BA() { |
|
|
|
fn learn_ternary_cim_BA() { |
|
|
|
let ba = BayesianApproach{ |
|
|
|
let ba = BayesianApproach{ |
|
|
|
default_alpha: 1, |
|
|
|
alpha: 1, |
|
|
|
default_tau: 1.0}; |
|
|
|
tau: 1.0}; |
|
|
|
learn_ternary_cim(ba); |
|
|
|
learn_ternary_cim(ba); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -148,13 +148,13 @@ fn learn_ternary_cim_no_parents<T: ParameterLearning> (pl: T) { |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
let data = trajectory_generator(&net, 100, 200.0); |
|
|
|
let data = trajectory_generator(&net, 100, 200.0, Some(6347747169756259),); |
|
|
|
let (CIM, M, T) = pl.fit(&net, &data, 0, None); |
|
|
|
let (CIM, M, T) = pl.fit(&net, &data, 0, None); |
|
|
|
print!("CIM: {:?}\nM: {:?}\nT: {:?}\n", CIM, M, T); |
|
|
|
print!("CIM: {:?}\nM: {:?}\nT: {:?}\n", CIM, M, T); |
|
|
|
assert_eq!(CIM.shape(), [1, 3, 3]); |
|
|
|
assert_eq!(CIM.shape(), [1, 3, 3]); |
|
|
|
assert!(CIM.abs_diff_eq(&arr3(&[[[-3.0, 2.0, 1.0],
|
|
|
|
assert!(CIM.abs_diff_eq(&arr3(&[[[-3.0, 2.0, 1.0],
|
|
|
|
[1.5, -2.0, 0.5], |
|
|
|
[1.5, -2.0, 0.5], |
|
|
|
[0.4, 0.6, -1.0]]]), 0.2)); |
|
|
|
[0.4, 0.6, -1.0]]]), 0.1)); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@ -168,8 +168,8 @@ fn learn_ternary_cim_no_parents_MLE() { |
|
|
|
#[test] |
|
|
|
#[test] |
|
|
|
fn learn_ternary_cim_no_parents_BA() { |
|
|
|
fn learn_ternary_cim_no_parents_BA() { |
|
|
|
let ba = BayesianApproach{ |
|
|
|
let ba = BayesianApproach{ |
|
|
|
default_alpha: 1, |
|
|
|
alpha: 1, |
|
|
|
default_tau: 1.0}; |
|
|
|
tau: 1.0}; |
|
|
|
learn_ternary_cim_no_parents(ba); |
|
|
|
learn_ternary_cim_no_parents(ba); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -228,7 +228,7 @@ fn learn_mixed_discrete_cim<T: ParameterLearning> (pl: T) { |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
let data = trajectory_generator(&net, 300, 300.0); |
|
|
|
let data = trajectory_generator(&net, 300, 300.0, Some(6347747169756259),); |
|
|
|
let (CIM, M, T) = pl.fit(&net, &data, 2, None); |
|
|
|
let (CIM, M, T) = pl.fit(&net, &data, 2, None); |
|
|
|
print!("CIM: {:?}\nM: {:?}\nT: {:?}\n", CIM, M, T); |
|
|
|
print!("CIM: {:?}\nM: {:?}\nT: {:?}\n", CIM, M, T); |
|
|
|
assert_eq!(CIM.shape(), [9, 4, 4]); |
|
|
|
assert_eq!(CIM.shape(), [9, 4, 4]); |
|
|
@ -244,7 +244,7 @@ fn learn_mixed_discrete_cim<T: ParameterLearning> (pl: T) { |
|
|
|
[[-2.0, 1.0, 0.6, 0.4], [2.6, -7.1, 1.4, 3.1], [5.0, 1.0, -8.0, 2.0], [1.4, 0.4, 0.2, -2.0]], |
|
|
|
[[-2.0, 1.0, 0.6, 0.4], [2.6, -7.1, 1.4, 3.1], [5.0, 1.0, -8.0, 2.0], [1.4, 0.4, 0.2, -2.0]], |
|
|
|
[[-3.0, 1.0, 1.5, 0.5], [3.0, -6.0, 1.0, 2.0], [0.3, 0.5, -1.9, 1.1], [5.0, 1.0, 2.0, -8.0]], |
|
|
|
[[-3.0, 1.0, 1.5, 0.5], [3.0, -6.0, 1.0, 2.0], [0.3, 0.5, -1.9, 1.1], [5.0, 1.0, 2.0, -8.0]], |
|
|
|
[[-2.6, 0.6, 0.2, 1.8], [2.0, -6.0, 3.0, 1.0], [0.1, 0.5, -1.3, 0.7], [0.8, 0.6, 0.2, -1.6]], |
|
|
|
[[-2.6, 0.6, 0.2, 1.8], [2.0, -6.0, 3.0, 1.0], [0.1, 0.5, -1.3, 0.7], [0.8, 0.6, 0.2, -1.6]], |
|
|
|
]), 0.2)); |
|
|
|
]), 0.1)); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
#[test] |
|
|
|
#[test] |
|
|
@ -257,7 +257,7 @@ fn learn_mixed_discrete_cim_MLE() { |
|
|
|
#[test] |
|
|
|
#[test] |
|
|
|
fn learn_mixed_discrete_cim_BA() { |
|
|
|
fn learn_mixed_discrete_cim_BA() { |
|
|
|
let ba = BayesianApproach{ |
|
|
|
let ba = BayesianApproach{ |
|
|
|
default_alpha: 1, |
|
|
|
alpha: 1, |
|
|
|
default_tau: 1.0}; |
|
|
|
tau: 1.0}; |
|
|
|
learn_mixed_discrete_cim(ba); |
|
|
|
learn_mixed_discrete_cim(ba); |
|
|
|
} |
|
|
|
} |
|
|
|