We propagate the reward for both sides

The AI now properly choses the optimal path for the active player
This commit is contained in:
David Kruger 2025-06-27 16:07:33 -07:00
parent 0f9d4f0c4e
commit 6a33818238

View File

@ -69,18 +69,14 @@ fn standard_backprop<S: GameState>(
let mut current_id: usize = node_id; let mut current_id: usize = node_id;
loop { loop {
let node = arena.get_node_mut(current_id); let node = arena.get_node_mut(current_id);
let player = node.state.get_current_player().clone(); node.increment_visits();
match rewards.get(&player) { for (player, reward) in rewards.iter() {
Some(reward) => { node.record_player_reward(player.clone(), *reward);
node.increment_visits(); }
node.record_player_reward(player, *reward); if let Some(parent_id) = node.parent {
if let Some(parent_id) = node.parent { current_id = parent_id;
current_id = parent_id; } else {
} else { break;
break;
}
}
None => (),
} }
} }
} }
@ -94,19 +90,14 @@ fn weighted_backprop<S: GameState>(
let mut current_id: usize = node_id; let mut current_id: usize = node_id;
loop { loop {
let node = arena.get_node_mut(current_id); let node = arena.get_node_mut(current_id);
let player = node.state.get_current_player().clone();
let weight = weight_for_depth(depth_factor, node.depth); let weight = weight_for_depth(depth_factor, node.depth);
match rewards.get(&player) { for (player, reward) in rewards.iter() {
Some(reward) => { node.record_player_reward(player.clone(), (*reward) * weight);
node.increment_visits(); }
node.record_player_reward(player, (*reward) * weight); if let Some(parent_id) = node.parent {
if let Some(parent_id) = node.parent { current_id = parent_id;
current_id = parent_id; } else {
} else { break;
break;
}
}
None => (),
} }
} }
} }