We propagate the reward for both sides
The AI now properly choses the optimal path for the active player
This commit is contained in:
parent
0f9d4f0c4e
commit
6a33818238
@ -69,18 +69,14 @@ fn standard_backprop<S: GameState>(
|
||||
let mut current_id: usize = node_id;
|
||||
loop {
|
||||
let node = arena.get_node_mut(current_id);
|
||||
let player = node.state.get_current_player().clone();
|
||||
match rewards.get(&player) {
|
||||
Some(reward) => {
|
||||
node.increment_visits();
|
||||
node.record_player_reward(player, *reward);
|
||||
if let Some(parent_id) = node.parent {
|
||||
current_id = parent_id;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
None => (),
|
||||
node.increment_visits();
|
||||
for (player, reward) in rewards.iter() {
|
||||
node.record_player_reward(player.clone(), *reward);
|
||||
}
|
||||
if let Some(parent_id) = node.parent {
|
||||
current_id = parent_id;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -94,19 +90,14 @@ fn weighted_backprop<S: GameState>(
|
||||
let mut current_id: usize = node_id;
|
||||
loop {
|
||||
let node = arena.get_node_mut(current_id);
|
||||
let player = node.state.get_current_player().clone();
|
||||
let weight = weight_for_depth(depth_factor, node.depth);
|
||||
match rewards.get(&player) {
|
||||
Some(reward) => {
|
||||
node.increment_visits();
|
||||
node.record_player_reward(player, (*reward) * weight);
|
||||
if let Some(parent_id) = node.parent {
|
||||
current_id = parent_id;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
None => (),
|
||||
for (player, reward) in rewards.iter() {
|
||||
node.record_player_reward(player.clone(), (*reward) * weight);
|
||||
}
|
||||
if let Some(parent_id) = node.parent {
|
||||
current_id = parent_id;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user