Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 4 additions & 13 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -23,20 +23,10 @@ all = [
"rfp",
"pvs",
"killers",
"histories"
]
all-no-killers = [
"ab",
"qs",
"tt-ab",
"tt-qs",
"mvv-lva",
"order-tt-mv-first",
"tt-cuttoffs",
"nmp",
"rfp",
"pvs",
"histories",
"lmr"
]

ab = []
qs = []
tt-ab = []
Expand All @@ -49,6 +39,7 @@ rfp = []
pvs = []
killers = []
histories = []
lmr = []

[dependencies]
rayon = "1.11.0"
Expand Down
109 changes: 91 additions & 18 deletions src/alpha_beta.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ use crate::{
transposition_table::{Bound, TT},
};

use std::{cmp::min, sync::atomic::Ordering};
use std::{cmp::min, f64, sync::atomic::Ordering};

/// There's different approaches to this one as well. CPW suggests 150 * depth, smol.cs does 75 * depth.
/// Generally: The smaller `rfp_margin`, the more aggressively we prune.
Expand Down Expand Up @@ -38,6 +38,8 @@ pub fn alpha_beta<const PV_NODE: bool>(
*sd.local_seldepth = (*sd.local_seldepth).max(ply);
sd.total_alpha_beta_nodes.fetch_add(1, Ordering::Relaxed);

assert!(depth <= MAX_AB_DEPTH);

if sd.board.is_threefold_repetition() || sd.board.is_50_move_rule() {
return 0;
}
Expand All @@ -59,22 +61,17 @@ pub fn alpha_beta<const PV_NODE: bool>(
let mut tt_move: Option<EncodedMove> = None;
let tt_score;

// TODO: Implement Proper PVS if you want to use pv_node checks
//let pv_node = beta > alpha + 1; // TODO

if settings::TT_AB {
// TODO: legal detection to prevent collisions
#[allow(clippy::cast_possible_truncation, clippy::cast_possible_wrap)]
if let Some(tt_hit) = TT.probe(sd.board.hash(), ply as i32) {
sd.total_tt_hits.fetch_add(1, Ordering::Relaxed);

let bound = tt_hit.bound();
// let depth_cond = tt_hit.depth() >= depth as i32 - 3;

tt_move = tt_hit.best_move();
tt_score = tt_hit.score();

// TODO: When using pvs add !pv_node as additionell condition
let depth_req = depth as i32 + i32::from(tt_score >= beta);

if settings::TT_CUTTOFFS
Expand Down Expand Up @@ -130,6 +127,7 @@ pub fn alpha_beta<const PV_NODE: bool>(
}
}
}

// set the best evaluation very low to begin with
let mut best_eval = i32::MIN + 1;
let mut best_move: Option<EncodedMove> = None;
Expand All @@ -141,31 +139,60 @@ pub fn alpha_beta<const PV_NODE: bool>(

let mut quiets_tried: ArrayVec<EncodedMove, 256> = ArrayVec::new();
let mut movepicker = MovePicker::new(tt_move, killer_mv, false);
let mut i = 0;
let mut moves_visited = 0;

while let Some(mv) = movepicker.next(sd.board) {
i += 1;
moves_visited += 1;
// cancels search if time is over
if sd.stop.load(Ordering::Relaxed) {
sd.timeout_occurred.store(true, Ordering::Relaxed);
return 0;
}
sd.board.make_move(mv);
let mut eval;
if i == 1 || !settings::PVS {
if moves_visited == 1 || !settings::PVS {
// Principal Variation Search
// We assume that the first move from the move ordering is the PV move;
// Since the TT move, if existant, is in first place anyway this automatically includes information from shallower search depths

eval = -alpha_beta::<true>(depth - 1, -beta, -alpha, sd, ply + 1, true);
} else {
// Search non-PV moves with null window
eval = -alpha_beta::<false>(depth - 1, -alpha - 1, -alpha, sd, ply + 1, true);
// Re-search if non-PV move raised Alpha
// ONLY do re-searching if the current Node is on PV - we don't care for re-searching OffPV nodes
// (if they actually improve over the PV that variation will be searched again at the last PV node anyway)
if eval > alpha && PV_NODE {
eval = -alpha_beta::<true>(depth - 1, -beta, -alpha, sd, ply + 1, true);
let mut reduction = 1;

#[allow(clippy::useless_let_if_seq, clippy::cast_possible_truncation)]
if settings::LMR && moves_visited >= settings::MOVES_BEFORE_LMR && depth > 2 {
// ensure we always reduce less than `depth`, otherwise we run into overflows and search until the end of the universe
reduction +=
LMR_REDUCTION[depth.clamp(0, 63)][moves_visited.clamp(0, 63)].min(depth as u32);
}

debug_assert!(
reduction as usize <= depth,
"have fun at the end of the universe"
);

eval = -alpha_beta::<false>(
depth - reduction as usize,
-alpha - 1,
-alpha,
sd,
ply + 1,
true,
);

if eval > alpha {
// If our shallow-depth search raised alpha, we perform a search at full depth but still with a null window
// in hopes that we can still avoid a full search
if reduction > 1 {
// Search non-PV moves with null window
sd.total_lmr_researches.fetch_add(1, Ordering::Relaxed);
eval = -alpha_beta::<false>(depth - 1, -alpha - 1, -alpha, sd, ply + 1, true);
}
// ONLY do full-window full-depth re-searching if the current Node is on PV - we don't care for re-searching OffPV nodes
// (if they actually improve over the PV that variation will be searched again at the last PV node anyway)
if eval > alpha && PV_NODE {
sd.total_pvs_researches.fetch_add(1, Ordering::Relaxed);
eval = -alpha_beta::<true>(depth - 1, -beta, -alpha, sd, ply + 1, true);
}
}
}

Expand Down Expand Up @@ -194,7 +221,7 @@ pub fn alpha_beta<const PV_NODE: bool>(

// When i still 0 than no move found
// returns the mate score (very low) when in check but adds the ply to give a later check a better eval because the depth is lowers the further you go
if i == 0 {
if moves_visited == 0 {
if sd.board.is_in_check() {
#[allow(clippy::cast_possible_truncation, clippy::cast_possible_wrap)]
return -MATE_SCORE + (ply as i32);
Expand Down Expand Up @@ -248,3 +275,49 @@ pub fn alpha_beta<const PV_NODE: bool>(

best_eval
}

// const LMP_LAZYLOCK = s

// store the base LMR reductions statically
const LMR_REDUCTION: [[u32; 64]; 64] = {
let mut out = [[0u32; 64]; 64];

let mut depth = 1;
while depth < 64 {
let mut moves_visited = 1;
while moves_visited < 64 {
out[depth][moves_visited] = lmr_reduction(depth, moves_visited);
// assert!(lmr_reduction(depth, moves_visited) != 0, "we have a non-zero value");
moves_visited += 1;
}
depth += 1;
}
out
};

#[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)]
const fn lmr_reduction(depth: usize, moves_visited: usize) -> u32 {
(1.35 + int_ln(depth) as f64 * int_ln(moves_visited) as f64 / 2.75) as u32
}

/// Method really is only there because `f64::ln` is not a const function and so cannot be called at compile time
/// This here expresses a ln on integers using the constant log 2
#[allow(
clippy::cast_sign_loss,
clippy::cast_precision_loss,
clippy::cast_possible_truncation
)]
const fn int_ln(n: usize) -> u32 {
(1.0 / f64::consts::LOG2_E * n.ilog2() as f64) as u32
}

#[cfg(test)]
mod tests {
use super::*;

#[test]
fn print_lmp_board() {
println!("{}", u32::from(false));
println!("{LMR_REDUCTION:?}");
}
}
5 changes: 3 additions & 2 deletions src/communication.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,14 @@ use crate::{
};
use std::{
env,
io::{self, BufRead, Write},
io::{self, BufRead, BufReader, Write},
process::exit,
time::Duration,
};

pub fn handle_communication(board: &mut Board) {
let stdin = io::stdin();
let reader = BufReader::with_capacity(65536, stdin.lock());
let mut stdout = io::stdout();

let args: Vec<String> = env::args().collect();
Expand All @@ -34,7 +35,7 @@ pub fn handle_communication(board: &mut Board) {
exit(0);
}

for line_res in stdin.lock().lines() {
for line_res in reader.lines() {
let Ok(line) = line_res else { break };
let mut parts = line.split_whitespace();

Expand Down
18 changes: 15 additions & 3 deletions src/iterative_deepening.rs
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,8 @@ pub fn iterative_deepening(
println!("AB Nodes: Nodes visited in standard Alpha-Beta");
println!("QS nodes: Nodes visited in Quiescence search");
println!("TT Hits : Times a TT entry was reused");
println!("LMR Res : Total Late Move Reduction re-searches");
println!("PVS Res : Total Principal Variation Search re-searches");
println!("GlobTime: Total elapsed time since search started (ms)");
println!(
"EBF : Effective Branch Factor (Relative to the previous depth iteration)"
Expand All @@ -62,7 +64,7 @@ pub fn iterative_deepening(
}

println!(
"{:>8} {:>8} {:>8} {:>8} {:>8} {:>8} {:>8} {:>8} {:>8} {:>8} {:>8} {:>8} {:>8} PV",
"{:>8} {:>8} {:>8} {:>8} {:>8} {:>8} {:>8} {:>8} {:>8} {:>8} {:>8} {:>8} {:>8} {:>8} {:>8} PV",
"Depth",
"Seldepth",
"Score",
Expand All @@ -73,6 +75,8 @@ pub fn iterative_deepening(
"AB Nodes",
"QS Nodes",
"TT Hits",
"LMR Res",
"PVS Res",
"GlobTime",
"EBF",
"AB EBF"
Expand All @@ -95,7 +99,7 @@ pub fn iterative_deepening(
let global_start = Instant::now();
let mut previouse_iteration_ab_nodes: usize = 0;
let mut previouse_iteration_qs_nodes: usize = 0;
let mut killers = [EncodedMove(0); MAX_AB_DEPTH];
let mut killers = [EncodedMove(0); MAX_AB_DEPTH + 1];

HISTORY_TABLE.age();
for depth in 1..=max_depth {
Expand Down Expand Up @@ -167,6 +171,12 @@ pub fn iterative_deepening(
#[allow(clippy::cast_precision_loss)]
if debug {
let iteration_tt_hits = iteration_search_data.total_tt_hits.load(Ordering::Relaxed);
let iteration_lmr_researches = iteration_search_data
.total_lmr_researches
.load(Ordering::Relaxed);
let iteration_pvs_researches = iteration_search_data
.total_pvs_researches
.load(Ordering::Relaxed);
let global_duration = global_start.elapsed();

let current_total_nodes = iteration_nodes as f64;
Expand All @@ -186,7 +196,7 @@ pub fn iterative_deepening(
};

println!(
"{:>8} {:>8} {:>8} {:>8} {:>8} {:>8} {:>8} {:>8} {:>8} {:>8} {:>8} {:>8} {:>8} {}",
"{:>8} {:>8} {:>8} {:>8} {:>8} {:>8} {:>8} {:>8} {:>8} {:>8} {:>8} {:>8} {:>8} {:>8} {:>8} {}",
depth,
seldepth,
best_eval_overall,
Expand All @@ -197,6 +207,8 @@ pub fn iterative_deepening(
format_usize(iteration_ab_nodes),
format_usize(iteration_qs_nodes),
format_usize(iteration_tt_hits),
format_usize(iteration_lmr_researches),
format_usize(iteration_pvs_researches),
format_usize(global_duration.as_millis() as usize),
format_f64(ebf),
format_f64(ab_ebf),
Expand Down
11 changes: 0 additions & 11 deletions src/move_scoring.rs
Original file line number Diff line number Diff line change
Expand Up @@ -140,12 +140,6 @@ const MAX_HISTORY_VALUE: i32 = i16::MAX as i32;
/// vectors are two-dimensional arrays indexed by `[from_square][to_square]`
pub struct HistoryTable([[AtomicI32; 64]; 64], [[AtomicI32; 64]; 64]);

// impl Default for HistoryTable {
// fn default() -> Self {
// Self::new()
// }
// }

impl HistoryTable {
pub fn new() -> Self {
Self(
Expand Down Expand Up @@ -183,11 +177,6 @@ impl HistoryTable {
}
}

#[allow(dead_code)]
pub fn get_relative_history(&self, _mv: DecodedMove, _color: Color) -> i32 {
todo!()
}

/// Age history values between search iterations
/// I have no idea why this is useful, but the Relative History Paper (Winands et. al.) suggests it
/// and apparently Histories lose ELO without it
Expand Down
7 changes: 6 additions & 1 deletion src/settings.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,15 +10,20 @@ pub const RFP: bool = cfg!(feature = "rfp");
pub const PVS: bool = cfg!(feature = "pvs");
pub const KILLERS: bool = cfg!(feature = "killers");
pub const HISTORIES: bool = cfg!(feature = "histories");
pub const LMR: bool = cfg!(feature = "lmr");

// Can be tweaked, hava an effect on elo
// These can be tweaked, have an effect on elo
pub const QS_CHECK_EVASION_LIMIT: usize = 2;
pub const MAX_QS_DEPTH: usize = 12;
// Maximum search depth. In practice likely never reached, but has an effect on memory usage of the program
pub const MAX_AB_DEPTH: usize = 128;
// How far a static eval needs to be over beta to initiate an RFP cutoff
pub const RFP_MARGIN: usize = 50;

// How early we start Late Move Reductions
// The better our move ordering is, the earlier we can do LMR, the more we hopefully prune
pub const MOVES_BEFORE_LMR: usize = 4;

#[inline]
pub fn repr() -> String {
format!(
Expand Down
8 changes: 6 additions & 2 deletions src/types/search_data.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,16 @@ pub struct SharedSearchData<'sd> {
pub board: &'sd mut Board,
pub stop: &'sd Arc<AtomicBool>,
pub local_seldepth: &'sd mut usize,
pub killers: &'sd mut [EncodedMove; MAX_AB_DEPTH],
pub killers: &'sd mut [EncodedMove; MAX_AB_DEPTH + 1],
pub ab_ply: usize,

// From here these are only used for additional info collection
pub total_alpha_beta_nodes: AtomicUsize,
pub total_qs_nodes: AtomicUsize,
pub total_eval_nodes: AtomicUsize,
pub total_tt_hits: AtomicUsize,
pub total_lmr_researches: AtomicUsize,
pub total_pvs_researches: AtomicUsize,
// stores whether the current search got cancelled due to timeout
// TODO find out whether this can be eliminated in favor of using only `stop`
pub timeout_occurred: AtomicBool,
Expand All @@ -32,7 +34,7 @@ impl<'sd> SharedSearchData<'sd> {
board: &'sd mut Board,
stop: &'sd Arc<AtomicBool>,
local_seldepth: &'sd mut usize,
killers: &'sd mut [EncodedMove; MAX_AB_DEPTH],
killers: &'sd mut [EncodedMove; MAX_AB_DEPTH + 1],
) -> Self {
Self {
board,
Expand All @@ -45,6 +47,8 @@ impl<'sd> SharedSearchData<'sd> {
total_qs_nodes: AtomicUsize::new(0),
total_eval_nodes: AtomicUsize::new(0),
total_tt_hits: AtomicUsize::new(0),
total_lmr_researches: AtomicUsize::new(0),
total_pvs_researches: AtomicUsize::new(0),
}
}
}
2 changes: 1 addition & 1 deletion testing/sprt.sh
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ run_sprt() {
-openings file=8moves_v3.pgn format=pgn order=random \
-concurrency 8 \
-rounds 5000 \
-recover \
-log append=false engine=true file=sprt.log \
-sprt elo0=0 elo1=10 alpha=0.05 beta=0.05
}
run_sprt $@
Loading