Add untested FP

This commit is contained in:
Mattia Giambirtone 2024-05-10 13:25:55 +02:00
parent 9b049cdcec
commit fdf71bbfce
1 changed files with 67 additions and 54 deletions

View File

@ -51,6 +51,11 @@ const
# Constants to configure FP
# (Futility pruning)
# Limit after which FP is disabled
FP_DEPTH_LIMIT = 1
# Advantage threshold
FP_EVAL_MARGIN = 125
NUM_KILLERS* = 2
MAX_DEPTH* = 255
# Constants used during move ordering
@ -158,7 +163,7 @@ func isTactical(self: Move): bool {.inline.} =
func isQuiet(self: Move): bool {.inline.} =
## Returns whether the given move is
## a quiet
return not self.isCapture() and not self.isEnPassant()
return not self.isCapture() and not self.isEnPassant() and not self.isPromotion()
proc getEstimatedMoveScore(self: SearchManager, move: Move, ply: int): int =
@ -376,6 +381,7 @@ proc search(self: var SearchManager, depth, ply: int, alpha, beta: Score, isPV:
if depth <= 0:
# Quiescent search gain: 264.8 +/- 71.6
return self.qsearch(0, alpha, beta)
let staticEval = self.board.position.evaluate()
if ply > 0:
# Probe the transposition table to see if we can cause an early cutoff
let query = self.transpositionTable[].get(self.board.position.zobristKey, depth.uint8)
@ -389,57 +395,56 @@ proc search(self: var SearchManager, depth, ply: int, alpha, beta: Score, isPV:
of UpperBound:
if query.entry.score <= alpha:
return query.entry.score
let staticEval = self.board.position.evaluate()
if not isPV and not self.board.inCheck() and depth <= RFP_DEPTH_LIMIT and staticEval - RFP_EVAL_THRESHOLD * depth >= beta:
## Reverse futility pruning: if the side to move has a significant advantage
## in the current position and is not in check, return the position's static
## evaluation to encourage the engine to deal with any potential threats from
## the opponent. Since this optimization technique is not sound, we limit the
## depth at which it can trigger for safety purposes (it is also the reason
## why the "advantage" threshold scales with depth: the deeper we go, the more
## careful we want to be with our estimate for how much of an advantage we may
## or may not have)
return staticEval
if not isPV and depth > NMP_DEPTH_THRESHOLD and self.board.canNullMove() and staticEval >= beta:
# Null move pruning: it is reasonable to assume that
# it is always better to make a move than not to do
# so (with some exceptions noted below). To take advantage
# of this assumption, we bend the rules a little and perform
# a so-called "null move", basically passing our turn doing
# nothing, and then perform a shallower search for our opponent.
# If the shallow search fails high (i.e. produces a beta cutoff),
# then it is useless for us to search this position any further
# and we can just return the score outright. Since we only care about
# whether the opponent can beat beta and not the actual value, we
# can do a null window search and save some time, too. There are a
# few rules that need to be followed to use NMP properly, though: we
# must not be in check and we also must have not null-moved before
# (that's what board.canNullMove() is checking) and the static
# evaluation of the position needs to already be better than or
# equal to beta
let
friendlyPawns = self.board.position.getBitboard(Pawn, self.board.position.sideToMove)
friendlyKing = self.board.position.getBitboard(King, self.board.position.sideToMove)
friendlyPieces = self.board.position.getOccupancyFor(self.board.position.sideToMove)
if friendlyPieces != (friendlyKing or friendlyPawns):
# NMP is disabled in endgame positions where only kings
# and (friendly) pawns are left because those are the ones
# where it is most likely that the null move assumption will
# not hold true due to zugzwang (fancy engines do zugzwang
# verification, but I literally cba to do that)
self.board.makeNullMove()
# We perform a shallower search because otherwise there would be no point in
# doing NMP at all!
var reduction: int
when defined(NMP2):
# Reduce more based on depth
reduction = NMP_BASE_REDUCTION + depth div NMP_DEPTH_REDUCTION
else:
reduction = NMP_BASE_REDUCTION
let score = -self.search(depth - reduction, ply + 1, -beta + 1, -beta, isPV=false)
self.board.unmakeMove()
if score >= beta:
return score
if not isPV and not self.board.inCheck() and depth <= RFP_DEPTH_LIMIT and staticEval - RFP_EVAL_THRESHOLD * depth >= beta:
## Reverse futility pruning: if the side to move has a significant advantage
## in the current position and is not in check, return the position's static
## evaluation to encourage the engine to deal with any potential threats from
## the opponent. Since this optimization technique is not sound, we limit the
## depth at which it can trigger for safety purposes (it is also the reason
## why the "advantage" threshold scales with depth: the deeper we go, the more
## careful we want to be with our estimate for how much of an advantage we may
## or may not have)
return staticEval
if not isPV and depth > NMP_DEPTH_THRESHOLD and self.board.canNullMove() and staticEval >= beta:
# Null move pruning: it is reasonable to assume that
# it is always better to make a move than not to do
# so (with some exceptions noted below). To take advantage
# of this assumption, we bend the rules a little and perform
# a so-called "null move", basically passing our turn doing
# nothing, and then perform a shallower search for our opponent.
# If the shallow search fails high (i.e. produces a beta cutoff),
# then it is useless for us to search this position any further
# and we can just return the score outright. Since we only care about
# whether the opponent can beat beta and not the actual value, we
# can do a null window search and save some time, too. There are a
# few rules that need to be followed to use NMP properly, though: we
# must not be in check and we also must have not null-moved before
# (that's what board.canNullMove() is checking) and the static
# evaluation of the position needs to already be better than or
# equal to beta
let
friendlyPawns = self.board.position.getBitboard(Pawn, self.board.position.sideToMove)
friendlyKing = self.board.position.getBitboard(King, self.board.position.sideToMove)
friendlyPieces = self.board.position.getOccupancyFor(self.board.position.sideToMove)
if friendlyPieces != (friendlyKing or friendlyPawns):
# NMP is disabled in endgame positions where only kings
# and (friendly) pawns are left because those are the ones
# where it is most likely that the null move assumption will
# not hold true due to zugzwang (fancy engines do zugzwang
# verification, but I literally cba to do that)
self.board.makeNullMove()
# We perform a shallower search because otherwise there would be no point in
# doing NMP at all!
var reduction: int
when defined(NMP2):
# Reduce more based on depth
reduction = NMP_BASE_REDUCTION + depth div NMP_DEPTH_REDUCTION
else:
reduction = NMP_BASE_REDUCTION
let score = -self.search(depth - reduction, ply + 1, -beta + 1, -beta, isPV=false)
self.board.unmakeMove()
if score >= beta:
return score
var
moves = newMoveList()
depth = depth
@ -463,6 +468,14 @@ proc search(self: var SearchManager, depth, ply: int, alpha, beta: Score, isPV:
for i, move in moves:
if ply == 0 and self.searchMoves.len() > 0 and move notin self.searchMoves:
continue
when defined(FP):
if not isPV and depth <= FP_DEPTH_LIMIT and staticEval + FP_EVAL_MARGIN * depth < alpha and bestScore > mateScore() - MAX_DEPTH:
# Futility pruning: If a move cannot meaningfully improve alpha, prune it from the
# tree. Much like RFP, this is an unsound optimization (and a riskier one at that,
# apparently), so our depth limit and evaluation margins are very conservative
# compared to RFP. Also, we need to make sure the best score is not a mate score, or
# we'd risk pruning moves that evade checkmate
continue
self.board.doMove(move)
let
extension = self.getSearchExtension(move)
@ -508,12 +521,12 @@ proc search(self: var SearchManager, depth, ply: int, alpha, beta: Score, isPV:
bestScore = max(score, bestScore)
if score >= beta:
if move.isQuiet():
# History heuristic: keep track of moves that caused a beta cutoff and order
# History heuristic: keep track of quiets that caused a beta cutoff and order
# them early in subsequent searches, as they might be really good later. A
# quadratic bonus wrt. depth is usually the value that is used (though some
# engines, namely Stockfish, use a linear bonus. Maybe we can investigate this)
self.storeHistoryScore(sideToMove, move, score, depth * depth)
# Killer move heuristic: store moves that caused a beta cutoff according to the distance from
# Killer move heuristic: store quiets that caused a beta cutoff according to the distance from
# root that they occurred at, as they might be good refutations for future moves from the opponent.
# Elo gains: 33.5 +/- 19.3
self.storeKillerMove(ply, move)