Implement RFP (elo gains 41.5 +/- 21.9)

This commit is contained in:
Mattia Giambirtone 2024-05-10 13:16:12 +02:00
parent b9fbb9eb3f
commit 9b049cdcec
1 changed files with 36 additions and 5 deletions

View File

@ -31,12 +31,26 @@ const
# Constants to configure how aggressively
# NMP reduces the search depth
# Start pruning moves after this depth has
# been cleared
NMP_DEPTH_THRESHOLD = 2
# Reduce search depth by at least this value
NMP_BASE_REDUCTION = 3
# Reduce search depth proportionally to the
# current depth divided by this value, plus
# the base reduction
NMP_DEPTH_REDUCTION {.used.} = 3
# Constants to configure RFP
# (Reverse Futility Pruning)
# Advantage threshold
RFP_EVAL_THRESHOLD = 100
# Limit after which RFP is disabled
RFP_DEPTH_LIMIT = 7
# Constants to configure FP
# (Futility pruning)
NUM_KILLERS* = 2
MAX_DEPTH* = 255
# Constants used during move ordering
@ -135,12 +149,18 @@ proc storeHistoryScore(self: var SearchManager, sideToMove: PieceColor, move: Mo
self.history[][sideToMove][move.startSquare][move.targetSquare] += Score(bonus) - abs(bonus.int32) * score div highestEval()
func isTactical(self: Move): bool =
func isTactical(self: Move): bool {.inline.} =
## Returns whether the given move
## is a tactical move
## is considered tactical
return self.isPromotion() or self.isCapture() or self.isEnPassant()
func isQuiet(self: Move): bool {.inline.} =
## Returns whether the given move is
## a quiet
return not self.isCapture() and not self.isEnPassant()
proc getEstimatedMoveScore(self: SearchManager, move: Move, ply: int): int =
## Returns an estimated static score for the move used
## during move ordering
@ -185,7 +205,7 @@ proc getEstimatedMoveScore(self: SearchManager, move: Move, ply: int): int =
return result + MVV_LVA_OFFSET
if not move.isCapture() and not move.isEnPassant():
if move.isQuiet():
# History heuristic bonus
return self.getHistoryScore(sideToMove, move) + HISTORY_OFFSET
@ -369,7 +389,18 @@ proc search(self: var SearchManager, depth, ply: int, alpha, beta: Score, isPV:
of UpperBound:
if query.entry.score <= alpha:
return query.entry.score
if not isPV and depth > 2 and self.board.canNullMove() and self.board.position.evaluate() >= beta:
let staticEval = self.board.position.evaluate()
if not isPV and not self.board.inCheck() and depth <= RFP_DEPTH_LIMIT and staticEval - RFP_EVAL_THRESHOLD * depth >= beta:
## Reverse futility pruning: if the side to move has a significant advantage
## in the current position and is not in check, return the position's static
## evaluation to encourage the engine to deal with any potential threats from
## the opponent. Since this optimization technique is not sound, we limit the
## depth at which it can trigger for safety purposes (it is also the reason
## why the "advantage" threshold scales with depth: the deeper we go, the more
## careful we want to be with our estimate for how much of an advantage we may
## or may not have)
return staticEval
if not isPV and depth > NMP_DEPTH_THRESHOLD and self.board.canNullMove() and staticEval >= beta:
# Null move pruning: it is reasonable to assume that
# it is always better to make a move than not to do
# so (with some exceptions noted below). To take advantage
@ -476,7 +507,7 @@ proc search(self: var SearchManager, depth, ply: int, alpha, beta: Score, isPV:
return
bestScore = max(score, bestScore)
if score >= beta:
if not move.isCapture() and not move.isEnPassant():
if move.isQuiet():
# History heuristic: keep track of moves that caused a beta cutoff and order
# them early in subsequent searches, as they might be really good later. A
# quadratic bonus wrt. depth is usually the value that is used (though some