WIP refactoring for aspiration window, minor other changes

This commit is contained in:
Mattia Giambirtone 2024-05-09 15:45:39 +02:00
parent e0914e2eb5
commit 887e2a64a3
1 changed files with 42 additions and 48 deletions

View File

@ -127,7 +127,7 @@ proc storeHistoryScore(self: var SearchManager, sideToMove: PieceColor, move: Mo
func isTactical(self: Move): bool =
## Returns whether the given move
## is a tactical move
return self.isPromotion() or self.isCapture()
return self.isPromotion() or self.isCapture() or self.isEnPassant()
proc getEstimatedMoveScore(self: SearchManager, move: Move, ply: int): int =
@ -170,7 +170,7 @@ proc getEstimatedMoveScore(self: SearchManager, move: Move, ply: int): int =
return result + MVV_LVA_OFFSET
if not move.isCapture():
if not move.isCapture() and not move.isEnPassant():
# History heuristic bonus
return self.getHistoryScore(sideToMove, move) + HISTORY_OFFSET
@ -421,7 +421,7 @@ proc search(self: var SearchManager, depth, ply: int, alpha, beta: Score, isPV:
return
bestScore = max(score, bestScore)
if score >= beta:
if not move.isCapture():
if not move.isCapture() and not move.isEnPassant():
# History heuristic: keep track of moves that caused a beta cutoff and order
# them early in subsequent searches, as they might be really good later. A
# quadratic bonus wrt. depth is usually the value that is used (though some
@ -448,22 +448,45 @@ proc search(self: var SearchManager, depth, ply: int, alpha, beta: Score, isPV:
break
self.pvMoves[ply][i + 1] = pv
self.pvMoves[ply][0] = move
# TODO
# else:
# when defined(noScaleHistory):
# if not move.isCapture() and self.history[][sideToMove][move.startSquare][move.targetSquare] > lowestEval():
# # Here, we punish moves that failed to raise alpha. This allows us to avoid scaling our values
# # after every search (which should retain more information about the explored subtreees) and
# # makes sure that moves that we thought were good but aren't are pushed further in the move list
# self.history[][sideToMove][move.startSquare][move.targetSquare] -= Score(depth * depth)
# else:
# discard
let nodeType = if bestScore >= beta: LowerBound elif bestScore <= alpha: UpperBound else: Exact
self.transpositionTable[].store(depth.uint8, bestScore, self.board.position.zobristKey, bestMove, nodeType)
return bestScore
proc aspirationWindow(self: var SearchManager, score: Score, depth: int): Score =
# Aspiration windows: start subsequent searches with tighter
# alpha-beta bounds and widen them as needed (i.e. when the score
# goes beyond the window) to increase the number of cutoffs
var
delta = Score(20)
alpha = max(lowestEval(), score - delta)
beta = min(highestEval(), score + delta)
searchDepth = depth
while not self.shouldStop() or getMonoTime() >= self.softLimit:
result = self.search(searchDepth, 0, alpha, beta, true)
# Score is outside window bounds, widen the one that
# we got past to get a better result
if score <= alpha:
beta = (alpha + beta) div 2
alpha = max(lowestEval(), alpha - delta)
searchDepth = depth
elif score >= beta:
beta = min(highestEval(), beta + delta)
if searchDepth > 1:
searchDepth = searchDepth - 1
else:
# Value was within the alpha-beta bounds, we're done
break
# Try again with larger window
delta += delta div 2
# TODO: Tune this
if delta >= Score(500):
# Window got too wide, give up and search with the full range
# of alpha-beta values
delta = highestEval()
proc findBestMove*(self: var SearchManager, timeRemaining, increment: int64, maxDepth: int, maxNodes: uint64, searchMoves: seq[Move], timePerMove=false): Move =
## Finds the best move in the current position
## and returns it, limiting search time according
@ -496,46 +519,17 @@ proc findBestMove*(self: var SearchManager, timeRemaining, increment: int64, max
self.searchFlag[].store(true)
# Iterative deepening loop
var score {.used.} = Score(0)
for i in 1..min(MAX_DEPTH, maxDepth):
when not defined(aspirationWindows):
self.search(i, 0, lowestEval(), highestEval(), true)
for depth in 1..min(MAX_DEPTH, maxDepth):
if depth < 5 or not defined(aspirationWindow):
score = self.search(depth, 0, lowestEval(), highestEval(), true)
else:
if i < 5:
score = self.search(i, 0, lowestEval(), highestEval(), true)
else:
# Aspiration windows: start subsequent searches with tighter
# alpha-beta bounds and widen them as needed (i.e. when the score
# goes beyond the window) to increase the number of cutoffs
var
delta = Score(15)
alpha = max(lowestEval(), score - delta)
beta = min(highestEval(), score + delta)
while not self.shouldStop() or getMonoTime() >= self.softLimit:
score = self.search(i, 0, alpha, beta, true)
# Score is outside window bounds, widen the one that
# we got past to get a better result
if score <= alpha:
beta = (alpha + beta) div 2
alpha = max(lowestEval(), alpha - delta)
elif score >= beta:
beta = min(highestEval(), beta + delta)
else:
# Value was within the alpha-beta bounds, we're done
break
# Try again with larger window
delta *= 2
# TODO: Tune this
if delta >= Score(500):
# Window got too wide, give up and search with the full range
# of alpha-beta values
score = self.search(i, 0, lowestEval(), highestEval(), true)
break
score = self.aspirationWindow(score, depth)
if self.pvMoves[0][0] != nullMove():
result = self.pvMoves[0][0]
if self.shouldStop():
self.log(i - 1)
self.log(depth - 1)
break
self.log(i)
self.log(depth)
# Soft time management: don't start a new search iteration
# if the soft limit has expired, as it is unlikely to complete
# anyway