CPG/Chess/nimfish/nimfishpkg/search.nim

310 lines
12 KiB
Nim

# Copyright 2024 Mattia Giambirtone & All Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Implementation of negamax with a/b pruning
import board
import movegen
import eval
import transpositions
import std/times
import std/atomics
import std/algorithm
import std/monotimes
import std/strformat
type
SearchManager* = ref object
## A simple state storage
## for our search
stopFlag*: Atomic[bool] # Can be used to cancel the search from another thread
board: Chessboard
bestMoveRoot: Move
bestRootScore: Score
searchStart: MonoTime
searchDeadline: MonoTime
nodeCount: uint64
maxNodes: uint64
searchMoves: seq[Move]
previousBestMove: Move
transpositionTable: TTable
currentExtensionCount: uint8
proc newSearchManager*(board: Chessboard, transpositions: TTable): SearchManager =
new(result)
result.board = board
result.bestMoveRoot = nullMove()
result.transpositionTable = transpositions
proc getEstimatedMoveScore(self: SearchManager, move: Move): Score =
## Returns an estimated static score for the move
result = Score(0)
let
sideToMove = self.board.position.sideToMove
nonSideToMove = sideToMove.opposite()
if self.previousBestMove != nullMove() and move == self.previousBestMove:
return highestEval() + 1
if move.isCapture():
# Implementation of MVVLVA: Most Valuable Victim Least Valuable Attacker
# We prioritize moves that capture the most valuable pieces, and as a
# second goal we want to use our least valuable pieces to do so (this
# is why we multiply the score of the captured piece by 100, to give
# it priority)
result += 100 * self.board.getPieceScore(move.targetSquare) - self.board.getPieceScore(move.startSquare)
if move.isPromotion():
# Promotions are a good idea to search first
var piece: Piece
case move.getPromotionType():
of PromoteToBishop:
piece = Piece(kind: Bishop, color: sideToMove)
of PromoteToKnight:
piece = Piece(kind: Knight, color: sideToMove)
of PromoteToRook:
piece = Piece(kind: Rook, color: sideToMove)
of PromoteToQueen:
piece = Piece(kind: Queen, color: sideToMove)
else:
discard # Unreachable
result += self.board.getPieceScore(piece, move.targetSquare)
if (self.board.getPawnAttacks(move.targetSquare, nonSideToMove) and self.board.getBitboard(Pawn, nonSideToMove)) != 0:
# Moving on a square attacked by an enemy pawn is _usually_ a very bad
# idea. Assume the piece is lost
result -= self.board.getPieceScore(move.startSquare)
proc reorderMoves(self: SearchManager, moves: var MoveList) =
## Reorders the list of moves in-place, trying
## to place the best ones first
proc orderer(a, b: Move): int {.closure.} =
return cmp(self.getEstimatedMoveScore(a), self.getEstimatedMoveScore(b))
# Ignore null moves beyond the lenght of the movelist
sort(moves.data.toOpenArray(0, moves.len - 1), orderer, SortOrder.Descending)
proc timedOut(self: SearchManager): bool = getMonoTime() >= self.searchDeadline
proc cancelled(self: SearchManager): bool = self.stopFlag.load()
proc log(self: SearchManager, depth: int) =
let
elapsed = getMonoTime() - self.searchStart
elapsedMsec = elapsed.inMilliseconds.uint64
nps = 1000 * (self.nodeCount div max(elapsedMsec, 1))
var logMsg = &"info depth {depth} time {elapsedMsec} nodes {self.nodeCount} nps {nps}"
if self.bestMoveRoot != nullMove():
logMsg &= &" bestmove {self.bestMoveRoot.toAlgebraic()} score {self.bestRootScore}"
echo logMsg
proc shouldStop(self: SearchManager): bool =
## Returns whether searching should
## stop
if self.cancelled():
# Search has been cancelled!
return true
if self.timedOut():
# We ran out of time!
return true
if self.maxNodes > 0 and self.nodeCount >= self.maxNodes:
# Ran out of nodes
return true
proc getSearchExtension(self: SearchManager, move: Move): int =
## Returns the number of extensions that should be performed
## when exploring the given move
if self.currentExtensionCount == 16:
return 0
if self.board.inCheck():
# Opponent is in check: extend the search to see
# if we can do other interesting things!
inc(self.currentExtensionCount)
return 1
let piece = self.board.getPiece(move.targetSquare)
# If a pawn has just moved to its second-last rank, extend to
# see if a promotion would yield some good position
if piece.kind == Pawn:
let rank = if piece.color == White: getRankMask(1) else: getRankMask(6)
if (move.targetSquare.toBitboard() and rank) != 0:
inc(self.currentExtensionCount, 1)
return 1
proc qsearch(self: SearchManager, ply: uint8, alpha, beta: Score): Score =
## Negamax search with a/b pruning that is restricted to
## capture moves (commonly called quiescent search). The
## purpose of this extra search step is to mitigate the
## so called horizon effect that stems from the fact that,
## at some point, the engine will have to stop searching, possibly
## thinking a bad move is good because it couldn't see far enough
## ahead (this usually results in the engine blundering captures
## or sacking pieces for apparently no reason: the reason is that it
## did not look at the opponent's responses, because it stopped earlier.
## That's the horizon). To address this, we look at all possible captures
## in the current position and make sure that a position is not evaluated as
## bad if only bad capture moves exist, if good non-capture moves do
if self.shouldStop():
return
if ply == 127:
return Score(0)
let score = self.board.evaluate()
if score >= beta:
# Same as with the regular alpha-beta search
return score
var moves = newMoveList()
self.board.generateMoves(moves, capturesOnly=true)
self.reorderMoves(moves)
var bestScore = score
var alpha = max(alpha, score)
for move in moves:
self.board.doMove(move)
inc(self.nodeCount)
# Find the best move for us (worst move
# for our opponent, hence the negative sign)
var score = -self.qsearch(ply + 1, -beta, -alpha)
self.board.unmakeMove()
bestScore = max(score, bestScore)
if score >= beta:
# This move was too good for us, opponent will not search it
break
# When a search is cancelled or times out, we need
# to make sure the entire call stack unwinds back
# to the root move. This is why the check is duplicated
if self.shouldStop():
return
if score > alpha:
alpha = score
return bestScore
proc search(self: SearchManager, depth, ply: int, alpha, beta: Score): Score {.discardable.} =
## Simple negamax search with alpha-beta pruning
if self.shouldStop():
return
let query = self.transpositionTable.get(self.board.position.zobristKey, depth.uint8)
if query.success:
case query.entry.flag:
of Exact:
return query.entry.score
of LowerBound:
if query.entry.score >= beta:
return query.entry.score
of UpperBound:
if query.entry.score <= alpha:
return query.entry.score
if depth == 0:
return self.qsearch(0, alpha, beta)
var moves = newMoveList()
var depth = depth
self.board.generateMoves(moves)
self.reorderMoves(moves)
if moves.len() == 0:
if self.board.inCheck():
# Checkmate! We add the current ply
# because mating in 3 is better than
# mating in 5 (and conversely being
# mated in 5 is better than being
# mated in 3)
return mateScore() + Score(ply)
# Stalemate
return Score(0)
var bestScore = lowestEval()
var alpha = alpha
for i, move in moves:
if ply == 0 and self.searchMoves.len() > 0 and move notin self.searchMoves:
continue
self.board.doMove(move)
var extension = self.getSearchExtension(move)
let zobrist = self.board.position.zobristKey
inc(self.nodeCount)
# Find the best move for us (worst move
# for our opponent, hence the negative sign)
var score: Score
var fullDepth = true
if extension == 0 and i >= 3 and not move.isCapture():
# Late Move Reduction: assume our move orderer did a good job,
# so it is not worth to look at all moves at the same depth equally.
# If this move turns out to be better than we expected, we'll re-search
# it at full depth
const reduction = 1
score = -self.search(depth - 1 - reduction, ply + 1, -beta, -alpha)
fullDepth = score > alpha
if fullDepth:
score = -self.search(depth - 1 + extension, ply + 1, -beta, -alpha)
if self.board.position.halfMoveClock >= 100 or self.board.position.repetitionDraw:
# Drawing by repetition is *bad*
score = Score(0)
self.board.unmakeMove()
# When a search is cancelled or times out, we need
# to make sure the entire call stack unwinds back
# to the root move. This is why the check is duplicated
if self.shouldStop():
return
bestScore = max(score, bestScore)
let nodeType = if score >= beta: LowerBound elif score <= alpha: UpperBound else: Exact
self.transpositionTable.store(depth.uint8, score, zobrist, nodeType)
if nodeType == LowerBound:
# score >= beta
# This move was too good for us, opponent will not search it
break
if score > alpha:
alpha = score
if ply == 0:
self.bestMoveRoot = move
self.bestRootScore = bestScore
return bestScore
proc findBestMove*(self: SearchManager, maxSearchTime, maxDepth: int, maxNodes: uint64, searchMoves: seq[Move]): Move =
## Finds the best move in the current position
## and returns it, limiting search time to
## maxSearchTime milliseconds and to maxDepth
## ply (if maxDepth is -1, a reasonable limit
## is picked). If maxNodes is supplied and is nonzero,
## search will stop once it has analyzed the given number
## of nodes. If searchMoves is provided and is not empty,
## search will be restricted to the moves in the list
self.bestMoveRoot = nullMove()
result = self.bestMoveRoot
self.maxNodes = maxNodes
self.searchMoves = searchMoves
self.searchStart = getMonoTime()
self.searchDeadline = self.searchStart + initDuration(milliseconds=maxSearchTime)
var maxDepth = maxDepth
if maxDepth == -1:
maxDepth = 30
# Iterative deepening loop
for i in 1..maxDepth:
# Search the previous best move first
self.previousBestMove = self.bestMoveRoot
self.search(i, 0, lowestEval(), highestEval())
# Since we always search the best move from the
# previous iteration, we can use partial search
# results: the engine will either not have changed
# its mind, or it will have found an even better move
# in the meantime, which we should obviously use!
result = self.bestMoveRoot
let shouldStop = self.shouldStop()
if shouldStop:
self.log(i - 1)
else:
self.log(i)
if shouldStop:
break