CPG/Chess/nimfish/nimfishpkg/search.nim

189 lines
6.6 KiB
Nim

# Copyright 2024 Mattia Giambirtone & All Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Implementation of negamax with a/b pruning
import board
import movegen
import eval
import std/times
import std/atomics
import std/algorithm
import std/monotimes
import std/strformat
type
SearchManager* = ref object
## A simple state storage
## for our search
stopFlag*: Atomic[bool] # Can be used to cancel the search from another thread
board: Chessboard
bestMoveRoot: Move
searchStart: MonoTime
searchDeadline: MonoTime
nodeCount: uint64
maxNodes: uint64
searchMoves: seq[Move]
previousBestMove: Move
proc newSearchManager*(board: Chessboard): SearchManager =
new(result)
result.board = board
result.bestMoveRoot = nullMove()
proc getEstimatedMoveScore(self: SearchManager, move: Move): Score =
## Returns an estimated static score for the move
result = Score(0)
if self.previousBestMove != nullMove() and move == self.previousBestMove:
result = highestEval() + 1
elif move.isCapture():
# Implementation of MVVLVA: Most Valuable Victim Least Valuable Attacker
# We prioritize moves that capture the most valuable pieces, and as a
# second goal we want to use our least valuable pieces to do so (this
# is why we multiply the score of the captured piece by 100, to give
# it priority)
result = 100 * self.board.getPieceScore(move.targetSquare) -
self.board.getPieceScore(move.startSquare)
proc reorderMoves(self: SearchManager, moves: var MoveList) =
## Reorders the list of moves in-place, trying
## to place the best ones first
proc orderer(a, b: Move): int {.closure.} =
return cmp(self.getEstimatedMoveScore(a), self.getEstimatedMoveScore(b))
moves.data.sort(orderer, SortOrder.Descending)
proc timedOut(self: SearchManager): bool = getMonoTime() >= self.searchDeadline
proc cancelled(self: SearchManager): bool = self.stopFlag.load()
proc log(self: SearchManager, depth: int) =
let
elapsed = getMonoTime() - self.searchStart
elapsedMsec = elapsed.inMilliseconds.uint64
nps = 1000 * (self.nodeCount div max(elapsedMsec, 1))
var logMsg = &"info depth {depth} time {elapsedMsec} nodes {self.nodeCount} nps {nps}"
if self.bestMoveRoot != nullMove():
logMsg &= &" pv {self.bestMoveRoot.toAlgebraic()}"
echo logMsg
proc shouldStop(self: SearchManager): bool =
## Returns whether searching should
## stop
if self.cancelled():
# Search has been cancelled!
return true
if self.timedOut():
# We ran out of time!
return true
if self.maxNodes > 0 and self.nodeCount >= self.maxNodes:
# Ran out of nodes
return true
proc search*(self: SearchManager, depth, ply: int, alpha, beta: Score): Score {.discardable.} =
## Simple negamax search with alpha-beta pruning
if self.shouldStop():
return
if depth == 0:
return self.board.evaluate()
var moves = MoveList()
self.board.generateMoves(moves)
self.reorderMoves(moves)
if moves.len() == 0:
if self.board.inCheck():
# Checkmate! We add the current ply
# because mating in 3 is better than
# mating in 5 (and conversely being
# mated in 5 is better than being
# mated in 3)
return mateScore() + Score(ply)
# Stalemate
return Score(0)
var bestScore = lowestEval()
var alpha = alpha
for i, move in moves:
if ply == 0 and self.searchMoves.len() > 0 and move notin self.searchMoves:
continue
self.board.doMove(move)
inc(self.nodeCount)
# Find the best move for us (worst move
# for our opponent, hence the negative sign)
var score = -self.search(depth - 1, ply + 1, -beta, -alpha)
if self.board.position.repetitionDraw:
# Drawing by repetition is *bad*
score = lowestEval() div 2
self.board.unmakeMove()
# When a search is cancelled or times out, we need
# to make sure the entire call stack unwindss back
# to the root move. This is why the check is duplicated
if self.shouldStop():
return
bestScore = max(score, bestScore)
if score >= beta:
# This move was too good for us, opponent will not search it
break
if score > alpha:
alpha = score
if ply == 0:
self.bestMoveRoot = move
return bestScore
proc findBestMove*(self: SearchManager, maxSearchTime, maxDepth: int, maxNodes: uint64, searchMoves: seq[Move]): Move =
## Finds the best move in the current position
## and returns it, limiting search time to
## maxSearchTime milliseconds and to maxDepth
## ply (if maxDepth is -1, a reasonable limit
## is picked). If maxNodes is supplied and is nonzero,
## search will stop once it has analyzed the given number
## of nodes. If searchMoves is provided and is not empty,
## search will be restricted to the moves in the list
self.bestMoveRoot = nullMove()
result = self.bestMoveRoot
self.maxNodes = maxNodes
self.searchMoves = searchMoves
self.searchStart = getMonoTime()
self.searchDeadline = self.searchStart + initDuration(milliseconds=maxSearchTime)
var maxDepth = maxDepth
if maxDepth == -1:
maxDepth = 30
# Iterative deepening loop
for i in 1..maxDepth:
# Search the previous best move first
self.previousBestMove = self.bestMoveRoot
self.search(i, 0, lowestEval(), highestEval())
let shouldStop = self.shouldStop()
if shouldStop:
self.log(i - 1)
else:
self.log(i)
# Since we always search the best move from the
# previous iteration, we can use partial search
# results: the engine will either not have changed
# its mind, or it will have found an even better move
# in the meantime, which we should obviously use!
result = self.bestMoveRoot
if shouldStop:
break