Fix bugs with LMR (needs testing). Move to ptrs. General refactoring

This commit is contained in:
Mattia Giambirtone 2024-05-06 00:34:06 +02:00
parent 245a5d75e8
commit 2a1f020edd
7 changed files with 127 additions and 84 deletions

3
Chess/.gitignore vendored
View File

@ -7,7 +7,4 @@ nimfish/nimfishpkg/resources/*.epd
nimfish/nimfishpkg/resources/*.pgn nimfish/nimfishpkg/resources/*.pgn
# Python # Python
__pycache__ __pycache__
fast-chess
log.txt
config.json
*.log *.log

View File

@ -1,9 +1,7 @@
--cc:clang --cc:clang
-o:"bin/nimfish" -o:"bin/nimfish"
-d:debug -d:danger
--passL:"-flto -lmimalloc" --passL:"-flto -lmimalloc"
--passC:"-flto -march=native -mtune=native" --passC:"-flto -march=native -mtune=native"
-d:useMalloc -d:useMalloc
--mm:atomicArc --mm:atomicArc
# --stackTrace
# --lineTrace

View File

@ -246,24 +246,24 @@ proc getPieceScore*(position: Position, piece: Piece, square: Square): Score =
result = Score((middleGameScore * middleGamePhase + endGameScore * endGamePhase) div 24) result = Score((middleGameScore * middleGamePhase + endGameScore * endGamePhase) div 24)
proc evaluateMaterial(board: ChessBoard): Score = proc evaluateMaterial(position: Position): Score =
## Returns a material and position evaluation ## Returns a material and position evaluation
## for the current side to move ## for the current side to move
let let
middleGamePhase = board.position.getGamePhase() middleGamePhase = position.getGamePhase()
endGamePhase = 24 - middleGamePhase endGamePhase = 24 - middleGamePhase
var var
# White, Black # White, Black
middleGameScores: array[PieceColor.White..PieceColor.Black, Score] = [0, 0] middleGameScores: array[PieceColor.White..PieceColor.Black, Score] = [0, 0]
endGameScores: array[PieceColor.White..PieceColor.Black, Score] = [0, 0] endGameScores: array[PieceColor.White..PieceColor.Black, Score] = [0, 0]
for sq in board.position.getOccupancy(): for sq in position.getOccupancy():
let piece = board.position.getPiece(sq) let piece = position.getPiece(sq)
middleGameScores[piece.color] += MIDDLEGAME_VALUE_TABLES[piece.color][piece.kind][sq] middleGameScores[piece.color] += MIDDLEGAME_VALUE_TABLES[piece.color][piece.kind][sq]
endGameScores[piece.color] += ENDGAME_VALUE_TABLES[piece.color][piece.kind][sq] endGameScores[piece.color] += ENDGAME_VALUE_TABLES[piece.color][piece.kind][sq]
let let
sideToMove = board.position.sideToMove sideToMove = position.sideToMove
nonSideToMove = sideToMove.opposite() nonSideToMove = sideToMove.opposite()
middleGameScore = middleGameScores[sideToMove] - middleGameScores[nonSideToMove] middleGameScore = middleGameScores[sideToMove] - middleGameScores[nonSideToMove]
endGameScore = endGameScores[sideToMove] - endGameScores[nonSideToMove] endGameScore = endGameScores[sideToMove] - endGameScores[nonSideToMove]
@ -297,8 +297,7 @@ proc evaluatePawnStructure(position: Position): Score {.used.} =
inc(isolatedPawns) inc(isolatedPawns)
# Pawns that are defended by another pawn are # Pawns that are defended by another pawn are
# stronger # stronger
var var strongPawnIncrement = Score(0)
strongPawnIncrement = Score(0)
for pawn in position.getBitboard(Pawn, White): for pawn in position.getBitboard(Pawn, White):
if position.getPawnAttacks(pawn, White) != 0: if position.getPawnAttacks(pawn, White) != 0:
strongPawnIncrement += position.getPieceScore(pawn) div Score(4) strongPawnIncrement += position.getPieceScore(pawn) div Score(4)
@ -306,9 +305,9 @@ proc evaluatePawnStructure(position: Position): Score {.used.} =
return DOUBLED_PAWNS_MALUS[doubledPawns] + ISOLATED_PAWN_MALUS[isolatedPawns] + strongPawnIncrement return DOUBLED_PAWNS_MALUS[doubledPawns] + ISOLATED_PAWN_MALUS[isolatedPawns] + strongPawnIncrement
proc evaluate*(board: Chessboard): Score = proc evaluate*(position: Position): Score =
## Evaluates the current position ## Evaluates the current position
result = board.evaluateMaterial() result = position.evaluateMaterial()
when defined(evalPawns): when defined(evalPawns):
result += board.evaluatePawnStructure() result += position.evaluatePawnStructure()

View File

@ -63,6 +63,7 @@ iterator pairs*(self: MoveList): tuple[i: int, move: Move] =
var i = 0 var i = 0
for item in self: for item in self:
yield (i, item) yield (i, item)
inc(i)
func `$`*(self: MoveList): string = func `$`*(self: MoveList): string =

View File

@ -25,17 +25,18 @@ import std/algorithm
import std/monotimes import std/monotimes
import std/strformat import std/strformat
import threading/smartptrs
const NUM_KILLERS = 2
type type
HistoryTable* = array[PieceColor.White..PieceColor.Black, array[Square(0)..Square(63), array[Square(0)..Square(63), Score]]] HistoryTable* = array[PieceColor.White..PieceColor.Black, array[Square(0)..Square(63), array[Square(0)..Square(63), Score]]]
KillersTable* = seq[array[2, Move]] KillersTable* = seq[array[NUM_KILLERS, Move]]
SearchManager* = ref object SearchManager* = object
## A simple state storage ## A simple state storage
## for our search ## for our search
searchFlag: SharedPtr[Atomic[bool]] searchFlag: ptr Atomic[bool]
stopFlag: SharedPtr[Atomic[bool]] stopFlag: ptr Atomic[bool]
board: Chessboard board: Chessboard
bestMoveRoot: Move bestMoveRoot: Move
bestRootScore: Score bestRootScore: Score
@ -44,16 +45,17 @@ type
softLimit: MonoTime softLimit: MonoTime
nodeCount: uint64 nodeCount: uint64
maxNodes: uint64 maxNodes: uint64
currentMove: Move
currentMoveNumber: int
searchMoves: seq[Move] searchMoves: seq[Move]
previousBestMove: Move previousBestMove: Move
transpositionTable: SharedPtr[TTable] transpositionTable: ptr TTable
history: SharedPtr[HistoryTable] history: ptr HistoryTable
killers: SharedPtr[KillersTable] killers: ptr KillersTable
proc newSearchManager*(board: Chessboard, transpositions: SharedPtr[TTable], stopFlag, searchFlag: SharedPtr[Atomic[bool]], proc newSearchManager*(board: Chessboard, transpositions: ptr TTable, stopFlag, searchFlag: ptr Atomic[bool],
history: SharedPtr[HistoryTable], killers: SharedPtr[KillersTable]): SearchManager = history: ptr HistoryTable, killers: ptr KillersTable): SearchManager =
new(result)
result = SearchManager(board: board, bestMoveRoot: nullMove(), transpositionTable: transpositions, stopFlag: stopFlag, result = SearchManager(board: board, bestMoveRoot: nullMove(), transpositionTable: transpositions, stopFlag: stopFlag,
searchFlag: searchFlag, history: history, killers: killers) searchFlag: searchFlag, history: history, killers: killers)
@ -71,7 +73,17 @@ proc stop*(self: SearchManager) =
self.stopFlag[].store(true) self.stopFlag[].store(true)
proc getEstimatedMoveScore(self: SearchManager, move: Move): Score = proc isKillerMove(self: SearchManager, move: Move, ply: int): bool =
## Returns whether the given move is a killer move
when not defined(killers):
return false
else:
for killer in self.killers[][ply]:
if killer == move:
return true
proc getEstimatedMoveScore(self: SearchManager, move: Move, ply: int): Score =
## Returns an estimated static score for the move used ## Returns an estimated static score for the move used
## during move ordering ## during move ordering
result = Score(0) result = Score(0)
@ -84,7 +96,10 @@ proc getEstimatedMoveScore(self: SearchManager, move: Move): Score =
let query = self.transpositionTable[].get(self.board.position.zobristKey) let query = self.transpositionTable[].get(self.board.position.zobristKey)
if query.success and query.entry.bestMove != nullMove() and query.entry.bestMove == move: if query.success and query.entry.bestMove != nullMove() and query.entry.bestMove == move:
return highestEval() + 1 return highestEval() - 1
if self.isKillerMove(move, ply):
result += self.board.position.getPieceScore(move.startSquare) * 5
if not move.isCapture(): if not move.isCapture():
# History euristic bonus # History euristic bonus
@ -121,12 +136,12 @@ proc getEstimatedMoveScore(self: SearchManager, move: Move): Score =
result -= self.board.position.getPieceScore(move.startSquare) * 2 result -= self.board.position.getPieceScore(move.startSquare) * 2
proc reorderMoves(self: SearchManager, moves: var MoveList) = proc reorderMoves(self: SearchManager, moves: var MoveList, ply: int) =
## Reorders the list of moves in-place, trying ## Reorders the list of moves in-place, trying
## to place the best ones first ## to place the best ones first
proc orderer(a, b: Move): int {.closure.} = proc orderer(a, b: Move): int {.closure.} =
return cmp(self.getEstimatedMoveScore(a), self.getEstimatedMoveScore(b)) return cmp(self.getEstimatedMoveScore(a, ply), self.getEstimatedMoveScore(b, ply))
# Ignore null moves beyond the lenght of the movelist # Ignore null moves beyond the lenght of the movelist
sort(moves.data.toOpenArray(0, moves.len - 1), orderer, SortOrder.Descending) sort(moves.data.toOpenArray(0, moves.len - 1), orderer, SortOrder.Descending)
@ -134,12 +149,12 @@ proc reorderMoves(self: SearchManager, moves: var MoveList) =
proc timedOut(self: SearchManager): bool = getMonoTime() >= self.hardLimit proc timedOut(self: SearchManager): bool = getMonoTime() >= self.hardLimit
proc cancelled(self: SearchManager): bool = self.stopFlag[].load() proc cancelled(self: SearchManager): bool = self.stopFlag[].load()
proc elapsedTime(self: SearchManager): int64 = (getMonoTime() - self.searchStart).inMilliseconds()
proc log(self: SearchManager, depth: int) = proc log(self: SearchManager, depth: int) =
let let
elapsed = getMonoTime() - self.searchStart elapsedMsec = self.elapsedTime().uint64
elapsedMsec = elapsed.inMilliseconds.uint64
nps = 1000 * (self.nodeCount div max(elapsedMsec, 1)) nps = 1000 * (self.nodeCount div max(elapsedMsec, 1))
var logMsg = &"info depth {depth} time {elapsedMsec} nodes {self.nodeCount} nps {nps}" var logMsg = &"info depth {depth} time {elapsedMsec} nodes {self.nodeCount} nps {nps}"
logMsg &= &" hashfull {self.transpositionTable[].getFillEstimate()}" logMsg &= &" hashfull {self.transpositionTable[].getFillEstimate()}"
@ -171,7 +186,7 @@ proc getSearchExtension(self: SearchManager, move: Move): int {.used.} =
return 1 return 1
proc qsearch(self: SearchManager, ply: uint8, alpha, beta: Score): Score = proc qsearch(self: var SearchManager, ply: int, alpha, beta: Score): Score =
## Negamax search with a/b pruning that is restricted to ## Negamax search with a/b pruning that is restricted to
## capture moves (commonly called quiescent search). The ## capture moves (commonly called quiescent search). The
## purpose of this extra search step is to mitigate the ## purpose of this extra search step is to mitigate the
@ -189,7 +204,7 @@ proc qsearch(self: SearchManager, ply: uint8, alpha, beta: Score): Score =
return return
if ply == 127: if ply == 127:
return Score(0) return Score(0)
let score = self.board.evaluate() let score = self.board.position.evaluate()
if score >= beta: if score >= beta:
# Same as with the regular alpha-beta search # Same as with the regular alpha-beta search
return score return score
@ -197,7 +212,7 @@ proc qsearch(self: SearchManager, ply: uint8, alpha, beta: Score): Score =
return Score(0) return Score(0)
var moves = newMoveList() var moves = newMoveList()
self.board.generateMoves(moves, capturesOnly=true) self.board.generateMoves(moves, capturesOnly=true)
self.reorderMoves(moves) self.reorderMoves(moves, ply)
var bestScore = score var bestScore = score
var alpha = max(alpha, score) var alpha = max(alpha, score)
for move in moves: for move in moves:
@ -219,13 +234,40 @@ proc qsearch(self: SearchManager, ply: uint8, alpha, beta: Score): Score =
return bestScore return bestScore
proc search(self: SearchManager, depth, ply: int, alpha, beta: Score): Score {.discardable.} = proc storeKillerMove(self: SearchManager, ply: int, move: Move) =
## Stores a killer move into our killers table at the given
## ply
# Stolen from https://rustic-chess.org/search/ordering/killers.html
# First killer move must not be the same as the one we're storing
let first = self.killers[][ply][0]
if first == move:
return
var j = self.killers[][ply].len() - 2
while j >= 0:
# Shift moves one spot down
self.killers[][ply][j + 1] = self.killers[][ply][j];
dec(j)
self.killers[][ply][0] = move;
proc shouldReduce(self: SearchManager, move: Move, depth, moveNumber: int): bool =
## Returns whether the search should be reduced at the given
## depth and move number
return defined(searchLMR) and moveNumber >= 5 and depth > 3 and not move.isCapture()
proc search(self: var SearchManager, depth, ply: int, alpha, beta: Score): Score {.discardable.} =
## Negamax search with various optimizations and search features ## Negamax search with various optimizations and search features
if depth > 1 and self.shouldStop(): if depth > 1 and self.shouldStop():
# We do not let ourselves get cancelled at depth # We do not let ourselves get cancelled at depth
# one because then we wouldn't have a move to return. # one because then we wouldn't have a move to return.
# In practice this should not be a problem # In practice this should not be a problem
return return
when defined(killers):
if self.killers[].high() < ply:
self.killers[].add([nullMove(), nullMove()])
if ply > 0: if ply > 0:
let query = self.transpositionTable[].get(self.board.position.zobristKey, depth.uint8) let query = self.transpositionTable[].get(self.board.position.zobristKey, depth.uint8)
if query.success: if query.success:
@ -248,7 +290,7 @@ proc search(self: SearchManager, depth, ply: int, alpha, beta: Score): Score {.d
depth = depth depth = depth
bestMove = nullMove() bestMove = nullMove()
self.board.generateMoves(moves) self.board.generateMoves(moves)
self.reorderMoves(moves) self.reorderMoves(moves, ply)
if moves.len() == 0: if moves.len() == 0:
if self.board.inCheck(): if self.board.inCheck():
# Checkmate! We add the current ply # Checkmate! We add the current ply
@ -268,29 +310,33 @@ proc search(self: SearchManager, depth, ply: int, alpha, beta: Score): Score {.d
if ply == 0 and self.searchMoves.len() > 0 and move notin self.searchMoves: if ply == 0 and self.searchMoves.len() > 0 and move notin self.searchMoves:
continue continue
self.board.doMove(move) self.board.doMove(move)
self.currentMove = move
self.currentMoveNumber = i
let extension = self.getSearchExtension(move) let extension = self.getSearchExtension(move)
inc(self.nodeCount) inc(self.nodeCount)
# Find the best move for us (worst move # Find the best move for us (worst move
# for our opponent, hence the negative sign) # for our opponent, hence the negative sign)
var score: Score var score: Score
# Implementation of Principal Variation Search (PVS) # Implementation of Principal Variation Search (PVS)
if i == 0: if i == 0:
# Due to our move ordering scheme, the first move is always the "best", so # Due to our move ordering scheme, the first move is always the "best", so
# search it always at full depth with the full search window # search it always at full depth with the full search window
score = -self.search(depth - 1 + extension, ply + 1, -beta, -alpha) score = -self.search(depth - 1 + extension, ply + 1, -beta, -alpha)
# elif extension == 0 and depth > 3 and i >= 5 and not move.isCapture(): elif extension == 0 and self.shouldReduce(move, depth, i):
# # Late Move Reductions: assume our move orderer did a good job, # Late Move Reductions: assume our move orderer did a good job,
# # so it is not worth it to look at all moves at the same depth equally. # so it is not worth it to look at all moves at the same depth equally.
# # If this move turns out to be better than we expected, we'll re-search # If this move turns out to be better than we expected, we'll re-search
# # it at full depth # it at full depth
# const reduction = 1 const reduction = 1
# # We first do a null-window search to see if there's a move that beats alpha # We first do a null-window search to see if there's a move that beats alpha
# # (we don't care about the actual value, so we search in the range [alpha, alpha + 1] # (we don't care about the actual value, so we search in the range [alpha, alpha + 1]
# # to increase the number of cutoffs) # to increase the number of cutoffs)
# score = -self.search(depth - 1 - reduction, ply + 1, -alpha - 1, -alpha) score = -self.search(depth - 1 - reduction, ply + 1, -alpha - 1, -alpha)
# if score > alpha: # If the null window search beats alpha, we do a full window reduced search to get a
# score = -self.search(depth - 1 + extension, ply + 1, -alpha - 1, -alpha) # better feel for the actual score of the position. If the score turns out to beat alpha
# (but not beta) again, we'll re-search this at full depth later
if score > alpha:
score = -self.search(depth - 1 - reduction, ply + 1, -beta, -alpha)
else: else:
# Move wasn't reduced, just do a null window search # Move wasn't reduced, just do a null window search
score = -self.search(depth - 1 + extension, ply + 1, -alpha - 1, -alpha) score = -self.search(depth - 1 + extension, ply + 1, -alpha - 1, -alpha)
@ -313,6 +359,10 @@ proc search(self: SearchManager, depth, ply: int, alpha, beta: Score): Score {.d
# quadratic bonus wrt. depth is usually the bonus that is used (though some # quadratic bonus wrt. depth is usually the bonus that is used (though some
# engines, namely Stockfish, use a linear bonus. Maybe we can investigate this) # engines, namely Stockfish, use a linear bonus. Maybe we can investigate this)
self.history[][sideToMove][move.startSquare][move.targetSquare] += Score(depth * depth) self.history[][sideToMove][move.startSquare][move.targetSquare] += Score(depth * depth)
# Killer move heuristic: store moves that caused a beta cutoff according to the distance from
# root that they occurred at, as they might be good refutations for future moves from the opponent.
when defined(killers):
self.storeKillerMove(ply, move)
# This move was too good for us, opponent will not search it # This move was too good for us, opponent will not search it
break break
if score > alpha: if score > alpha:
@ -321,22 +371,23 @@ proc search(self: SearchManager, depth, ply: int, alpha, beta: Score): Score {.d
if ply == 0: if ply == 0:
self.bestMoveRoot = move self.bestMoveRoot = move
self.bestRootScore = bestScore self.bestRootScore = bestScore
else: # TODO
when defined(noScaleHistory): # else:
if not move.isCapture() and self.history[][sideToMove][move.startSquare][move.targetSquare] > lowestEval(): # when defined(noScaleHistory):
# Here, we punish moves that failed to raise alpha. This allows us to avoid scaling our values # if not move.isCapture() and self.history[][sideToMove][move.startSquare][move.targetSquare] > lowestEval():
# after every search (which should retain more information about the explored subtreees) and # # Here, we punish moves that failed to raise alpha. This allows us to avoid scaling our values
# makes sure that moves that we thought were good but aren't are pushed further in the move list # # after every search (which should retain more information about the explored subtreees) and
self.history[][sideToMove][move.startSquare][move.targetSquare] -= Score(depth * depth) # # makes sure that moves that we thought were good but aren't are pushed further in the move list
else: # self.history[][sideToMove][move.startSquare][move.targetSquare] -= Score(depth * depth)
discard # else:
# discard
let nodeType = if bestScore >= beta: LowerBound elif bestScore <= alpha: UpperBound else: Exact let nodeType = if bestScore >= beta: LowerBound elif bestScore <= alpha: UpperBound else: Exact
self.transpositionTable[].store(depth.uint8, bestScore, self.board.position.zobristKey, bestMove, nodeType) self.transpositionTable[].store(depth.uint8, bestScore, self.board.position.zobristKey, bestMove, nodeType)
return bestScore return bestScore
proc findBestMove*(self: SearchManager, timeRemaining, increment: int64, maxDepth: int, maxNodes: uint64, searchMoves: seq[Move]): Move = proc findBestMove*(self: var SearchManager, timeRemaining, increment: int64, maxDepth: int, maxNodes: uint64, searchMoves: seq[Move]): Move =
## Finds the best move in the current position ## Finds the best move in the current position
## and returns it, limiting search time according ## and returns it, limiting search time according
## to the remaining time and increment values provided ## to the remaining time and increment values provided
@ -351,7 +402,8 @@ proc findBestMove*(self: SearchManager, timeRemaining, increment: int64, maxDept
# Apparently negative remaining time is a thing. Welp # Apparently negative remaining time is a thing. Welp
let let
maxSearchTime = max(1, (timeRemaining div 10) + (increment div 2)) maxSearchTime = max(1, (timeRemaining div 10) + (increment div 2))
softLimit = max(1, maxSearchTime div 3) softLimit = maxSearchTime div 3
echo maxSearchTime
self.bestMoveRoot = nullMove() self.bestMoveRoot = nullMove()
result = self.bestMoveRoot result = self.bestMoveRoot
self.maxNodes = maxNodes self.maxNodes = maxNodes
@ -365,8 +417,6 @@ proc findBestMove*(self: SearchManager, timeRemaining, increment: int64, maxDept
self.searchFlag[].store(true) self.searchFlag[].store(true)
# Iterative deepening loop # Iterative deepening loop
for i in 1..maxDepth: for i in 1..maxDepth:
if self.killers[].len() < i:
self.killers[].add([nullMove(), nullMove()])
# Search the previous best move first # Search the previous best move first
self.previousBestMove = self.bestMoveRoot self.previousBestMove = self.bestMoveRoot
self.search(i, 0, lowestEval(), highestEval()) self.search(i, 0, lowestEval(), highestEval())

View File

@ -458,7 +458,7 @@ proc commandLoop*: int =
of "rep": of "rep":
echo "Position is drawn by repetition: ", if board.drawnByRepetition(): "yes" else: "no" echo "Position is drawn by repetition: ", if board.drawnByRepetition(): "yes" else: "no"
of "eval": of "eval":
echo &"Eval: {board.evaluate()}" echo &"Eval: {board.position.evaluate()}"
else: else:
echo &"Unknown command '{cmd[0]}'. Type 'help' for more information." echo &"Unknown command '{cmd[0]}'. Type 'help' for more information."
except IOError: except IOError:

View File

@ -18,9 +18,6 @@ import std/strformat
import std/atomics import std/atomics
import threading/smartptrs
import board import board
import movegen import movegen
import search import search
@ -36,18 +33,18 @@ type
# The current position # The current position
position: Position position: Position
# Atomic boolean flag to interrupt the search # Atomic boolean flag to interrupt the search
stopFlag: SharedPtr[Atomic[bool]] stopFlag: ptr Atomic[bool]
# Atomic search flag used to know whether a search # Atomic search flag used to know whether a search
# is in progress # is in progress
searchFlag: SharedPtr[Atomic[bool]] searchFlag: ptr Atomic[bool]
# Size of the transposition table (in megabytes) # Size of the transposition table (in megabytes)
hashTableSize: uint64 hashTableSize: uint64
# The transposition table # The transposition table
transpositionTable: SharedPtr[TTable] transpositionTable: ptr TTable
# Storage for our history heuristic # Storage for our history heuristic
historyTable: SharedPtr[HistoryTable] historyTable: ptr HistoryTable
# Storage for our killer move heuristic # Storage for our killer move heuristic
killerMoves: SharedPtr[KillersTable] killerMoves: ptr KillersTable
UCICommandType = enum UCICommandType = enum
## A UCI command type enumeration ## A UCI command type enumeration
@ -327,10 +324,11 @@ proc bestMove(args: tuple[session: UCISession, command: UCICommand]) {.thread.}
var var
timeRemaining = (if session.position.sideToMove == White: command.wtime else: command.btime) timeRemaining = (if session.position.sideToMove == White: command.wtime else: command.btime)
increment = (if session.position.sideToMove == White: command.winc else: command.binc) increment = (if session.position.sideToMove == White: command.winc else: command.binc)
if timeRemaining == 0:
timeRemaining = int32.high()
if command.moveTime != -1: if command.moveTime != -1:
timeRemaining = command.moveTime timeRemaining = 0
increment = command.moveTime
elif timeRemaining == 0:
timeRemaining = int32.high()
var move = searcher.findBestMove(timeRemaining, increment, command.depth, command.nodes, command.searchmoves) var move = searcher.findBestMove(timeRemaining, increment, command.depth, command.nodes, command.searchmoves)
echo &"bestmove {move.toAlgebraic()}" echo &"bestmove {move.toAlgebraic()}"
@ -345,13 +343,14 @@ proc startUCISession* =
cmd: UCICommand cmd: UCICommand
cmdStr: string cmdStr: string
session = UCISession(hashTableSize: 64, position: startpos()) session = UCISession(hashTableSize: 64, position: startpos())
session.transpositionTable = newSharedPtr(TTable) # God forbid we try to use atomic ARC like it was intended. Raw pointers
session.stopFlag = newSharedPtr(Atomic[bool]) # it is then... sigh
session.searchFlag = newSharedPtr(Atomic[bool]) session.transpositionTable = cast[ptr TTable](alloc0(sizeof(TTable)))
session.stopFlag = cast[ptr Atomic[bool]](alloc0(sizeof(Atomic[bool])))
session.searchFlag = cast[ptr Atomic[bool]](alloc0(sizeof(Atomic[bool])))
session.transpositionTable[] = newTranspositionTable(session.hashTableSize * 1024 * 1024) session.transpositionTable[] = newTranspositionTable(session.hashTableSize * 1024 * 1024)
session.historyTable = newSharedPtr(HistoryTable) session.historyTable = cast[ptr HistoryTable](alloc0(sizeof(HistoryTable)))
session.killerMoves = newSharedPtr(KillersTable) session.killerMoves = cast[ptr KillersTable](alloc0(sizeof(KillersTable)))
session.stopFlag[].store(false)
# Fun fact, nim doesn't collect the memory of thread vars. Another stupid fucking design pitfall # Fun fact, nim doesn't collect the memory of thread vars. Another stupid fucking design pitfall
# of nim's AWESOME threading model. Someone is getting a pipebomb in their mailbox about this, mark # of nim's AWESOME threading model. Someone is getting a pipebomb in their mailbox about this, mark
# my fucking words. (for legal purposes THAT IS A JOKE). See https://github.com/nim-lang/Nim/issues/23165 # my fucking words. (for legal purposes THAT IS A JOKE). See https://github.com/nim-lang/Nim/issues/23165
@ -412,7 +411,6 @@ proc startUCISession* =
else: else:
discard discard
of Position: of Position:
echo session.history
discard discard
else: else:
discard discard