diff --git a/Chess/nimfish/nimfishpkg/search.nim b/Chess/nimfish/nimfishpkg/search.nim index d0afa18..bd04a85 100644 --- a/Chess/nimfish/nimfishpkg/search.nim +++ b/Chess/nimfish/nimfishpkg/search.nim @@ -51,10 +51,10 @@ const # Constants to configure FP # (Futility pruning) - # Limit after which FP is disabled - FP_DEPTH_LIMIT = 1 + # Limit after which FP is disabled (TODO) + FP_DEPTH_LIMIT {.used.} = 1 # Advantage threshold - FP_EVAL_MARGIN = 125 + FP_EVAL_MARGIN {.used.} = 250 NUM_KILLERS* = 2 MAX_DEPTH* = 255 @@ -469,7 +469,7 @@ proc search(self: var SearchManager, depth, ply: int, alpha, beta: Score, isPV: if ply == 0 and self.searchMoves.len() > 0 and move notin self.searchMoves: continue when defined(FP): - if not isPV and depth <= FP_DEPTH_LIMIT and staticEval + FP_EVAL_MARGIN * depth < alpha and bestScore > mateScore() - MAX_DEPTH: + if not isPV and move.isQuiet() and depth <= FP_DEPTH_LIMIT and staticEval + FP_EVAL_MARGIN * depth < alpha and bestScore > mateScore() + MAX_DEPTH: # Futility pruning: If a move cannot meaningfully improve alpha, prune it from the # tree. Much like RFP, this is an unsound optimization (and a riskier one at that, # apparently), so our depth limit and evaluation margins are very conservative