diff --git a/src/main.nim b/src/main.nim index 0235586..0fe412f 100644 --- a/src/main.nim +++ b/src/main.nim @@ -7,6 +7,7 @@ import std/tables import std/math import std/random import std/algorithm +import std/strformat ## A bunch of activation functions @@ -27,28 +28,29 @@ func ind2sub(n: int, shape: tuple[rows, cols: int]): tuple[row, col: int] = proc loss(params: TableRef[string, float]): float = ## Our loss function for tris - result = params["moves"] - if int(params["result"]) == GameStatus.Draw.int: - result += 6.0 - elif int(params["result"]) == GameStatus.Lose.int: - result += 12.0 - result = sigmoid(result) + if params.hasKey("sameMove"): + result = 24 - params["moves"] + else: + result = params["moves"] + if int(params["result"]) == GameStatus.Draw.int: + result += 6 + elif int(params["result"]) == GameStatus.Lose.int: + result += 12 + echo result proc compareNetworks(a, b: NeuralNetwork): int = + if a.params.len() == 0: + return -1 + elif b.params.len() == 0: + return 1 return cmp(loss(a.params), loss(b.params)) proc crossover(a, b: NeuralNetwork): NeuralNetwork = result = deepCopy(a) - for i, layer in a.layers: - result.layers[i].weights = layer.weights.copy() - result.layers[i].biases = layer.biases.copy() var i = 0 while i < a.layers.len(): - result.layers[i] = new(Layer) - result.layers[i].inputSize = a.layers[i].inputSize - result.layers[i].outputSize = a.layers[i].outputSize # We inherit 50% of our weights and biases from our first # parent and the other 50% from the other parent result.layers[i].weights = where(rand[float](a.layers[i].weights.shape) >= 0.5, a.layers[i].weights, b.layers[i].weights) @@ -65,23 +67,25 @@ proc crossover(a, b: NeuralNetwork): NeuralNetwork = ## Our training program -const Population = 2 -const Iterations = 100 +const Population = 100 +const Iterations = 300 const Epochs = 10 -const Take = 2 +const Take = 15 var networks: seq[NeuralNetwork] = @[] +var best: seq[NeuralNetwork] = @[] for _ in 0..