Moved utility functions to neural network library

This commit is contained in:
Mattia Giambirtone 2023-03-20 12:11:40 +01:00
parent 01525da889
commit ac0cdfdc92
Signed by: nocturn9x
GPG Key ID: 8270F9F467971E59
2 changed files with 52 additions and 43 deletions

View File

@ -1,49 +1,11 @@
import nn/network
import nn/util/matrix
import std/math
# Mean squared error
proc mse(a, b: Matrix[float]): float =
result = (b - a).apply(proc (x: float): float = pow(x, 2), axis = -1).sum() / len(a).float
# Derivative of MSE
func dxMSE*(x, y: Matrix[float]): Matrix[float] = 2.0 * (x - y)
# A bunch of vectorized activation functions
func sigmoid*(input: Matrix[float]): Matrix[float] =
result = input.apply(proc (x: float): float = 1 / (1 + exp(-x)) , axis = -1)
func sigmoidDerivative*(input: Matrix[float]): Matrix[float] = sigmoid(input) * (1.0 - sigmoid(input))
func softmax*(input: Matrix[float]): Matrix[float] =
var input = input - input.max()
result = input.apply(math.exp, axis = -1) / input.apply(math.exp, axis = -1).sum()
func softmaxDerivative*(input: Matrix[float]): Matrix[float] =
var input = input.reshape(input.shape.cols, 1)
result = input.diagflat() - input.dot(input.transpose())
func step*(input: Matrix[float]): Matrix[float] = input.apply(proc (x: float): float = (if x < 0.0: 0.0 else: x), axis = -1)
func silu*(input: Matrix[float]): Matrix[float] = input.apply(proc (x: float): float = 1 / (1 + exp(-x)), axis= -1)
func relu*(input: Matrix[float]): Matrix[float] = input.apply(proc (x: float): float = max(0.0, x), axis = -1)
func htan*(input: Matrix[float]): Matrix[float] =
let f = proc (x: float): float =
let temp = exp(2 * x)
result = (temp - 1) / (temp + 1)
input.apply(f, axis = -1)
var mlp = newNeuralNetwork(@[newDenseLayer(2, 3, newActivation(sigmoid, sigmoidDerivative)),
newDenseLayer(3, 2, newActivation(sigmoid, sigmoidDerivative)),
newDenseLayer(2, 3, newActivation(softmax, softmaxDerivative))],
lossFunc=newLoss(mse, dxMSE), learnRate=0.05, momentum=0.55,
var mlp = newNeuralNetwork(@[newDenseLayer(2, 3, Sigmoid),
newDenseLayer(3, 2, Sigmoid),
newDenseLayer(2, 3, Softmax)],
lossFunc=MSE, learnRate=0.05, momentum=0.55,
weightRange=(start: -1.0, stop: 1.0), biasRange=(start: -10.0, stop: 10.0))
echo mlp.feedforward(newMatrix[float](@[1.0, 2.0]))

View File

@ -17,6 +17,7 @@ import util/matrix
import std/strformat
import std/random
import std/math
randomize()
@ -142,4 +143,50 @@ proc feedforward*(self: NeuralNetwork, data: Matrix[float]): Matrix[float] =
proc backprop(self: NeuralNetwork, x, y: Matrix[float]) {.used.} =
## Performs a single backpropagation step and updates the
## gradients for our weights and biases, layer by layer
## gradients for our weights and biases, layer by layer
## Utility functions
# Mean squared error
proc mse(a, b: Matrix[float]): float =
result = (b - a).apply(proc (x: float): float = pow(x, 2), axis = -1).sum() / len(a).float
# Derivative of MSE
func dxMSE(x, y: Matrix[float]): Matrix[float] = 2.0 * (x - y)
# A bunch of vectorized activation functions
func sigmoid(input: Matrix[float]): Matrix[float] =
result = input.apply(proc (x: float): float = 1 / (1 + exp(-x)) , axis = -1)
func sigmoidDerivative(input: Matrix[float]): Matrix[float] = sigmoid(input) * (1.0 - sigmoid(input))
func softmax(input: Matrix[float]): Matrix[float] =
var input = input - input.max()
result = input.apply(math.exp, axis = -1) / input.apply(math.exp, axis = -1).sum()
func softmaxDerivative(input: Matrix[float]): Matrix[float] =
var input = input.reshape(input.shape.cols, 1)
result = input.diagflat() - input.dot(input.transpose())
func step(input: Matrix[float]): Matrix[float] {.used.} = input.apply(proc (x: float): float = (if x < 0.0: 0.0 else: x), axis = -1)
func silu(input: Matrix[float]): Matrix[float] {.used.} = input.apply(proc (x: float): float = 1 / (1 + exp(-x)), axis= -1)
func relu(input: Matrix[float]): Matrix[float] {.used.} = input.apply(proc (x: float): float = max(0.0, x), axis = -1)
func htan(input: Matrix[float]): Matrix[float] {.used.} =
let f = proc (x: float): float =
let temp = exp(2 * x)
result = (temp - 1) / (temp + 1)
input.apply(f, axis = -1)
{.push.}
{.hints: off.} # So nim doesn't complain about the naming
var Sigmoid* = newActivation(sigmoid, sigmoidDerivative)
var Softmax* = newActivation(softmax, softmaxDerivative)
var MSE* = newLoss(mse, dxMSE)
{.pop.}