NNExperiments/src/nn/layer.nim

87 lines
3.6 KiB
Nim

# Copyright 2022 Mattia Giambirtone & All Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import util/matrix
import util/losses
import util/activations
import std/strformat
import std/random
randomize()
type
Layer* = ref object
## A generic neural network
## layer
inputSize*: int # The number of inputs we process
outputSize*: int # The number of outputs we produce
weights*: Matrix[float] # The weights for each connection (2D)
biases*: Matrix[float] # The biases for each neuron (1D)
activation: Activation # The activation function along with its derivative
loss: Loss # The cost function used in training
gradients: tuple[weights, biases: Matrix[float]] # Gradient coefficients for weights and biases
learnRate: float # The speed at which we perform gradient descent
proc `$`*(self: Layer): string =
## Returns a string representation
## of the layer
result = &"Layer(inputs={self.inputSize}, outputs={self.outputSize})"
proc newLayer*(inputSize: int, outputSize: int, activation: Activation, loss: Loss, learnRate: float, weightRange: tuple[start, stop: float]): Layer =
## Creates a new layer with inputSize input
## parameters and outputSize outgoing outputs.
## Weights are initialized with random values
## in the chosen range
new(result)
result.inputSize = inputSize
result.outputSize = outputSize
var biases = newSeqOfCap[float](outputSize)
var biasGradients = newSeqOfCap[float](outputSize)
for _ in 0..<outputSize:
biases.add(0.0)
biasGradients.add(0.0)
var weights = newSeqOfCap[seq[float]](inputSize * outputSize)
var weightGradients = newSeqOfCap[seq[float]](inputSize * outputSize)
for _ in 0..<outputSize:
weights.add(@[])
weightGradients.add(@[])
for _ in 0..<inputSize:
weights[^1].add(rand(weightRange.start..weightRange.stop))
weightGradients[^1].add(0)
result.biases = newMatrix[float](biases)
result.weights = newMatrix[float](weights)
result.activation = activation
result.loss = loss
result.gradients = (weights: newMatrix[float](weightGradients), biases: newMatrix[float](biasGradients))
result.learnRate = learnRate
proc compute*(self: Layer, data: Matrix[float]): Matrix[float] =
## Computes the output of a given layer with
## the given input data and returns it as a
## one-dimensional array
var sequence = newSeqOfCap[float](self.outputSize)
for i, weights in self.weights:
# This looks fancy, but it's just abstracting some of the
# complexity away to the matrix library and is equivalent
# to the nested for-loop approach (although more idiomatic
# and probably faster)
sequence.add(self.activation.function((weights * data).sum() + self.biases[0, i]))
result = newMatrix[float](sequence)