quacknet.main

  1from . import backPropgation
  2from quacknet.activationFunctions import relu, sigmoid, tanH, linear, softMax
  3from quacknet.lossFunctions import MSELossFunction, MAELossFunction, CrossEntropyLossFunction
  4from quacknet.optimisers import Optimisers
  5from quacknet.initialisers import Initialisers
  6from quacknet.writeAndReadWeightBias import writeAndRead
  7from quacknet.convulationalManager import CNNModel
  8from quacknet.dataAugmentation import Augementation
  9import numpy as np
 10import matplotlib.pyplot as plt
 11
 12class Network(Optimisers, Initialisers, writeAndRead, CNNModel, Augementation):
 13    def __init__(self, lossFunc = "MSE", learningRate = 0.01, optimisationFunc = "gd", useMomentum = False, momentumCoefficient = 0.9, momentumDecay = 0.99, useBatches = False, batchSize = 32):
 14        """
 15        Args:
 16            lossFunc (str): Loss function name ('mse', 'mae', 'cross'). Default is "MSE".
 17            learningRate (float, optional): Learning rate for training. Default is 0.01.
 18            optimisationFunc (str, optional): Optimisaztion method ('gd', 'sgd', 'batching'). Default is "gd".
 19            useMomentum (bool, optional): Wether to use momentum in optimisation. Default is False.
 20            momentumCoefficient (float, optional): Momentum coefficient if used. Default is 0.9.
 21            momentumDecay (float, optional): Decay rate for momentum. Default is 0.99.
 22            useBatches (bool, optional): Wether to use mini batches. Default is False.
 23            batchSize (int, optional): size of mini batches. Default is 32.
 24        """
 25        self.layers = []
 26        self.weights = []
 27        self.biases = []
 28        self.learningRate = learningRate
 29
 30        lossFunctionDict = {
 31            "mse": MSELossFunction,
 32            "mae": MAELossFunction,
 33            "cross entropy": CrossEntropyLossFunction,"cross": CrossEntropyLossFunction,
 34        }
 35        self.lossFunction = lossFunctionDict[lossFunc.lower()]
 36
 37        optimisationFunctionDict = {
 38            "gd": self._trainGradientDescent,
 39            "sgd": self._trainStochasticGradientDescent,
 40            "batching": self._trainGradientDescentUsingBatching, "batches": self._trainGradientDescentUsingBatching, 
 41        }
 42        self.optimisationFunction = optimisationFunctionDict[optimisationFunc.lower()]
 43        if(useBatches == True):
 44            self.optimisationFunction = self._trainGradientDescentUsingBatching
 45
 46        self.useMomentum = useMomentum
 47        self.momentumCoefficient = momentumCoefficient
 48        self.momentumDecay = momentumDecay
 49        self.velocityWeight = None
 50        self.velocityBias = None
 51
 52        self.useBatches = useBatches
 53        self.batchSize = batchSize
 54
 55    def addLayer(self, size, activationFunction="relu"):
 56        """
 57        Add a layer to the network with the specified number of nodes and activation function.
 58
 59        Args:
 60            size (int): Number of nodes in the new layer.
 61            activationFunction (str, optional): Activation function name ('relu', 'sigmoid', 'linear', 'tanh', 'softmax'). Default is "relu".
 62        """
 63        funcs = {
 64            "relu": relu,
 65            "sigmoid": sigmoid,
 66            "linear": linear,
 67            "tanh": tanH,
 68            "softmax": softMax,
 69        }
 70        if(activationFunction.lower() not in funcs):
 71            raise ValueError(f"Activation function not made: {activationFunction.lower()}")
 72        self.layers.append([size, funcs[activationFunction.lower()]])
 73
 74    def _calculateLayerNodes(self, lastLayerNodes, lastLayerWeights, biases, currentLayer) -> np.ndarray:
 75        """
 76        Calculate the output of a layer given inputs, weights and biases.
 77
 78        Args:
 79            lastLayerNodes (ndarray): Output from the previous layer.
 80            lastLayerWeights (ndarray): Weights connecting the previous layer.
 81            biases (ndarray): Biases of the current layer.
 82            currentLayer (list): List containing layer size and activation function.
 83        
 84        Returns:
 85            ndarray: Output of the current layer.
 86        """
 87        
 88        summ = np.dot(lastLayerNodes, lastLayerWeights) + biases
 89        if(currentLayer[1] != softMax):
 90            return currentLayer[1](summ)
 91        else:
 92            return softMax(summ)
 93        
 94    def forwardPropagation(self, inputData):
 95        """
 96        Perform forward propagation through the network for the given input data.
 97
 98        Args:
 99            inputData (list): Input data for the network.
100
101        Returns:
102            list of ndarray: List containing outputs of each layer including input layer.
103        """
104        layerNodes = [np.array(inputData)]
105        for i in range(1, len(self.layers)):
106            layerNodes.append(np.array(self._calculateLayerNodes(layerNodes[i - 1], self.weights[i - 1], self.biases[i - 1], self.layers[i])))
107        return layerNodes
108    
109    def _backPropgation(self, layerNodes, weights, biases, trueValues, returnErrorTermForCNN = False):
110        """
111        Perform backpropagation over the network layers to compute gradients for weights and biases.
112
113        Args:
114            layerNodes (list of ndarray): List of output values for each layer.
115            weights (list of ndarray): List of weights for each layer.
116            biases (list of ndarray): List of biases for each layer.
117            trueValues (ndarray): True target values for the output layer.
118            returnErrorTermForCNN (bool, optional): Whether to return error terms for CNN backpropagation. Defaults to False.
119
120        Returns:
121            weightGradients (list of ndarray): Gradients of weights for each layer.
122            biasGradients (list of ndarray): Gradients of biases for each layer.
123            If returnErrorTermForCNN is True:
124                hiddenWeightErrorTermsForCNNBackpropgation (ndarray): Error terms from the output layer weights.   
125        """  
126        return backPropgation._backPropgation(layerNodes, weights, biases, trueValues, self.layers, self.lossFunction, returnErrorTermForCNN)
127    
128    def train(self, inputData, labels, epochs):
129        """
130        Train the neural network using the specified optimisation function.
131
132        Args:
133            inputData (list of lists): All of the training input data
134            labels (list of ndarray): All of the labels for all the input data.
135            epochs (int): Number of training epochs.
136        
137        Returns:
138            float: Average accauracy over all epochs.
139            float: Average loss over all epochs.
140        """
141        self._checkIfNetworkCorrect()
142        correct = 0
143        totalLoss = 0
144        nodes, self.weights, self.biases, self.velocityWeight, self.velocityBias = self.optimisationFunction(inputData, labels, epochs, self.weights, self.biases, self.momentumCoefficient, self.momentumDecay, self.useMomentum, self.velocityWeight, self.velocityBias, self.learningRate, self.batchSize)        
145        lastLayer = len(nodes[0]) - 1
146        labels = np.tile(labels, (epochs, 1)) # duplicates the labels ([1, 2], (3, 1)) would become [[1, 2], [1, 2], [1, 2]]
147        for i in range(len(nodes)): 
148            totalLoss += self.lossFunction(nodes[i][lastLayer], labels[i])
149            nodeIndex = np.argmax(nodes[i][lastLayer])
150            labelIndex = np.argmax(labels[i])
151            if(nodeIndex == labelIndex):
152                correct += 1
153        return correct / (len(labels) * epochs), totalLoss / (len(labels) * epochs)
154    
155    def _checkIfNetworkCorrect(self): #this is to check if activation functions/loss functions adhere to certain rule
156        for i in range(len(self.layers) - 1): #checks if softmax is used for any activation func that isnt output layer
157            if(self.layers[i][1] == softMax): #if so it stops the user
158                raise ValueError(f"Softmax shouldnt be used in non ouput layers. Error at Layer {i + 1}")
159        usingSoftMax = self.layers[len(self.layers) - 1][1] == softMax
160        if(usingSoftMax == True):
161            if(self.lossFunction != CrossEntropyLossFunction): #checks if softmax is used without cross entropy loss function
162                raise ValueError(f"Softmax output layer requires Cross Entropy loss function") #if so stops the user
163        elif(self.lossFunction == CrossEntropyLossFunction):
164            raise ValueError(f"Cross Entropy loss function requires Softmax output layer") #if so stops the user
165    
166    def drawGraphs(self, allAccuracy, allLoss):
167        """
168        Plot training accuracy and loss graphs over epochs for multiple runs.
169
170        Args:
171            allAccuracy (list of lists): Accuracy at each epoch for each run.
172            allLoss (list of lists): Loss at each epoch for each run.
173
174        Displays:
175            Matplotlib plots of accuracy and loss trends.
176        """
177        epochs = list(range(1, len(allAccuracy[0]) + 1))
178        figure, axis = plt.subplots(1, 2)
179        meanAccuracy = np.mean(allAccuracy, axis=0)
180        meanLoss = np.mean(allLoss, axis=0)
181
182        for i in range(len(allAccuracy)):
183            axis[0].plot(epochs, allAccuracy[i], marker="o", label=f'Run {i+1}', alpha=0.3)
184        axis[0].plot(epochs, meanAccuracy, marker="o", label=f'Average', alpha=1)
185        axis[0].set_xticks(epochs)
186        axis[0].set_xlabel("epochs")
187        axis[0].set_ylabel("accauracy")
188        axis[0].set_title("model accuracy")
189        axis[0].grid(True)
190        axis[0].legend()
191
192        for i in range(len(allLoss)):
193            axis[1].plot(epochs, allLoss[i], marker="o", label=f'Run {i+1}', alpha=0.3)
194        axis[1].plot(epochs, meanLoss, marker="o", label=f'Average', alpha=1)
195        axis[1].set_xticks(epochs)
196        axis[1].set_xlabel("epochs")
197        axis[1].set_ylabel("loss")
198        axis[1].set_title("model loss")
199        axis[1].grid(True)
200        axis[1].legend()
201
202
203        plt.tight_layout()
204        plt.show()
205
206
207# use this to get how many functions are tests or not: coverage run -m pytest unitTests/
208# then to see results do: coverage report -m
 13class Network(Optimisers, Initialisers, writeAndRead, CNNModel, Augementation):
 14    def __init__(self, lossFunc = "MSE", learningRate = 0.01, optimisationFunc = "gd", useMomentum = False, momentumCoefficient = 0.9, momentumDecay = 0.99, useBatches = False, batchSize = 32):
 15        """
 16        Args:
 17            lossFunc (str): Loss function name ('mse', 'mae', 'cross'). Default is "MSE".
 18            learningRate (float, optional): Learning rate for training. Default is 0.01.
 19            optimisationFunc (str, optional): Optimisaztion method ('gd', 'sgd', 'batching'). Default is "gd".
 20            useMomentum (bool, optional): Wether to use momentum in optimisation. Default is False.
 21            momentumCoefficient (float, optional): Momentum coefficient if used. Default is 0.9.
 22            momentumDecay (float, optional): Decay rate for momentum. Default is 0.99.
 23            useBatches (bool, optional): Wether to use mini batches. Default is False.
 24            batchSize (int, optional): size of mini batches. Default is 32.
 25        """
 26        self.layers = []
 27        self.weights = []
 28        self.biases = []
 29        self.learningRate = learningRate
 30
 31        lossFunctionDict = {
 32            "mse": MSELossFunction,
 33            "mae": MAELossFunction,
 34            "cross entropy": CrossEntropyLossFunction,"cross": CrossEntropyLossFunction,
 35        }
 36        self.lossFunction = lossFunctionDict[lossFunc.lower()]
 37
 38        optimisationFunctionDict = {
 39            "gd": self._trainGradientDescent,
 40            "sgd": self._trainStochasticGradientDescent,
 41            "batching": self._trainGradientDescentUsingBatching, "batches": self._trainGradientDescentUsingBatching, 
 42        }
 43        self.optimisationFunction = optimisationFunctionDict[optimisationFunc.lower()]
 44        if(useBatches == True):
 45            self.optimisationFunction = self._trainGradientDescentUsingBatching
 46
 47        self.useMomentum = useMomentum
 48        self.momentumCoefficient = momentumCoefficient
 49        self.momentumDecay = momentumDecay
 50        self.velocityWeight = None
 51        self.velocityBias = None
 52
 53        self.useBatches = useBatches
 54        self.batchSize = batchSize
 55
 56    def addLayer(self, size, activationFunction="relu"):
 57        """
 58        Add a layer to the network with the specified number of nodes and activation function.
 59
 60        Args:
 61            size (int): Number of nodes in the new layer.
 62            activationFunction (str, optional): Activation function name ('relu', 'sigmoid', 'linear', 'tanh', 'softmax'). Default is "relu".
 63        """
 64        funcs = {
 65            "relu": relu,
 66            "sigmoid": sigmoid,
 67            "linear": linear,
 68            "tanh": tanH,
 69            "softmax": softMax,
 70        }
 71        if(activationFunction.lower() not in funcs):
 72            raise ValueError(f"Activation function not made: {activationFunction.lower()}")
 73        self.layers.append([size, funcs[activationFunction.lower()]])
 74
 75    def _calculateLayerNodes(self, lastLayerNodes, lastLayerWeights, biases, currentLayer) -> np.ndarray:
 76        """
 77        Calculate the output of a layer given inputs, weights and biases.
 78
 79        Args:
 80            lastLayerNodes (ndarray): Output from the previous layer.
 81            lastLayerWeights (ndarray): Weights connecting the previous layer.
 82            biases (ndarray): Biases of the current layer.
 83            currentLayer (list): List containing layer size and activation function.
 84        
 85        Returns:
 86            ndarray: Output of the current layer.
 87        """
 88        
 89        summ = np.dot(lastLayerNodes, lastLayerWeights) + biases
 90        if(currentLayer[1] != softMax):
 91            return currentLayer[1](summ)
 92        else:
 93            return softMax(summ)
 94        
 95    def forwardPropagation(self, inputData):
 96        """
 97        Perform forward propagation through the network for the given input data.
 98
 99        Args:
100            inputData (list): Input data for the network.
101
102        Returns:
103            list of ndarray: List containing outputs of each layer including input layer.
104        """
105        layerNodes = [np.array(inputData)]
106        for i in range(1, len(self.layers)):
107            layerNodes.append(np.array(self._calculateLayerNodes(layerNodes[i - 1], self.weights[i - 1], self.biases[i - 1], self.layers[i])))
108        return layerNodes
109    
110    def _backPropgation(self, layerNodes, weights, biases, trueValues, returnErrorTermForCNN = False):
111        """
112        Perform backpropagation over the network layers to compute gradients for weights and biases.
113
114        Args:
115            layerNodes (list of ndarray): List of output values for each layer.
116            weights (list of ndarray): List of weights for each layer.
117            biases (list of ndarray): List of biases for each layer.
118            trueValues (ndarray): True target values for the output layer.
119            returnErrorTermForCNN (bool, optional): Whether to return error terms for CNN backpropagation. Defaults to False.
120
121        Returns:
122            weightGradients (list of ndarray): Gradients of weights for each layer.
123            biasGradients (list of ndarray): Gradients of biases for each layer.
124            If returnErrorTermForCNN is True:
125                hiddenWeightErrorTermsForCNNBackpropgation (ndarray): Error terms from the output layer weights.   
126        """  
127        return backPropgation._backPropgation(layerNodes, weights, biases, trueValues, self.layers, self.lossFunction, returnErrorTermForCNN)
128    
129    def train(self, inputData, labels, epochs):
130        """
131        Train the neural network using the specified optimisation function.
132
133        Args:
134            inputData (list of lists): All of the training input data
135            labels (list of ndarray): All of the labels for all the input data.
136            epochs (int): Number of training epochs.
137        
138        Returns:
139            float: Average accauracy over all epochs.
140            float: Average loss over all epochs.
141        """
142        self._checkIfNetworkCorrect()
143        correct = 0
144        totalLoss = 0
145        nodes, self.weights, self.biases, self.velocityWeight, self.velocityBias = self.optimisationFunction(inputData, labels, epochs, self.weights, self.biases, self.momentumCoefficient, self.momentumDecay, self.useMomentum, self.velocityWeight, self.velocityBias, self.learningRate, self.batchSize)        
146        lastLayer = len(nodes[0]) - 1
147        labels = np.tile(labels, (epochs, 1)) # duplicates the labels ([1, 2], (3, 1)) would become [[1, 2], [1, 2], [1, 2]]
148        for i in range(len(nodes)): 
149            totalLoss += self.lossFunction(nodes[i][lastLayer], labels[i])
150            nodeIndex = np.argmax(nodes[i][lastLayer])
151            labelIndex = np.argmax(labels[i])
152            if(nodeIndex == labelIndex):
153                correct += 1
154        return correct / (len(labels) * epochs), totalLoss / (len(labels) * epochs)
155    
156    def _checkIfNetworkCorrect(self): #this is to check if activation functions/loss functions adhere to certain rule
157        for i in range(len(self.layers) - 1): #checks if softmax is used for any activation func that isnt output layer
158            if(self.layers[i][1] == softMax): #if so it stops the user
159                raise ValueError(f"Softmax shouldnt be used in non ouput layers. Error at Layer {i + 1}")
160        usingSoftMax = self.layers[len(self.layers) - 1][1] == softMax
161        if(usingSoftMax == True):
162            if(self.lossFunction != CrossEntropyLossFunction): #checks if softmax is used without cross entropy loss function
163                raise ValueError(f"Softmax output layer requires Cross Entropy loss function") #if so stops the user
164        elif(self.lossFunction == CrossEntropyLossFunction):
165            raise ValueError(f"Cross Entropy loss function requires Softmax output layer") #if so stops the user
166    
167    def drawGraphs(self, allAccuracy, allLoss):
168        """
169        Plot training accuracy and loss graphs over epochs for multiple runs.
170
171        Args:
172            allAccuracy (list of lists): Accuracy at each epoch for each run.
173            allLoss (list of lists): Loss at each epoch for each run.
174
175        Displays:
176            Matplotlib plots of accuracy and loss trends.
177        """
178        epochs = list(range(1, len(allAccuracy[0]) + 1))
179        figure, axis = plt.subplots(1, 2)
180        meanAccuracy = np.mean(allAccuracy, axis=0)
181        meanLoss = np.mean(allLoss, axis=0)
182
183        for i in range(len(allAccuracy)):
184            axis[0].plot(epochs, allAccuracy[i], marker="o", label=f'Run {i+1}', alpha=0.3)
185        axis[0].plot(epochs, meanAccuracy, marker="o", label=f'Average', alpha=1)
186        axis[0].set_xticks(epochs)
187        axis[0].set_xlabel("epochs")
188        axis[0].set_ylabel("accauracy")
189        axis[0].set_title("model accuracy")
190        axis[0].grid(True)
191        axis[0].legend()
192
193        for i in range(len(allLoss)):
194            axis[1].plot(epochs, allLoss[i], marker="o", label=f'Run {i+1}', alpha=0.3)
195        axis[1].plot(epochs, meanLoss, marker="o", label=f'Average', alpha=1)
196        axis[1].set_xticks(epochs)
197        axis[1].set_xlabel("epochs")
198        axis[1].set_ylabel("loss")
199        axis[1].set_title("model loss")
200        axis[1].grid(True)
201        axis[1].legend()
202
203
204        plt.tight_layout()
205        plt.show()
Network( lossFunc='MSE', learningRate=0.01, optimisationFunc='gd', useMomentum=False, momentumCoefficient=0.9, momentumDecay=0.99, useBatches=False, batchSize=32)
14    def __init__(self, lossFunc = "MSE", learningRate = 0.01, optimisationFunc = "gd", useMomentum = False, momentumCoefficient = 0.9, momentumDecay = 0.99, useBatches = False, batchSize = 32):
15        """
16        Args:
17            lossFunc (str): Loss function name ('mse', 'mae', 'cross'). Default is "MSE".
18            learningRate (float, optional): Learning rate for training. Default is 0.01.
19            optimisationFunc (str, optional): Optimisaztion method ('gd', 'sgd', 'batching'). Default is "gd".
20            useMomentum (bool, optional): Wether to use momentum in optimisation. Default is False.
21            momentumCoefficient (float, optional): Momentum coefficient if used. Default is 0.9.
22            momentumDecay (float, optional): Decay rate for momentum. Default is 0.99.
23            useBatches (bool, optional): Wether to use mini batches. Default is False.
24            batchSize (int, optional): size of mini batches. Default is 32.
25        """
26        self.layers = []
27        self.weights = []
28        self.biases = []
29        self.learningRate = learningRate
30
31        lossFunctionDict = {
32            "mse": MSELossFunction,
33            "mae": MAELossFunction,
34            "cross entropy": CrossEntropyLossFunction,"cross": CrossEntropyLossFunction,
35        }
36        self.lossFunction = lossFunctionDict[lossFunc.lower()]
37
38        optimisationFunctionDict = {
39            "gd": self._trainGradientDescent,
40            "sgd": self._trainStochasticGradientDescent,
41            "batching": self._trainGradientDescentUsingBatching, "batches": self._trainGradientDescentUsingBatching, 
42        }
43        self.optimisationFunction = optimisationFunctionDict[optimisationFunc.lower()]
44        if(useBatches == True):
45            self.optimisationFunction = self._trainGradientDescentUsingBatching
46
47        self.useMomentum = useMomentum
48        self.momentumCoefficient = momentumCoefficient
49        self.momentumDecay = momentumDecay
50        self.velocityWeight = None
51        self.velocityBias = None
52
53        self.useBatches = useBatches
54        self.batchSize = batchSize

Args: lossFunc (str): Loss function name ('mse', 'mae', 'cross'). Default is "MSE". learningRate (float, optional): Learning rate for training. Default is 0.01. optimisationFunc (str, optional): Optimisaztion method ('gd', 'sgd', 'batching'). Default is "gd". useMomentum (bool, optional): Wether to use momentum in optimisation. Default is False. momentumCoefficient (float, optional): Momentum coefficient if used. Default is 0.9. momentumDecay (float, optional): Decay rate for momentum. Default is 0.99. useBatches (bool, optional): Wether to use mini batches. Default is False. batchSize (int, optional): size of mini batches. Default is 32.

layers
weights
biases
learningRate
lossFunction
optimisationFunction
useMomentum
momentumCoefficient
momentumDecay
velocityWeight
velocityBias
useBatches
batchSize
def addLayer(self, size, activationFunction='relu'):
56    def addLayer(self, size, activationFunction="relu"):
57        """
58        Add a layer to the network with the specified number of nodes and activation function.
59
60        Args:
61            size (int): Number of nodes in the new layer.
62            activationFunction (str, optional): Activation function name ('relu', 'sigmoid', 'linear', 'tanh', 'softmax'). Default is "relu".
63        """
64        funcs = {
65            "relu": relu,
66            "sigmoid": sigmoid,
67            "linear": linear,
68            "tanh": tanH,
69            "softmax": softMax,
70        }
71        if(activationFunction.lower() not in funcs):
72            raise ValueError(f"Activation function not made: {activationFunction.lower()}")
73        self.layers.append([size, funcs[activationFunction.lower()]])

Add a layer to the network with the specified number of nodes and activation function.

Args: size (int): Number of nodes in the new layer. activationFunction (str, optional): Activation function name ('relu', 'sigmoid', 'linear', 'tanh', 'softmax'). Default is "relu".

def forwardPropagation(self, inputData):
 95    def forwardPropagation(self, inputData):
 96        """
 97        Perform forward propagation through the network for the given input data.
 98
 99        Args:
100            inputData (list): Input data for the network.
101
102        Returns:
103            list of ndarray: List containing outputs of each layer including input layer.
104        """
105        layerNodes = [np.array(inputData)]
106        for i in range(1, len(self.layers)):
107            layerNodes.append(np.array(self._calculateLayerNodes(layerNodes[i - 1], self.weights[i - 1], self.biases[i - 1], self.layers[i])))
108        return layerNodes

Perform forward propagation through the network for the given input data.

Args: inputData (list): Input data for the network.

Returns: list of ndarray: List containing outputs of each layer including input layer.

def train(self, inputData, labels, epochs):
129    def train(self, inputData, labels, epochs):
130        """
131        Train the neural network using the specified optimisation function.
132
133        Args:
134            inputData (list of lists): All of the training input data
135            labels (list of ndarray): All of the labels for all the input data.
136            epochs (int): Number of training epochs.
137        
138        Returns:
139            float: Average accauracy over all epochs.
140            float: Average loss over all epochs.
141        """
142        self._checkIfNetworkCorrect()
143        correct = 0
144        totalLoss = 0
145        nodes, self.weights, self.biases, self.velocityWeight, self.velocityBias = self.optimisationFunction(inputData, labels, epochs, self.weights, self.biases, self.momentumCoefficient, self.momentumDecay, self.useMomentum, self.velocityWeight, self.velocityBias, self.learningRate, self.batchSize)        
146        lastLayer = len(nodes[0]) - 1
147        labels = np.tile(labels, (epochs, 1)) # duplicates the labels ([1, 2], (3, 1)) would become [[1, 2], [1, 2], [1, 2]]
148        for i in range(len(nodes)): 
149            totalLoss += self.lossFunction(nodes[i][lastLayer], labels[i])
150            nodeIndex = np.argmax(nodes[i][lastLayer])
151            labelIndex = np.argmax(labels[i])
152            if(nodeIndex == labelIndex):
153                correct += 1
154        return correct / (len(labels) * epochs), totalLoss / (len(labels) * epochs)

Train the neural network using the specified optimisation function.

Args: inputData (list of lists): All of the training input data labels (list of ndarray): All of the labels for all the input data. epochs (int): Number of training epochs.

Returns: float: Average accauracy over all epochs. float: Average loss over all epochs.

def drawGraphs(self, allAccuracy, allLoss):
167    def drawGraphs(self, allAccuracy, allLoss):
168        """
169        Plot training accuracy and loss graphs over epochs for multiple runs.
170
171        Args:
172            allAccuracy (list of lists): Accuracy at each epoch for each run.
173            allLoss (list of lists): Loss at each epoch for each run.
174
175        Displays:
176            Matplotlib plots of accuracy and loss trends.
177        """
178        epochs = list(range(1, len(allAccuracy[0]) + 1))
179        figure, axis = plt.subplots(1, 2)
180        meanAccuracy = np.mean(allAccuracy, axis=0)
181        meanLoss = np.mean(allLoss, axis=0)
182
183        for i in range(len(allAccuracy)):
184            axis[0].plot(epochs, allAccuracy[i], marker="o", label=f'Run {i+1}', alpha=0.3)
185        axis[0].plot(epochs, meanAccuracy, marker="o", label=f'Average', alpha=1)
186        axis[0].set_xticks(epochs)
187        axis[0].set_xlabel("epochs")
188        axis[0].set_ylabel("accauracy")
189        axis[0].set_title("model accuracy")
190        axis[0].grid(True)
191        axis[0].legend()
192
193        for i in range(len(allLoss)):
194            axis[1].plot(epochs, allLoss[i], marker="o", label=f'Run {i+1}', alpha=0.3)
195        axis[1].plot(epochs, meanLoss, marker="o", label=f'Average', alpha=1)
196        axis[1].set_xticks(epochs)
197        axis[1].set_xlabel("epochs")
198        axis[1].set_ylabel("loss")
199        axis[1].set_title("model loss")
200        axis[1].grid(True)
201        axis[1].legend()
202
203
204        plt.tight_layout()
205        plt.show()

Plot training accuracy and loss graphs over epochs for multiple runs.

Args: allAccuracy (list of lists): Accuracy at each epoch for each run. allLoss (list of lists): Loss at each epoch for each run.

Displays: Matplotlib plots of accuracy and loss trends.