quacknet.initialisers

 1import math
 2import numpy as np
 3from quacknet.activationFunctions import relu, sigmoid
 4
 5class Initialisers: 
 6    def createWeightsAndBiases(self):
 7        #weights are in [number of layers][size of current layer][size of next layer]
 8        for i in range(1, len(self.layers)):
 9            currSize = self.layers[i][0]
10            lastSize = self.layers[i - 1][0]
11            actFunc = self.layers[i][1]
12
13            if(actFunc == relu):
14                bounds =  math.sqrt(2 / lastSize) # He initialisation
15            elif(actFunc == sigmoid):
16                bounds = math.sqrt(6/ (lastSize + currSize)) # Xavier initialisation
17            else:
18                bounds = 1
19                
20            self.weights.append(np.random.normal(0, bounds, size=(lastSize, currSize)))
21            self.biases.append(np.random.normal(0, bounds, size=(currSize)))
22        
class Initialisers:
 6class Initialisers: 
 7    def createWeightsAndBiases(self):
 8        #weights are in [number of layers][size of current layer][size of next layer]
 9        for i in range(1, len(self.layers)):
10            currSize = self.layers[i][0]
11            lastSize = self.layers[i - 1][0]
12            actFunc = self.layers[i][1]
13
14            if(actFunc == relu):
15                bounds =  math.sqrt(2 / lastSize) # He initialisation
16            elif(actFunc == sigmoid):
17                bounds = math.sqrt(6/ (lastSize + currSize)) # Xavier initialisation
18            else:
19                bounds = 1
20                
21            self.weights.append(np.random.normal(0, bounds, size=(lastSize, currSize)))
22            self.biases.append(np.random.normal(0, bounds, size=(currSize)))
def createWeightsAndBiases(self):
 7    def createWeightsAndBiases(self):
 8        #weights are in [number of layers][size of current layer][size of next layer]
 9        for i in range(1, len(self.layers)):
10            currSize = self.layers[i][0]
11            lastSize = self.layers[i - 1][0]
12            actFunc = self.layers[i][1]
13
14            if(actFunc == relu):
15                bounds =  math.sqrt(2 / lastSize) # He initialisation
16            elif(actFunc == sigmoid):
17                bounds = math.sqrt(6/ (lastSize + currSize)) # Xavier initialisation
18            else:
19                bounds = 1
20                
21            self.weights.append(np.random.normal(0, bounds, size=(lastSize, currSize)))
22            self.biases.append(np.random.normal(0, bounds, size=(currSize)))