34
loading...
This website collects cookies to deliver better user experience
x @ w + b
Where...
x is the input to the layer
w is the weights of the layer
b is the bias of the layer
(@ means matrix multiply)
[[-5, 10]
[15, -10] --> relu --> [[0, 10]
[15, 0]]
import numpy as np
class Linear:
def __init__(self, units):
#units specify how many nodes are in the layer
self.units = units
self.initialized = False
def __call__(self, x):
#initialize weights and biases if layer hasn't been called before
if not self.initialized:
self.w = np.random.randn(self.input.shape[-1], self.units)
self.b = np.random.randn(self.units)
self.initialized = True
return self.input @ self.w + self.b
x = np.array([[0, 1]])
layer = Linear(5)
print (layer(x))
# => [[-2.63399933 -1.18289984 0.32129587 0.2903246 -0.2602642 ]]
class Sigmoid:
def __call__(self, x):
return 1 / (1 + np.exp(-x))
class Relu:
def __call__(self, x):
return np.maximum(0, x)
class Softmax:
def __call__(self, x):
return np.exp(x) / np.sum(np.exp(x))
class Tanh:
def __call__(self, x):
return np.tanh(x)
class Model:
def __init__(self, layers):
self.layers = layers
def __call__(self, x):
output = x
for layer in self.layers:
output = layer(x)
return output
import layers
import numpy as np
#inputs array
x = np.array([[0, 1], [0, 0], [1, 1], [0, 1]])
#network uses all the layers we have designed so far
net = layers.Model([
layers.Linear(32),
layers.Sigmoid(),
layers.Linear(16),
layers.Softmax(),
layers.Linear(8),
layers.Tanh(),
layers.Linear(4),
layers.Relu(),
])
print (net(x))
Output:
[[0. 3.87770361 0.17602662 0. ]
[0. 3.85640582 0.22373699 0. ]
[0. 3.77290517 0.2469388 0. ]
[0. 3.87770361 0.17602662 0. ]]