From 02b5a6ecf576b209978b41ffb16f7cb6b92f4b33 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E5=AE=81=E5=AE=81?= Date: Thu, 21 Sep 2017 21:39:43 +0800 Subject: [PATCH 1/2] modify MLP to support multi hidden layers in a easy way --- python/MLP.py | 47 +++++++++++++++++++++++++++++++++-------------- 1 file changed, 33 insertions(+), 14 deletions(-) diff --git a/python/MLP.py b/python/MLP.py index e9ded0b..cee5ac4 100755 --- a/python/MLP.py +++ b/python/MLP.py @@ -9,6 +9,10 @@ class MLP(object): def __init__(self, input, label, n_in, n_hidden, n_out, rng=None): + """ + n_hidden: python list represent the hidden dimention + activation: activation function + """ self.x = input self.y = label @@ -17,35 +21,50 @@ def __init__(self, input, label, n_in, n_hidden, n_out, rng=None): rng = numpy.random.RandomState(1234) # construct hidden_layer - self.hidden_layer = HiddenLayer(input=self.x, - n_in=n_in, - n_out=n_hidden, - rng=rng, - activation=tanh) + layers_dim = numpy.hstack([n_in,n_hidden]) + self.hidden_layer = [] + + for hidden_idx in xrange(len(layers_dim) - 1): + self.hidden_layer.append(HiddenLayer(input=self.x, + n_in=layers_dim[hidden_idx], + n_out=layers_dim[hidden_idx+1], + rng=rng, + activation=tanh)) # construct log_layer - self.log_layer = LogisticRegression(input=self.hidden_layer.output, + self.log_layer = LogisticRegression(input=self.hidden_layer[-1].output, label=self.y, - n_in=n_hidden, + n_in=n_hidden[-1], n_out=n_out) + def train(self): # forward hidden_layer - layer_input = self.hidden_layer.forward() + layer_input = self.x + + for hidden_idx in range(len(self.hidden_layer)): + + layer_input = self.hidden_layer[hidden_idx].forward(input=layer_input) # forward & backward log_layer - # self.log_layer.forward(input=layer_input) self.log_layer.train(input=layer_input) # backward hidden_layer - self.hidden_layer.backward(prev_layer=self.log_layer) + for hidden_idx in range(len(self.hidden_layer))[::-1]: - # backward log_layer - # self.log_layer.backward() + if hidden_idx == len(self.hidden_layer) - 1: + + self.hidden_layer[hidden_idx].backward(prev_layer=self.log_layer) + + continue + + self.hidden_layer[hidden_idx].backward(prev_layer=self.hidden_layer[hidden_idx+1]) def predict(self, x): - x = self.hidden_layer.output(input=x) + for hidden_idx in range(len(self.hidden_layer)): + x = self.hidden_layer[hidden_idx].output(input=x) + return self.log_layer.predict(x) @@ -66,7 +85,7 @@ def test_mlp(n_epochs=5000): # construct MLP - classifier = MLP(input=x, label=y, n_in=2, n_hidden=3, n_out=2, rng=rng) + classifier = MLP(input=x, label=y, n_in=2, n_hidden=[3,4], n_out=2, rng=rng) # train for epoch in xrange(n_epochs): From b7397f56f09cdf26ee9e3b98b1f5dfa09730c94b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E5=AE=81=E5=AE=81?= Date: Thu, 21 Sep 2017 21:43:51 +0800 Subject: [PATCH 2/2] small change: modify annotation --- python/MLP.py | 1 - 1 file changed, 1 deletion(-) diff --git a/python/MLP.py b/python/MLP.py index cee5ac4..142efd6 100755 --- a/python/MLP.py +++ b/python/MLP.py @@ -11,7 +11,6 @@ class MLP(object): def __init__(self, input, label, n_in, n_hidden, n_out, rng=None): """ n_hidden: python list represent the hidden dimention - activation: activation function """ self.x = input