Rijuban Rangslang
2015-10-09 14:26:07 UTC
Hi ,
I am trying to model a sparse RBM so I tried some dropout. The code seems
to build the model but throws an error which I cannot figure out why. The
error is as follows.
z[0] = numpy.asarray(numpy.dot(x, y))
ValueError: matrices are not aligned
Apply node that caused the error: dot(Elemwise{mul,no_inplace}.0, W)
Toposort index: 45
Inputs types: [TensorType(float64, matrix), TensorType(float64, matrix)]
Inputs shapes: [(60, 2000), (180, 2000)]
Inputs strides: [(16000, 8), (16000, 8)]
Inputs values: ['not shown', 'not shown']
Outputs clients: [[Elemwise{add,no_inplace}(dot.0, DimShuffle{x,0}.0)]]
Backtrace when the node is created:
File "GRBM_momemtum_sparsity.py", line 257, in propup
pre_sigmoid_activation = T.dot(vis, self.W) + self.hbias
The error is in this particular line. I have as my input a matrix with
columns 180, so my input is 180 and output 61. I take my batch size as 60.
Also, my hidden layers is [2000,2000]
The part of the code from where the error might occur is
#def
__init__(self,numpy_rng,layer_sizes,dropout_rates,activations,use_bias,theano_rng
= None):
def
__init__(self,numpy_rng,layer_sizes,dropout_rates,activations,use_bias,theano_rng
= None):
#rectified_linear_activation = lambda x: T.maximum(0.0, x)
self.x = T.matrix('x')
# Set up all the hidden layers
weight_matrix_sizes = zip(layer_sizes, layer_sizes[1:])
self.layers = []
#self.n_layers = len(hidden_layer_sizes)
self.dropout_layers = []
self.rbm_layers = []
next_layer_input = self.x
#first_layer = True
if theano_rng is None:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
# dropout the input
next_dropout_layer_input = _dropout_from_layer(numpy_rng,
next_layer_input, p=dropout_rates[0])
layer_counter = 0
for n_ins, n_out in weight_matrix_sizes[:-1]:
# Reuse the paramters from the dropout layer here, in a
different
# path through the graph.
next_layer = HiddenLayer(numpy_rng=numpy_rng,
input=next_layer_input,
activation=activations[layer_counter],
# scale the weight matrix W with (1-p)
n_ins=n_ins, n_out=n_out,
use_bias=use_bias)
self.layers.append(next_layer)
next_layer_input = next_layer.output
next_dropout_layer = DropoutHiddenLayer(numpy_rng=numpy_rng,
input=next_dropout_layer_input,
activation=activations[layer_counter],
n_ins=n_ins, n_out=n_out,use_bias=use_bias,
W=next_layer.W*(1 - dropout_rates[-1]),
b=next_layer.b,
dropout_rate=dropout_rates[layer_counter + 1])
self.dropout_layers.append(next_dropout_layer)
next_dropout_layer_input = next_dropout_layer.output
#first_layer = False
layer_counter += 1
# Construct an RBM that shared weights with this layer
rbm_layer = RBM(numpy_rng=numpy_rng,
theano_rng=theano_rng,
input=next_dropout_layer_input,
n_visible=n_ins,
n_hidden=n_out,
W=next_layer.W,
hbias=next_layer.b)
self.rbm_layers.append(rbm_layer)
# Set up the output layer
#n_ins, n_out = weight_matrix_sizes[-1]
dropout_output_layer = LogisticRegression(
input=next_dropout_layer_input,
n_ins=n_ins, n_out=n_out)
self.dropout_layers.append(dropout_output_layer)
# Again, reuse paramters in the dropout output.
output_layer = LogisticRegression(
input=next_layer_input,
# scale the weight matrix W with (1-p)
W=dropout_output_layer.W * (1 - dropout_rates[-1]),
b=dropout_output_layer.b,
n_ins=n_ins, n_out=n_out)
self.layers.append(output_layer)
Kindly help.
I am trying to model a sparse RBM so I tried some dropout. The code seems
to build the model but throws an error which I cannot figure out why. The
error is as follows.
z[0] = numpy.asarray(numpy.dot(x, y))
ValueError: matrices are not aligned
Apply node that caused the error: dot(Elemwise{mul,no_inplace}.0, W)
Toposort index: 45
Inputs types: [TensorType(float64, matrix), TensorType(float64, matrix)]
Inputs shapes: [(60, 2000), (180, 2000)]
Inputs strides: [(16000, 8), (16000, 8)]
Inputs values: ['not shown', 'not shown']
Outputs clients: [[Elemwise{add,no_inplace}(dot.0, DimShuffle{x,0}.0)]]
Backtrace when the node is created:
File "GRBM_momemtum_sparsity.py", line 257, in propup
pre_sigmoid_activation = T.dot(vis, self.W) + self.hbias
The error is in this particular line. I have as my input a matrix with
columns 180, so my input is 180 and output 61. I take my batch size as 60.
Also, my hidden layers is [2000,2000]
The part of the code from where the error might occur is
#def
__init__(self,numpy_rng,layer_sizes,dropout_rates,activations,use_bias,theano_rng
= None):
def
__init__(self,numpy_rng,layer_sizes,dropout_rates,activations,use_bias,theano_rng
= None):
#rectified_linear_activation = lambda x: T.maximum(0.0, x)
self.x = T.matrix('x')
# Set up all the hidden layers
weight_matrix_sizes = zip(layer_sizes, layer_sizes[1:])
self.layers = []
#self.n_layers = len(hidden_layer_sizes)
self.dropout_layers = []
self.rbm_layers = []
next_layer_input = self.x
#first_layer = True
if theano_rng is None:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
# dropout the input
next_dropout_layer_input = _dropout_from_layer(numpy_rng,
next_layer_input, p=dropout_rates[0])
layer_counter = 0
for n_ins, n_out in weight_matrix_sizes[:-1]:
# Reuse the paramters from the dropout layer here, in a
different
# path through the graph.
next_layer = HiddenLayer(numpy_rng=numpy_rng,
input=next_layer_input,
activation=activations[layer_counter],
# scale the weight matrix W with (1-p)
n_ins=n_ins, n_out=n_out,
use_bias=use_bias)
self.layers.append(next_layer)
next_layer_input = next_layer.output
next_dropout_layer = DropoutHiddenLayer(numpy_rng=numpy_rng,
input=next_dropout_layer_input,
activation=activations[layer_counter],
n_ins=n_ins, n_out=n_out,use_bias=use_bias,
W=next_layer.W*(1 - dropout_rates[-1]),
b=next_layer.b,
dropout_rate=dropout_rates[layer_counter + 1])
self.dropout_layers.append(next_dropout_layer)
next_dropout_layer_input = next_dropout_layer.output
#first_layer = False
layer_counter += 1
# Construct an RBM that shared weights with this layer
rbm_layer = RBM(numpy_rng=numpy_rng,
theano_rng=theano_rng,
input=next_dropout_layer_input,
n_visible=n_ins,
n_hidden=n_out,
W=next_layer.W,
hbias=next_layer.b)
self.rbm_layers.append(rbm_layer)
# Set up the output layer
#n_ins, n_out = weight_matrix_sizes[-1]
dropout_output_layer = LogisticRegression(
input=next_dropout_layer_input,
n_ins=n_ins, n_out=n_out)
self.dropout_layers.append(dropout_output_layer)
# Again, reuse paramters in the dropout output.
output_layer = LogisticRegression(
input=next_layer_input,
# scale the weight matrix W with (1-p)
W=dropout_output_layer.W * (1 - dropout_rates[-1]),
b=dropout_output_layer.b,
n_ins=n_ins, n_out=n_out)
self.layers.append(output_layer)
Kindly help.
--
---
You received this message because you are subscribed to the Google Groups "theano-users" group.
To unsubscribe from this group and stop receiving emails from it, send an email to theano-users+***@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.
---
You received this message because you are subscribed to the Google Groups "theano-users" group.
To unsubscribe from this group and stop receiving emails from it, send an email to theano-users+***@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.