Mercurial > ift6266
changeset 298:a222af1d0598
- Adapt to scdae to input_shape change in pynnet
- Use the proper dataset in run_exp
author | Arnaud Bergeron <abergeron@gmail.com> |
---|---|
date | Mon, 29 Mar 2010 17:36:22 -0400 |
parents | a6b6b1140de9 |
children | a9af079892ce |
files | deep/convolutional_dae/run_exp.py deep/convolutional_dae/scdae.py |
diffstat | 2 files changed, 24 insertions(+), 86 deletions(-) [+] |
line wrap: on
line diff
--- a/deep/convolutional_dae/run_exp.py Mon Mar 29 09:18:54 2010 -0400 +++ b/deep/convolutional_dae/run_exp.py Mon Mar 29 17:36:22 2010 -0400 @@ -47,9 +47,9 @@ pretrain_lr=state.pretrain_lr, train_lr=state.train_lr) + t_it = repeat_itf(dset.train, state.bsize) pretrain_fs, train, valid, test = massage_funcs( - repeat_itf(dset.train, state.bsize), - dset, state.bsize, + t_it, t_it, dset, state.bsize, pretrain_funcs, trainf,evalf) series = create_series()
--- a/deep/convolutional_dae/scdae.py Mon Mar 29 09:18:54 2010 -0400 +++ b/deep/convolutional_dae/scdae.py Mon Mar 29 17:36:22 2010 -0400 @@ -1,6 +1,4 @@ from pynnet import * -# use hacks also -from pynnet.utils import * import numpy import theano @@ -11,37 +9,27 @@ class cdae(LayerStack): def __init__(self, filter_size, num_filt, num_in, subsampling, corruption, - dtype, img_shape): + dtype): LayerStack.__init__(self, [ConvAutoencoder(filter_size=filter_size, num_filt=num_filt, num_in=num_in, noisyness=corruption, - dtype=dtype, - image_shape=img_shape), + dtype=dtype), MaxPoolLayer(subsampling)]) - def build(self, input): - LayerStack.build(self, input) + def build(self, input, input_shape=None): + LayerStack.build(self, input, input_shape) self.cost = self.layers[0].cost + self.pre_params = self.layers[0].pre_params -def cdae_out_size(in_size, filt_size, num_filt, num_in, subs): - out = [None] * 3 - out[0] = num_filt - out[1] = (in_size[1]-filt_size[0]+1)/subs[0] - out[2] = (in_size[2]-filt_size[1]+1)/subs[1] - return out - -def scdae(in_size, num_in, filter_sizes, num_filts, - subsamplings, corruptions, dtype): +def scdae(filter_sizes, num_filts, subsamplings, corruptions, dtype): layers = [] old_nfilt = 1 for fsize, nfilt, subs, corr in izip(filter_sizes, num_filts, subsamplings, corruptions): - layers.append(cdae(fsize, nfilt, old_nfilt, subs, corr, dtype, - (num_in, in_size[0], in_size[1], in_size[2]))) - in_size = cdae_out_size(in_size, fsize, nfilt, old_nfilt, subs) + layers.append(cdae(fsize, nfilt, old_nfilt, subs, corr, dtype)) old_nfilt = nfilt - return LayerStack(layers), in_size + return LayerStack(layers) def mlp(layer_sizes, dtype): layers = [] @@ -53,11 +41,13 @@ return LayerStack(layers) def scdae_net(in_size, num_in, filter_sizes, num_filts, subsamplings, - corruptions, layer_sizes, out_size, dtype, batch_size): + corruptions, layer_sizes, out_size, dtype): rl1 = ReshapeLayer((None,)+in_size) - ls, outs = scdae(in_size, num_in, filter_sizes, num_filts, subsamplings, - corruptions, dtype) - outs = numpy.prod(outs) + ls = scdae(num_in, filter_sizes, num_filts, subsamplings, + corruptions, dtype) + x = T.tensor4() + ls.build(x, input_shape=(1,)+in_size) + outs = numpy.prod(ls.output_shape) rl2 = ReshapeLayer((None, outs)) layer_sizes = [outs]+layer_sizes ls2 = mlp(layer_sizes, dtype) @@ -68,7 +58,7 @@ noise, mlp_sizes, out_size, dtype, pretrain_lr, train_lr): n = scdae_net((1,)+img_size, batch_size, filter_sizes, num_filters, subs, - noise, mlp_sizes, out_size, dtype, batch_size) + noise, mlp_sizes, out_size, dtype) n.save('start.net') @@ -76,19 +66,18 @@ y = T.ivector('y') def pretrainfunc(net, alpha): - up = trainers.get_updates(net.params, net.cost, alpha) + up = trainers.get_updates(net.pre_params, net.cost, alpha) return theano.function([x], net.cost, updates=up) def trainfunc(net, alpha): up = trainers.get_updates(net.params, net.cost, alpha) return theano.function([x, y], net.cost, updates=up) - n.build(x, y) + n.build(x, y, input_shape=(bsize, 1)+img_size) pretrain_funcs_opt = [pretrainfunc(l, pretrain_lr) for l in n.layers[1].layers] trainf_opt = trainfunc(n, train_lr) evalf_opt = theano.function([x, y], errors.class_error(n.output, y)) - clear_imgshape(n) n.build(x, y) pretrain_funcs_reg = [pretrainfunc(l, 0.01) for l in n.layers[1].layers] trainf_reg = trainfunc(n, 0.1) @@ -121,10 +110,11 @@ for epoch in xrange(pretrain_epochs): serie.append((layer, epoch), f()) -def massage_funcs(train_it, dset, batch_size, pretrain_funcs, trainf, evalf): +def massage_funcs(pretrain_it, train_it, dset, batch_size, pretrain_funcs, + trainf, evalf): def pretrain_f(f): def res(): - for x, y in train_it: + for x, y in pretrain_it: yield f(x) it = res() return lambda: it.next() @@ -196,58 +186,6 @@ return series -def run_exp(state, channel): - from ift6266 import datasets - from sgd_opt import sgd_opt - import sys, time - - # params: bsize, pretrain_lr, train_lr, nfilts1, nfilts2, nftils3, nfilts4 - # pretrain_rounds - - pylearn.version.record_versions(state, [theano,ift6266,pylearn]) - # TODO: maybe record pynnet version? - channel.save() - - dset = dataset.nist_all(1000) - - nfilts = [] - if state.nfilts1 != 0: - nfilts.append(state.nfilts1) - if state.nfilts2 != 0: - nfilts.append(state.nfilts2) - if state.nfilts3 != 0: - nfilts.append(state.nfilts3) - if state.nfilts4 != 0: - nfilts.append(state.nfilts4) - - fsizes = [(5,5)]*len(nfilts) - subs = [(2,2)]*len(nfilts) - noise = [state.noise]*len(nfilts) - - pretrain_funcs, trainf, evalf, net = build_funcs( - img_size=(32, 32), - batch_size=state.bsize, - filter_sizes=fsizes, - num_filters=nfilts, - subs=subs, - noise=noise, - mlp_sizes=[state.mlp_sz], - out_size=62, - dtype=numpy.float32, - pretrain_lr=state.pretrain_lr, - train_lr=state.train_lr) - - pretrain_fs, train, valid, test = massage_funcs( - state.bsize, dset, pretrain_funcs, trainf, evalf) - - series = create_series() - - do_pretrain(pretrain_fs, state.pretrain_rounds, series['recons_error']) - - sgd_opt(train, valid, test, training_epochs=100000, patience=10000, - patience_increase=2., improvement_threshold=0.995, - validation_frequency=2500, series=series, net=net) - if __name__ == '__main__': from ift6266 import datasets from sgd_opt import sgd_opt @@ -263,9 +201,9 @@ mlp_sizes=[500], out_size=10, dtype=numpy.float32, pretrain_lr=0.01, train_lr=0.1) + t_it = repeat_itf(dset.train, batch_size) pretrain_fs, train, valid, test = massage_funcs( - repeat_itf(dset.train, batch_size), - dset, batch_size, + t_it, t_it, dset, batch_size, pretrain_funcs, trainf, evalf) print "pretraining ...",