[conv1a] epsW=0.0 epsB=0.0 momW=0.9 momB=0.9 wc=0.0005 wball=0.00 schedW=dexp[500,4] schedB=dexp[10,2] [conv1b] epsW=0.0 epsB=0.0 momW=0.9 momB=0.9 wc=0.0005 wball=0.00 schedW=dexp[500,4] schedB=dexp[10,2] [conv2a] epsW=0.01 epsB=0.02 momW=0.9 momB=0.9 wc=0.0005 wball=0.00 schedW=dexp[500,4] schedB=dexp[10,2] [conv2b] epsW=0.01 epsB=0.02 momW=0.9 momB=0.9 wc=0.0005 wball=0.00 schedW=dexp[500,4] schedB=dexp[10,2] [conv3a] epsW=0.01,0.01 epsB=0.02 momW=0.9,0.9 momB=0.9 wc=0.0005,0.0005 wball=0,0 schedW=dexp[500,4] schedB=dexp[10,2] [conv3b] epsW=0.01,0.01 epsB=0.02 momW=0.9,0.9 momB=0.9 wc=0.0005,0.0005 wball=0,0 schedW=dexp[500,4] schedB=dexp[10,2] [conv4a] epsW=0.01 epsB=0.02 momW=0.9 momB=0.9 wc=0.0005 wball=0 schedW=dexp[500,4] schedB=dexp[10,2] [conv4b] epsW=0.01 epsB=0.02 momW=0.9 momB=0.9 wc=0.0005 wball=0 schedW=dexp[500,4] schedB=dexp[10,2] [conv5a] epsW=0.01 epsB=0.02 momW=0.9 momB=0.9 wc=0.0005 wball=0 schedW=dexp[500,4] schedB=dexp[10,2] [conv5b] epsW=0.01 epsB=0.02 momW=0.9 momB=0.9 wc=0.0005 wball=0 schedW=dexp[500,4] schedB=dexp[10,2] [fc2048a] epsW=0.01,0.01 epsB=0.02 momW=0.9,0.9 momB=0.9 wc=0.001,0.001 wball=0,0 schedW=dexp[500,4] schedB=dexp[10,2] [fc2048b] epsW=0.01,0.01 epsB=0.02 momW=0.9,0.9 momB=0.9 wc=0.001,0.001 wball=0,0 schedW=dexp[500,4] schedB=dexp[10,2] [fc2048ba] epsW=0.01,0.01 epsB=0.02 momW=0.9,0.9 momB=0.9 wc=0.001,0.001 wball=0,0 schedW=dexp[500,4] schedB=dexp[10,2] [fc2048bb] epsW=0.01,0.01 epsB=0.02 momW=0.9,0.9 momB=0.9 wc=0.001,0.001 wball=0,0 schedW=dexp[500,4] schedB=dexp[10,2] [fc1000] epsW=0.01,0.01 epsB=0.02 momW=0.9,0.9 momB=0.9 wc=0.001,0.001 wball=0,0 schedW=dexp[500,4] schedB=dexp[10,2] [logprob] coeff=1 topk=5 [hs1a] enable=true [hs2a] enable=true [hs1b] enable=true [hs2b] enable=true [rnorm1a] scale=0.0001 pow=0.75 minDiv=2 [rnorm1b] scale=0.0001 pow=0.75 minDiv=2 [rnorm2a] scale=0.0001 pow=0.75 minDiv=2 [rnorm2b] scale=0.0001 pow=0.75 minDiv=2 [cnorm2a] scale=0.001 pow=0.75 [cnorm2b] scale=0.001 pow=0.75 # this is like #153 (so uses same file) but with dexp learning rate schedule # its also like 163 but final learning rate is 0.00002 instead of 0.00001 # on guppy7 # logs/layers-165.log # /nobackup/kriz/tmp/ConvNet__2012-12-30_18.42.56 # NOTE: performance to be compared with 163 # epoch 63: set color noise to 0 from 0.1 # epoch 78: set conv1 epsw to 0 from 0.01 # logprob: 1.847919, 0.427840, 0.198452 # multiview logprob: 1.757196, 0.409820, 0.183920