def LeNet2(X):
L1 = tf.layers.conv2d(X,32,[5,5],activation=tf.nn.relu,padding = 'SAME')
#print(L1)
L1 = tf.layers.max_pooling2d(L1,[2,2],[2,2],padding = 'SAME')
L1 = tf.layers.dropout(L1, 0.3, is_training)
#print(L1)
#W1 = tf.Variable(tf.random_normal([3,3,1,32],stddev = 0.01))
#L1 = tf.nn.conv2d(x,W1,strides = [1,1,1,1],padding = 'SAME')
#L1 = tf.nn.relu(L1)
#L1 = tf.nn.max_pool(L1,ksize=[1,2,2,1],strides = [1,2,2,1],padding = 'SAME')
#L1 = tf.nn.dropout(L1,keep_prob)
L2 = tf.layers.conv2d(L1,64,[5,5],activation= tf.nn.relu,padding = 'SAME')
#print(L2)
L2 = tf.layers.max_pooling2d(L2,[2,2],[2,2],padding = 'SAME')
L2 = tf.layers.dropout(L2, 0.3, is_training)
L2_a = tf.layers.conv2d(L2,128,[3,3],activation= tf.nn.relu,padding = 'SAME')
#print(L2)
L2_a = tf.layers.max_pooling2d(L2_a,[2,2],[2,2],padding = 'SAME')
L2_a = tf.layers.dropout(L2_a, 0.3, is_training)
L2_b = tf.layers.conv2d(L2_a,256,[5,5],activation= tf.nn.relu,padding = 'SAME')
#print(L3)
L2_b = tf.layers.max_pooling2d(L2_b,[2,2],[2,2],padding = 'SAME')
L2_b = tf.layers.dropout(L2_b, 0.3, is_training)
L3 = tf.contrib.layers.flatten(L2_b)
#print(L4)
L3 = tf.layers.dense(L3,625,activation = tf.nn.relu)
#print(L4)
L3 = tf.layers.dropout(L3,0.5,is_training)
#print(L4)
L3_a = tf.layers.dense(L3,128,activation = tf.nn.relu)
#print(L4)
L3_a = tf.layers.dropout(L3_a,0.5,is_training)
logits = tf.layers.dense(L3_a,n_classes,activation = None)
#print(model)
return logits