Week3作业提交


#1

import tensorflow as tf import os from tensorflow.examples.tutorials.mnist import input_data

def inference(input_tensor, train, regularizer): with tf.variable_scope(‘layer1-conv1’): weights = tf.getVariable(‘conv1_weights’, [5, 5, 1, 32], initializer=tf.truncated_normal_initializer(stddev=0.1)) biases = tf.get_variable(‘conv1_biases’,[32], initializer=tf.constant_initializer[0.0]) conv1 = tf.nn.conv2d(input_tensor, weights, strides=[1, 1, 1, 1], padding=‘SAME’) relu1 = tf.nn.relu(tf.nn.bias_add(conv1, biases)) with tf.name_scope(‘layer2-pool1’): pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=‘SAME’) with tf.variable_scope(‘layer3-conv2’): weights = tf.get_variable(‘conv2-weights’,[5, 5, 32, 64], initializer=tf.truncated_normal_initializer(stddev=0.1)) biases = tf.get_variable(‘conv2-biases’, [64], initializer=tf.constant_initializer(0.0)) conv2 = tf.nn.conv2d(pool1, weights, strides=[1, 1, 1, 1], padding=‘SAME’) relu2 = tf.nn.relu(tf.nn.bias_add(conv2, biases)) with tf.name_scope(‘layer4-pool2’): pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=‘SAME’) pool_shape = pool2.get_shape().as_list() nodes = pool_shape[1]*pool_shape[2]*pool_shape[3] reshaped = tf.reshape(pool2, [pool_shape[0], nodes]) with tf.variable_scope(‘layer5-fc1’): weights = tf.get_variable(‘fc1-weight’, [nodes, 512], initializr=tf.truncated_normal_initializer(stddev=0.1)) if regularizer != None: tf.add_to_collection(‘losses’, regularizer(weights)) biases = tf.get_variable(‘fc1-biases’, [512], initializer=tf.constant_initializer(0.1)) fc1 = tf.nn.relu(tf.matmul(reshaped, weights)+ biases) if train: fc1 = tf.nn.dropout(fc1, 0.5)

with tf.variable_scope('layer6-fc2'):
    weights = tf.get_variable('fc2-weight', [512, 10], initializer=tf.truncated_normal_initializer(stddev=0.1))
    if regularizer != None:
        tf.add_to_collection('losses', regularizer(weights))
    biases = tf.get_variable('fc2-biases', [10], initializer=tf.constant_initializer(0.1))
    logit = tf.matmul(fc1, weights) + biases
return logit

def train(mnist): x = tf.placeholder(tf.float32, [None, 784], name=‘x-input’) y_ = tf.placeholder(tf.float32, [None, 10], name=‘y-output’) regularizer = tf.contrib.layers.l2_regularizer(0.0001) y = inference(x, regularizer) global_step = tf.Variable(0, trainable=False) variable_averages = tf.train.ExponentialMovingAverage(0.99, global_step) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1)) cross_entropy_mean = tf.reduce_mean(cross_entropy) loss = cross_entropy_mean + tf.add_n(tf.get_collection(‘losses’)) learning_rate = tf.train.exponential_decay(0.8, global_step, mnist.train.num_examples/100, 0.99) train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step) with tf.control_dependencies([train_step, variable_averages]): train_op = tf.no_op(name=‘train’) saver = tf.trian.Saver() with tf.Session() as sess: tf.initialize_all_variables().run() for i in range(30000): xs, ys = mnist.train.next_batch(100) , loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x:xs, y:ys}) if i % 1000 ==0: print(“After %d training step(s), loss on training batch is %g.” %(step, loss_value)) saver.save( sess, os.path.join(‘model/’, ‘model.ckpt’), global_step = global_step ) def main(argv = None): mnist = input_data.read_data_sets(‘data/’, one_hot=True) train(mnist)

if name == ‘main’: tf.app.run()

参考tensorflow实战教学的代码,对代码已经基本掌握。


#2

import tensorflow as tf import input_data

mnist=input_data.read_data_sets(‘MNIST_data’,one_hot=True) sess=tf.InteractiveSession()

x=tf.placeholder(tf.float32,shape=[None,784]) y=tf.placeholder(tf.float32,shape=[None,10])

def weight_variable(shape): initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial)

def bias_variable(shape): initial = tf.constant(0.1, shape=shape) return tf.Variable(initial)

def conv2d(x, W, padding): return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding=padding)

def max_pool_2x2(x): return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=‘SAME’)

x_image = tf.reshape(x,[-1,28,28,1])

with tf.name_scope(‘conv1’): W_conv1 = weight_variable([5, 5, 1, 6]) b_conv1 = bias_variable([6]) h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1,‘SAME’) + b_conv1)

with tf.name_scope(‘pool1’): h_pool1 = max_pool_2x2(h_conv1)

with tf.name_scope(‘conv2’): W_conv2 = weight_variable([5, 5, 6, 16]) b_conv2 = bias_variable([16]) h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2,‘VALID’) + b_conv2)

with tf.name_scope(‘pool2’): h_pool2 = max_pool_2x2(h_conv2)

with tf.name_scope(‘fc1’): h_pool2_flat = tf.reshape(h_pool2, [-1, 5 * 5 * 16]) W_fc1 = weight_variable([5 * 5 * 16, 120]) b_fc1 = bias_variable([120]) h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)

with tf.name_scope(‘fc2’): W_fc2 = weight_variable([120, 84]) b_fc2 = bias_variable([84]) h_fc2 = tf.nn.relu(tf.matmul(h_fc1, W_fc2) + b_fc2)

keep_prob = tf.placeholder(“float”) h_fc1_drop = tf.nn.dropout(h_fc2, keep_prob)

with tf.name_scope(‘softmax’): W_fc3 = weight_variable([84, 10]) b_fc3 = bias_variable([10]) y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc3) + b_fc3)

cross_entropy = -tf.reduce_sum(y*tf.log(y_conv)) train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) correct_prediction=tf.equal(tf.argmax(y_conv,1),tf.argmax(y,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, “float”))

sess.run(tf.global_variables_initializer()) for i in range(20000): batch = mnist.train.next_batch(50) if i%100 == 0: train_accuracy = accuracy.eval(feed_dict={x: batch[0], y: batch[1], keep_prob: 1.0}) print(“step %d, training accuracy %g”%(i, train_accuracy)) train_step.run(feed_dict={x: batch[0], y: batch[1],keep_prob:0.5})

print(“test accuracy %g”%accuracy.eval(feed_dict={ x: mnist.test.images, y: mnist.test.labels,keep_prob:1.0}))