# with tf.keras
tf.reset_default_graph()withtf.name_scope("inputs"):input_data=tf.placeholder(dtype=tf.float32,shape=[None,28,28],name='input_data')y_label=tf.placeholder(dtype=tf.float32,shape=[None,10],name='label')withtf.variable_scope("RNN_layer"):rnn_out=tf.keras.layers.SimpleRNN(units=32)(input_data)withtf.variable_scope("output_layer"):prediction=tf.layers.dense(inputs=rnn_out,units=10)withtf.name_scope("loss"):loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=prediction,labels=y_label))withtf.name_scope("optimizer"):opt=tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)withtf.name_scope("accuracy"):correct_prediction=tf.equal(tf.argmax(prediction,1),tf.argmax(y_label,1))accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))init=tf.global_variables_initializer()
Tain the model
sess=tf.Session()sess.run(init)
forepoch_indexinrange(epochs):loss_ls,acc_ls=[],[]get_batch=batch_gen(X_train,y_train,batch_size)forbatch_X,batch_yinget_batch:_,batch_acc,batch_loss=sess.run([opt,accuracy,loss],feed_dict={input_data:batch_X,y_label:batch_y})loss_ls.append(batch_loss)acc_ls.append(batch_acc)print("Epoch ",epoch_index)print("Accuracy ",np.mean(acc_ls)," Loss ",np.mean(loss_ls))print("__________________")