TensorFlow Slim (TF-Slim)で書いたモデルをMovidiusで動かす & 蒸留もどき

TF-Slimとは

TensorFlow Low Layerのマクロみたいなもの 。 比較的簡単に書けるようになる。

変数の定義

weights = slim.model_variable('weights', shape=[10, 10, 3 , 3])
my_var = slim.variable('my_var',
                       shape=[20, 1],
                       initializer=tf.zeros_initializer())

レイヤの追加

net = slim.conv2d(input, 128, [3, 3], scope='conv1_1')
Layer TF-Slim
BiasAdd slim.bias_add
BatchNorm slim.batch_norm
Conv2d slim.conv2d
Conv2dInPlane slim.conv2d_in_plane
Conv2dTranspose (Deconv) slim.conv2d_transpose
FullyConnected slim.fully_connected
AvgPool2D slim.avg_pool2d
Dropout slim.dropout
Flatten slim.flatten
MaxPool2D slim.max_pool2d
OneHotEncoding slim.one_hot_encoding
SeparableConv2 slim.separable_conv2d
UnitNorm slim.unit_norm

サンプル

import numpy as np
import tensorflow as tf

from tensorflow.contrib.slim.nets import inception

slim = tf.contrib.slim

def run(name, image_size, num_classes):
    with tf.Graph().as_default():
        image = tf.placeholder("float", [1, image_size, image_size, 3], name="input")
        with slim.arg_scope(inception.inception_v1_arg_scope()):
            logits, _ = inception.inception_v1(image, num_classes, is_training=False, spatial_squeeze=False)
        probabilities = tf.nn.softmax(logits)
        init_fn = slim.assign_from_checkpoint_fn('inception_v1.ckpt', slim.get_model_variables('InceptionV1'))

        with tf.Session() as sess:
            init_fn(sess)
            saver = tf.train.Saver(tf.global_variables())
            saver.save(sess, "output/"+name)

run('inception-v1', 224, 1001)

TF-Slim を用いた蒸留

拾ってきたソースコードつなぎ合わせて無理やり動かしたらかろうじて動いたレベル

ナンバープレート画像を用いて全結合のみに蒸留

蒸留したものをMovidiusに変換

graphでoutput nodeを確認する

fw = tf.summary.FileWriter('logs', sess.graph)
fw.close()
tensorboard --logdir logs

コンパイル

mvNCCompile -s 12 student_flozen.ckpt.meta -in=input -on=output -o graph

実行

tokunn@nanase 1:18:01 [~/Documents/distil_mnist/second_challenge] $ python3 movidius.py /home/tokunn/make_image/test/3186 2>/dev/null
Image path : /home/tokunn/make_image/test/3186/*.jpg or *.png
['extend_5_0_5934.png', 'extend_9_0_1460.png', 'extend_9_0_6437.png', 'extend_9_0_733.png', 'extend_13_0_8860.png', 'extend_5_0_1296.png', 'extend_5_0_320.png', 'extend_5_0_2227.png', 'extend_5_0_6957.png', 'extend_1_0_2447.png']
imgshape  (25, 784)
Start prediting ...
1 7 7 7 6 7 7 7 7 8 1 7 7 7 7 7 7 2 2 7 7 3 9 1 2 
Time : 0.10333132743835449 (25 images)

思ったよりすんなり動いた

ソースコード

ナンバー親子

#!/usr/bin/env python
# coding: utf-8

# In[1]:


import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" 
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import tensorflow as tf
import numpy as np
import tensorflow.contrib.slim as slim
from tensorflow.examples.tutorials.mnist import input_data
import loadimg


# In[2]:


#config = tf.ConfigProto()
#config.gpu_options.per_process_gpu_memory_fraction = 0.25
#sess = tf.Session(config=config)

config = tf.ConfigProto(
    gpu_options=tf.GPUOptions(
        visible_device_list="1", # specify GPU number
        allow_growth=False
    )
)
#sess = tf.Session(config=config)


# In[3]:


NUMBER_OF_CLASS = 10


# In[4]:


def MnistNetworkTeacher(input,keep_prob_conv,keep_prob_hidden,scope='Mnist',reuse = False):
    with tf.variable_scope(scope,reuse = reuse) as sc :
        with slim.arg_scope([slim.conv2d],
                            kernel_size = [3,3],
                            stride = [1,1],
                            biases_initializer=tf.constant_initializer(0.0),
                            activation_fn=tf.nn.relu):
                                        
                                        
            net = slim.conv2d(input, 32, scope='conv1')
            net = slim.max_pool2d(net,[2, 2], 2, scope='pool1')
            net = tf.nn.dropout(net, keep_prob_conv)

            net = slim.conv2d(net, 64,scope='conv2')
            net = slim.max_pool2d(net,[2, 2], 2, scope='pool2')
            net = tf.nn.dropout(net, keep_prob_conv)

            net = slim.conv2d(net, 128,scope='conv3')
            net = slim.max_pool2d(net,[2, 2], 2, scope='pool3')
            net = tf.nn.dropout(net, keep_prob_conv)

            net = slim.flatten(net)
        with slim.arg_scope([slim.fully_connected],
                            biases_initializer=tf.constant_initializer(0.0),
                            activation_fn=tf.nn.relu) :
            
            net = slim.fully_connected(net,625,scope='fc1')
            net = tf.nn.dropout(net, keep_prob_hidden)
            net = slim.fully_connected(net,NUMBER_OF_CLASS,activation_fn=None,scope='fc2')
            
            net = tf.nn.softmax(net/temperature)
            return net


# In[5]:


def MnistNetworkStudent(input,scope='Mnist',reuse = False):
    with tf.variable_scope(scope,reuse = reuse) as sc :
        with slim.arg_scope([slim.fully_connected],
                                          biases_initializer=tf.constant_initializer(0.0),
                                          activation_fn=tf.nn.sigmoid):
            
            net = slim.fully_connected(input,1000,scope = 'fc1')
            net = slim.fully_connected(net,
                                       NUMBER_OF_CLASS,
                                       activation_fn = None,
                                       scope = 'fc2')
            
            return net


# In[6]:


def loss(prediction,output,temperature = 1):
    cross_entropy = tf.reduce_mean(-tf.reduce_sum(
        output * tf.log(tf.clip_by_value(prediction,1e-10,1.0)),
                                                  reduction_indices=[1]))      
    correct_prediction = tf.equal(tf.argmax(prediction,1), tf.argmax(output,1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    return cross_entropy,accuracy


# In[7]:


eps = 0.1
alpha = 0.5
temperature = 1
start_lr = 1e-4
decay = 1e-6


# In[8]:


with tf.Graph().as_default():
        
    
     

    x = tf.placeholder(tf.float32, shape=[None, 784], name='input')
    y_ = tf.placeholder(tf.float32, shape=[None, NUMBER_OF_CLASS])
    keep_prob_conv = tf.placeholder(tf.float32)
    keep_prob_hidden = tf.placeholder(tf.float32)
    x_image = tf.reshape(x, [-1,28,28,1])

    y_conv_teacher=MnistNetworkTeacher(x_image,keep_prob_conv,
                                       keep_prob_hidden,scope = 'teacher')
    y_conv = MnistNetworkStudent(x,scope = 'student')

    y_conv_student = tf.nn.softmax(y_conv/temperature)
    y_conv_student_actual = tf.nn.softmax(y_conv)

    cross_entropy_teacher, accuracy_teacher=loss(y_conv_teacher,
                                                 y_,
                                                temperature = temperature)
    student_loss1, accuracy_student = loss(y_conv_student_actual,
                                           y_,
                                          temperature = temperature)
    
    student_loss2 = tf.reduce_mean(
        - tf.reduce_sum(y_conv_teacher * tf.log(tf.clip_by_value(y_conv_student, 1e-10,1.0)), reduction_indices=1)
    )
    cross_entropy_student = student_loss1 + student_loss2
    
    model_vars = tf.trainable_variables()
    var_teacher = [var for var in model_vars if 'teacher' in var.name]
    var_student = [var for var in model_vars if 'student' in var.name]

    grad_teacher = tf.gradients(cross_entropy_teacher,var_teacher)
    grad_student = tf.gradients(cross_entropy_student,var_student)

    l_rate = tf.placeholder(shape=[],dtype = tf.float32)
    
    trainer = tf.train.RMSPropOptimizer(learning_rate = l_rate)
    trainer1 = tf.train.GradientDescentOptimizer(0.1)

    train_step_teacher = trainer.apply_gradients(zip(grad_teacher,var_teacher))
    train_step_student = trainer1.apply_gradients(zip(grad_student,var_student))

    sess = tf.InteractiveSession(config=config)
    sess.run(tf.global_variables_initializer())
    saver1 = tf.train.Saver(var_teacher)
    saver2 = tf.train.Saver(var_student)
    


# In[9]:


#mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)  

x_train, y_train, x_test, y_test, class_count = loadimg.loadimg(
    '/home/tokunn/make_image/',
    NUMBER_OF_CLASS
)


# In[10]:


for i in range(10000):
  #batch = mnist.train.next_batch(128)
  s = 128*i % len(x_train)
  batch = [x_train[s:s+128], y_train[s:s+128]]
  lr = start_lr * 1.0/(1.0 + i*decay)
  if i%100 ==0:
    train_accuracy = accuracy_teacher.eval(feed_dict={x:x_test,
                                                      y_: y_test,
                                                      keep_prob_conv: 1.0,
                                                      keep_prob_hidden: 1.0})
    print("step %d, training accuracy %g,"%(i, train_accuracy))
  train_step_teacher.run(feed_dict={x: batch[0],
                                    y_: batch[1],
                                    keep_prob_conv :0.8,
                                    #keep_prob_hidden:0.5})
                                    keep_prob_hidden:0.5,
                                    l_rate:lr})

saver1.save(sess,'./models/teacher1.ckpt')
print('*'*20)


 
for i in range(30000):
  #batch = mnist.train.next_batch(100)
  s = 128*i % len(x_train)
  batch = [x_train[s:s+100], y_train[s:s+100]]
  if i%100 == 0:
    train_accuracy = accuracy_student.eval(feed_dict={x:x_test,
                                                      y_: y_test,
                                                      keep_prob_conv: 1.0,
                                                      keep_prob_hidden: 1.0})
    print("step %d, training accuracy %g"%(i, train_accuracy))
  train_step_student.run(feed_dict={x: batch[0],
                                    y_: batch[1],
                                    keep_prob_conv :1.0,
                                    keep_prob_hidden:1.0})

  
saver2.save(sess,'./models/student.ckpt')  



# In[11]:


test_acc = sess.run(accuracy_student,feed_dict={x: x_test,
                                                y_: y_test,
                                                keep_prob_conv: 1.0,
                                                keep_prob_hidden: 1.0})
print("test accuracy of the student model is %g "%(test_acc))


# In[13]:


fw = tf.summary.FileWriter('logs', sess.graph)
fw.close()


# In[ ]:

loadimg

#!/usr/bin/env python2

import os
import numpy as np
import tensorflow as tf
from keras.preprocessing.image import load_img, img_to_array
from keras.utils import np_utils
import matplotlib.pyplot as plt
import glob
from sklearn.model_selection import train_test_split


IMGSIZE = 28
IMGSIZE = 28

def loadimg_one(DIRPATH, NUM):
    x = []
    y = []

    img_list = os.listdir(DIRPATH)
    if (NUM) and (len(img_list) > NUM):
        img_list = img_list[:NUM]
    #print("[loadimg] : img_list : ", end=' ')
    #print(img_list)

    img_count = 0

    for number in img_list:
        dirpath = os.path.join(DIRPATH, number)
        dirpic_list = glob.glob(os.path.join(dirpath, '*.jpg'))
        dirpic_list += glob.glob(os.path.join(dirpath, '*.png'))
        for picture in dirpic_list:
            img = img_to_array(load_img(picture, color_mode = "grayscale", target_size=(IMGSIZE, IMGSIZE)))
            x.append(img)
            y.append(img_count)
            #print("Load {0} : {1}".format(picture, img_count))
        img_count += 1

    output_count = img_count
    x = np.asarray(x)
    x = x.astype('float32')
    x = x/255.0
    y = np.asarray(y, dtype=np.int32)
    y = np_utils.to_categorical(y, output_count)

    return x, y, output_count


def loadimg(COMMONDIR='./', NUM=None):
    print("########## loadimg ########")

    #COMMONDIR = './make_image'
    TRAINDIR = os.path.join(COMMONDIR, 'train')
    TESTDIR = os.path.join(COMMONDIR, 'test')
    x_train, y_train, class_count = loadimg_one(TRAINDIR, NUM)
    x_test,  y_test,  _  = loadimg_one(TESTDIR, NUM)
    #for i in range(0, x_test.shape[0]):
    #    plt.imshow(x_test[i])
    #    plt.show()
    x = np.concatenate((x_train, x_test))
    x = np.reshape(x, [-1, 784])
    y = np.concatenate((y_train, y_test)) 

    print("x_train, y_train, x_test, y_test, class_count")
    print("x_train shape : ", x_train.shape)

    print("########## END of loadimg ########")
    x_train, x_test, y_train, y_test = train_test_split(x, y,train_size=0.2, test_size=0.8)
    return x_train,  y_train, x_test, y_test, class_count

if __name__ == '__main__':
    loadimg()

コード変換

#!/usr/bin/env python
# coding: utf-8

# In[1]:


import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
import numpy as np
import tensorflow.contrib.slim as slim
from tensorflow.examples.tutorials.mnist import input_data
import loadimg


# In[2]:


NUMBER_OF_CLASS = 10


# In[3]:


def MnistNetworkStudent(input,scope='Mnist',reuse = False):
    with tf.variable_scope(scope,reuse = reuse) as sc :
        with slim.arg_scope([slim.fully_connected],
                                          biases_initializer=tf.constant_initializer(0.0),
                                          activation_fn=tf.nn.sigmoid):
            
            net = slim.fully_connected(input,1000,scope = 'fc1')
            net = slim.fully_connected(net,
                                       NUMBER_OF_CLASS,
                                       activation_fn = None,
                                       scope = 'fc2')
            
            return net


# In[4]:


eps = 0.1
alpha = 0.5
temperature = 1
start_lr = 1e-4
decay = 1e-6


# In[5]:


with tf.Graph().as_default():
    
    x = tf.placeholder(tf.float32, shape=[None, 784], name='input')
    x_image = tf.reshape(x, [-1,28,28,1])

    y_conv = MnistNetworkStudent(x,scope = 'student')

    y_conv_student = tf.nn.softmax(y_conv/temperature, name='output_temp')
    y_conv_student_actual = tf.nn.softmax(y_conv, name='output')

    model_vars = tf.trainable_variables()
    var_student = [var for var in model_vars if 'student' in var.name]

    sess = tf.InteractiveSession()
    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())
    saver2 = tf.train.Saver(var_student)
    


# In[6]:


saver2.restore(sess, './models/student.ckpt')
saver2.save(sess,'./models/student_flozen.ckpt')  


# In[7]:


fw = tf.summary.FileWriter('logs', sess.graph)
fw.close()

Movidius 予測

import mvnc.mvncapi as mvnc
import numpy as np
from PIL import Image
import cv2
import time, sys, os

import glob

IMAGE_DIR_NAME = '/home/tokunn/make_image/'
if (len(sys.argv) > 1):
    IMAGE_DIR_NAME = sys.argv[1]
#IMAGE_DIR_NAME = 'github_deep_mnist/ncappzoo/data/digit_images'

def predict(input):
    print("Start prediting ...")
    devices = mvnc.EnumerateDevices()
    device = mvnc.Device(devices[0])
    device.OpenDevice()

    # Load graph file data
    with open('./models/graph', 'rb') as f:
        graph_file_buffer = f.read()

    # Initialize a Graph object
    graph = device.AllocateGraph(graph_file_buffer)

    start = time.time()
    for i in range(len(input)):
        # Write the tensor to the input_fifo and queue an inference
        graph.LoadTensor(input[i], None)
        output, userobj = graph.GetResult()
        print(np.argmax(output), end=' ')
    stop = time.time()
    print('')
    print("Time : {0} ({1} images)".format(stop-start, len(input)))

    graph.DeallocateGraph()
    device.CloseDevice()

    return output

if __name__ == '__main__':
    print("Image path : {0}".format(os.path.join(IMAGE_DIR_NAME, '*.jpg or *.png')))
    jpg_list = glob.glob(os.path.join(IMAGE_DIR_NAME, '*.jpg'))
    jpg_list += glob.glob(os.path.join(IMAGE_DIR_NAME, '*.png'))
    if not len(jpg_list):
        print("No image file")
        sys.exit()
    jpg_list.reverse()
    print([i.split('/')[-1] for i in jpg_list][:10])
    img_list = []
    for n in jpg_list:
        image = cv2.imread(n)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        image = cv2.resize(image, (28, 28))
        img_list.append(image)
    img_list = np.asarray(img_list) * (1.0/255.0)
    img_list = np.reshape(img_list, [-1, 784])
    print("imgshape ", img_list.shape)
    predict(img_list.astype(np.float16))

ソースコード

[None, 28, 28, 3]で入力

#!/usr/bin/env python
# coding: utf-8

# In[1]:


import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" 
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import tensorflow as tf
import numpy as np
import tensorflow.contrib.slim as slim
from tensorflow.examples.tutorials.mnist import input_data
import loadimg


# In[2]:


NUMBER_OF_CLASS = 10


# In[3]:


def MnistNetworkTeacher(input,keep_prob_conv,keep_prob_hidden,scope='Mnist',reuse = False):
    with tf.variable_scope(scope,reuse = reuse) as sc :
        with slim.arg_scope([slim.conv2d],
                            kernel_size = [3,3],
                            stride = [1,1],
                            biases_initializer=tf.constant_initializer(0.0),
                            activation_fn=tf.nn.relu):
                                        
                                        
            net = slim.conv2d(input, 32, scope='conv1')
            net = slim.max_pool2d(net,[2, 2], 2, scope='pool1')
            net = tf.nn.dropout(net, keep_prob_conv)

            net = slim.conv2d(net, 64,scope='conv2')
            net = slim.max_pool2d(net,[2, 2], 2, scope='pool2')
            net = tf.nn.dropout(net, keep_prob_conv)

            net = slim.conv2d(net, 128,scope='conv3')
            net = slim.max_pool2d(net,[2, 2], 2, scope='pool3')
            net = tf.nn.dropout(net, keep_prob_conv)

            net = slim.flatten(net)
        with slim.arg_scope([slim.fully_connected],
                            biases_initializer=tf.constant_initializer(0.0),
                            activation_fn=tf.nn.relu) :
            
            net = slim.fully_connected(net,625,scope='fc1')
            net = tf.nn.dropout(net, keep_prob_hidden)
            net = slim.fully_connected(net,NUMBER_OF_CLASS,activation_fn=None,scope='fc2')
            
            net = tf.nn.softmax(net/temperature)
            return net


# In[4]:


def MnistNetworkStudent(input,scope='Mnist',reuse = False):
    with tf.variable_scope(scope,reuse = reuse) as sc :
        with slim.arg_scope([slim.fully_connected],
                                          biases_initializer=tf.constant_initializer(0.0),
                                          activation_fn=tf.nn.sigmoid):
            
            net = slim.fully_connected(input,1000,scope = 'fc1')
            net = slim.fully_connected(net,
                                       NUMBER_OF_CLASS,
                                       activation_fn = None,
                                       scope = 'fc2')
            
            return net


# In[5]:


def loss(prediction,output,temperature = 1):
    cross_entropy = tf.reduce_mean(-tf.reduce_sum(
        output * tf.log(tf.clip_by_value(prediction,1e-10,1.0)),
                                                  reduction_indices=[1]))      
    correct_prediction = tf.equal(tf.argmax(prediction,1), tf.argmax(output,1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    return cross_entropy,accuracy


# In[6]:


eps = 0.1
alpha = 0.5
temperature = 1
start_lr = 1e-4
decay = 1e-6


# In[7]:


with tf.Graph().as_default():
        
    
     

    x = tf.placeholder(tf.float32, shape=[None, 28,28,1], name='input')
    y_ = tf.placeholder(tf.float32, shape=[None, NUMBER_OF_CLASS])
    keep_prob_conv = tf.placeholder(tf.float32)
    keep_prob_hidden = tf.placeholder(tf.float32)
    x_line = tf.reshape(x, [-1,784])

    y_conv_teacher=MnistNetworkTeacher(x,keep_prob_conv,
                                       keep_prob_hidden,scope = 'teacher')
    y_conv = MnistNetworkStudent(x_line,scope = 'student')

    y_conv_student = tf.nn.softmax(y_conv/temperature)
    y_conv_student_actual = tf.nn.softmax(y_conv)

    cross_entropy_teacher, accuracy_teacher=loss(y_conv_teacher,
                                                 y_,
                                                temperature = temperature)
    student_loss1, accuracy_student = loss(y_conv_student_actual,
                                           y_,
                                          temperature = temperature)
    
    student_loss2 = tf.reduce_mean(
        - tf.reduce_sum(y_conv_teacher * tf.log(tf.clip_by_value(y_conv_student, 1e-10,1.0)), reduction_indices=1)
    )
    cross_entropy_student = student_loss1 + student_loss2
    
    model_vars = tf.trainable_variables()
    var_teacher = [var for var in model_vars if 'teacher' in var.name]
    var_student = [var for var in model_vars if 'student' in var.name]

    grad_teacher = tf.gradients(cross_entropy_teacher,var_teacher)
    grad_student = tf.gradients(cross_entropy_student,var_student)

    l_rate = tf.placeholder(shape=[],dtype = tf.float32)
    
    trainer = tf.train.RMSPropOptimizer(learning_rate = l_rate)
    trainer1 = tf.train.GradientDescentOptimizer(0.1)

    train_step_teacher = trainer.apply_gradients(zip(grad_teacher,var_teacher))
    train_step_student = trainer1.apply_gradients(zip(grad_student,var_student))

    sess = tf.InteractiveSession()
    sess.run(tf.global_variables_initializer())
    saver1 = tf.train.Saver(var_teacher)
    saver2 = tf.train.Saver(var_student)
    


# In[8]:


#mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)  

x_train, y_train, x_test, y_test, class_count = loadimg.loadimg(
    '/home/tokunn/make_image/',
    NUMBER_OF_CLASS
)


# In[9]:


for i in range(10000):
  #batch = mnist.train.next_batch(128)
  s = 128*i % len(x_train)
  batch = [x_train[s:s+128], y_train[s:s+128]]
  lr = start_lr * 1.0/(1.0 + i*decay)
  if i%100 ==0:
    train_accuracy = accuracy_teacher.eval(feed_dict={x:x_test,
                                                      y_: y_test,
                                                      keep_prob_conv: 1.0,
                                                      keep_prob_hidden: 1.0})
    print("step %d, training accuracy %g,"%(i, train_accuracy))
  train_step_teacher.run(feed_dict={x: batch[0],
                                    y_: batch[1],
                                    keep_prob_conv :0.8,
                                    #keep_prob_hidden:0.5})
                                    keep_prob_hidden:0.5,
                                    l_rate:lr})

saver1.save(sess,'./models/teacher1.ckpt')
print('*'*20)


 
for i in range(30000):
  #batch = mnist.train.next_batch(100)
  s = 128*i % len(x_train)
  batch = [x_train[s:s+100], y_train[s:s+100]]
  if i%100 == 0:
    train_accuracy = accuracy_student.eval(feed_dict={x:x_test,
                                                      y_: y_test,
                                                      keep_prob_conv: 1.0,
                                                      keep_prob_hidden: 1.0})
    print("step %d, training accuracy %g"%(i, train_accuracy))
  train_step_student.run(feed_dict={x: batch[0],
                                    y_: batch[1],
                                    keep_prob_conv :1.0,
                                    keep_prob_hidden:1.0})

  
saver2.save(sess,'./models/student.ckpt')  



# In[10]:


test_acc = sess.run(accuracy_student,feed_dict={x: x_test,
                                                y_: y_test,
                                                keep_prob_conv: 1.0,
                                                keep_prob_hidden: 1.0})
print("test accuracy of the student model is %g "%(test_acc))


# In[11]:


fw = tf.summary.FileWriter('logs', sess.graph)
fw.close()