-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathLiblinearAndCNN.py
101 lines (81 loc) · 2.9 KB
/
LiblinearAndCNN.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
'''
Homework:
using liblinear to classify MNist
Use CNN to generate features(FC)
Save image features into training and testing files.
Train liblinear on the training set, and evaluate on the testing test.
Report the classification accuracy.
'''
import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
import tensorflow.compat.v1 as tf
tf.enable_eager_execution()
tf.disable_v2_behavior()
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
sess = tf.InteractiveSession()
x = tf.placeholder("float", shape=[None, 784])
y_ = tf.placeholder("float", shape=[None, 10])
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
x_image = tf.reshape(x, [-1,28,28,1])
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
keep_prob = tf.placeholder("float")
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
sess.run(tf.initialize_all_variables())
for i in range(500):
batch = mnist.train.next_batch(50)
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
for i in range(10000):
batch=mnist.train.next_batch(1)
hfc1,yy=sess.run([h_fc1,y_],feed_dict={x:batch[0],y_:batch[1]})
y=yy.tolist()
yy=y[0].index(max(y[0]))
hfc1=hfc1.tolist()
hfc1=hfc1[0]
f=open('./train.txt','a')
f.write(str(yy))
for j in range(1024):
f.write(' '+str(j+1)+':'+str(hfc1[j]))
f.write('\n')
f.close()
for i in range(1000):
batch = mnist.test.next_batch(1)
hfc1,yy=sess.run([h_fc1,y_],feed_dict={x:batch[0],y_:batch[1]})
y = yy.tolist()
yy = y[0].index(max(y[0]))
hfc1 = hfc1.tolist()
hfc1 = hfc1[0]
f = open('./test.txt', 'a')
f.write(str(yy))
for j in range(1024):
f.write(' ' + str(j + 1) + ':' + str(hfc1[j]))
f.write('\n')
f.close()