-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathtensorflow_functions.py
125 lines (89 loc) · 3.07 KB
/
tensorflow_functions.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
import os
#os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from utils import batch
import numpy as np
import logging
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
logging.warning("GPU device not found. Without a GPU the execution of the program will be very slow.")
else:
print('Found GPU at: {}'.format(device_name))
def matrix_add(ma, mb):
tf.reset_default_graph()
X = tf.placeholder(tf.float32, [len(ma), len(ma[0])])
Y = tf.placeholder(tf.float32, [len(mb), len(mb[0])])
m = tf.add(X, Y)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
x = sess.run(m, {X: ma, Y: mb})
sess.close()
tf.reset_default_graph()
return x
def matrix_dot(ma, mb):
tf.reset_default_graph()
X = tf.placeholder(tf.float32, [len(ma), len(ma[0])])
Y = tf.placeholder(tf.float32, [len(mb), len(mb[0])])
m = tf.matmul(X, tf.transpose(Y))
sess = tf.Session()
sess.run(tf.global_variables_initializer())
x = sess.run(m, {X: ma, Y: mb})
sess.close()
tf.reset_default_graph()
return x
def matrix_dot_batches(ma,mb,batch_size=10000):
x = None
for i_batch, mbatch in enumerate(batch(mb, batch_size)):
if x is None:
x = matrix_dot(ma, mbatch)
else:
x = np.concatenate((x, matrix_dot(ma, mbatch)), axis=1)
return x
def k_top(ma, k):
tf.reset_default_graph()
X = tf.placeholder(tf.float32, [len(ma), len(ma[0])])
_, indexes = tf.nn.top_k(ma, k)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
x = sess.run(indexes, {X: ma})
sess.close()
tf.reset_default_graph()
return x
def cosine_knn(ma, mb, k):
tf.reset_default_graph()
X = tf.placeholder(tf.float32, [len(ma), len(ma[0])])
Y = tf.placeholder(tf.float32, [len(mb), len(mb[0])])
m = tf.matmul(X, tf.transpose(Y))
_, indexes = tf.nn.top_k(m, k)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
x = sess.run(indexes, {X: ma, Y: mb})
sess.close()
tf.reset_default_graph()
return x
def cosine_knn_batches(ma, mb, k, batch_size=10000):
#mDot = matrix_dot_batches(ma, mb, batch_size=batch_size)
x = None
for i_batch, mbatch in enumerate(batch(mb, batch_size)):
if x is None:
x = matrix_dot(ma, mbatch)
else:
x = np.concatenate((x, matrix_dot(ma, mbatch)), axis=1)
top = k_top(x, k)
return top
def matrix_analogy(ma,mb,mc,mM):
tf.reset_default_graph()
a = tf.placeholder(tf.float32, [len(ma), len(ma[0])])
b = tf.placeholder(tf.float32, [len(mb), len(mb[0])])
c = tf.placeholder(tf.float32, [len(mc), len(mc[0])])
M = tf.placeholder(tf.float32, [len(mM), len(mM[0])])
ag = tf.add(tf.subtract(c, a), b)
nn = tf.matmul(ag, tf.transpose(M))
sess = tf.Session()
sess.run(tf.global_variables_initializer())
x = sess.run(nn, {a: ma, b: mb, c: mc, M:mM})
sess.close()
tf.reset_default_graph()
return x