def create_placeholders(n_x, n_y, p):
X = tf.placeholder(tf.float32,[n_x, None])
Y = tf. placeholder(tf.float32,[n_y, None])
keep_prob = tf.placeholder(tf.float32, [1, 1])
return X, Y
def init_params():
W1 = tf.get_variable("W1",
[25, 784],
initializer = tf.contrib.layers.xavier_initializer())
b1 = tf.get_variable("b1",
[25, 1],
initializer = tf.zeros_initializer())
W2 = tf.get_variable("W2",
[12, 25],
initializer = tf.contrib.layers.xavier_initializer())
b2 = tf.get_variable("b2",
[12, 1],
initializer = tf.zeros_initializer())
W3 = tf.get_variable("W3",
[10, 12],
initializer = tf.contrib.layers.xavier_initializer())
b3 = tf.get_variable("b3",
[10, 1],
initializer = tf.zeros_initializer())
params = {"W1" : W1,
"b1" : b1,
"W2" : W2,
"b2" : b2,
"W3" : W3,
"b3" : b3}
return params
def fwrd_prop(X, params):
W1 = params['W1']
b1 = params['b1']
W2 = params['W2']
b2 = params['b2']
W3 = params['W3']
b3 = params['b3']
Z1 = tf.add(tf.matmul(W1, X), b1)
A1 = tf.nn.relu(Z1)
drop_out1 = tf.nn.dropout(A1, keep_prob)
Z2 = tf.add(tf.matmul(W2, drop_out1), b2)
A2 = tf.nn.relu(Z2)
drop_out2 = tf.nn.dropout(A2, keep_prob)
Z3 = tf.add(tf.matmul(W3, drop_out2), b3)
return Z3
def compute_cost(Z3, Y, params, lamb):
logits = tf.transpose(Z3)
labels = tf.transpose(Y)
reg = tf.nn.l2_loss(params["W1"]) + tf.nn.l2_loss(params["W2"]) + tf.nn.l2_loss(params["W3"])
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits =
logits,
labels =
labels)
+(reg*lamb))
return cost
def model(X_train, Y_train, X_test, Y_test, lamb=0, p=0.8, learning_rate = 0.01, num_epochs = 100, minibatch_size = 30, print_cost = True):
ops.reset_default_graph()
tf.set_random_seed(1)
seed = 3
(n_x, m) = X_train.shape
n_y = Y_train.shape[0]
costs = []
X, Y = create_placeholders(n_x, n_y, p)
params = init_params()
Z3 = fwrd_prop(X, params)
cost = compute_cost(Z3, Y, params, lamb)
optimizer = tf.train.GradientDescentOptimizer(learning_rate =
learning_rate).minimize(cost)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for epoch in range(num_epochs):
epoch_cost = 0.
num_minibatches = int(m/minibatch_size)
seed = seed + 1
minibatches = random_mini_batches(X_train,
Y_train, minibatch_size,
seed)
for minibatch in minibatches:
(minibatch_X, minibatch_Y) = minibatch
_, minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X,
Y: minibatch_Y, keep_prob : 0.5})
epoch_cost += minibatch_cost / num_minibatches
if print_cost == True and epoch % 10 == 0:
print ("Cost after epoch %i: %f" % (epoch, epoch_cost))
if print_cost == True and epoch % 1 == 0:
costs.append(epoch_cost)
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
parameters = sess.run(params)
print ("Parameters have been trained!")
correct_prediction = tf.equal(tf.argmax(Z3),
tf.argmax(Y))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print ("Train Accuracy:", accuracy.eval({X: X_train, Y: Y_train, keep_prob : 1.0}))
print ("Test Accuracy:", accuracy.eval({X: X_test, Y: Y_test, keep_prob : 1.0}))
return parameters
parameters = model(X_train, Y_train, X_test, Y_test)
And when I run parameters i get this error:
Tensor("dropout/random_uniform:0", shape=(25, ?), dtype=float32) must be from the same graph as Tensor("Placeholder_2:0", dtype=float32).
Any help or hint will be appreciated. Thanks!
Comments
Post a Comment