Skip to content

Commit

Permalink
Use integer division for training iterations.
Browse files Browse the repository at this point in the history
  • Loading branch information
esvhd committed Jul 1, 2017
1 parent 713df95 commit cc4638d
Show file tree
Hide file tree
Showing 5 changed files with 11 additions and 8 deletions.
2 changes: 1 addition & 1 deletion 1_linear_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def main():

for i in range(100):
cost = 0.
num_batches = len(X) / batch_size
num_batches = len(X) // batch_size
for k in range(num_batches):
start, end = k * batch_size, (k + 1) * batch_size
cost += train(model, loss, optimizer, X[start:end], Y[start:end])
Expand Down
11 changes: 7 additions & 4 deletions 2_logistic_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,11 @@


def build_model(input_dim, output_dim):
# We don't need the softmax layer here since CrossEntropyLoss already uses it internally.
# We don't need the softmax layer here since CrossEntropyLoss already
# uses it internally.
model = torch.nn.Sequential()
model.add_module("linear", torch.nn.Linear(input_dim, output_dim, bias=False))
model.add_module("linear",
torch.nn.Linear(input_dim, output_dim, bias=False))
return model


Expand Down Expand Up @@ -56,10 +58,11 @@ def main():

for i in range(100):
cost = 0.
num_batches = n_examples / batch_size
num_batches = n_examples // batch_size
for k in range(num_batches):
start, end = k * batch_size, (k + 1) * batch_size
cost += train(model, loss, optimizer, trX[start:end], trY[start:end])
cost += train(model, loss, optimizer,
trX[start:end], trY[start:end])
predY = predict(model, teX)
print("Epoch %d, cost = %f, acc = %.2f%%"
% (i + 1, cost / num_batches, 100. * np.mean(predY == teY)))
Expand Down
2 changes: 1 addition & 1 deletion 3_neural_net.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ def main():

for i in range(100):
cost = 0.
num_batches = n_examples / batch_size
num_batches = n_examples // batch_size
for k in range(num_batches):
start, end = k * batch_size, (k + 1) * batch_size
cost += train(model, loss, optimizer, trX[start:end], trY[start:end])
Expand Down
2 changes: 1 addition & 1 deletion 4_modern_neural_net.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def main():

for i in range(100):
cost = 0.
num_batches = n_examples / batch_size
num_batches = n_examples // batch_size
for k in range(num_batches):
start, end = k * batch_size, (k + 1) * batch_size
cost += train(model, loss, optimizer, trX[start:end], trY[start:end])
Expand Down
2 changes: 1 addition & 1 deletion 5_convolutional_net.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ def main():

for i in range(20):
cost = 0.
num_batches = n_examples / batch_size
num_batches = n_examples // batch_size
for k in range(num_batches):
start, end = k * batch_size, (k + 1) * batch_size
cost += train(model, loss, optimizer, trX[start:end], trY[start:end])
Expand Down

0 comments on commit cc4638d

Please sign in to comment.