Skip to content

Commit

Permalink
Merge branch 'travis'
Browse files Browse the repository at this point in the history
  • Loading branch information
vinhkhuc committed Oct 7, 2018
2 parents cbf48ed + 8fff980 commit 8139b0f
Show file tree
Hide file tree
Showing 9 changed files with 26 additions and 28 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
.idea
*.pyc
datasets/mnist
25 changes: 11 additions & 14 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,18 +6,15 @@ python:
- 3.6
cache: bundler
install:
- if [[ "$TRAVIS_PYTHON_VERSION" == "2.7" ]]; then
pip install -q http://download.pytorch.org/whl/cu75/torch-0.1.12.post2-cp27-none-linux_x86_64.whl;
else
pip install -q http://download.pytorch.org/whl/cu75/torch-0.1.12.post2-cp36-cp36m-linux_x86_64.whl;
fi
- pip install -q torchvision
- pip install -q numpy==1.13.0
- pip install --no-cache-dir -q torch

# We use timeout to prevent the Python process from running too long since the long running process will be
# auto-killed by Travis. When the timeout happens, we override its exit code 143 by the success exit code 0.
script:
- python 0_multiply.py
- python 1_linear_regression.py
- python 2_logistic_regression.py
- python 3_neural_net.py
- python 4_modern_neural_net.py
- python 5_convolutional_net.py
- python 6_lstm.py
- timeout --preserve-status 2m python 0_multiply.py || if [ $? -eq 0 ] || [ $? -eq 143 ]; then exit 0; fi
- timeout --preserve-status 2m python 1_linear_regression.py || if [ $? -eq 0 ] || [ $? -eq 143 ]; then exit 0; fi
- timeout --preserve-status 5m python 2_logistic_regression.py || if [ $? -eq 0 ] || [ $? -eq 143 ]; then exit 0; fi
- timeout --preserve-status 5m python 3_neural_net.py || if [ $? -eq 0 ] || [ $? -eq 143 ]; then exit 0; fi
- timeout --preserve-status 5m python 4_modern_neural_net.py || if [ $? -eq 0 ] || [ $? -eq 143 ]; then exit 0; fi
- timeout --preserve-status 10m python 5_convolutional_net.py || if [ $? -eq 0 ] || [ $? -eq 143 ]; then exit 0; fi
- timeout --preserve-status 10m python 6_lstm.py || if [ $? -eq 0 ] || [ $? -eq 143 ]; then exit 0; fi
6 changes: 3 additions & 3 deletions 1_linear_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ def train(model, loss, optimizer, x, y):
optimizer.zero_grad()

# Forward
fx = model.forward(x.view(len(x), 1))
fx = model.forward(x.view(len(x), 1)).squeeze()
output = loss.forward(fx, y)

# Backward
Expand All @@ -26,7 +26,7 @@ def train(model, loss, optimizer, x, y):
# Update parameters
optimizer.step()

return output.data[0]
return output.item()


def main():
Expand All @@ -35,7 +35,7 @@ def main():
Y = 2 * X + torch.randn(X.size()) * 0.33

model = build_model()
loss = torch.nn.MSELoss(size_average=True)
loss = torch.nn.MSELoss(reduction='elementwise_mean')
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
batch_size = 10

Expand Down
4 changes: 2 additions & 2 deletions 2_logistic_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def train(model, loss, optimizer, x_val, y_val):
# Update parameters
optimizer.step()

return output.data[0]
return output.item()


def predict(model, x_val):
Expand All @@ -52,7 +52,7 @@ def main():
n_examples, n_features = trX.size()
n_classes = 10
model = build_model(n_features, n_classes)
loss = torch.nn.CrossEntropyLoss(size_average=True)
loss = torch.nn.CrossEntropyLoss(reduction='elementwise_mean')
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
batch_size = 100

Expand Down
4 changes: 2 additions & 2 deletions 3_neural_net.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def train(model, loss, optimizer, x_val, y_val):
# Update parameters
optimizer.step()

return output.data[0]
return output.item()


def predict(model, x_val):
Expand All @@ -51,7 +51,7 @@ def main():
n_examples, n_features = trX.size()
n_classes = 10
model = build_model(n_features, n_classes)
loss = torch.nn.CrossEntropyLoss(size_average=True)
loss = torch.nn.CrossEntropyLoss(reduction='elementwise_mean')
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
batch_size = 100

Expand Down
4 changes: 2 additions & 2 deletions 4_modern_neural_net.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def train(model, loss, optimizer, x_val, y_val):
# Update parameters
optimizer.step()

return output.data[0]
return output.item()


def predict(model, x_val):
Expand All @@ -55,7 +55,7 @@ def main():
n_examples, n_features = trX.size()
n_classes = 10
model = build_model(n_features, n_classes)
loss = torch.nn.CrossEntropyLoss(size_average=True)
loss = torch.nn.CrossEntropyLoss(reduction='elementwise_mean')
optimizer = optim.Adam(model.parameters())
batch_size = 100

Expand Down
4 changes: 2 additions & 2 deletions 5_convolutional_net.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def train(model, loss, optimizer, x_val, y_val):
# Update parameters
optimizer.step()

return output.data[0]
return output.item()


def predict(model, x_val):
Expand All @@ -72,7 +72,7 @@ def main():
n_examples = len(trX)
n_classes = 10
model = ConvNet(output_dim=n_classes)
loss = torch.nn.CrossEntropyLoss(size_average=True)
loss = torch.nn.CrossEntropyLoss(reduction='elementwise_mean')
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
batch_size = 100

Expand Down
4 changes: 2 additions & 2 deletions 6_lstm.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def train(model, loss, optimizer, x_val, y_val):
# Update parameters
optimizer.step()

return output.data[0]
return output.item()


def predict(model, x_val):
Expand Down Expand Up @@ -73,7 +73,7 @@ def main():
trY = torch.from_numpy(trY).long()

model = LSTMNet(input_dim, hidden_dim, n_classes)
loss = torch.nn.CrossEntropyLoss(size_average=True)
loss = torch.nn.CrossEntropyLoss(reduction='elementwise_mean')
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)

for i in range(epochs):
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
<!-- [![Build Status](https://travis-ci.org/vinhkhuc/PyTorch-Mini-Tutorials.svg?branch=master)](https://travis-ci.org/vinhkhuc/PyTorch-Mini-Tutorials) -->
[![Build Status](https://travis-ci.org/vinhkhuc/PyTorch-Mini-Tutorials.svg?branch=master)](https://travis-ci.org/vinhkhuc/PyTorch-Mini-Tutorials)

Minimal tutorials for [PyTorch](https://github.com/pytorch/pytorch) adapted
from Alec Radford's [Theano tutorials](https://github.com/Newmu/Theano-Tutorials).
Expand Down

0 comments on commit 8139b0f

Please sign in to comment.