Skip to content

Commit

Permalink
Linear and logistic regressions
Browse files Browse the repository at this point in the history
  • Loading branch information
vinhkhuc committed Feb 13, 2017
1 parent 2e26abe commit 05f780e
Show file tree
Hide file tree
Showing 2 changed files with 123 additions and 0 deletions.
54 changes: 54 additions & 0 deletions 1_linear_regression.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
import torch
from torch.autograd import Variable
from torch import optim


def build_model():
model = torch.nn.Sequential()
model.add_module("linear", torch.nn.Linear(1, 1, bias=False))
return model


def train(model, loss, optimizer, x, y):
x = Variable(x, requires_grad=False)
y = Variable(y, requires_grad=False)

# Reset gradient
optimizer.zero_grad()

# Forward
fx = model.forward(x.view(len(x), 1))
output = loss.forward(fx, y)

# Backward
output.backward()

# Update parameters
optimizer.step()

return output.data[0]


def main():
torch.manual_seed(42)
X = torch.linspace(-1, 1, 101)
Y = 2 * X + torch.randn(X.size()) * 0.33

model = build_model()
loss = torch.nn.MSELoss(size_average=True)
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
batch_size = 10

for i in range(100):
cost = 0.
num_batches = len(X) / batch_size
for k in range(num_batches):
start, end = k * batch_size, (k + 1) * batch_size
cost += train(model, loss, optimizer, X[start:end], Y[start:end])
print("Epoch = %d, cost = %s" % (i + 1, cost / num_batches))

w = model.parameters().next().data # model has only one parameter
print("w = %.2f" % w.numpy()) # will be approximately 2

if __name__ == "__main__":
main()
69 changes: 69 additions & 0 deletions 2_logistic_regression.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
import numpy as np

import torch
from torch.autograd import Variable
from torch import optim

from data_util import load_mnist


def build_model(input_dim, output_dim):
model = torch.nn.Sequential()
model.add_module("linear", torch.nn.Linear(input_dim, output_dim, bias=False))
model.add_module("softmax", torch.nn.Softmax())
return model


def train(model, loss, optimizer, x, y):
x = Variable(x, requires_grad=False)
y = Variable(y, requires_grad=False)

# Reset gradient
optimizer.zero_grad()

# Forward
fx = model.forward(x)
output = loss.forward(fx, y)

# Backward
output.backward()

# Update parameters
optimizer.step()

return output.data[0]


def predict(model, x):
var_x = Variable(x, requires_grad=False)
output = model.forward(var_x)
return output.data.numpy().argmax(axis=1)


def main():
torch.manual_seed(42)
trX, teX, trY, teY = load_mnist(onehot=False)
trX = torch.from_numpy(trX).float()
teX = torch.from_numpy(teX).float()
trY = torch.from_numpy(trY).long()

n_examples, n_features = trX.size()
n_classes = 10
model = build_model(n_features, n_classes)
loss = torch.nn.CrossEntropyLoss(size_average=True)
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
batch_size = 100

for i in range(100):
cost = 0.
num_batches = n_examples / batch_size
for k in range(num_batches):
start, end = k * batch_size, (k + 1) * batch_size
cost += train(model, loss, optimizer, trX[start:end], trY[start:end])
predY = predict(model, teX)
print("Epoch %d, cost = %f, acc = %.2f%%"
% (i + 1, cost / num_batches, 100. * np.mean(predY == teY)))


if __name__ == "__main__":
main()

0 comments on commit 05f780e

Please sign in to comment.