Skip to content

Instantly share code, notes, and snippets.

@motivic
Created October 31, 2016 18:05
Show Gist options
  • Select an option

  • Save motivic/c36bdfcad68b4e856526e6676a01b7cd to your computer and use it in GitHub Desktop.

Select an option

Save motivic/c36bdfcad68b4e856526e6676a01b7cd to your computer and use it in GitHub Desktop.
Learning from Data: Logistic Regression Exercise
import numpy as np
# Find f(x), where f is the line through p1 and p2
def f(p1, p2, x):
return (p2[1]-p1[1])/(p2[0]-p1[0])*(x-p1[0])+p1[1]
epochs = []
Eouts = []
N = 100
for i in range(N):
# Initialize function
# Get random points p1 and p2
p1, p2 = 2*np.random.rand(2, 2)-1
line = lambda x: f(p1, p2, x)
def label(p):
return np.sign(p[1]-line(p[0]))
# Initialize model training
w = np.zeros((3), dtype=np.float)
epoch = 0
delta = 1
eta = 0.01
training_set = np.concatenate((2*np.random.rand(100, 2)-1,
np.ones((100, 1), dtype=np.float)),
axis=1)
# Train model
while delta >= 0.01:
epoch += 1
last_w = np.copy(w)
np.random.shuffle(training_set)
for p in training_set:
w += eta*np.divide(label(p)*p,
1+np.exp(label(p)*np.dot(w,p)))
delta = np.linalg.norm(last_w-w)
epochs.append(epoch)
# Test model
test_set = np.concatenate((2*np.random.rand(100, 2)-1,
np.ones((100, 1), dtype=np.float)),
axis=1)
error = 1/N*sum([np.log(1+np.exp(-label(p)*np.dot(w,p))) for p in test_set])
Eouts.append(error)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment