Skip to content

Instantly share code, notes, and snippets.

@BBloggsbott
Last active March 14, 2026 09:26
Show Gist options
  • Select an option

  • Save BBloggsbott/fcb5eb918e1b49dfed7209167c06d14f to your computer and use it in GitHub Desktop.

Select an option

Save BBloggsbott/fcb5eb918e1b49dfed7209167c06d14f to your computer and use it in GitHub Desktop.
Simple Machine Learning Implementations
import random
import numpy as np
# Define Simple Dataset generated using the linear equation y = 4 * x0 + 9 * x1 + 13 with some noise added in
def generate_dataset(n_rows=100):
# 1. Generate a raw NumPy array (n_rows by 2 columns)
raw_data = np.random.rand(n_rows, 2)
# 2. Perform the math using NumPy indexing (very fast)
# y = 4 * x0 + 9 * x1 + 13
y = 4 * raw_data[:, 0] + 9 * raw_data[:, 1] + 13 + (random.random() * random.choice([-1, 1]))
return raw_data, y
# Generate a dataset
X, y = generate_dataset(1000)
learning_rate = 0.01
iterations = 10000
n_samples, n_features = X.shape
# We have n_features. To include the bias (theta_0) in the matric operation, we are adding a new feature X_0 (all 1s)
X_with_bias = np.c_[np.ones(n_samples), X]
theta = np.zeros((n_features+1, 1))
# Convert y to a column vector
y = y.reshape(-1, 1)
for _ in range(iterations):
# h_theta(x) = X . theta (Matrix Multiplication)
predictions = X_with_bias @ theta
errors = predictions - y
# Taking the average gradients because we don't want the gradients to scale with the size of the dataset
gradient = (1/n_samples) * X_with_bias.T @ errors
theta -= (learning_rate * gradient)
# Theta gets computed to the values below or something close (due to the random noise in the data)
# [[12.40251102]
# [ 4.00076841]
# [ 8.99951281]]
print(theta)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment