Created
February 26, 2026 20:07
-
-
Save Curiouspaul1/6a0b0f059ad668f4a0c4b0d45097e4ed to your computer and use it in GitHub Desktop.
A simple implementation of a single node, single layer neural network - a basic perceptron, that's able to learn by way of SGD and backprop..
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import numpy as np | |
| # The Activation Function: Squashes outputs between 0 and 1 | |
| def sigmoid(x): | |
| return 1 / (1 + np.exp(-x)) | |
| # The Derivative: Crucial for Backpropagation to calculate the gradient | |
| def sigmoid_derivative(x): | |
| return x * (1 - x) | |
| class Perceptron: | |
| def __init__(self, input_size, learning_rate=0.1): | |
| # Initialize weights and bias randomly | |
| np.random.seed(42) # For reproducible live demos! | |
| self.weights = np.random.randn(input_size) | |
| self.bias = np.random.randn(1) | |
| self.learning_rate = learning_rate | |
| def predict(self, inputs): | |
| # The Forward Pass: (Inputs * Weights) + Bias | |
| linear_output = np.dot(inputs, self.weights) + self.bias | |
| return sigmoid(linear_output) | |
| def train(self, training_data, labels, epochs=5000): | |
| print("Starting training...") | |
| for epoch in range(epochs): | |
| total_error = 0 | |
| # STOCHASTIC Gradient Descent (SGD): Updating weights per individual sample | |
| for inputs, label in zip(training_data, labels): | |
| # 1. Forward Pass (Make a guess) | |
| prediction = self.predict(inputs) | |
| # 2. Calculate Error (How wrong was the guess?) | |
| error = label - prediction | |
| total_error += abs(error) | |
| # 3. Backpropagation (Calculate the gradient) | |
| # How much did the weights contribute to the error? | |
| gradient = error * sigmoid_derivative(prediction) | |
| # 4. Gradient Descent (Update the weights and bias) | |
| # Move a tiny step (learning rate) in the right direction | |
| self.weights += self.learning_rate * gradient * inputs | |
| self.bias += self.learning_rate * gradient | |
| # Print progress every 1000 epochs so the audience sees it "learning" | |
| if epoch % 1000 == 0: | |
| print(f"Epoch {epoch} | Total Error: {total_error[0]:.4f}") | |
| # Inputs: [Feature 1, Feature 2] | |
| # X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) | |
| # # The Answers (Labels) | |
| # y = np.array([0, 0, 0, 1]) | |
| # Initialize the Perceptron (2 inputs: Feature 1 & Feature 2) | |
| # model = Perceptron(input_size=2, learning_rate=0.1) | |
| # # Check the baseline (untrained) guesses | |
| # print("Untrained Predictions:") | |
| # for i in range(len(X)): | |
| # print(f"Input: {X[i]} | Prediction: {model.predict(X[i])[0]:.4f}") | |
| # print("-" * 30) | |
| # # Train it! | |
| # model.train(X, y, epochs=5000) | |
| # print("-" * 30) | |
| # # # Check the educated guesses | |
| # print("Trained Predictions:") | |
| # for i in range(len(X)): | |
| # # Round the prediction to 0 or 1 for the final answer | |
| # raw_pred = model.predict(X[i])[0] | |
| # final_decision = 1 if raw_pred >= 0.5 else 0 | |
| # print( | |
| # f"Input: {X[i]} | Raw: {raw_pred:.4f} | Output: {final_decision} (Expected: {y[i]})" | |
| # ) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment