Machine Learning Linear Regression using Gradient Descent From Scratch in Python

Code
==================
import numpy as np

x_train = np.array([0, 1, 2, 3, 4, 5, 6], dtype=float)
y_train = np.array([-3, 2, 7, 12, 17, 22, 27], dtype=float) # y = 5x - 3; find y when x is 25 y = ???

n = int(x_train.shape[0])
weights = np.zeros((n,1))
bias = 0
learning_rate = 0.0001

for epochs in range(50000):
y_predict = weights * x_train + bias
mean_square_error = np.sum((y_train - y_predict) ** 2) / n
derived_weights = np.sum( 2 * x_train * (y_predict - y_train)) /n
derived_bias = np.sum(2 * (y_predict - y_train)) / n

weights -= learning_rate * derived_weights
bias -= learning_rate * derived_bias

if epochs % 1000 == 0:
print("Mean Square Error ===== ", mean_square_error)


print("x = ", [25], "y = ", weights[0] * [25] + bias)

Post a Comment

0 Comments