def hypothesis(theta,x):
return np.dot(x,theta)
def error(X,Y,theta):
tot_error = 0
m = X.shape[0]
for i in range(m):
tot_error+=(Y[i]-hypothesis(theta,X[i]))**2
return tot_error/m
def gradient(Y,X,theta):
m,n=X.shape
grad= np.zeros((n,))
for j in range(n):
for i in range(m):
y_ = hypothesis(theta,X[i])
grad[j]+=(y_-Y[i])*X[i][j]
return grad/m
def gradientDescent(X,Y,learning_rate,max_itr):
m,n = X.shape
theta=np.zeros((n,))
error_list = []
for i in range(max_itr):
grad = gradient(Y,X,theta)
e = error(X,Y,theta)
error_list.append(e)
for j in range(n):
theta[j]=theta[j]-learning_rate*grad[j]
return theta,error_list
theta,error_list = gradientDescent(X,Y,learning_rate=0.1,max_itr=100)
print(theta)
print(error_list)
plt.plot(error_list)