here is code:
def hypo(X,theta):
return theta[0]+theta[1]*X
def error(X,Y,theta):
m=X.shape
err=0
for i in range(m):
hx=hypo(X[i],theta)
error+=(hx-Y[i])**2
return err
def gradient(X,Y,theta):
grad=np.zeros((2,))
m=X.shape[0]
for i in range(m):
hx=hypo(X[i],theta)
grad[0]+=(hx-Y[i])
grad[1]+=(hx-Y[i])*X[i]
return grad
def gradient_descent(X,Y,learning_rate=0.001):
theta=np.zeros((2,))
itr=0
max_itr=100
error_list=[]
while(itr<=100):
grad=gradient(X,Y,theta)
e=error(X,Y,theta)
error_list.append(e)
theta[0]=theta[0]-learning_rategrad[0]
theta[1]=theta[1]-learning_rategrad[1]
itr+=1
return theta,error_list
final_theta,error = gradient_descent(X,Y)