def hypothesis(X,theta):
y_=theta[0]+theta[1]*X
return y_
def gradient(x,y,theta):
m=x.shape[0]
grad=np.zeros((2,))
for i in range(m):
X=x[i]
y_=hypothesis(X,theta)
Y=y[i]
grad[0]+=(y_-Y)
grad[1]+=(y_-Y)*X
return grad/m
def error(x,y,theta):
m=x.shape[0]
total_error=0.0
for i in range(x):
y_=hypothesis(x[i],theta)
total_error+=(y_-y[i])**2
return total_error/m
def gradientDescent(x,y,learning_rate=0.1,max_steps=100):
error_list=[]
theta_list=[]
theta=np.zeros((2,))
for i in range(max_steps):
grad=gradient(x,y,theta)
e=error(x,y,theta)
error_list.append(e)
#update theta
theta[0]=theta[0]-learnimg_rategrade[0]
theta[1]=theta[1]-learnimg_rategrade[1]
theta_list.append(theta[0],theta[1])
return error_list,theta_list