I am getting negtive accuracy.Please help me
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
dfx=pd.read_csv(‘Linear_X_Train.csv’)
dfy=pd.read_csv(‘Linear_Y_Train.csv’)
dfz=pd.read_csv(‘Linear_X_Test.csv’)
x=dfx.values
y=dfy.values
z=dfz.values
print(x.shape)
print(y.shape)
print(z.shape)
plt.scatter(x,y)
X=(x-x.mean())/x.std()
Y=y
Z=z
plt.scatter(X,Y)
plt.show()
plt.scatter(X,Y)
plt.show()
def hypothesis(x,theta):
return theta[0]+theta[1]*x
def error(X,Y,theta):
m=X.shape[0]
error=0
for i in range(m):
hx=hypothesis(X[i],theta)
error+=(hx-Y[i])**2
return error
def gradient(X,Y,theta):
grad=np.zeros((2,))
m=X.shape[0]
for i in range(m):
hx=hypothesis(X[i],theta)
grad[0]+=(hx-Y[i])
grad[1]+=(hx-Y[i])*X[i]
return grad
#Algorithm
def gradientDescent(X,Y,learning_rate=.0001):
theta=np.zeros((2,))
itr=0
max_itr=100
error_list=[]
while(itr<=max_itr):
grad=gradient(X,Y,theta)
e=error(X,Y,theta)
error_list.append(e)
theta[0]=theta[0]-learning_rategrad[0]
theta[1]=theta[1]-learning_rategrad[1]
itr+=1
return theta,error_list
final_theta,error_list=gradientDescent(X,Y)
plt.plot(error_list)
plt.show()
print(final_theta)