Error in implementation of Mini batch gradient Descent

This is the error i am getting
ValueError Traceback (most recent call last)
in
----> 1 theta,error_list = gradient_descent(X,y)

in gradient_descent(X, y, l_r, max_epochs)
19 error_list = []
20 for i in range(max_epochs):
—> 21 e = error(X,y,theta)
22 error_list.append(e)
23

in error(X, y, theta)
5 e=0.0
6 m = X.shape[0]
----> 7 y_ = hypothesis(X,theta)
8 e = np.sum((y-y_)**2)
9 return e/m

in hypothesis(X, theta)
1 def hypothesis(X,theta):
----> 2 return np.dot(X,theta,out=None)
3
4 def error(X,y,theta):
5 e=0.0

<array_function internals> in dot(*args, **kwargs)

ValueError: shapes (10000,21) and (10000,) not aligned: 21 (dim 1) != 10000 (dim 0)

HERE IS MY CODE---------------->

ones = np.ones((X.shape[0],1))
X = np.hstack((ones,X))
print(X.shape)
print(y.shape)
#(10000, 21)
#(10000,)

def hypothesis(X,theta):
return np.dot(X,theta,out=None)

def error(X,y,theta):
e=0.0
m = X.shape[0]
y_ = hypothesis(X,theta)
e = np.sum((y-y_)**2)
return e/m
def gradient(X,y,theta):
y_=hypothesis(X,theta)
grad= np.dot(X.T,(y-y_))
m = X.shape[0]
return grad/m

def gradient_descent(X,y,l_r=0.01,max_epochs=300):
n = X.shape[0]
theta = np.zeros((n,))
error_list = []
for i in range(max_epochs):
e = error(X,y,theta)
error_list.append(e)

    #Batch gradient descent wrt to entire dataset
    grad = gradient(X,y,theta)
    theta = theta - l_r*grad
return theta,error_list