def batch_gradient(X,Y,theta,batch_size=30):
m = Y.shape[0]
indices = np.arange(m)
np.random.shuffle(indices)
indices = indices[:batch_size]
grad = np.zeros((2,))
for i in indices:
h = hypothesis(X[i],theta)
grad[0] += (Y[i]-h)
grad[1] += (Y[i] - h)*X[i]
return grad*0.5
What is the use of batch_gradient function?