def hypothesis(x,theta,b):
h = np.dot(x,theta)+b
return sigmoid(h)
def sigmoid(z):
return (1.0)/(1.0+np.exp(-1.0*z))
def error(y_true,x,theta,b):
m = x.shape[0]
err = 0.0
for i in range(m):
hx = hypothesis(x[i],theta,b)
err+= (y_true[i]*np.log2(hx))+(1-y_true[i])*np.log2(1-hx)
return -err/m
def get_grads(y_true,x,theta,b):
grad_theta = np.zeros(theta.shape)
grad_b = 0.0
m = x.shape[0]
for i in range(m):
hx = hypothesis(x[i],theta,b)
grad_theta += -1*(y_true[i]-hx)*x[i]
grad_b += -1*(y_true[i]-hx)
grad_theta /= m
grad_b /= m
return [grad_theta,grad_b]
def grad_descent(x,y_true,theta,b,learning_rate=0.5):
err = error(y_true,x,theta,b)
[grad_theta,grad_b] = get_grads(y_true,x,theta,b)
theta = theta + learning_rate*grad_theta
b = b + learning_rate*grad_b
return err,theta,b
loss = []
theta = 2np.random.random((x.shape[1],))
b = 5np.random.random()
for i in range(100):
l,theta,b = grad_descent(x,y,theta,b)
loss.append(l)
:14: RuntimeWarning: divide by zero encountered in log2
err+= (y_true[i]*np.log2(hx))+(1-y_true[i])*np.log2(1-hx)
:14: RuntimeWarning: invalid value encountered in multiply
err+= (y_true[i]*np.log2(hx))+(1-y_true[i])*np.log2(1-hx)