Is my code correct for SIgmoid neuron

class SigmoidNeuron:

def __init__(self):

self.w = None

self.b = None

def perceptron(self, X):

w = self.w

b = self.b

return np.dot(X,w.T) + b

def sigmoid(self, X):

aggregation = self.perceptron(X)

return 1.0/(1.0 + np.exp(-aggregation))

def grad_mse_w(self, X, Y):

y\_hat = self.sigmoid(X)

dw = (y\_hat - Y) \* y\_hat \* ( 1 - y\_hat ) \* X

return dw

def grad_mse_b(self, X, Y):

y\_hat = self.sigmoid(X)

db = (y\_hat - Y) \* y\_hat \* ( 1 - y\_hat )

return db

def grad_ce_w(self,X,Y):

y\_hat = self.sigmoid(X)

if Y == 0 :

  return  y\_hat \* X

elif Y == 1 :

  return -1 \* (1 - y\_hat) \* X

else :

  raise ValueError("Y should be 0 or 1")

def grad_ce_b(self,X,Y):

y\_hat = sigmoid(X)

if Y == 0 :

  return  y\_hat

elif Y == 1 :

  return -1 \* (1 - y\_hat)

else :

  raise ValueError("Y should be 0 or 1")

def predict(self, X):

Y\_pred = \[\]

for x in X:

  y\_pred = self.sigmoid(x)

  Y\_pred.append(y\_pred)

return np.array(Y\_pred)

def check_accuracy_score(self,X,Y):

Y\_hat = self.predict(X)

Y\_hat\_binary = ((Y\_hat) > (1-Y\_hat)) \* 1

return accuracy\_score(Y\_hat\_binary,Y) 

def fit(self, X,Y,epochs=1, learning_rate=1, initialise = True, loss_fn = ‘mse’, display_loss = True):

if initialise:

  self.w = np.random.randn(1,X.shape\[1\])

  self.b = 0

  accuracy = {}

  max\_accuracy = 0

  max\_accuracy\_epoch = 0




  #best weights and best bias

  chkptw = np.ones((1, X.shape\[1\]))

  chkptb = 0




  #all weights and all bias

  weights\_tensor = np.zeros((epochs, self.w.shape\[0\], self.w.shape\[1\]))

  bias\_vector = \[\]




if display\_loss:

  loss = {}

for epoch in tqdm\_notebook(range(epochs), total = epochs, unit = "epoch"):

  dw = 0

  db = 0

  for x,y in zip(X,Y):

    if loss\_fn == "mse":

      dw += self.grad\_mse\_w(x, y)

      db += self.grad\_mse\_b(x, y) 

    elif loss\_fn == "ce":

      dw += self.grad\_ce\_w(x, y)

      db += self.grad\_ce\_b(x, y)

  m = X.shape\[1\]       

  self.w -= learning\_rate \* dw/m

  self.b -= learning\_rate \* db/m




  weights\_tensor\[epoch\] = self.w

  bias\_vector.append(self.b)

  accuracy\[epoch\] = self.check\_accuracy\_score(X,Y)

  if(accuracy\[epoch\]> max\_accuracy):

    max\_accuracy = accuracy\[epoch\]

    max\_accuracy\_epoch = epoch

    chkptw = self.w

    chkptb = self.b




  if display\_loss:

    Y\_pred = self.sigmoid(X)

    if loss\_fn == "mse":

      loss\[epoch\] = mean\_squared\_error(Y\_pred, Y)

    elif loss\_fn == "ce":

      loss\[epoch\] = log\_loss(Y\_pred, Y)

print("Max accuraracy is : ",max\_accuracy, " achieved in epoch : ",max\_accuracy\_epoch)  

if display\_loss:

  plt.plot(list(loss.values()))

  plt.xlabel('Epochs')

  if loss\_fn == "mse" :

    plt.ylabel('Mean Squared Error')

  elif loss\_fn == "ce" :

    plt.ylabel('Log Loss')

  plt.show()




return  weights\_tensor, bias\_vector