Error is not Converged to 0.0001 in Neural Network Python

by Krutika Parekh   Last Updated February 11, 2019 09:24 AM

    import numpy as np
    import pandas as pd
    import os

  //Load the Dataset of X and y  
    file=os.path.abspath("D:/Krutsi_RandD/ANN2.xlsx")

    xl = pd.ExcelFile(file)

    df1 = xl.parse('Sheet2')
    #
    #df1=df.head()
    X=np.array(df1[['PCTC','PCTMN','PCTMO','PCTNB','PCTTI','TGTPLATETHICKNESS1','ORDERLENGTH','TGTTMPLATETEMP','GradeID']],dtype=float)  
    #pd.options.display.max_columns=None
    y=np.array(df1[['P11BodyAvg']],dtype=float)  


    #print(X)
    #print(y)
    # scale units
    X = X/np.amax(X, axis=0) # maximum of X array
    #xPredicted = xPredicted/np.amax(xPredicted, axis=0) # maximum of xPredicted (our input data for the prediction)
    y = y/np.amax(y, axis=0)


    class Neural_Network(object):
      def __init__(self):
      #parameters
        self.inputSize = X.shape[1]
        self.outputSize = 1
        self.hiddenSize =63
        self.lr=0.1

      //weights
        self.W1 = np.random.randn(self.inputSize, self.hiddenSize) 
        self.bh= np.random.uniform(self.hiddenSize)
        self.W2 = np.random.randn(self.hiddenSize, self.outputSize) 
        self.bout=np.random.uniform(self.outputSize)
        self.Y=y

      def forward(self, X):

         self.hidden_layer_input=np.dot(X,self.W1)+ self.bh

         self.hiddenlayer_activations = self.sigmoid(self.hidden_layer_input)
         self.output_layer_input=np.dot(self.hiddenlayer_activations,self.W2)+self.bout

         o = self.sigmoid(self.output_layer_input)
         return o



      def sigmoid(self, s):
       // activation function
        return 1/(1+np.exp(-s))



      def sigmoidPrime(self, s):
       //derivative of sigmoid
        D=1/(1+np.exp(-s))
        return D * (1 -D)

      def backward(self,X,y,o):

        self.E = y-o
        self.slope_output_layer = self.sigmoidPrime(o)
        self.slope_hidden_layer = self.sigmoidPrime(self.hiddenlayer_activations)
        self.d_output = self.E * self.slope_output_layer
        self.Error_at_hidden_layer = self.d_output.dot(self.W2.T)
        self.d_hiddenlayer = self.Error_at_hidden_layer * self.slope_hidden_layer
        self.W2 += self.hiddenlayer_activations.T.dot(self.d_output) *self.lr
        self.bout += np.sum(self.d_output, axis=0,keepdims=True) *self.lr
        self.W1 += X.T.dot(self.d_hiddenlayer) *self.lr
        self.bh += np.sum(self.d_hiddenlayer, axis=0,keepdims=True) *self.lr

      def train(self, X, y):
        o = self.forward(X)
        self.backward(X, y, o)

      def saveWeights(self):
        np.savetxt("w1.txt", self.W1, fmt="%s")
        np.savetxt("w2.txt", self.W2, fmt="%s")

      def savepredictedoutput(self):
        np.savetxt("op.txt", (self.forward(X)),fmt="%s")
        np.savetxt("y.txt", self.Y,fmt="%s")

      def predict(self):
        print ("Predicted data based on trained weights: ");
        print ("Input (scaled): \n" + str(X));
        print ("Output: \n" + str(self.forward(X)));

    NN = Neural_Network()
    for i in range(50): # trains the NN 1,000 times

      print ("Itearation "+str(i)+""+"Error: " + str(np.mean(np.square(y - NN.forward(X))))) # mean sum squared loss

      NN.train(X, y)

    print ("Input (scaled): \n" + str(X))
    print ("Actual Output: \n" + str(y))
    print ("Predicted Output: \n" + str(NN.forward(X)))
    NN.saveWeights()
    NN.savepredictedoutput()
    #NN.predict()
Tags : support


Related Questions


Updated March 26, 2015 07:30 AM

Updated March 26, 2015 07:30 AM

Updated March 26, 2015 07:30 AM

Updated April 03, 2015 21:27 PM