xor neural net

General FreeBASIC programming questions.
Post Reply
bluatigro
Posts: 660
Joined: Apr 25, 2012 10:35
Location: netherlands

xor neural net

Post by bluatigro »

i translated c++

Code: Select all

// Author:		John McCullock
// Date:		12-11-2005
// Description:	Backpropagation XOR Example 2.
// Sources: Dr Phil Brierley, www.philbrierley.com

#include <iostream>
#include <cmath>
#include <ctime>
#include <cstdlib>

using namespace std;

const int numInputs = 3;        // Input nodes, plus the bias input.
const int numPatterns = 4;      // Input patterns for XOR experiment.

const int numHidden = 4;
const int numEpochs = 200;
const double LR_IH = 0.7;       // Learning rate, input to hidden weights.
const double LR_HO = 0.07;      // Learning rate, hidden to output weights.

int patNum = 0;
double errThisPat = 0.0;
double outPred = 0.0;                   // "Expected" output values.
double RMSerror = 0.0;                  // Root Mean Squared error.

double hiddenVal[numHidden] = {0.0};            // Hidden node outputs.

double weightsIH[numInputs][numHidden]; // Input to Hidden weights.
double weightsHO[numHidden] = {0.0};            // Hidden to Output weights.

int trainInputs[numPatterns][numInputs];
int trainOutput[numPatterns];           // "Actual" output values.

// Function Prototypes.
void initWeights();
void calcNet();
void WeightChangesHO();
void WeightChangesIH();
void calcOverallError();
void initData();
void displayResults();
double getRand();

int main(){

    srand((unsigned)time(0));   // Seed the generator with system time.

    initWeights();

    initData();

    // Train the network.
    for(int j = 0; j <= numEpochs; j++){

        for(int i = 0; i < numPatterns; i++){

            //Select a pattern at random.
            patNum = rand() % numPatterns;

            //Calculate the output and error for this pattern.
            calcNet();

            //Adjust network weights.
            WeightChangesHO();
            WeightChangesIH();
        }

        calcOverallError();

        //Display the overall network error after each epoch
        cout << "epoch = " << j << " RMS Error = " << RMSerror << endl;

    }
    //Training has finished.

    displayResults();

    return 0;
}

void initWeights(){
// Initialize weights to random values.

    for(int j = 0; j < numHidden; j++){

        weightsHO[j] = (getRand() - 0.5) / 2;
        for(int i = 0; i < numInputs; i++){

            weightsIH[i][j] = (getRand() - 0.5) / 5;
            cout << "Weight = " << weightsIH[i][j] << endl;
        }
    }
}

void initData(){
    // The data here is the XOR data which has been rescaled to 
    // the range -1 to 1.

    // An extra input value of 1 is also added to act as the bias.

    // The output must lie in the range -1 to 1.

    trainInputs[0][0]   =  1;
    trainInputs[0][1]   = -1;
    trainInputs[0][2]   =  1; // Bias
    trainOutput[0]      =  1;

    trainInputs[1][0]   = -1;
    trainInputs[1][1]   =  1;
    trainInputs[1][2]   =  1; // Bias
    trainOutput[1]      =  1;

    trainInputs[2][0]   =  1;
    trainInputs[2][1]   =  1;
    trainInputs[2][2]   =  1; // Bias
    trainOutput[2]      = -1;

    trainInputs[3][0]   = -1;
    trainInputs[3][1]   = -1;
    trainInputs[3][2]   =  1; // Bias
    trainOutput[3]      = -1;
}

void calcNet(){
// Calculates values for Hidden and Output nodes.
    for(int i = 0; i < numHidden; i++){
	  hiddenVal[i] = 0.0;

        for(int j = 0; j < numInputs; j++){
	        hiddenVal[i] = hiddenVal[i] + (trainInputs[patNum][j] * weightsIH[j][i]);
        }

        hiddenVal[i] = tanh(hiddenVal[i]);
    }

    outPred = 0.0;

    for(int i = 0; i < numHidden; i++){
        outPred = outPred + hiddenVal[i] * weightsHO[i];
    }
    //Calculate the error: "Expected" - "Actual"
    errThisPat = outPred - trainOutput[patNum];
}

void WeightChangesHO(){
//Adjust the Hidden to Output weights.

    for(int k = 0; k < numHidden; k++){
        double weightChange = LR_HO * errThisPat * hiddenVal[k];
        weightsHO[k] = weightsHO[k] - weightChange;

        // Regularization of the output weights.
        if (weightsHO[k] < -5){
            weightsHO[k] = -5;
        }else if (weightsHO[k] > 5){
            weightsHO[k] = 5;
        }
    }
}

void WeightChangesIH(){
// Adjust the Input to Hidden weights.

    for(int i = 0; i < numHidden; i++){

        for(int k = 0; k < numInputs; k++){

            double x = 1 - (hiddenVal[i] * hiddenVal[i]);
            x = x * weightsHO[i] * errThisPat * LR_IH;
            x = x * trainInputs[patNum][k];
            double weightChange = x;
            weightsIH[k][i] = weightsIH[k][i] - weightChange;
        }
    }
}

void calcOverallError(){
    RMSerror = 0.0;

    for(int i = 0; i < numPatterns; i++){
         patNum = i;
         calcNet();
         RMSerror = RMSerror + (errThisPat * errThisPat);
    }

    RMSerror = RMSerror / numPatterns;
    RMSerror = sqrt(RMSerror);
}

void displayResults(){
    for(int i = 0; i < numPatterns; i++){
        patNum = i;
        calcNet();
        cout << "pat = " << patNum + 1 << 
                " actual = " << trainOutput[patNum] << 
                " neural model = " << outPred << endl;
    }
}

double getRand(){
    return double(rand() / double(RAND_MAX));
}

into FB

Code: Select all

'' Author:		John McCullock
'' Date:		12-11-2005
'' Description:	Backpropagation XOR Example 2.
'' Sources: Dr Phil Brierley, www.philbrierley.com
'' translated from c++ to FB by bluatigro

const as integer numInputs = 3       '' Input nodes, plus the bias input.
const as integer numPatterns = 4     '' Input patterns for XOR experiment.

const as integer numHidden = 4
const as integer numEpochs = 200
const as double LR_IH = 0.7       '' Learning rate, input to hidden weights.
const as double LR_HO = 0.07      '' Learning rate, hidden to output weights.

dim shared as integer patNum = 0
dim shared as double errThisPat = 0.0
dim shared as double outPred = 0.0                  '' "Expected" output values.
dim shared as double RMSerror = 0.0                 '' Root Mean Squared error.

dim shared as double hiddenVal( numHidden )         '' Hidden node outputs.

dim shared as double weightsIH( numInputs , numHidden )  '' Input to Hidden weights.
dim shared as double weightsHO( numHidden )          '' Hidden to Output weights.

dim shared as integer trainInputs( numPatterns , numInputs )
dim shared as integer trainOutput( numPatterns )         '' "Actual" output values.

'' Function Prototypes.
declare sub initWeights()
declare sub calcNet()
declare sub WeightChangesHO()
declare sub WeightChangesIH()
declare sub calcOverallError()
declare sub initData()
declare sub displayResults()

'' main()

    randomize timer  '' Seed the generator with system time.

    initWeights

    initData

    '' Train the network.
    dim as integer i , j
    for j = 0 to numEpochs

        for i = 0 to numPatterns

            ''Select a pattern at random.
            patNum = rnd() * numPatterns

            ''Calculate the output and error for this pattern.
            calcNet

            ''Adjust network weights.
            WeightChangesHO
            WeightChangesIH
        next i

        calcOverallError

        ''Display the overall network error after each epoch
        print "epoch = " + str(j) + " RMS Error = " + str(RMSerror)

    next j
    ''Training has finished.

    displayResults

sleep
end

function tanh( x as double ) as double
  return ( 1 - exp( -x * 2 ) ) / ( 1 + exp( -x * 2 ) )
end function

sub initWeights()
'' Initialize weights to random values.
  dim as integer i , j 
  
    for j = 0 to numHidden

        weightsHO(j) = ( rnd - 0.5 ) / 2
        for i = 0 to numInputs

            weightsIH(i,j) = ( rnd - 0.5 ) / 5
            print "Weight = " + str( weightsIH(i,j) )
        next i
    next j
end sub

sub initData()
    '' The data here is the XOR data which has been rescaled to 
    '' the range -1 to 1.

    '' An extra input value of 1 is also added to act as the bias.

    '' The output must lie in the range -1 to 1.

    trainInputs(0,0)   =  1
    trainInputs(0,1)   = -1
    trainInputs(0,2)   =  1   '' Bias
    trainOutput(0)     =  1

    trainInputs(1,0)   = -1 
    trainInputs(1,1)   =  1 
    trainInputs(1,2)   =  1  '' Bias
    trainOutput(1)     =  1

    trainInputs(2,0)   =  1
    trainInputs(2,1)   =  1
    trainInputs(2,2)   =  1  '' Bias
    trainOutput(2)     = -1

    trainInputs(3,0)   = -1
    trainInputs(3,1)   = -1
    trainInputs(3,2)   =  1  '' Bias
    trainOutput(3)     = -1
end sub

sub calcNet()
'' Calculates values for Hidden and Output nodes.
  dim as integer i , j

    for i = 0 to numHidden
	  hiddenVal(i) = 0.0

        for j = 0 to numInputs
	        hiddenVal(i) += (trainInputs(patNum,j) * weightsIH(j,i) )
        next j

        hiddenVal(i) = tanh( hiddenVal( i ) )
    next i

    outPred = 0.0

    for i = 0 to numHidden
        outPred += hiddenVal(i) * weightsHO(i)
    next i
    ''Calculate the error: "Expected" - "Actual"
    errThisPat = outPred - trainOutput( patNum )
end sub

sub WeightChangesHO()
''Adjust the Hidden to Output weights.
  dim as integer k
  
    for k = 0 to numHidden 
        dim as double weightChange = LR_HO * errThisPat * hiddenVal(k)
        weightsHO(k) = weightsHO(k) - weightChange

        '' Regularization of the output weights.
        if (weightsHO(k) < -5) then
            weightsHO(k) = -5
        end if
        if (weightsHO(k) > 5) then
            weightsHO(k) = 5
        end if
    next k
end sub

sub WeightChangesIH()
'' Adjust the Input to Hidden weights.
  dim as integer i , k

    for i = 0 to numHidden

        for k = 0 to numInputs

            dim as double x = 1 - (hiddenVal(i) * hiddenVal(i))
            x = x * weightsHO(i) * errThisPat * LR_IH
            x = x * trainInputs(patNum,k)
            dim as double weightChange = x
            weightsIH(k,i) = weightsIH(k,i) - weightChange
        next k
    next i
end sub

sub calcOverallError()
    RMSerror = 0.0
  dim as integer i
  
    for i = 0 to numPatterns
         patNum = i
         calcNet
         RMSerror = RMSerror + (errThisPat * errThisPat)
    next i

    RMSerror = RMSerror / numPatterns
    RMSerror = sqr(RMSerror)
end sub

sub displayResults()
  dim as integer i
  
    for i = 0 to numPatterns
        patNum = i
        calcNet
        print "pat = " + str( patNum + 1 ) _ 
        + " actual = " + str( trainOutput(patNum) ) _ 
        + " neural model = " + str( outPred )
    next i
end sub

i want to rebiuld this to a general purpes class lib file
has anyone a good idea how
bluatigro
Posts: 660
Joined: Apr 25, 2012 10:35
Location: netherlands

Re: xor neural net

Post by bluatigro »

update :
test whit more inputs

Code: Select all

'' Author:		John McCullock
'' Date:		12-11-2005
'' Description:	Backpropagation XOR Example 2.
'' Sources: Dr Phil Brierley, www.philbrierley.com
'' translated from c++ to FB by bluatigro
const as integer numInputs = 3       '' Input nodes, plus the bias input.
const as integer numPatterns = 8     '' Input patterns for XOR experiment.
const as integer numHidden = 4
const as integer numEpochs = 200
const as double LR_IH = 0.7       '' Learning rate, input to hidden weights.
const as double LR_HO = 0.07      '' Learning rate, hidden to output weights.
dim shared as integer patNum = 0
dim shared as double errThisPat = 0.0
dim shared as double outPred = 0.0                  '' "Expected" output values.
dim shared as double RMSerror = 0.0                 '' Root Mean Squared error.
dim shared as double hiddenVal( numHidden )         '' Hidden node outputs.
dim shared as double weightsIH( numInputs , numHidden )  '' Input to Hidden weights.
dim shared as double weightsHO( numHidden )          '' Hidden to Output weights.
dim shared as integer trainInputs( numPatterns , numInputs )
dim shared as integer trainOutput( numPatterns )         '' "Actual" output values.
'' Function Prototypes.
declare sub initWeights()
declare sub calcNet()
declare sub WeightChangesHO()
declare sub WeightChangesIH()
declare sub calcOverallError()
declare sub initData()
declare sub displayResults()

randomize timer  '' Seed the generator with system time    
initWeights
initData
'' Train the network.
dim as integer i , j
for j = 0 to numEpochs
    for i = 0 to numPatterns
        ''Select a pattern at random.
        patNum = cint( rnd * numPatterns )
        ''Calculate the output and error for this pattern.
        calcNet
        ''Adjust network weights.
        WeightChangesHO
        WeightChangesIH
    next i
    calcOverallError
    ''Display the overall network error after each epoch
    print "epoch = " + str(j) + " RMS Error = " + str(RMSerror)
next j
''Training has finished.
displayResults
sleep
end

function tanh( x as double ) as double
    return ( 1 - exp( -x * 2 ) ) / ( 1 + exp( -x * 2 ) )
end function

sub initWeights()
'' Initialize weights to random values.
    dim as integer i , j 
    for j = 0 to numHidden
        weightsHO(j) = ( rnd - 0.5 ) / 2
        for i = 0 to numInputs
            weightsIH(i,j) = ( rnd - 0.5 ) / 5
            print "Weight = " + str( weightsIH(i,j) )
        next i
    next j
end sub

sub setTrain( p as integer _
  , a as integer , b as integer , c as integer _
  , uit as integer )
    trainInputs(p,0)   =  a
    trainInputs(p,1)   =  b
    trainInputs(p,2)   =  c
    trainInputs(p,3)   =  1   '' Bias
    trainOutput(p)     =  uit
end sub

function xx( a as integer , b as integer , c as integer ) as integer
  if a <> b and b <> c then
    return 1
  else
    return -1
  end if
end function
  
sub initData()
    '' The data here is the xor data which has been rescaled to 
    '' the range -1 to 1.
    '' An extra input value of 1 is also added to act as the bias.
    '' The output must lie in the range -1 to 1.
    dim as integer a,b,c,tel
    for a = -1 to 1 step 2
      for b = -1 to 1 step 2
        for c = -1 to 1 step 2
          setTrain tel , a,b,c ,  xx(a,b,c)
          tel += 1
        next c
      next b
    next a
end sub

sub calcNet()
'' Calculates values for Hidden and Output nodes.
    dim as integer i , j
    for i = 0 to numHidden
    	  hiddenVal(i) = 0.0
        for j = 0 to numInputs
	        hiddenVal(i) += (trainInputs(patNum,j) * weightsIH(j,i) )
        next j
        hiddenVal(i) = tanh( hiddenVal( i ) )
    next i
    outPred = 0.0
    for i = 0 to numHidden
        outPred += hiddenVal(i) * weightsHO(i)
    next i
    ''Calculate the error: "Expected" - "Actual"
    errThisPat = outPred - trainOutput( patNum )
end sub

sub WeightChangesHO()
''Adjust the Hidden to Output weights.
    dim as integer k
    for k = 0 to numHidden 
        dim as double weightChange = LR_HO * errThisPat * hiddenVal(k)
        weightsHO(k) = weightsHO(k) - weightChange
        '' Regularization of the output weights.
        if (weightsHO(k) < -5) then
            weightsHO(k) = -5
        end if
        if (weightsHO(k) > 5) then
            weightsHO(k) = 5
        end if
    next k
end sub

sub WeightChangesIH()
'' Adjust the Input to Hidden weights.
    dim as integer i , k
    for i = 0 to numHidden
        for k = 0 to numInputs
            dim as double x = 1 - (hiddenVal(i) * hiddenVal(i))
            x = x * weightsHO(i) * errThisPat * LR_IH
            x = x * trainInputs(patNum,k)
            dim as double weightChange = x
            weightsIH(k,i) = weightsIH(k,i) - weightChange
        next k
    next i
end sub

sub calcOverallError()
    RMSerror = 0.0
    dim as integer i
    for i = 0 to numPatterns
         patNum = i
         calcNet
         RMSerror = RMSerror + (errThisPat * errThisPat)
    next i
    RMSerror = RMSerror / numPatterns
    RMSerror = sqr(RMSerror)
end sub

sub displayResults()
    dim as integer i
    for i = 0 to numPatterns
        patNum = i
        calcNet
        print "pat = " + str( patNum + 1 ) _ 
        + " actual = " + str( trainOutput(patNum) ) _ 
        + " neural model = " + str( outPred )
    next i
end sub

how to rebuild this to a general purpus class ?
bluatigro
Posts: 660
Joined: Apr 25, 2012 10:35
Location: netherlands

Re: xor neural net

Post by bluatigro »

trying to get rid of global var's

many error's

Code: Select all

'' Author:		John McCullock
'' Date:		12-11-2005
'' Description:	Backpropagation XOR Example 2.
'' Sources: Dr Phil Brierley, www.philbrierley.com
'' translated from c++ to FB by bluatigro
const as integer numInputs = 3       '' Input nodes, plus the bias input.
const as integer numPatterns = 4     '' Input patterns for XOR experiment.
const as integer numHidden = 4
const as integer numEpochs = 200
const as double LR_IH = 0.7       '' Learning rate, input to hidden weights.
const as double LR_HO = 0.07      '' Learning rate, hidden to output weights.
dim shared as double hiddenVal( numHidden )         '' Hidden node outputs.
dim shared as double weightsIH( numInputs , numHidden )  '' Input to Hidden weights.
dim shared as double weightsHO( numHidden )          '' Hidden to Output weights.
dim shared as integer trainInputs( numPatterns , numInputs )
dim shared as integer trainOutput( numPatterns )         '' "Actual" output values.
'' Function Prototypes.
declare sub initWeights()
declare function calcNet( p as integer , byref o as double ) as double
declare function WeightChangesHO( e as double ) as double
declare function WeightChangesIH( p as integer , byref e as double ) as double
declare function calcOverallError() as double
declare sub initData()
declare sub displayResults()

randomize timer  '' Seed the generator with system time    
initWeights
initData
'' Train the network.
dim as integer i , j , patNum
dim as double errThisPat , RMSerror , outPred
for j = 0 to numEpochs
    for i = 0 to numPatterns
        ''Select a pattern at random.
        patNum = cint( rnd * numPatterns )
        ''Calculate the output and error for this pattern.
        errThisPat = calcNet( patNum , outPred )
        ''Adjust network weights.
        WeightChangesHO errThisPat
        WeightChangesIH patNum , outPred
    next i
    RMSerror = calcOverallError()
    ''Display the overall network error after each epoch
    print "epoch = " + str(j) + " RMS Error = " + str(RMSerror)
next j
''Training has finished.
displayResults
sleep
end

function tanh( x as double ) as double
    return ( 1 - exp( -x * 2 ) ) / ( 1 + exp( -x * 2 ) )
end function

sub initWeights()
'' Initialize weights to random values.
    dim as integer i , j 
    for j = 0 to numHidden
        weightsHO(j) = ( rnd - 0.5 ) / 2
        for i = 0 to numInputs
            weightsIH(i,j) = ( rnd - 0.5 ) / 5
            print "Weight = " + str( weightsIH(i,j) )
        next i
    next j
end sub

sub setTrain( p as integer _
  , a as integer , b as integer , uit as integer )
    trainInputs(p,0)   =  a
    trainInputs(p,1)   =  b
    trainInputs(p,2)   =  1   '' Bias
    trainOutput(p)     =  uit
end sub
  
sub initData()
    '' The data here is the XOR data which has been rescaled to 
    '' the range -1 to 1.
    '' An extra input value of 1 is also added to act as the bias.
    '' The output must lie in the range -1 to 1.
    setTrain 0 ,  1 , -1 ,  1
    setTrain 1 , -1 ,  1 ,  1
    setTrain 2 ,  1 ,  1 , -1
    setTrain 3 , -1 , -1 , -1
end sub

function calcNet( patNum as integer _
  , byref outPred as double ) as double
'' Calculates values for Hidden and Output nodes.
    dim as integer i , j
    for i = 0 to numHidden
    	  hiddenVal(i) = 0.0
        for j = 0 to numInputs
	        hiddenVal(i) += (trainInputs(patNum,j) * weightsIH(j,i) )
        next j
        hiddenVal(i) = tanh( hiddenVal( i ) )
    next i
    outPred = 0.0
    for i = 0 to numHidden
        outPred += hiddenVal(i) * weightsHO(i)
    next i
    ''Calculate the error: "Expected" - "Actual"
    return outPred - trainOutput( patNum )
end function

sub WeightChangesHO( errThisPat as double )
''Adjust the Hidden to Output weights.
    dim as integer k
    for k = 0 to numHidden 
        dim as double weightChange = LR_HO * errThisPat * hiddenVal(k)
        weightsHO(k) = weightsHO(k) - weightChange
        '' Regularization of the output weights.
        if (weightsHO(k) < -5) then
            weightsHO(k) = -5
        end if
        if (weightsHO(k) > 5) then
            weightsHO(k) = 5
        end if
    next k
end sub

sub WeightChangesIH( patNum as integer , outPred as double )
'' Adjust the Input to Hidden weights.
    dim as integer i , k
    for i = 0 to numHidden
        for k = 0 to numInputs
            dim as double x = 1 - (hiddenVal(i) * hiddenVal(i))
            x = x * weightsHO(i) * errThisPat * LR_IH
            x = x * trainInputs(patNum,k)
            dim as double weightChange = x
            weightsIH(k,i) = weightsIH(k,i) - weightChange
        next k
    next i
end sub

function calcOverallError() as double
    dim as double RMSerror = 0.0 , errThisPat , outPred
    dim as integer i , patNum
    for i = 0 to numPatterns
         patNum = i
         errThisPat = calcNet( patNum , outPred )
         RMSerror = RMSerror + (errThisPat * errThisPat)
    next i
    RMSerror = RMSerror / numPatterns
    return sqr(RMSerror)
end function

sub displayResults()
    dim as integer i , patNum
    for i = 0 to numPatterns
        patNum = i
        errThisPat = calcNet( panNum , outPred )
        print "pat = " + str( patNum + 1 ) _ 
        + " actual = " + str( trainOutput(patNum) ) _ 
        + " neural model = " + str( outPred )
    next i
end sub
bluatigro
Posts: 660
Joined: Apr 25, 2012 10:35
Location: netherlands

char neural net

Post by bluatigro »

first try at char recognision

error :
the "f" is not recognised
i got strange results

Code: Select all

'' Author:		John McCullock
'' Date:		12-11-2005
'' Description:	Backpropagation XOR Example 2.
'' Sources: Dr Phil Brierley, www.philbrierley.com
'' translated from c++ to FB by bluatigro
'' rebuild to char recognision try 20 jan 2017
const as string chars = " abcdefghjiklm"
const as integer numInputs = 8 * 16 + 1      '' Input nodes, plus the bias input.
const as integer numPatterns = len( chars )     '' Input patterns for XOR experiment.
const as integer numHidden = 100
const as integer numEpochs = 200
const as double LR_IH = 0.7       '' Learning rate, input to hidden weights.
const as double LR_HO = 0.07      '' Learning rate, hidden to output weights.
dim shared as integer patNum = 0
dim shared as double errThisPat = 0.0
dim shared as double outPred = 0.0                  '' "Expected" output values.
dim shared as double RMSerror = 0.0                 '' Root Mean Squared error.
dim shared as double hiddenVal( numHidden )         '' Hidden node outputs.
dim shared as double weightsIH( numInputs , numHidden )  '' Input to Hidden weights.
dim shared as double weightsHO( numHidden )          '' Hidden to Output weights.
dim shared as integer trainInputs( numPatterns , numInputs )
dim shared as integer trainOutput( numPatterns )         '' "Actual" output values.
'' Function Prototypes.
declare sub initWeights()
declare sub calcNet()
declare sub WeightChangesHO()
declare sub WeightChangesIH()
declare sub calcOverallError()
declare sub initData()
declare sub displayResults()

randomize timer  '' Seed the generator with system time    
initWeights
initData
'' Train the network.
dim as integer i , j
for j = 0 to numEpochs
    for i = 0 to numPatterns
        ''Select a pattern at random.
        patNum = cint( rnd * numPatterns )
        ''Calculate the output and error for this pattern.
        calcNet
        ''Adjust network weights.
        WeightChangesHO
        WeightChangesIH
    next i
    calcOverallError
    ''Display the overall network error after each epoch
    print "epoch = " + str(j) + " RMS Error = " + str(RMSerror)
next j
''Training has finished.
displayResults
sleep
end

function tanh( x as double ) as double
    return ( 1 - exp( -x * 2 ) ) / ( 1 + exp( -x * 2 ) )
end function

sub initWeights()
'' Initialize weights to random values.
    dim as integer i , j 
    for j = 0 to numHidden
        weightsHO(j) = ( rnd - 0.5 ) / 2
        for i = 0 to numInputs
            weightsIH(i,j) = ( rnd - 0.5 ) / 5
            print "Weight = " + str( weightsIH(i,j) )
        next i
    next j
end sub

const as integer no = -1
const as integer yes = 1
function getpixel( x as integer , y as integer ) as integer
''so i can read a pixel from any 24 bit bmp
  dim as ulong kl = point( x , y )
  dim as integer r , g , b
  r = kl and 255
  g = int( kl shr 8 ) and 255
  b = int( kl shr 16 ) and 255
  return iif( r + g + b > 127 * 3 , no , yes )  '' alpha color masking
end function

sub initData()
    '' The data here is the XOR data which has been rescaled to 
    '' the range -1 to 1.
    '' An extra input value of 1 is also added to act as the bias.
    '' The output must lie in the range -1 to 1.
    dim as integer i , p
    for i = 1 to numPatterns
      cls
      print mid( chars , i , 1 )
      for p = 0 to numInputs - 1
        TrainInputs( i , p ) = getpixel( p and 7 , int( p shr 3 ) )
      next p
      TrainInputs( i , numInputs ) = 1
      TrainOutput( i ) = iif( mid( chars , i , 1 ) = "f" , yes , no )
    next i
end sub

sub calcNet()
'' Calculates values for Hidden and Output nodes.
    dim as integer i , j
    for i = 0 to numHidden
    	  hiddenVal(i) = 0.0
        for j = 0 to numInputs
	        hiddenVal(i) += (trainInputs(patNum,j) * weightsIH(j,i) )
        next j
        hiddenVal(i) = tanh( hiddenVal( i ) )
    next i
    outPred = 0.0
    for i = 0 to numHidden
        outPred += hiddenVal(i) * weightsHO(i)
    next i
    ''Calculate the error: "Expected" - "Actual"
    errThisPat = outPred - trainOutput( patNum )
end sub

sub WeightChangesHO()
''Adjust the Hidden to Output weights.
    dim as integer k
    for k = 0 to numHidden 
        dim as double weightChange = LR_HO * errThisPat * hiddenVal(k)
        weightsHO(k) = weightsHO(k) - weightChange
        '' Regularization of the output weights.
        if (weightsHO(k) < -5) then
            weightsHO(k) = -5
        end if
        if (weightsHO(k) > 5) then
            weightsHO(k) = 5
        end if
    next k
end sub

sub WeightChangesIH()
'' Adjust the Input to Hidden weights.
    dim as integer i , k
    for i = 0 to numHidden
        for k = 0 to numInputs
            dim as double x = 1 - (hiddenVal(i) * hiddenVal(i))
            x = x * weightsHO(i) * errThisPat * LR_IH
            x = x * trainInputs(patNum,k)
            dim as double weightChange = x
            weightsIH(k,i) = weightsIH(k,i) - weightChange
        next k
    next i
end sub

sub calcOverallError()
    RMSerror = 0.0
    dim as integer i
    for i = 0 to numPatterns
         patNum = i
         calcNet
         RMSerror = RMSerror + (errThisPat * errThisPat)
    next i
    RMSerror = RMSerror / numPatterns
    RMSerror = sqr(RMSerror)
end sub

sub displayResults()
    dim as integer i
    for i = 1 to numPatterns
        patNum = i
        calcNet
        print "pat = " + mid( chars , patNum , 1 ) _ 
        + " actual = " + str( trainOutput(patNum) ) _ 
        + " neural model = " + str( outPred )
    next i
end sub

bluatigro
Posts: 660
Joined: Apr 25, 2012 10:35
Location: netherlands

Re: xor neural net

Post by bluatigro »

i m working towarts picture recognision
i need a array of labeled pictures
so i decided to create my own

error :
expected const

Code: Select all

''bluatigro 20 jan 2017
''picture creator for ann picture

screen 20 , 32

dim shared as any ptr img = imagecreate( 64 , 64 )

function range( l as integer , h as integer ) as integer
  return cint( rnd * ( h - l ) + l )
end function
function nr( n as integer , m as integer ) as string
  return right( "000000000" + str( n ) , m )
end function

sub shape( q as integer )
  dim as integer x , y , r = range( 10 , 25 )
  x = range( r , 64 - r )
  y = range( r , 64 - r )
  if q then 
    circle img , ( x , y ) , r , &hffffff ,,,, f
  else
    line img , ( x - r , y - r ) - ( x + r , y + r ) , &hffffff , bf
  end if
end sub

dim as integer i

for i = 0 to 999
  line img , ( 0 , 0 ) - ( 64 , 64 ) , 0 , bf
  shape i and 1
  bsave "bmp\ann" + nr ( i , 4 ) + ".bmp" , img
next i

sleep 
MrSwiss
Posts: 3910
Joined: Jun 02, 2013 9:27
Location: Switzerland

Re: xor neural net

Post by MrSwiss »

@bluatigro,

your Function range(), could easily be done as a MACRO (faster than Function call):

Code: Select all

#Define IntRange(l, h)     ( CInt(Rnd() * (h - l) + l) )
The renaming allows you, to define multiple range MACROS, returning different Types.
E.g. UL_Range() for Color (ULong Type).
bluatigro
Posts: 660
Joined: Apr 25, 2012 10:35
Location: netherlands

Re: xor neural net

Post by bluatigro »

fount this c++ code

Code: Select all


// input I[i] = any real numbers ("doubles" in C++)
// y[j]
// network output y[k] = sigmoid continuous 0 to 1
// correct output O[k] = continuous 0 to 1


// assumes throughout that all i are linked to all j, and that all j are linked to all k
// if want some NOT to be connected, will need to introduce:
//   Boolean connected [ TOTAL ] [ TOTAL ];
// initialise it, and then keep checking:
//   if (connected[i][j])
// don't really need to do this,
// since we can LEARN a weight of 0 on this link




double sigmoid ( double x ) 
{
 return 1.0 / (1 + exp(-x)); 
}



const int TOTAL = NOINPUT+NOHIDDEN+NOOUTPUT;

// units all unique ids - so no ambiguity about which we refer to:

const int loi = 0;
const int hii = NOINPUT-1;
const int loj = NOINPUT;
const int hij = NOINPUT+NOHIDDEN-1;
const int lok = NOINPUT+NOHIDDEN;
const int hik = NOINPUT+NOHIDDEN+NOOUTPUT-1;

#define for_i   for ( i=loi; i<=hii; i++ )
#define for_j   for ( j=loj; j<=hij; j++ )
#define for_k   for ( k=lok; k<=hik; k++ )

class ann
{
 int i,j,k;

 double         I [ TOTAL ];            
 double         y [ TOTAL ];            
 double         O [ TOTAL ];    
 double         w [ TOTAL ] [ TOTAL ];          // w[i][j] 
 double         wt [ TOTAL ];                   // bias weights wt[i]
                
 double         dx [ TOTAL ];                   // dE/dx[i] 
 double         dy [ TOTAL ];                   // dE/dy[i] 
};


// going to do w++ and w--
// so might think should start with all w=0
// (all y[k]=0.5 - halfway between possibles 0 and 1)
// in fact, if all w same they tend to march in step together
// need *asymmetry* if want them to specialise (form a representation scheme)
// best to start with diverse w
//
// also, large positive or negative w -> slow learning
// so start with small absolute w -> fast learning

double initw()
{
 return float_randomAtoB ( -C, C );
}

ann::init()
{
 visits = 0;

 for_i
  for_j
   w[i][j] = initw();

 for_j
  for_k
   w[j][k] = initw();

 for_j
  wt[j] = initw();

 for_k
  wt[k] = initw();
}



ann :: backpropagate()
{
 double         dw;                             // temporary variable - dE/dw[i][j]



//----- backpropagate O[k] -> dy[k] -> dx[k] -> w[j][k],wt[k] ---------------------------------
 for_k
 {
  dy[k] = y[k] - O[k];
  dx[k] = ( dy[k] ) * y[k] * (1-y[k]);
 }

//----- backpropagate dx[k],w[j][k] -> dy[j] -> dx[j] -> w[i][j],wt[j] ------------------------
//----- use OLD w values here (that's what the equations refer to) .. -------------------------
 for_j
 {
  double t = 0;
  for_k
   t = t + ( dx[k] * w[j][k] );
  dy[j] = t;
  dx[j] = ( dy[j] ) * y[j] * (1-y[j]);
 }

//----- .. do all w changes together at end ---------------------------------------------------
 for_j
  for_k 
  {
   dw = dx[k] * y[j];           
   w[j][k] = w[j][k] - ( RATE * dw );
  }

 for_i
  for_j
  {
   dw = dx[j] * I[i];           
   w[i][j] = w[i][j] - ( RATE * dw );
  }

 for_k 
 {
  dw = dx[k] * (-1);            
  wt[k] = wt[k] - ( RATE * dw );
 }

 for_j
 {
  dw = dx[j] * (-1);            
  wt[j] = wt[j] - ( RATE * dw );
 }
}

ann :: learn ( int CEILING )
{
 for ( int c=1; c<=CEILING; c++ )
 {
  newIO();

   // new I/O pair
   // put I into I[i]
   // put O into O[k]

  forwardpass();
  backpropagate();
 }
}

ann :: exploit()
{
 for ( int c=1; c<=30; c++ )            
 {
  newIO();
  forwardpass();
  reportIO ( cout  );  
 }
}

ann net;
translated to FB

Code: Select all


'' input I[i] = any real numbers ("doubles" in C++)
'' y[j]
'' network output y[k] = sigmoid continuous 0 to 1
'' correct output O[k] = continuous 0 to 1


'' assumes throughout that all i are linked to all j, and that all j are linked to all k
'' if want some NOT to be connected, will need to introduce:
''   Boolean connected [ TOTAL ] [ TOTAL ];
'' initialise it, and then keep checking:
''   if (connected[i][j])
'' don't really need to do this,
'' since we can LEARN a weight of 0 on this link




function sigmoid( x as double ) as double 
  return 1.0 / (1 + exp(-x)); 
end function



const as integer TOTAL = NOINPUT+NOHIDDEN+NOOUTPUT

'' units all unique ids - so no ambiguity about which we refer to:

const as integer loi = 0
const as integer hii = NOINPUT-1
const as integer loj = NOINPUT
const as integer hij = NOINPUT+NOHIDDEN-1
const as integer lok = NOINPUT+NOHIDDEN
const as integer hik = NOINPUT+NOHIDDEN+NOOUTPUT-1

#define for_i   for i=loi to hii
#define for_j   for j=loj to hij
#define for_k   for k=lok to hik

type ann

 as integer i,j,k

 as double         I( TOTAL )            
 as double         y( TOTAL )            
 as double         O( TOTAL )    
 as double         w( TOTAL , TOTAL )          '' w[i][j] 
 as double         wt( TOTAL )                '' bias weights wt[i]
                
 as double         dx( TOTAL )                '' dE/dx[i] 
 as double         dy( TOTAL )                '' dE/dy[i] 
  declare sub init()
  declare sub forwardpass()
  declare sub backpropagate()
  declare sub learn( t as integer )
  declare sub exploid()
end type


'' going to do w++ and w--
'' so might think should start with all w=0
'' (all y[k]=0.5 - halfway between possibles 0 and 1)
'' in fact, if all w same they tend to march in step together
'' need *asymmetry* if want them to specialise (form a representation scheme)
'' best to start with diverse w

'' also, large positive or negative w -> slow learning
'' so start with small absolute w -> fast learning

function initw() as double
  return rnd * ( C - -C ) + -C 
end function

sub ann.init()
  
  as integer visits = 0

  for_i
    for_j
      w(i,j) = initw()
    next j
  next i

  for_j
    for_k
      w(j,k) = initw()
    next k
  next j

  for_j
    wt(j) = initw()
  next j

  for_k
    wt(k) = initw()
  next k
end sub



sub ann.backpropagate()
  as double         dw                             '' temporary variable - dE/dw[i][j]



''----- backpropagate O[k] -> dy[k] -> dx[k] -> w[j][k],wt[k] ---------------------------------
  for_k  
    dy(k) = y(k) - O(k)
    dx(k) = ( dy(k) ) * y(k) * (1-y(k))
  next k

''----- backpropagate dx[k],w[j][k] -> dy[j] -> dx[j] -> w[i][j],wt[j] ------------------------
''----- use OLD w values here (that's what the equations refer to) .. -------------------------
  for_j
    as double t = 0
    for_k
      t = t + ( dx(k) * w(j,k) )
    next k
    dy(j) = t
    dx(j) = ( dy(j) ) * y(j) * (1-y(j))
  next j

''----- .. do all w changes together at end ---------------------------------------------------
  for_j
    for_k 
      dw = dx(k) * y(j)          
      w(j,k) = w(j,k) - ( RATE * dw )
    next k
  next j

  for_i
    for_j
      dw = dx(j) * I(i)           
      w(i,j) = w(i,j) - ( RATE * dw )
    next j
  next i
 
  for_k 
    dw = dx(k) * (-1)        
    wt(k) = wt(k) - ( RATE * dw )
  next k

  for_j
    dw = dx(j) * (-1);            
    wt(j) = wt(j) - ( RATE * dw );
  next j
end sub

sub ann.learn( CEILING as integer )

  as integer c
  for c=1 to CEILING
    
''  newIO
''  new I/O pair
''  put I into I[i]
''  put O into O[k]

    forwardpass
    backpropagate
  next c
end sub

sub ann.exploit()
  as integer c
  for c=1 to 30            
    newIO
    forwardpass
    reportIO cout  
  next c
end sub

dim as ann net 
it is not complete i mis ann.forwartpass() and was is C ?
how do i use this ?
D.J.Peters
Posts: 8586
Joined: May 28, 2005 3:28
Contact:

Re: xor neural net

Post by D.J.Peters »

The basic Neural Network class
Sample implementation - As a function approximator

Code: Select all

' input I[i] = any real numbers ("doubles" in C++)
' y[j]
' network output y[k] = sigmoid continuous 0 to 1
' correct output O[k] = continuous 0 to 1

' assumes throughout that all i are linked to all j, and that all j are linked to all k
' if want some NOT to be connected, will need to introduce:
'   Boolean connected [ TOTAL ] [ TOTAL ];
' initialise it, and then keep checking:
'   if (connected[i][j])
' don't really need to do this,
' since we can LEARN a weight of 0 on this link

const as integer NOINPUT  = 1
const as integer NOHIDDEN = 30
const as integer NOOUTPUT = 1

const as double RATE = 0.3

const as double C = 0.1    

function sigmoid (x as double) as double
  return 1.0 / (1 + exp(-x))
end function

function float_randomAtoB(lox as double, hix as double) as double
  return lox + rnd * (hix-lox)
end function

function f (x as double) as double
  return sqr(x)
  ' return sin(x)
  '  return sin(x)+sin(2*x)+sin(5*x)+cos(x)
end function

' I = x = double lox to hix
const as double lox = 0
const as double hix = 9

'/ want it to store f(x) = double lof to hif
const as double _lof = -2.5 ' approximate bounds
const as double _hif =  3.2


' O = f(x) normalised to range 0 to 1 
function normalise (t as double) as double
  return (t-_lof) / (_hif-_lof)
end function

function expand (t as double) as double ' goes the other way
  return _lof + t*(_hif-_lof)
end function




const as integer TOTAL = NOINPUT+NOHIDDEN+NOOUTPUT

' units all unique ids - so no ambiguity about which we refer to:

const as integer loi = 0
const as integer hii = NOINPUT-1
const as integer loj = NOINPUT
const as integer hij = NOINPUT+NOHIDDEN-1
const as integer lok = NOINPUT+NOHIDDEN
const as integer hik = NOINPUT+NOHIDDEN+NOOUTPUT-1

#define for_i for i as integer = loi to hii
#define for_j for j as integer = loj to hij
#define for_k for k as integer = lok to hik

type ann
  declare sub init
  declare sub forwardpass
  declare sub backpropagate
  declare sub learn(ceiling as integer)
  declare sub exploit
  declare sub newIO
  declare sub reportIO
  declare sub report
  as double  II( TOTAL - 1)
  as double  y ( TOTAL - 1)           
  as double  O ( TOTAL - 1)   
  as double  w ( TOTAL - 1, TOTAL  - 1)        ' w[i][j]
  as double  wt ( TOTAL - 1)                   ' bias weights wt[i]
  as double  dx ( TOTAL - 1)                   ' dE/dx[i]
  as double  dy ( TOTAL - 1)                   ' dE/dy[i]
  as integer visits
end type


' going to do w++ and w--
' so might think should start with all w=0
' (all y[k]=0.5 - halfway between possibles 0 and 1)
' in fact, if all w same they tend to march in step together
' need *asymmetry* if want them to specialise (form a representation scheme)
' best to start with diverse w
'
' also, large positive or negative w -> slow learning
' so start with small absolute w -> fast learning
function initw() as double
  return (rnd-rnd) * C
end function

sub ann.init()
  visits = 0
  for_i
    for_j
      w(i,j) = initw()
    next
  next
  for_j
    for_k
      w(j,k) = initw()
    next
  next 
  for_j
    wt(j) = initw()
  next
  for_k
    wt(k) = initw()
  next 
end sub



sub ann.backpropagate()
  dim as double dw ' temporary variable - dE/dw[i][j]
  '----- backpropagate O[k] -> dy[k] -> dx[k] -> w[j][k],wt[k] ---------------------------------
  for_k
    dy(k) = y(k) - O(k)
    dx(k) = ( dy(k) ) * y(k) * (1-y(k))
  next
  '----- backpropagate dx(k),w[j](k) -> dy[j] -> dx[j] -> w[i][j],wt[j] ------------------------
  '----- use OLD w values here (that's what the equations refer to) .. -------------------------
  for_j
    dim as double t = 0
    for_k
      t = t + ( dx(k) * w(j,k) )
    next  
    dy(j) = t
    dx(j) = ( dy(j) ) * y(j) * (1-y(j))
  next
  '----- .. do all w changes together at end ---------------------------------------------------
  for_j
    for_k
      dw = dx(k) * y(j)
      w(j,k) = w(j,k) - ( RATE * dw )
    next
  next
  for_i
    for_j
     dw = dx(j) * II(i)
     w(i,j) = w(i,j) - ( RATE * dw )
    next
  next
  
  for_k
    dw = dx(k) * (-1)
    wt(k) = wt(k) - ( RATE * dw )
  next

  for_j
    dw = dx(j) * (-1)
    wt(j) = wt(j) - ( RATE * dw )
  next
end sub


sub ann.newIO()
  dim as double x = float_randomAtoB ( lox, hix )      
  ' there is only one, just don't want to remember number:
  for_i
    II(i) = x
  next
  ' there is only one, just don't want to remember number:
  for_k                  
    O(k) = normalise(f(x))
  next  
end sub


' Note it never even sees the same exemplar twice!
sub ann.reportIO()
  dim as double x,_y
  for_i
    x = II(i)
  next
  for_k
    _y = expand(y(k))
  next  
  var hFile = Freefile()
  open err for output as #hFile
  print #hFile,"x    " & x
  print #hFile,"y    " & _y
  print #hFile,"f(x) " & f(x)
  close #hFile
end sub

sub ann.forwardpass
  dim as double x 'temporary variable - x[i]
  '----- forwardpass I[i] -> y[j] ------------------------------------------------
  for_j
    x = 0
    for_i         
      x = x + ( II(i) * w(i,j) )
    next  
    y(j) = sigmoid ( x - wt(j) )
  next
  '----- forwardpass y[j] -> y[k] ------------------------------------------------
  for_k
    x = 0
    for_j         
      x = x + ( y(j) * w(j,k) )
    next
    y(k) = sigmoid( x - wt(k) )
  next
end sub


sub ann.report ' report on the forwardpass we just did
  var hFile = FreeFile()
  open err for output as #hFile
  print #hFile,"[i] "
  for_i
    print #hFile,II(i) & " " ;
  next  
  print #hFile,""
  print #hFile,"y[j] "
  for_j
    print #hFile,y(j) & " ";
  next  
  print #hFile,""

  print #hFile,"y[k] "
  for_k
    print #hFile,y(k) & " "; 
  next
  print #hFile,""

  print #hFile,"O[k] "
  for_k
    print #hFile,O(k) & " "; 
  next  
  print #hFile,""

  dim as double E = 0
  for_k
    E = E + (y(k)-O(k))*(y(k)-O(k))
  next  
   E = E/2
  print #hFile,E & " E"
  close #hFile
end sub


sub ann.learn (CEILING as integer)
  for c as integer =1 to CEILING
    newIO()
    ' new I/O pair
    ' put I into I[i]
    ' put O into O(k)
    forwardpass()
    backpropagate()
 next
end sub

sub ann.exploit()
  for ic as integer =1 to 10
    newIO()
    forwardpass()
    reportIO()
  next
end sub

dim as ann net

dim as integer CEILING = 10
net.init()
net.learn ( CEILING )
net.exploit()
sleep

bluatigro
Posts: 660
Joined: Apr 25, 2012 10:35
Location: netherlands

neural net : square and circle

Post by bluatigro »

this is a try at picture recognize

error :
i think i got to les memory for this
there must be a better way

Code: Select all

' input I[i] = any real numbers ("doubles" in C++)
' y[j]
' network output y[k] = sigmoid continuous 0 to 1
' correct output O[k] = continuous 0 to 1

' assumes throughout that all i are linked to all j, and that all j are linked to all k
' if want some NOT to be connected, will need to introduce:
'   Boolean connected [ TOTAL ] [ TOTAL ];
' initialise it, and then keep checking:
'   if (connected[i][j])
' don't really need to do this,
' since we can LEARN a weight of 0 on this link

const as integer bmpsize = 63

const as integer NOINPUT  = bmpsize * bmpsize
const as integer NOHIDDEN = 30
const as integer NOOUTPUT = 1

const as double RATE = 0.3

const as double C = 0.1  

const as integer no = 0
const as integer yes = 1

function getpixel( x as integer , y as integer ) as integer
  dim as integer r,g,b 
  dim as ulong kl = point( x , y ) and &h00ffffff
  r = kl and 255
  g = int( kl shr 8 ) and 255
  b = int( kl shr 16 ) and 255
  if r+g+b > 127*3 then return yes
  return no
end function

function sigmoid (x as double) as double
  return 1.0 / (1 + exp(-x))
end function

function range( l as integer , h as integer ) as integer
  return l + rnd * ( h - l )
end function

sub shape( i as integer )
  cls
  dim as integer r = range( 10 , 30 ) 
  dim as integer x = range( r , 63 - r )
  dim as integer y = range( r , 63 - r )
  if i then
    circle( x , y ) , r , &hffffff ,,,, f
  else
    line( x - r , y - r ) - ( x + r , y + r ) , &hffffff , bf
  end if
end sub

const as integer TOTAL = NOINPUT+NOHIDDEN+NOOUTPUT

' units all unique ids - so no ambiguity about which we refer to:

const as integer loi = 0
const as integer hii = NOINPUT-1
const as integer loj = NOINPUT
const as integer hij = NOINPUT+NOHIDDEN-1
const as integer lok = NOINPUT+NOHIDDEN
const as integer hik = NOINPUT+NOHIDDEN+NOOUTPUT-1

#define for_i for i as integer = loi to hii
#define for_j for j as integer = loj to hij
#define for_k for k as integer = lok to hik

type ann
  declare sub init
  declare sub forwardpass
  declare sub backpropagate
  declare sub learn(ceiling as integer)
  declare sub exploit
  declare sub newIO( i as integer )
  declare sub reportIO
  declare sub report
  as double  II( TOTAL - 1)
  as double  y ( TOTAL - 1)           
  as double  O ( TOTAL - 1)   
  as double  w ( TOTAL - 1, TOTAL  - 1)        ' w[i][j]
  as double  wt ( TOTAL - 1)                   ' bias weights wt[i]
  as double  dx ( TOTAL - 1)                   ' dE/dx[i]
  as double  dy ( TOTAL - 1)                   ' dE/dy[i]
  as integer visits
end type


' going to do w++ and w--
' so might think should start with all w=0
' (all y[k]=0.5 - halfway between possibles 0 and 1)
' in fact, if all w same they tend to march in step together
' need *asymmetry* if want them to specialise (form a representation scheme)
' best to start with diverse w
'
' also, large positive or negative w -> slow learning
' so start with small absolute w -> fast learning
function initw() as double
  return (rnd-rnd) * C
end function

sub ann.init()
  visits = 0
  for_i
    for_j
      w(i,j) = initw()
    next
  next
  for_j
    for_k
      w(j,k) = initw()
    next
  next 
  for_j
    wt(j) = initw()
  next
  for_k
    wt(k) = initw()
  next 
end sub

sub ann.backpropagate()
  dim as double dw ' temporary variable - dE/dw[i][j]
  '----- backpropagate O[k] -> dy[k] -> dx[k] -> w[j][k],wt[k] ---------------------------------
  for_k
    dy(k) = y(k) - O(k)
    dx(k) = ( dy(k) ) * y(k) * (1-y(k))
  next
  '----- backpropagate dx(k),w[j](k) -> dy[j] -> dx[j] -> w[i][j],wt[j] ------------------------
  '----- use OLD w values here (that's what the equations refer to) .. -------------------------
  for_j
    dim as double t = 0
    for_k
      t = t + ( dx(k) * w(j,k) )
    next  
    dy(j) = t
    dx(j) = ( dy(j) ) * y(j) * (1-y(j))
  next
  '----- .. do all w changes together at end ---------------------------------------------------
  for_j
    for_k
      dw = dx(k) * y(j)
      w(j,k) = w(j,k) - ( RATE * dw )
    next
  next
  for_i
    for_j
     dw = dx(j) * II(i)
     w(i,j) = w(i,j) - ( RATE * dw )
    next
  next
  
  for_k
    dw = dx(k) * (-1)
    wt(k) = wt(k) - ( RATE * dw )
  next

  for_j
    dw = dx(j) * (-1)
    wt(j) = wt(j) - ( RATE * dw )
  next
end sub


sub ann.newIO( q as double )
  ' there is only one, just don't want to remember number:
  shape cint( q )
  for_i
    II(i) = getpixel( i and 63 , int( i / 64 ) )
  next
  ' there is only one, just don't want to remember number:
  for_k                  
    O(k) = q
  next  
end sub


' Note it never even sees the same exemplar twice!
sub ann.reportIO()
  dim as double x,_y
  for_i
    x = II(i)
  next
  for_k
    _y = expand(y(k))
  next  
  var hFile = Freefile()
  open err for output as #hFile
  print #hFile,"x    " & x
  print #hFile,"y    " & _y
  print #hFile,"f(x) " & f(x)
  close #hFile
end sub

sub ann.forwardpass
  dim as double x 'temporary variable - x[i]
  '----- forwardpass I[i] -> y[j] ------------------------------------------------
  for_j
    x = 0
    for_i         
      x = x + ( II(i) * w(i,j) )
    next  
    y(j) = sigmoid ( x - wt(j) )
  next
  '----- forwardpass y[j] -> y[k] ------------------------------------------------
  for_k
    x = 0
    for_j         
      x = x + ( y(j) * w(j,k) )
    next
    y(k) = sigmoid( x - wt(k) )
  next
end sub


sub ann.report ' report on the forwardpass we just did
  var hFile = FreeFile()
  open err for output as #hFile
  print #hFile,"[i] "
  for_i
    print #hFile,II(i) & " " ;
  next  
  print #hFile,""
  print #hFile,"y[j] "
  for_j
    print #hFile,y(j) & " ";
  next  
  print #hFile,""

  print #hFile,"y[k] "
  for_k
    print #hFile,y(k) & " "; 
  next
  print #hFile,""

  print #hFile,"O[k] "
  for_k
    print #hFile,O(k) & " "; 
  next  
  print #hFile,""

  dim as double E = 0
  for_k
    E = E + (y(k)-O(k))*(y(k)-O(k))
  next  
   E = E/2
  print #hFile,E & " E"
  close #hFile
end sub


sub ann.learn (CEILING as integer)
  for c as integer =1 to CEILING
    newIO cdbl( c and 1 )
    ' new I/O pair
    ' put I into I[i]
    ' put O into O(k)
    forwardpass
    backpropagate
 next
end sub

sub ann.exploit()
  dim as integer tel = 0
  for ic as integer = 1 to 100
    newIO cdbl( ic and 1 )
    forwardpass
    if y(0) >= .5 and ( ic and 1 ) then
      tel = tel + 1
    end if
    if y(0) < .5 and ( ic and 1 ) = 0 then
      tel = tel + 1
    end if
  next
  print str( tel ) + " % corect " 
end sub

dim as ann net

net.init
net.learn 1000 
net.exploit
print "[ game over : pres return ]"
sleep
greenink
Posts: 200
Joined: Jan 28, 2016 15:45

Re: xor neural net

Post by greenink »

I think you can just use mutation random hill climbing to train a neural net.
https://groups.google.com/forum/#!topic ... Vvry50HEwI
It is actually working out quite well. Just slightly alter the weights by some random process and see if that has improved the network. If so keep the altered weights, if not go back to the original ones. That is simpler even than the attempts to train neural nets by simulated annealing in the 1980's.
bluatigro
Posts: 660
Joined: Apr 25, 2012 10:35
Location: netherlands

Re: xor neural net

Post by bluatigro »

every time i try to run the last it freezes FB
and i get no error report
greenink
Posts: 200
Joined: Jan 28, 2016 15:45

Re: xor neural net

Post by greenink »

Post Reply