Code: Select all
// Author: John McCullock
// Date: 12-11-2005
// Description: Backpropagation XOR Example 2.
// Sources: Dr Phil Brierley, www.philbrierley.com
#include <iostream>
#include <cmath>
#include <ctime>
#include <cstdlib>
using namespace std;
const int numInputs = 3; // Input nodes, plus the bias input.
const int numPatterns = 4; // Input patterns for XOR experiment.
const int numHidden = 4;
const int numEpochs = 200;
const double LR_IH = 0.7; // Learning rate, input to hidden weights.
const double LR_HO = 0.07; // Learning rate, hidden to output weights.
int patNum = 0;
double errThisPat = 0.0;
double outPred = 0.0; // "Expected" output values.
double RMSerror = 0.0; // Root Mean Squared error.
double hiddenVal[numHidden] = {0.0}; // Hidden node outputs.
double weightsIH[numInputs][numHidden]; // Input to Hidden weights.
double weightsHO[numHidden] = {0.0}; // Hidden to Output weights.
int trainInputs[numPatterns][numInputs];
int trainOutput[numPatterns]; // "Actual" output values.
// Function Prototypes.
void initWeights();
void calcNet();
void WeightChangesHO();
void WeightChangesIH();
void calcOverallError();
void initData();
void displayResults();
double getRand();
int main(){
srand((unsigned)time(0)); // Seed the generator with system time.
initWeights();
initData();
// Train the network.
for(int j = 0; j <= numEpochs; j++){
for(int i = 0; i < numPatterns; i++){
//Select a pattern at random.
patNum = rand() % numPatterns;
//Calculate the output and error for this pattern.
calcNet();
//Adjust network weights.
WeightChangesHO();
WeightChangesIH();
}
calcOverallError();
//Display the overall network error after each epoch
cout << "epoch = " << j << " RMS Error = " << RMSerror << endl;
}
//Training has finished.
displayResults();
return 0;
}
void initWeights(){
// Initialize weights to random values.
for(int j = 0; j < numHidden; j++){
weightsHO[j] = (getRand() - 0.5) / 2;
for(int i = 0; i < numInputs; i++){
weightsIH[i][j] = (getRand() - 0.5) / 5;
cout << "Weight = " << weightsIH[i][j] << endl;
}
}
}
void initData(){
// The data here is the XOR data which has been rescaled to
// the range -1 to 1.
// An extra input value of 1 is also added to act as the bias.
// The output must lie in the range -1 to 1.
trainInputs[0][0] = 1;
trainInputs[0][1] = -1;
trainInputs[0][2] = 1; // Bias
trainOutput[0] = 1;
trainInputs[1][0] = -1;
trainInputs[1][1] = 1;
trainInputs[1][2] = 1; // Bias
trainOutput[1] = 1;
trainInputs[2][0] = 1;
trainInputs[2][1] = 1;
trainInputs[2][2] = 1; // Bias
trainOutput[2] = -1;
trainInputs[3][0] = -1;
trainInputs[3][1] = -1;
trainInputs[3][2] = 1; // Bias
trainOutput[3] = -1;
}
void calcNet(){
// Calculates values for Hidden and Output nodes.
for(int i = 0; i < numHidden; i++){
hiddenVal[i] = 0.0;
for(int j = 0; j < numInputs; j++){
hiddenVal[i] = hiddenVal[i] + (trainInputs[patNum][j] * weightsIH[j][i]);
}
hiddenVal[i] = tanh(hiddenVal[i]);
}
outPred = 0.0;
for(int i = 0; i < numHidden; i++){
outPred = outPred + hiddenVal[i] * weightsHO[i];
}
//Calculate the error: "Expected" - "Actual"
errThisPat = outPred - trainOutput[patNum];
}
void WeightChangesHO(){
//Adjust the Hidden to Output weights.
for(int k = 0; k < numHidden; k++){
double weightChange = LR_HO * errThisPat * hiddenVal[k];
weightsHO[k] = weightsHO[k] - weightChange;
// Regularization of the output weights.
if (weightsHO[k] < -5){
weightsHO[k] = -5;
}else if (weightsHO[k] > 5){
weightsHO[k] = 5;
}
}
}
void WeightChangesIH(){
// Adjust the Input to Hidden weights.
for(int i = 0; i < numHidden; i++){
for(int k = 0; k < numInputs; k++){
double x = 1 - (hiddenVal[i] * hiddenVal[i]);
x = x * weightsHO[i] * errThisPat * LR_IH;
x = x * trainInputs[patNum][k];
double weightChange = x;
weightsIH[k][i] = weightsIH[k][i] - weightChange;
}
}
}
void calcOverallError(){
RMSerror = 0.0;
for(int i = 0; i < numPatterns; i++){
patNum = i;
calcNet();
RMSerror = RMSerror + (errThisPat * errThisPat);
}
RMSerror = RMSerror / numPatterns;
RMSerror = sqrt(RMSerror);
}
void displayResults(){
for(int i = 0; i < numPatterns; i++){
patNum = i;
calcNet();
cout << "pat = " << patNum + 1 <<
" actual = " << trainOutput[patNum] <<
" neural model = " << outPred << endl;
}
}
double getRand(){
return double(rand() / double(RAND_MAX));
}
Code: Select all
'' Author: John McCullock
'' Date: 12-11-2005
'' Description: Backpropagation XOR Example 2.
'' Sources: Dr Phil Brierley, www.philbrierley.com
'' translated from c++ to FB by bluatigro
const as integer numInputs = 3 '' Input nodes, plus the bias input.
const as integer numPatterns = 4 '' Input patterns for XOR experiment.
const as integer numHidden = 4
const as integer numEpochs = 200
const as double LR_IH = 0.7 '' Learning rate, input to hidden weights.
const as double LR_HO = 0.07 '' Learning rate, hidden to output weights.
dim shared as integer patNum = 0
dim shared as double errThisPat = 0.0
dim shared as double outPred = 0.0 '' "Expected" output values.
dim shared as double RMSerror = 0.0 '' Root Mean Squared error.
dim shared as double hiddenVal( numHidden ) '' Hidden node outputs.
dim shared as double weightsIH( numInputs , numHidden ) '' Input to Hidden weights.
dim shared as double weightsHO( numHidden ) '' Hidden to Output weights.
dim shared as integer trainInputs( numPatterns , numInputs )
dim shared as integer trainOutput( numPatterns ) '' "Actual" output values.
'' Function Prototypes.
declare sub initWeights()
declare sub calcNet()
declare sub WeightChangesHO()
declare sub WeightChangesIH()
declare sub calcOverallError()
declare sub initData()
declare sub displayResults()
'' main()
randomize timer '' Seed the generator with system time.
initWeights
initData
'' Train the network.
dim as integer i , j
for j = 0 to numEpochs
for i = 0 to numPatterns
''Select a pattern at random.
patNum = rnd() * numPatterns
''Calculate the output and error for this pattern.
calcNet
''Adjust network weights.
WeightChangesHO
WeightChangesIH
next i
calcOverallError
''Display the overall network error after each epoch
print "epoch = " + str(j) + " RMS Error = " + str(RMSerror)
next j
''Training has finished.
displayResults
sleep
end
function tanh( x as double ) as double
return ( 1 - exp( -x * 2 ) ) / ( 1 + exp( -x * 2 ) )
end function
sub initWeights()
'' Initialize weights to random values.
dim as integer i , j
for j = 0 to numHidden
weightsHO(j) = ( rnd - 0.5 ) / 2
for i = 0 to numInputs
weightsIH(i,j) = ( rnd - 0.5 ) / 5
print "Weight = " + str( weightsIH(i,j) )
next i
next j
end sub
sub initData()
'' The data here is the XOR data which has been rescaled to
'' the range -1 to 1.
'' An extra input value of 1 is also added to act as the bias.
'' The output must lie in the range -1 to 1.
trainInputs(0,0) = 1
trainInputs(0,1) = -1
trainInputs(0,2) = 1 '' Bias
trainOutput(0) = 1
trainInputs(1,0) = -1
trainInputs(1,1) = 1
trainInputs(1,2) = 1 '' Bias
trainOutput(1) = 1
trainInputs(2,0) = 1
trainInputs(2,1) = 1
trainInputs(2,2) = 1 '' Bias
trainOutput(2) = -1
trainInputs(3,0) = -1
trainInputs(3,1) = -1
trainInputs(3,2) = 1 '' Bias
trainOutput(3) = -1
end sub
sub calcNet()
'' Calculates values for Hidden and Output nodes.
dim as integer i , j
for i = 0 to numHidden
hiddenVal(i) = 0.0
for j = 0 to numInputs
hiddenVal(i) += (trainInputs(patNum,j) * weightsIH(j,i) )
next j
hiddenVal(i) = tanh( hiddenVal( i ) )
next i
outPred = 0.0
for i = 0 to numHidden
outPred += hiddenVal(i) * weightsHO(i)
next i
''Calculate the error: "Expected" - "Actual"
errThisPat = outPred - trainOutput( patNum )
end sub
sub WeightChangesHO()
''Adjust the Hidden to Output weights.
dim as integer k
for k = 0 to numHidden
dim as double weightChange = LR_HO * errThisPat * hiddenVal(k)
weightsHO(k) = weightsHO(k) - weightChange
'' Regularization of the output weights.
if (weightsHO(k) < -5) then
weightsHO(k) = -5
end if
if (weightsHO(k) > 5) then
weightsHO(k) = 5
end if
next k
end sub
sub WeightChangesIH()
'' Adjust the Input to Hidden weights.
dim as integer i , k
for i = 0 to numHidden
for k = 0 to numInputs
dim as double x = 1 - (hiddenVal(i) * hiddenVal(i))
x = x * weightsHO(i) * errThisPat * LR_IH
x = x * trainInputs(patNum,k)
dim as double weightChange = x
weightsIH(k,i) = weightsIH(k,i) - weightChange
next k
next i
end sub
sub calcOverallError()
RMSerror = 0.0
dim as integer i
for i = 0 to numPatterns
patNum = i
calcNet
RMSerror = RMSerror + (errThisPat * errThisPat)
next i
RMSerror = RMSerror / numPatterns
RMSerror = sqr(RMSerror)
end sub
sub displayResults()
dim as integer i
for i = 0 to numPatterns
patNum = i
calcNet
print "pat = " + str( patNum + 1 ) _
+ " actual = " + str( trainOutput(patNum) ) _
+ " neural model = " + str( outPred )
next i
end sub
has anyone a good idea how