i need some help tranlating ths to FB
Code: Select all
//bluatigro 5 jun 2017
//neural net from internet :
//http://www.learnartificialneuralnetworks.com/neural-network-software/backpropagation-source-code/
#include <iostream>
using namespace std;
struct neuron
{
float *weights; // neuron input weights or synaptic connections
float *deltavalues; //neuron delta values
float output; //output value
float gain;//Gain value
float wgain;//Weight gain value
neuron();//Constructor
~neuron();//Destructor
void create(int inputcount);//Allocates memory and initializates values
};
struct layer
{
neuron **neurons;//The array of neurons
int neuroncount;//The total count of neurons
float *layerinput;//The layer input
int inputcount;//The total count of elements in layerinput
layer();//Object constructor. Initializates all values as 0
~layer();//Destructor. Frees the memory used by the layer
void create(int inputsize, int _neuroncount);//Creates the layer and allocates memory
void calculate();//Calculates all neurons performing the network formula
};
class bpnet
{
private:
layer m_inputlayer;//input layer of the network
layer m_outputlayer;//output layer..contains the result of applying the network
layer **m_hiddenlayers;//Additional hidden layers
int m_hiddenlayercount;//the count of additional hidden layers
public:
//function tu create in memory the network structure
bpnet();//Construction..initialzates all values to 0
~bpnet();//Destructor..releases memory
//Creates the network structure on memory
void create(int inputcount,int inputneurons,int outputcount,int *hiddenlayers,int hiddenlayercount);
void propagate(const float *input);//Calculates the network values given an input pattern
//Updates the weight values of the network given a desired output and applying the backpropagation
//Algorithm
float train(const float *desiredoutput,const float *input,float alpha, float momentum);
//Updates the next layer input values
void update(int layerindex);
//Returns the output layer..this is useful to get the output values of the network
inline layer &getOutput()
{
return m_outputlayer;
}
};
void layer::calculate()
{
int i,j;
float sum;
//Apply the formula for each neuron
for(i=0;i<neuroncount;i++)
{
sum=0;//store the sum of all values here
for(j=0;j<inputcount;j++)
{
//Performing function
sum+=neurons[i]->weights[j] * layerinput[j]; //apply input * weight
}
sum+=neurons[i]->wgain * neurons[i]->gain; //apply the gain or theta multiplied by the gain weight.
//sigmoidal activation function
neurons[i]->output= 1.f/(1.f + exp(-sum));//calculate the sigmoid function
}
}
void bpnet::propagate(const float *input)
{
//The propagation function should start from the input layer
//first copy the input vector to the input layer Always make sure the size
//"array input" has the same size of inputcount
memcpy(m_inputlayer.layerinput,input,m_inputlayer.inputcount * sizeof(float));
//now calculate the inputlayer
m_inputlayer.calculate();
update(-1);//propagate the inputlayer out values to the next layer
if(m_hiddenlayers)
{
//Calculating hidden layers if any
for(int i=0;i<m_hiddenlayercount;i++)
{
m_hiddenlayers[i]->calculate();
update(i);
}
}
//calculating the final statge: the output layer
m_outputlayer.calculate();
}
//Main training function. Run this function in a loop as many times needed per pattern
float bpnet::train(const float *desiredoutput, const float *input, float alpha, float momentum)
{
//function train, teaches the network to recognize a pattern given a desired output
float errorg=0; //general quadratic error
float errorc; //local error;
float sum=0,csum=0;
float delta,udelta;
float output;
//first we begin by propagating the input
propagate(input);
int i,j,k;
//the backpropagation algorithm starts from the output layer propagating the error from the output
//layer to the input layer
for(i=0;i<m_outputlayer.neuroncount;i++)
{
//calculate the error value for the output layer
output=m_outputlayer.neurons[i]->output; //copy this value to facilitate calculations
//from the algorithm we can take the error value as
errorc=(desiredoutput[i] - output) * output * (1 - output);
//and the general error as the sum of delta values. Where delta is the squared difference
//of the desired value with the output value
//quadratic error
errorg+=(desiredoutput[i] - output) * (desiredoutput[i] - output) ;
//now we proceed to update the weights of the neuron
for(j=0;j<m_outputlayer.inputcount;j++)
{
//get the current delta value
delta=m_outputlayer.neurons[i]->deltavalues[j];
//update the delta value
udelta=alpha * errorc * m_outputlayer.layerinput[j] + delta * momentum;
//update the weight values
m_outputlayer.neurons[i]->weights[j]+=udelta;
m_outputlayer.neurons[i]->deltavalues[j]=udelta;
//we need this to propagate to the next layer
sum+=m_outputlayer.neurons[i]->weights[j] * errorc;
}
//calculate the weight gain
m_outputlayer.neurons[i]->wgain+= alpha * errorc * m_outputlayer.neurons[i]->gain;
}
for(i=(m_hiddenlayercount - 1);i>=0;i--)
{
for(j=0;j<m_hiddenlayers[i]->neuroncount;j++)
{
output=m_hiddenlayers[i]->neurons[j]->output;
//calculate the error for this layer
errorc= output * (1-output) * sum;
//update neuron weights
for(k=0;k<m_hiddenlayers[i]->inputcount;k++)
{
delta=m_hiddenlayers[i]->neurons[j]->deltavalues[k];
udelta= alpha * errorc * m_hiddenlayers[i]->layerinput[k] + delta * momentum;
m_hiddenlayers[i]->neurons[j]->weights[k]+=udelta;
m_hiddenlayers[i]->neurons[j]->deltavalues[k]=udelta;
csum+=m_hiddenlayers[i]->neurons[j]->weights[k] * errorc;//needed for next layer
}
m_hiddenlayers[i]->neurons[j]->wgain+=alpha * errorc * m_hiddenlayers[i]->neurons[j]->gain;
}
sum=csum;
csum=0;
}
//and finally process the input layer
for(i=0;i<m_inputlayer.neuroncount;i++)
{
output=m_inputlayer.neurons[i]->output;
errorc=output * (1 - output) * sum;
for(j=0;j<m_inputlayer.inputcount;j++)
{
delta=m_inputlayer.neurons[i]->deltavalues[j];
udelta=alpha * errorc * m_inputlayer.layerinput[j] + delta * momentum;
//update weights
m_inputlayer.neurons[i]->weights[j]+=udelta;
m_inputlayer.neurons[i]->deltavalues[j]=udelta;
}
//and update the gain weight
m_inputlayer.neurons[i]->wgain+=alpha * errorc * m_inputlayer.neurons[i]->gain;
}
//return the general error divided by 2
return errorg / 2 ;
}
#define PATTERN_COUNT 4
#define PATTERN_SIZE 2
#define NETWORK_INPUTNEURONS 3
#define NETWORK_OUTPUT 1
#define HIDDEN_LAYERS 0
#define EPOCHS 20000
int main()
{
//Create some patterns
//playing with xor
//XOR input values
float pattern[PATTERN_COUNT][PATTERN_SIZE]=
{
{0,0},
{0,1},
{1,0},
{1,1}
};
//XOR desired output values
float desiredout[PATTERN_COUNT][NETWORK_OUTPUT]=
{
{0},
{1},
{1},
{0}
};
bpnet net;//Our neural network object
int i,j;
float error;
//We create the network
net.create(PATTERN_SIZE,NETWORK_INPUTNEURONS,NETWORK_OUTPUT,HIDDEN_LAYERS,HIDDEN_LAYERS);
//Start the neural network training
for(i=0;i<EPOCHS;i++)
{
error=0;
for(j=0;j<PATTERN_COUNT;j++)
{
error+=net.train(desiredout[j],pattern[j],0.2f,0.1f);
}
error/=PATTERN_COUNT;
//display error
cout << "ERROR:" << error << "\r";
}
//once trained test all patterns
for(i=0;i<PATTERN_COUNT;i++)
{
net.propagate(pattern[i]);
//display result
cout << "TESTED PATTERN " << i << " DESIRED OUTPUT: " << *desiredout[i] << " NET RESULT: "<< net.getOutput().neurons[0]->output << endl;
}
return 0;
}
Code: Select all
''bluatigro 5 jul 2017
''ann
''translated from c++ from :
''http://www.learnartificialneuralnetworks.com/neural-network-software/backpropagation-source-code/
type neuron
dim as double uitvoer , gain , wgain
dim as double ptr weights , deltavalues
declare sub create( i as integer )
declare constructor ()
declare destructor ()
end type
type layer
dim as neuron ptr ptr neurons
dim as integer neuroncount
dim as double ptr layerinput
dim as integer inputcount
declare sub create( inputsize as integer , numneurons as integer )
declare sub calculate()
declare constructor ()
declare destructor ()
end sub
sub layer.caluculate()
dim as integer i,j
dim as double sum
for i = 0 to neuroncount
sum = 0
for j = 0 to inputcount
sum += neurons[i].weight[j] * layerinput[j]
next j
neurons[i].uitvoer=1/(1+exp(-sum))
next i
end sub
type bpnet
private :
dim as layer _inputlayer
dim as layer _outputlayer
dim as layer ptr ptr _hiddenlayer
dim as integer _hiddenlayercount
public :
declare constructor ()
declare destructor ()
declare sub create( in as integer , inn as integer _
, uit as integer , hidden as integer ptr , hid as integer )
declare sub propegate( in as double ptr )
declare function train( wish as double ptr , in as double ptr _
, a as double , m as double ) as double
declare sub update( layerindex as integer )
declare function getoutputlayer() as double ptr
end type